diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/mmc | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/mmc')
87 files changed, 6733 insertions, 13279 deletions
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 400756ec7c4..12eef393e21 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile | |||
@@ -6,4 +6,5 @@ subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG | |||
6 | 6 | ||
7 | obj-$(CONFIG_MMC) += core/ | 7 | obj-$(CONFIG_MMC) += core/ |
8 | obj-$(CONFIG_MMC) += card/ | 8 | obj-$(CONFIG_MMC) += card/ |
9 | obj-$(subst m,y,$(CONFIG_MMC)) += host/ | 9 | obj-$(CONFIG_MMC) += host/ |
10 | |||
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 3b1f783bf92..ebb4afe6c70 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig | |||
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE | |||
50 | 50 | ||
51 | If unsure, say Y here. | 51 | If unsure, say Y here. |
52 | 52 | ||
53 | config MMC_BLOCK_DEFERRED_RESUME | ||
54 | bool "Deferr MMC layer resume until I/O is requested" | ||
55 | depends on MMC_BLOCK | ||
56 | default n | ||
57 | help | ||
58 | Say Y here to enable deferred MMC resume until I/O | ||
59 | is requested. This will reduce overall resume latency and | ||
60 | save power when theres an SD card inserted but not being used. | ||
61 | |||
53 | config SDIO_UART | 62 | config SDIO_UART |
54 | tristate "SDIO UART/GPS class support" | 63 | tristate "SDIO UART/GPS class support" |
55 | help | 64 | help |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 21056b9ef0a..2bd93d7a517 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/mmc/mmc.h> | 41 | #include <linux/mmc/mmc.h> |
42 | #include <linux/mmc/sd.h> | 42 | #include <linux/mmc/sd.h> |
43 | 43 | ||
44 | #include <asm/system.h> | ||
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
45 | 46 | ||
46 | #include "queue.h" | 47 | #include "queue.h" |
@@ -57,7 +58,8 @@ MODULE_ALIAS("mmc:block"); | |||
57 | #define INAND_CMD38_ARG_SECERASE 0x80 | 58 | #define INAND_CMD38_ARG_SECERASE 0x80 |
58 | #define INAND_CMD38_ARG_SECTRIM1 0x81 | 59 | #define INAND_CMD38_ARG_SECTRIM1 0x81 |
59 | #define INAND_CMD38_ARG_SECTRIM2 0x88 | 60 | #define INAND_CMD38_ARG_SECTRIM2 0x88 |
60 | #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ | 61 | |
62 | #define MMC_CMD_RETRIES 10 | ||
61 | 63 | ||
62 | static DEFINE_MUTEX(block_mutex); | 64 | static DEFINE_MUTEX(block_mutex); |
63 | 65 | ||
@@ -94,11 +96,6 @@ struct mmc_blk_data { | |||
94 | unsigned int read_only; | 96 | unsigned int read_only; |
95 | unsigned int part_type; | 97 | unsigned int part_type; |
96 | unsigned int name_idx; | 98 | unsigned int name_idx; |
97 | unsigned int reset_done; | ||
98 | #define MMC_BLK_READ BIT(0) | ||
99 | #define MMC_BLK_WRITE BIT(1) | ||
100 | #define MMC_BLK_DISCARD BIT(2) | ||
101 | #define MMC_BLK_SECDISCARD BIT(3) | ||
102 | 99 | ||
103 | /* | 100 | /* |
104 | * Only set in main mmc_blk_data associated | 101 | * Only set in main mmc_blk_data associated |
@@ -107,8 +104,6 @@ struct mmc_blk_data { | |||
107 | */ | 104 | */ |
108 | unsigned int part_curr; | 105 | unsigned int part_curr; |
109 | struct device_attribute force_ro; | 106 | struct device_attribute force_ro; |
110 | struct device_attribute power_ro_lock; | ||
111 | int area_type; | ||
112 | }; | 107 | }; |
113 | 108 | ||
114 | static DEFINE_MUTEX(open_lock); | 109 | static DEFINE_MUTEX(open_lock); |
@@ -116,21 +111,16 @@ static DEFINE_MUTEX(open_lock); | |||
116 | enum mmc_blk_status { | 111 | enum mmc_blk_status { |
117 | MMC_BLK_SUCCESS = 0, | 112 | MMC_BLK_SUCCESS = 0, |
118 | MMC_BLK_PARTIAL, | 113 | MMC_BLK_PARTIAL, |
119 | MMC_BLK_CMD_ERR, | ||
120 | MMC_BLK_RETRY, | 114 | MMC_BLK_RETRY, |
121 | MMC_BLK_ABORT, | 115 | MMC_BLK_RETRY_SINGLE, |
122 | MMC_BLK_DATA_ERR, | 116 | MMC_BLK_DATA_ERR, |
123 | MMC_BLK_ECC_ERR, | 117 | MMC_BLK_CMD_ERR, |
124 | MMC_BLK_NOMEDIUM, | 118 | MMC_BLK_ABORT, |
125 | }; | 119 | }; |
126 | 120 | ||
127 | module_param(perdev_minors, int, 0444); | 121 | module_param(perdev_minors, int, 0444); |
128 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | 122 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); |
129 | 123 | ||
130 | static inline int mmc_blk_part_switch(struct mmc_card *card, | ||
131 | struct mmc_blk_data *md); | ||
132 | static int get_card_status(struct mmc_card *card, u32 *status, int retries); | ||
133 | |||
134 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) | 124 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
135 | { | 125 | { |
136 | struct mmc_blk_data *md; | 126 | struct mmc_blk_data *md; |
@@ -148,11 +138,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) | |||
148 | 138 | ||
149 | static inline int mmc_get_devidx(struct gendisk *disk) | 139 | static inline int mmc_get_devidx(struct gendisk *disk) |
150 | { | 140 | { |
151 | int devmaj = MAJOR(disk_devt(disk)); | 141 | int devidx = disk->first_minor / perdev_minors; |
152 | int devidx = MINOR(disk_devt(disk)) / perdev_minors; | ||
153 | |||
154 | if (!devmaj) | ||
155 | devidx = disk->first_minor / perdev_minors; | ||
156 | return devidx; | 142 | return devidx; |
157 | } | 143 | } |
158 | 144 | ||
@@ -172,70 +158,6 @@ static void mmc_blk_put(struct mmc_blk_data *md) | |||
172 | mutex_unlock(&open_lock); | 158 | mutex_unlock(&open_lock); |
173 | } | 159 | } |
174 | 160 | ||
175 | static ssize_t power_ro_lock_show(struct device *dev, | ||
176 | struct device_attribute *attr, char *buf) | ||
177 | { | ||
178 | int ret; | ||
179 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | ||
180 | struct mmc_card *card = md->queue.card; | ||
181 | int locked = 0; | ||
182 | |||
183 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | ||
184 | locked = 2; | ||
185 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | ||
186 | locked = 1; | ||
187 | |||
188 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | ||
189 | |||
190 | return ret; | ||
191 | } | ||
192 | |||
193 | static ssize_t power_ro_lock_store(struct device *dev, | ||
194 | struct device_attribute *attr, const char *buf, size_t count) | ||
195 | { | ||
196 | int ret; | ||
197 | struct mmc_blk_data *md, *part_md; | ||
198 | struct mmc_card *card; | ||
199 | unsigned long set; | ||
200 | |||
201 | if (kstrtoul(buf, 0, &set)) | ||
202 | return -EINVAL; | ||
203 | |||
204 | if (set != 1) | ||
205 | return count; | ||
206 | |||
207 | md = mmc_blk_get(dev_to_disk(dev)); | ||
208 | card = md->queue.card; | ||
209 | |||
210 | mmc_claim_host(card->host); | ||
211 | |||
212 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | ||
213 | card->ext_csd.boot_ro_lock | | ||
214 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | ||
215 | card->ext_csd.part_time); | ||
216 | if (ret) | ||
217 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); | ||
218 | else | ||
219 | card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; | ||
220 | |||
221 | mmc_release_host(card->host); | ||
222 | |||
223 | if (!ret) { | ||
224 | pr_info("%s: Locking boot partition ro until next power on\n", | ||
225 | md->disk->disk_name); | ||
226 | set_disk_ro(md->disk, 1); | ||
227 | |||
228 | list_for_each_entry(part_md, &md->part, part) | ||
229 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | ||
230 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | ||
231 | set_disk_ro(part_md->disk, 1); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | mmc_blk_put(md); | ||
236 | return count; | ||
237 | } | ||
238 | |||
239 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, | 161 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
240 | char *buf) | 162 | char *buf) |
241 | { | 163 | { |
@@ -337,9 +259,6 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |||
337 | goto idata_err; | 259 | goto idata_err; |
338 | } | 260 | } |
339 | 261 | ||
340 | if (!idata->buf_bytes) | ||
341 | return idata; | ||
342 | |||
343 | idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); | 262 | idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); |
344 | if (!idata->buf) { | 263 | if (!idata->buf) { |
345 | err = -ENOMEM; | 264 | err = -ENOMEM; |
@@ -362,38 +281,6 @@ out: | |||
362 | return ERR_PTR(err); | 281 | return ERR_PTR(err); |
363 | } | 282 | } |
364 | 283 | ||
365 | static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, | ||
366 | u32 retries_max) | ||
367 | { | ||
368 | int err; | ||
369 | u32 retry_count = 0; | ||
370 | |||
371 | if (!status || !retries_max) | ||
372 | return -EINVAL; | ||
373 | |||
374 | do { | ||
375 | err = get_card_status(card, status, 5); | ||
376 | if (err) | ||
377 | break; | ||
378 | |||
379 | if (!R1_STATUS(*status) && | ||
380 | (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) | ||
381 | break; /* RPMB programming operation complete */ | ||
382 | |||
383 | /* | ||
384 | * Rechedule to give the MMC device a chance to continue | ||
385 | * processing the previous command without being polled too | ||
386 | * frequently. | ||
387 | */ | ||
388 | usleep_range(1000, 5000); | ||
389 | } while (++retry_count < retries_max); | ||
390 | |||
391 | if (retry_count == retries_max) | ||
392 | err = -EPERM; | ||
393 | |||
394 | return err; | ||
395 | } | ||
396 | |||
397 | static int mmc_blk_ioctl_cmd(struct block_device *bdev, | 284 | static int mmc_blk_ioctl_cmd(struct block_device *bdev, |
398 | struct mmc_ioc_cmd __user *ic_ptr) | 285 | struct mmc_ioc_cmd __user *ic_ptr) |
399 | { | 286 | { |
@@ -402,11 +289,9 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |||
402 | struct mmc_card *card; | 289 | struct mmc_card *card; |
403 | struct mmc_command cmd = {0}; | 290 | struct mmc_command cmd = {0}; |
404 | struct mmc_data data = {0}; | 291 | struct mmc_data data = {0}; |
405 | struct mmc_request mrq = {NULL}; | 292 | struct mmc_request mrq = {0}; |
406 | struct scatterlist sg; | 293 | struct scatterlist sg; |
407 | int err; | 294 | int err; |
408 | int is_rpmb = false; | ||
409 | u32 status = 0; | ||
410 | 295 | ||
411 | /* | 296 | /* |
412 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | 297 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the |
@@ -420,80 +305,61 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |||
420 | if (IS_ERR(idata)) | 305 | if (IS_ERR(idata)) |
421 | return PTR_ERR(idata); | 306 | return PTR_ERR(idata); |
422 | 307 | ||
423 | md = mmc_blk_get(bdev->bd_disk); | ||
424 | if (!md) { | ||
425 | err = -EINVAL; | ||
426 | goto cmd_err; | ||
427 | } | ||
428 | |||
429 | if (md->area_type & MMC_BLK_DATA_AREA_RPMB) | ||
430 | is_rpmb = true; | ||
431 | |||
432 | card = md->queue.card; | ||
433 | if (IS_ERR(card)) { | ||
434 | err = PTR_ERR(card); | ||
435 | goto cmd_done; | ||
436 | } | ||
437 | |||
438 | cmd.opcode = idata->ic.opcode; | 308 | cmd.opcode = idata->ic.opcode; |
439 | cmd.arg = idata->ic.arg; | 309 | cmd.arg = idata->ic.arg; |
440 | cmd.flags = idata->ic.flags; | 310 | cmd.flags = idata->ic.flags; |
441 | 311 | ||
442 | if (idata->buf_bytes) { | 312 | data.sg = &sg; |
443 | data.sg = &sg; | 313 | data.sg_len = 1; |
444 | data.sg_len = 1; | 314 | data.blksz = idata->ic.blksz; |
445 | data.blksz = idata->ic.blksz; | 315 | data.blocks = idata->ic.blocks; |
446 | data.blocks = idata->ic.blocks; | ||
447 | |||
448 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | ||
449 | |||
450 | if (idata->ic.write_flag) | ||
451 | data.flags = MMC_DATA_WRITE; | ||
452 | else | ||
453 | data.flags = MMC_DATA_READ; | ||
454 | 316 | ||
455 | /* data.flags must already be set before doing this. */ | 317 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); |
456 | mmc_set_data_timeout(&data, card); | ||
457 | 318 | ||
458 | /* Allow overriding the timeout_ns for empirical tuning. */ | 319 | if (idata->ic.write_flag) |
459 | if (idata->ic.data_timeout_ns) | 320 | data.flags = MMC_DATA_WRITE; |
460 | data.timeout_ns = idata->ic.data_timeout_ns; | 321 | else |
322 | data.flags = MMC_DATA_READ; | ||
461 | 323 | ||
462 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | 324 | mrq.cmd = &cmd; |
463 | /* | 325 | mrq.data = &data; |
464 | * Pretend this is a data transfer and rely on the | ||
465 | * host driver to compute timeout. When all host | ||
466 | * drivers support cmd.cmd_timeout for R1B, this | ||
467 | * can be changed to: | ||
468 | * | ||
469 | * mrq.data = NULL; | ||
470 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | ||
471 | */ | ||
472 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | ||
473 | } | ||
474 | 326 | ||
475 | mrq.data = &data; | 327 | md = mmc_blk_get(bdev->bd_disk); |
328 | if (!md) { | ||
329 | err = -EINVAL; | ||
330 | goto cmd_done; | ||
476 | } | 331 | } |
477 | 332 | ||
478 | mrq.cmd = &cmd; | 333 | card = md->queue.card; |
334 | if (IS_ERR(card)) { | ||
335 | err = PTR_ERR(card); | ||
336 | goto cmd_done; | ||
337 | } | ||
479 | 338 | ||
480 | mmc_claim_host(card->host); | 339 | mmc_claim_host(card->host); |
481 | 340 | ||
482 | err = mmc_blk_part_switch(card, md); | ||
483 | if (err) | ||
484 | goto cmd_rel_host; | ||
485 | |||
486 | if (idata->ic.is_acmd) { | 341 | if (idata->ic.is_acmd) { |
487 | err = mmc_app_cmd(card->host, card); | 342 | err = mmc_app_cmd(card->host, card); |
488 | if (err) | 343 | if (err) |
489 | goto cmd_rel_host; | 344 | goto cmd_rel_host; |
490 | } | 345 | } |
491 | 346 | ||
492 | if (is_rpmb) { | 347 | /* data.flags must already be set before doing this. */ |
493 | err = mmc_set_blockcount(card, data.blocks, | 348 | mmc_set_data_timeout(&data, card); |
494 | idata->ic.write_flag & (1 << 31)); | 349 | /* Allow overriding the timeout_ns for empirical tuning. */ |
495 | if (err) | 350 | if (idata->ic.data_timeout_ns) |
496 | goto cmd_rel_host; | 351 | data.timeout_ns = idata->ic.data_timeout_ns; |
352 | |||
353 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | ||
354 | /* | ||
355 | * Pretend this is a data transfer and rely on the host driver | ||
356 | * to compute timeout. When all host drivers support | ||
357 | * cmd.cmd_timeout for R1B, this can be changed to: | ||
358 | * | ||
359 | * mrq.data = NULL; | ||
360 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | ||
361 | */ | ||
362 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | ||
497 | } | 363 | } |
498 | 364 | ||
499 | mmc_wait_for_req(card->host, &mrq); | 365 | mmc_wait_for_req(card->host, &mrq); |
@@ -531,24 +397,11 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |||
531 | } | 397 | } |
532 | } | 398 | } |
533 | 399 | ||
534 | if (is_rpmb) { | ||
535 | /* | ||
536 | * Ensure RPMB command has completed by polling CMD13 | ||
537 | * "Send Status". | ||
538 | */ | ||
539 | err = ioctl_rpmb_card_status_poll(card, &status, 5); | ||
540 | if (err) | ||
541 | dev_err(mmc_dev(card->host), | ||
542 | "%s: Card Status=0x%08X, error %d\n", | ||
543 | __func__, status, err); | ||
544 | } | ||
545 | |||
546 | cmd_rel_host: | 400 | cmd_rel_host: |
547 | mmc_release_host(card->host); | 401 | mmc_release_host(card->host); |
548 | 402 | ||
549 | cmd_done: | 403 | cmd_done: |
550 | mmc_blk_put(md); | 404 | mmc_blk_put(md); |
551 | cmd_err: | ||
552 | kfree(idata->buf); | 405 | kfree(idata->buf); |
553 | kfree(idata); | 406 | kfree(idata); |
554 | return err; | 407 | return err; |
@@ -587,24 +440,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, | |||
587 | { | 440 | { |
588 | int ret; | 441 | int ret; |
589 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); | 442 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); |
590 | |||
591 | if (main_md->part_curr == md->part_type) | 443 | if (main_md->part_curr == md->part_type) |
592 | return 0; | 444 | return 0; |
593 | 445 | ||
594 | if (mmc_card_mmc(card)) { | 446 | if (mmc_card_mmc(card)) { |
595 | u8 part_config = card->ext_csd.part_config; | 447 | card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
596 | 448 | card->ext_csd.part_config |= md->part_type; | |
597 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | ||
598 | part_config |= md->part_type; | ||
599 | 449 | ||
600 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 450 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
601 | EXT_CSD_PART_CONFIG, part_config, | 451 | EXT_CSD_PART_CONFIG, card->ext_csd.part_config, |
602 | card->ext_csd.part_time); | 452 | card->ext_csd.part_time); |
603 | if (ret) | 453 | if (ret) |
604 | return ret; | 454 | return ret; |
605 | 455 | } | |
606 | card->ext_csd.part_config = part_config; | ||
607 | } | ||
608 | 456 | ||
609 | main_md->part_curr = md->part_type; | 457 | main_md->part_curr = md->part_type; |
610 | return 0; | 458 | return 0; |
@@ -616,9 +464,10 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | |||
616 | u32 result; | 464 | u32 result; |
617 | __be32 *blocks; | 465 | __be32 *blocks; |
618 | 466 | ||
619 | struct mmc_request mrq = {NULL}; | 467 | struct mmc_request mrq = {0}; |
620 | struct mmc_command cmd = {0}; | 468 | struct mmc_command cmd = {0}; |
621 | struct mmc_data data = {0}; | 469 | struct mmc_data data = {0}; |
470 | unsigned int timeout_us; | ||
622 | 471 | ||
623 | struct scatterlist sg; | 472 | struct scatterlist sg; |
624 | 473 | ||
@@ -638,12 +487,23 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | |||
638 | cmd.arg = 0; | 487 | cmd.arg = 0; |
639 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | 488 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
640 | 489 | ||
490 | data.timeout_ns = card->csd.tacc_ns * 100; | ||
491 | data.timeout_clks = card->csd.tacc_clks * 100; | ||
492 | |||
493 | timeout_us = data.timeout_ns / 1000; | ||
494 | timeout_us += data.timeout_clks * 1000 / | ||
495 | (card->host->ios.clock / 1000); | ||
496 | |||
497 | if (timeout_us > 100000) { | ||
498 | data.timeout_ns = 100000000; | ||
499 | data.timeout_clks = 0; | ||
500 | } | ||
501 | |||
641 | data.blksz = 4; | 502 | data.blksz = 4; |
642 | data.blocks = 1; | 503 | data.blocks = 1; |
643 | data.flags = MMC_DATA_READ; | 504 | data.flags = MMC_DATA_READ; |
644 | data.sg = &sg; | 505 | data.sg = &sg; |
645 | data.sg_len = 1; | 506 | data.sg_len = 1; |
646 | mmc_set_data_timeout(&data, card); | ||
647 | 507 | ||
648 | mrq.cmd = &cmd; | 508 | mrq.cmd = &cmd; |
649 | mrq.data = &data; | 509 | mrq.data = &data; |
@@ -693,7 +553,6 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries) | |||
693 | return err; | 553 | return err; |
694 | } | 554 | } |
695 | 555 | ||
696 | #define ERR_NOMEDIUM 3 | ||
697 | #define ERR_RETRY 2 | 556 | #define ERR_RETRY 2 |
698 | #define ERR_ABORT 1 | 557 | #define ERR_ABORT 1 |
699 | #define ERR_CONTINUE 0 | 558 | #define ERR_CONTINUE 0 |
@@ -714,18 +573,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |||
714 | req->rq_disk->disk_name, "timed out", name, status); | 573 | req->rq_disk->disk_name, "timed out", name, status); |
715 | 574 | ||
716 | /* If the status cmd initially failed, retry the r/w cmd */ | 575 | /* If the status cmd initially failed, retry the r/w cmd */ |
717 | if (!status_valid) | 576 | if (!status_valid) { |
577 | pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name); | ||
718 | return ERR_RETRY; | 578 | return ERR_RETRY; |
719 | 579 | } | |
720 | /* | 580 | /* |
721 | * If it was a r/w cmd crc error, or illegal command | 581 | * If it was a r/w cmd crc error, or illegal command |
722 | * (eg, issued in wrong state) then retry - we should | 582 | * (eg, issued in wrong state) then retry - we should |
723 | * have corrected the state problem above. | 583 | * have corrected the state problem above. |
724 | */ | 584 | */ |
725 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) | 585 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { |
586 | pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name); | ||
726 | return ERR_RETRY; | 587 | return ERR_RETRY; |
588 | } | ||
727 | 589 | ||
728 | /* Otherwise abort the command */ | 590 | /* Otherwise abort the command */ |
591 | pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name); | ||
729 | return ERR_ABORT; | 592 | return ERR_ABORT; |
730 | 593 | ||
731 | default: | 594 | default: |
@@ -755,15 +618,12 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |||
755 | * Otherwise we don't understand what happened, so abort. | 618 | * Otherwise we don't understand what happened, so abort. |
756 | */ | 619 | */ |
757 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | 620 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, |
758 | struct mmc_blk_request *brq, int *ecc_err) | 621 | struct mmc_blk_request *brq) |
759 | { | 622 | { |
760 | bool prev_cmd_status_valid = true; | 623 | bool prev_cmd_status_valid = true; |
761 | u32 status, stop_status = 0; | 624 | u32 status, stop_status = 0; |
762 | int err, retry; | 625 | int err, retry; |
763 | 626 | ||
764 | if (mmc_card_removed(card)) | ||
765 | return ERR_NOMEDIUM; | ||
766 | |||
767 | /* | 627 | /* |
768 | * Try to get card status which indicates both the card state | 628 | * Try to get card status which indicates both the card state |
769 | * and why there was no response. If the first attempt fails, | 629 | * and why there was no response. If the first attempt fails, |
@@ -780,18 +640,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
780 | } | 640 | } |
781 | 641 | ||
782 | /* We couldn't get a response from the card. Give up. */ | 642 | /* We couldn't get a response from the card. Give up. */ |
783 | if (err) { | 643 | if (err) |
784 | /* Check if the card is removed */ | ||
785 | if (mmc_detect_card_removed(card->host)) | ||
786 | return ERR_NOMEDIUM; | ||
787 | return ERR_ABORT; | 644 | return ERR_ABORT; |
788 | } | ||
789 | |||
790 | /* Flag ECC errors */ | ||
791 | if ((status & R1_CARD_ECC_FAILED) || | ||
792 | (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || | ||
793 | (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) | ||
794 | *ecc_err = 1; | ||
795 | 645 | ||
796 | /* | 646 | /* |
797 | * Check the current card state. If it is in some data transfer | 647 | * Check the current card state. If it is in some data transfer |
@@ -810,8 +660,6 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
810 | */ | 660 | */ |
811 | if (err) | 661 | if (err) |
812 | return ERR_ABORT; | 662 | return ERR_ABORT; |
813 | if (stop_status & R1_CARD_ECC_FAILED) | ||
814 | *ecc_err = 1; | ||
815 | } | 663 | } |
816 | 664 | ||
817 | /* Check for set block count errors */ | 665 | /* Check for set block count errors */ |
@@ -824,10 +672,6 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
824 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | 672 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, |
825 | prev_cmd_status_valid, status); | 673 | prev_cmd_status_valid, status); |
826 | 674 | ||
827 | /* Data errors */ | ||
828 | if (!brq->stop.error) | ||
829 | return ERR_CONTINUE; | ||
830 | |||
831 | /* Now for stop errors. These aren't fatal to the transfer. */ | 675 | /* Now for stop errors. These aren't fatal to the transfer. */ |
832 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | 676 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", |
833 | req->rq_disk->disk_name, brq->stop.error, | 677 | req->rq_disk->disk_name, brq->stop.error, |
@@ -844,45 +688,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
844 | return ERR_CONTINUE; | 688 | return ERR_CONTINUE; |
845 | } | 689 | } |
846 | 690 | ||
847 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, | ||
848 | int type) | ||
849 | { | ||
850 | int err; | ||
851 | |||
852 | if (md->reset_done & type) | ||
853 | return -EEXIST; | ||
854 | |||
855 | md->reset_done |= type; | ||
856 | err = mmc_hw_reset(host); | ||
857 | /* Ensure we switch back to the correct partition */ | ||
858 | if (err != -EOPNOTSUPP) { | ||
859 | struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); | ||
860 | int part_err; | ||
861 | |||
862 | main_md->part_curr = main_md->part_type; | ||
863 | part_err = mmc_blk_part_switch(host->card, md); | ||
864 | if (part_err) { | ||
865 | /* | ||
866 | * We have failed to get back into the correct | ||
867 | * partition, so we need to abort the whole request. | ||
868 | */ | ||
869 | return -ENODEV; | ||
870 | } | ||
871 | } | ||
872 | return err; | ||
873 | } | ||
874 | |||
875 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | ||
876 | { | ||
877 | md->reset_done &= ~type; | ||
878 | } | ||
879 | |||
880 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 691 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
881 | { | 692 | { |
882 | struct mmc_blk_data *md = mq->data; | 693 | struct mmc_blk_data *md = mq->data; |
883 | struct mmc_card *card = md->queue.card; | 694 | struct mmc_card *card = md->queue.card; |
884 | unsigned int from, nr, arg; | 695 | unsigned int from, nr, arg; |
885 | int err = 0, type = MMC_BLK_DISCARD; | 696 | int err = 0; |
886 | 697 | ||
887 | if (!mmc_can_erase(card)) { | 698 | if (!mmc_can_erase(card)) { |
888 | err = -EOPNOTSUPP; | 699 | err = -EOPNOTSUPP; |
@@ -892,13 +703,11 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
892 | from = blk_rq_pos(req); | 703 | from = blk_rq_pos(req); |
893 | nr = blk_rq_sectors(req); | 704 | nr = blk_rq_sectors(req); |
894 | 705 | ||
895 | if (mmc_can_discard(card)) | 706 | if (mmc_can_trim(card)) |
896 | arg = MMC_DISCARD_ARG; | ||
897 | else if (mmc_can_trim(card)) | ||
898 | arg = MMC_TRIM_ARG; | 707 | arg = MMC_TRIM_ARG; |
899 | else | 708 | else |
900 | arg = MMC_ERASE_ARG; | 709 | arg = MMC_ERASE_ARG; |
901 | retry: | 710 | |
902 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 711 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
903 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 712 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
904 | INAND_CMD38_ARG_EXT_CSD, | 713 | INAND_CMD38_ARG_EXT_CSD, |
@@ -911,11 +720,9 @@ retry: | |||
911 | } | 720 | } |
912 | err = mmc_erase(card, from, nr, arg); | 721 | err = mmc_erase(card, from, nr, arg); |
913 | out: | 722 | out: |
914 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | 723 | spin_lock_irq(&md->lock); |
915 | goto retry; | 724 | __blk_end_request(req, err, blk_rq_bytes(req)); |
916 | if (!err) | 725 | spin_unlock_irq(&md->lock); |
917 | mmc_blk_reset_success(md, type); | ||
918 | blk_end_request(req, err, blk_rq_bytes(req)); | ||
919 | 726 | ||
920 | return err ? 0 : 1; | 727 | return err ? 0 : 1; |
921 | } | 728 | } |
@@ -925,10 +732,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
925 | { | 732 | { |
926 | struct mmc_blk_data *md = mq->data; | 733 | struct mmc_blk_data *md = mq->data; |
927 | struct mmc_card *card = md->queue.card; | 734 | struct mmc_card *card = md->queue.card; |
928 | unsigned int from, nr, arg, trim_arg, erase_arg; | 735 | unsigned int from, nr, arg; |
929 | int err = 0, type = MMC_BLK_SECDISCARD; | 736 | int err = 0; |
930 | 737 | ||
931 | if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { | 738 | if (!mmc_can_secure_erase_trim(card)) { |
932 | err = -EOPNOTSUPP; | 739 | err = -EOPNOTSUPP; |
933 | goto out; | 740 | goto out; |
934 | } | 741 | } |
@@ -936,24 +743,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
936 | from = blk_rq_pos(req); | 743 | from = blk_rq_pos(req); |
937 | nr = blk_rq_sectors(req); | 744 | nr = blk_rq_sectors(req); |
938 | 745 | ||
939 | /* The sanitize operation is supported at v4.5 only */ | 746 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
940 | if (mmc_can_sanitize(card)) { | 747 | arg = MMC_SECURE_TRIM1_ARG; |
941 | erase_arg = MMC_ERASE_ARG; | 748 | else |
942 | trim_arg = MMC_TRIM_ARG; | 749 | arg = MMC_SECURE_ERASE_ARG; |
943 | } else { | ||
944 | erase_arg = MMC_SECURE_ERASE_ARG; | ||
945 | trim_arg = MMC_SECURE_TRIM1_ARG; | ||
946 | } | ||
947 | 750 | ||
948 | if (mmc_erase_group_aligned(card, from, nr)) | ||
949 | arg = erase_arg; | ||
950 | else if (mmc_can_trim(card)) | ||
951 | arg = trim_arg; | ||
952 | else { | ||
953 | err = -EINVAL; | ||
954 | goto out; | ||
955 | } | ||
956 | retry: | ||
957 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 751 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
958 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 752 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
959 | INAND_CMD38_ARG_EXT_CSD, | 753 | INAND_CMD38_ARG_EXT_CSD, |
@@ -962,42 +756,24 @@ retry: | |||
962 | INAND_CMD38_ARG_SECERASE, | 756 | INAND_CMD38_ARG_SECERASE, |
963 | 0); | 757 | 0); |
964 | if (err) | 758 | if (err) |
965 | goto out_retry; | 759 | goto out; |
966 | } | 760 | } |
967 | |||
968 | err = mmc_erase(card, from, nr, arg); | 761 | err = mmc_erase(card, from, nr, arg); |
969 | if (err == -EIO) | 762 | if (!err && arg == MMC_SECURE_TRIM1_ARG) { |
970 | goto out_retry; | ||
971 | if (err) | ||
972 | goto out; | ||
973 | |||
974 | if (arg == MMC_SECURE_TRIM1_ARG) { | ||
975 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 763 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
976 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 764 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
977 | INAND_CMD38_ARG_EXT_CSD, | 765 | INAND_CMD38_ARG_EXT_CSD, |
978 | INAND_CMD38_ARG_SECTRIM2, | 766 | INAND_CMD38_ARG_SECTRIM2, |
979 | 0); | 767 | 0); |
980 | if (err) | 768 | if (err) |
981 | goto out_retry; | 769 | goto out; |
982 | } | 770 | } |
983 | |||
984 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); | 771 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
985 | if (err == -EIO) | ||
986 | goto out_retry; | ||
987 | if (err) | ||
988 | goto out; | ||
989 | } | 772 | } |
990 | |||
991 | if (mmc_can_sanitize(card)) | ||
992 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
993 | EXT_CSD_SANITIZE_START, 1, 0); | ||
994 | out_retry: | ||
995 | if (err && !mmc_blk_reset(md, card->host, type)) | ||
996 | goto retry; | ||
997 | if (!err) | ||
998 | mmc_blk_reset_success(md, type); | ||
999 | out: | 773 | out: |
1000 | blk_end_request(req, err, blk_rq_bytes(req)); | 774 | spin_lock_irq(&md->lock); |
775 | __blk_end_request(req, err, blk_rq_bytes(req)); | ||
776 | spin_unlock_irq(&md->lock); | ||
1001 | 777 | ||
1002 | return err ? 0 : 1; | 778 | return err ? 0 : 1; |
1003 | } | 779 | } |
@@ -1005,16 +781,16 @@ out: | |||
1005 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | 781 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
1006 | { | 782 | { |
1007 | struct mmc_blk_data *md = mq->data; | 783 | struct mmc_blk_data *md = mq->data; |
1008 | struct mmc_card *card = md->queue.card; | ||
1009 | int ret = 0; | ||
1010 | 784 | ||
1011 | ret = mmc_flush_cache(card); | 785 | /* |
1012 | if (ret) | 786 | * No-op, only service this because we need REQ_FUA for reliable |
1013 | ret = -EIO; | 787 | * writes. |
1014 | 788 | */ | |
1015 | blk_end_request_all(req, ret); | 789 | spin_lock_irq(&md->lock); |
790 | __blk_end_request_all(req, 0); | ||
791 | spin_unlock_irq(&md->lock); | ||
1016 | 792 | ||
1017 | return ret ? 0 : 1; | 793 | return 1; |
1018 | } | 794 | } |
1019 | 795 | ||
1020 | /* | 796 | /* |
@@ -1051,11 +827,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
1051 | static int mmc_blk_err_check(struct mmc_card *card, | 827 | static int mmc_blk_err_check(struct mmc_card *card, |
1052 | struct mmc_async_req *areq) | 828 | struct mmc_async_req *areq) |
1053 | { | 829 | { |
830 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; | ||
1054 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | 831 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, |
1055 | mmc_active); | 832 | mmc_active); |
1056 | struct mmc_blk_request *brq = &mq_mrq->brq; | 833 | struct mmc_blk_request *brq = &mq_mrq->brq; |
1057 | struct request *req = mq_mrq->req; | 834 | struct request *req = mq_mrq->req; |
1058 | int ecc_err = 0; | ||
1059 | 835 | ||
1060 | /* | 836 | /* |
1061 | * sbc.error indicates a problem with the set block count | 837 | * sbc.error indicates a problem with the set block count |
@@ -1067,15 +843,12 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
1067 | * stop.error indicates a problem with the stop command. Data | 843 | * stop.error indicates a problem with the stop command. Data |
1068 | * may have been transferred, or may still be transferring. | 844 | * may have been transferred, or may still be transferring. |
1069 | */ | 845 | */ |
1070 | if (brq->sbc.error || brq->cmd.error || brq->stop.error || | 846 | if (brq->sbc.error || brq->cmd.error || brq->stop.error) { |
1071 | brq->data.error) { | 847 | switch (mmc_blk_cmd_recovery(card, req, brq)) { |
1072 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { | ||
1073 | case ERR_RETRY: | 848 | case ERR_RETRY: |
1074 | return MMC_BLK_RETRY; | 849 | return MMC_BLK_RETRY; |
1075 | case ERR_ABORT: | 850 | case ERR_ABORT: |
1076 | return MMC_BLK_ABORT; | 851 | return MMC_BLK_ABORT; |
1077 | case ERR_NOMEDIUM: | ||
1078 | return MMC_BLK_NOMEDIUM; | ||
1079 | case ERR_CONTINUE: | 852 | case ERR_CONTINUE: |
1080 | break; | 853 | break; |
1081 | } | 854 | } |
@@ -1099,27 +872,13 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
1099 | */ | 872 | */ |
1100 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | 873 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { |
1101 | u32 status; | 874 | u32 status; |
1102 | unsigned long timeout; | ||
1103 | |||
1104 | timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS); | ||
1105 | do { | 875 | do { |
1106 | int err = get_card_status(card, &status, 5); | 876 | int err = get_card_status(card, &status, 5); |
1107 | if (err) { | 877 | if (err) { |
1108 | pr_err("%s: error %d requesting status\n", | 878 | printk(KERN_ERR "%s: error %d requesting status\n", |
1109 | req->rq_disk->disk_name, err); | 879 | req->rq_disk->disk_name, err); |
1110 | return MMC_BLK_CMD_ERR; | 880 | return MMC_BLK_CMD_ERR; |
1111 | } | 881 | } |
1112 | |||
1113 | /* Timeout if the device never becomes ready for data | ||
1114 | * and never leaves the program state. | ||
1115 | */ | ||
1116 | if (time_after(jiffies, timeout)) { | ||
1117 | pr_err("%s: Card stuck in programming state!"\ | ||
1118 | " %s %s\n", mmc_hostname(card->host), | ||
1119 | req->rq_disk->disk_name, __func__); | ||
1120 | |||
1121 | return MMC_BLK_CMD_ERR; | ||
1122 | } | ||
1123 | /* | 882 | /* |
1124 | * Some cards mishandle the status bits, | 883 | * Some cards mishandle the status bits, |
1125 | * so make sure to check both the busy | 884 | * so make sure to check both the busy |
@@ -1137,21 +896,23 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
1137 | brq->cmd.resp[0], brq->stop.resp[0]); | 896 | brq->cmd.resp[0], brq->stop.resp[0]); |
1138 | 897 | ||
1139 | if (rq_data_dir(req) == READ) { | 898 | if (rq_data_dir(req) == READ) { |
1140 | if (ecc_err) | 899 | if (brq->data.blocks > 1) { |
1141 | return MMC_BLK_ECC_ERR; | 900 | /* Redo read one sector at a time */ |
901 | pr_warning("%s: retrying using single block read\n", | ||
902 | req->rq_disk->disk_name); | ||
903 | return MMC_BLK_RETRY_SINGLE; | ||
904 | } | ||
1142 | return MMC_BLK_DATA_ERR; | 905 | return MMC_BLK_DATA_ERR; |
1143 | } else { | 906 | } else { |
1144 | return MMC_BLK_CMD_ERR; | 907 | return MMC_BLK_CMD_ERR; |
1145 | } | 908 | } |
1146 | } | 909 | } |
1147 | 910 | ||
1148 | if (!brq->data.bytes_xfered) | 911 | if (ret == MMC_BLK_SUCCESS && |
1149 | return MMC_BLK_RETRY; | 912 | blk_rq_bytes(req) != brq->data.bytes_xfered) |
913 | ret = MMC_BLK_PARTIAL; | ||
1150 | 914 | ||
1151 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) | 915 | return ret; |
1152 | return MMC_BLK_PARTIAL; | ||
1153 | |||
1154 | return MMC_BLK_SUCCESS; | ||
1155 | } | 916 | } |
1156 | 917 | ||
1157 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 918 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
@@ -1163,7 +924,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1163 | struct mmc_blk_request *brq = &mqrq->brq; | 924 | struct mmc_blk_request *brq = &mqrq->brq; |
1164 | struct request *req = mqrq->req; | 925 | struct request *req = mqrq->req; |
1165 | struct mmc_blk_data *md = mq->data; | 926 | struct mmc_blk_data *md = mq->data; |
1166 | bool do_data_tag; | ||
1167 | 927 | ||
1168 | /* | 928 | /* |
1169 | * Reliable writes are used to implement Forced Unit Access and | 929 | * Reliable writes are used to implement Forced Unit Access and |
@@ -1185,6 +945,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1185 | if (!mmc_card_blockaddr(card)) | 945 | if (!mmc_card_blockaddr(card)) |
1186 | brq->cmd.arg <<= 9; | 946 | brq->cmd.arg <<= 9; |
1187 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | 947 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
948 | brq->cmd.retries = MMC_CMD_RETRIES; | ||
1188 | brq->data.blksz = 512; | 949 | brq->data.blksz = 512; |
1189 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | 950 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1190 | brq->stop.arg = 0; | 951 | brq->stop.arg = 0; |
@@ -1199,20 +960,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1199 | if (brq->data.blocks > card->host->max_blk_count) | 960 | if (brq->data.blocks > card->host->max_blk_count) |
1200 | brq->data.blocks = card->host->max_blk_count; | 961 | brq->data.blocks = card->host->max_blk_count; |
1201 | 962 | ||
1202 | if (brq->data.blocks > 1) { | 963 | /* |
1203 | /* | 964 | * After a read error, we redo the request one sector at a time |
1204 | * After a read error, we redo the request one sector | 965 | * in order to accurately determine which sectors can be read |
1205 | * at a time in order to accurately determine which | 966 | * successfully. |
1206 | * sectors can be read successfully. | 967 | */ |
1207 | */ | 968 | if (disable_multi && brq->data.blocks > 1) |
1208 | if (disable_multi) | 969 | brq->data.blocks = 1; |
1209 | brq->data.blocks = 1; | ||
1210 | |||
1211 | /* Some controllers can't do multiblock reads due to hw bugs */ | ||
1212 | if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && | ||
1213 | rq_data_dir(req) == READ) | ||
1214 | brq->data.blocks = 1; | ||
1215 | } | ||
1216 | 970 | ||
1217 | if (brq->data.blocks > 1 || do_rel_wr) { | 971 | if (brq->data.blocks > 1 || do_rel_wr) { |
1218 | /* SPI multiblock writes terminate using a special | 972 | /* SPI multiblock writes terminate using a special |
@@ -1240,16 +994,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1240 | mmc_apply_rel_rw(brq, card, req); | 994 | mmc_apply_rel_rw(brq, card, req); |
1241 | 995 | ||
1242 | /* | 996 | /* |
1243 | * Data tag is used only during writing meta data to speed | ||
1244 | * up write and any subsequent read of this meta data | ||
1245 | */ | ||
1246 | do_data_tag = (card->ext_csd.data_tag_unit_size) && | ||
1247 | (req->cmd_flags & REQ_META) && | ||
1248 | (rq_data_dir(req) == WRITE) && | ||
1249 | ((brq->data.blocks * brq->data.blksz) >= | ||
1250 | card->ext_csd.data_tag_unit_size); | ||
1251 | |||
1252 | /* | ||
1253 | * Pre-defined multi-block transfers are preferable to | 997 | * Pre-defined multi-block transfers are preferable to |
1254 | * open ended-ones (and necessary for reliable writes). | 998 | * open ended-ones (and necessary for reliable writes). |
1255 | * However, it is not sufficient to just send CMD23, | 999 | * However, it is not sufficient to just send CMD23, |
@@ -1267,13 +1011,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1267 | * We'll avoid using CMD23-bounded multiblock writes for | 1011 | * We'll avoid using CMD23-bounded multiblock writes for |
1268 | * these, while retaining features like reliable writes. | 1012 | * these, while retaining features like reliable writes. |
1269 | */ | 1013 | */ |
1270 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && | 1014 | |
1271 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || | 1015 | if ((md->flags & MMC_BLK_CMD23) && |
1272 | do_data_tag)) { | 1016 | mmc_op_multi(brq->cmd.opcode) && |
1017 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | ||
1273 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | 1018 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1274 | brq->sbc.arg = brq->data.blocks | | 1019 | brq->sbc.arg = brq->data.blocks | |
1275 | (do_rel_wr ? (1 << 31) : 0) | | 1020 | (do_rel_wr ? (1 << 31) : 0); |
1276 | (do_data_tag ? (1 << 29) : 0); | ||
1277 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | 1021 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1278 | brq->mrq.sbc = &brq->sbc; | 1022 | brq->mrq.sbc = &brq->sbc; |
1279 | } | 1023 | } |
@@ -1308,40 +1052,15 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1308 | mmc_queue_bounce_pre(mqrq); | 1052 | mmc_queue_bounce_pre(mqrq); |
1309 | } | 1053 | } |
1310 | 1054 | ||
1311 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | ||
1312 | struct mmc_blk_request *brq, struct request *req, | ||
1313 | int ret) | ||
1314 | { | ||
1315 | /* | ||
1316 | * If this is an SD card and we're writing, we can first | ||
1317 | * mark the known good sectors as ok. | ||
1318 | * | ||
1319 | * If the card is not SD, we can still ok written sectors | ||
1320 | * as reported by the controller (which might be less than | ||
1321 | * the real number of written sectors, but never more). | ||
1322 | */ | ||
1323 | if (mmc_card_sd(card)) { | ||
1324 | u32 blocks; | ||
1325 | |||
1326 | blocks = mmc_sd_num_wr_blocks(card); | ||
1327 | if (blocks != (u32)-1) { | ||
1328 | ret = blk_end_request(req, 0, blocks << 9); | ||
1329 | } | ||
1330 | } else { | ||
1331 | ret = blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1332 | } | ||
1333 | return ret; | ||
1334 | } | ||
1335 | |||
1336 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | 1055 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
1337 | { | 1056 | { |
1338 | struct mmc_blk_data *md = mq->data; | 1057 | struct mmc_blk_data *md = mq->data; |
1339 | struct mmc_card *card = md->queue.card; | 1058 | struct mmc_card *card = md->queue.card; |
1340 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | 1059 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; |
1341 | int ret = 1, disable_multi = 0, retry = 0, type; | 1060 | int ret = 1, disable_multi = 0, retry = 0; |
1342 | enum mmc_blk_status status; | 1061 | enum mmc_blk_status status; |
1343 | struct mmc_queue_req *mq_rq; | 1062 | struct mmc_queue_req *mq_rq; |
1344 | struct request *req = rqc; | 1063 | struct request *req; |
1345 | struct mmc_async_req *areq; | 1064 | struct mmc_async_req *areq; |
1346 | 1065 | ||
1347 | if (!rqc && !mq->mqrq_prev->req) | 1066 | if (!rqc && !mq->mqrq_prev->req) |
@@ -1349,16 +1068,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1349 | 1068 | ||
1350 | do { | 1069 | do { |
1351 | if (rqc) { | 1070 | if (rqc) { |
1352 | /* | ||
1353 | * When 4KB native sector is enabled, only 8 blocks | ||
1354 | * multiple read or write is allowed | ||
1355 | */ | ||
1356 | if ((brq->data.blocks & 0x07) && | ||
1357 | (card->ext_csd.data_sector_size == 4096)) { | ||
1358 | pr_err("%s: Transfer size is not 4KB sector size aligned\n", | ||
1359 | req->rq_disk->disk_name); | ||
1360 | goto cmd_abort; | ||
1361 | } | ||
1362 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | 1071 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); |
1363 | areq = &mq->mqrq_cur->mmc_active; | 1072 | areq = &mq->mqrq_cur->mmc_active; |
1364 | } else | 1073 | } else |
@@ -1370,7 +1079,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1370 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | 1079 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); |
1371 | brq = &mq_rq->brq; | 1080 | brq = &mq_rq->brq; |
1372 | req = mq_rq->req; | 1081 | req = mq_rq->req; |
1373 | type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | ||
1374 | mmc_queue_bounce_post(mq_rq); | 1082 | mmc_queue_bounce_post(mq_rq); |
1375 | 1083 | ||
1376 | switch (status) { | 1084 | switch (status) { |
@@ -1379,16 +1087,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1379 | /* | 1087 | /* |
1380 | * A block was successfully transferred. | 1088 | * A block was successfully transferred. |
1381 | */ | 1089 | */ |
1382 | mmc_blk_reset_success(md, type); | 1090 | spin_lock_irq(&md->lock); |
1383 | ret = blk_end_request(req, 0, | 1091 | ret = __blk_end_request(req, 0, |
1384 | brq->data.bytes_xfered); | 1092 | brq->data.bytes_xfered); |
1385 | /* | 1093 | spin_unlock_irq(&md->lock); |
1386 | * If the blk_end_request function returns non-zero even | ||
1387 | * though all data has been transferred and no errors | ||
1388 | * were returned by the host controller, it's a bug. | ||
1389 | */ | ||
1390 | if (status == MMC_BLK_SUCCESS && ret) { | 1094 | if (status == MMC_BLK_SUCCESS && ret) { |
1391 | pr_err("%s BUG rq_tot %d d_xfer %d\n", | 1095 | /* |
1096 | * The blk_end_request has returned non zero | ||
1097 | * even though all data is transfered and no | ||
1098 | * erros returned by host. | ||
1099 | * If this happen it's a bug. | ||
1100 | */ | ||
1101 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", | ||
1392 | __func__, blk_rq_bytes(req), | 1102 | __func__, blk_rq_bytes(req), |
1393 | brq->data.bytes_xfered); | 1103 | brq->data.bytes_xfered); |
1394 | rqc = NULL; | 1104 | rqc = NULL; |
@@ -1396,53 +1106,33 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1396 | } | 1106 | } |
1397 | break; | 1107 | break; |
1398 | case MMC_BLK_CMD_ERR: | 1108 | case MMC_BLK_CMD_ERR: |
1399 | ret = mmc_blk_cmd_err(md, card, brq, req, ret); | 1109 | goto cmd_err; |
1400 | if (!mmc_blk_reset(md, card->host, type)) | 1110 | case MMC_BLK_RETRY_SINGLE: |
1401 | break; | 1111 | disable_multi = 1; |
1402 | goto cmd_abort; | 1112 | break; |
1403 | case MMC_BLK_RETRY: | 1113 | case MMC_BLK_RETRY: |
1404 | if (retry++ < 5) | 1114 | if (retry++ < 5) |
1405 | break; | 1115 | break; |
1406 | /* Fall through */ | ||
1407 | case MMC_BLK_ABORT: | 1116 | case MMC_BLK_ABORT: |
1408 | if (!mmc_blk_reset(md, card->host, type)) | ||
1409 | break; | ||
1410 | goto cmd_abort; | 1117 | goto cmd_abort; |
1411 | case MMC_BLK_DATA_ERR: { | 1118 | case MMC_BLK_DATA_ERR: |
1412 | int err; | ||
1413 | |||
1414 | err = mmc_blk_reset(md, card->host, type); | ||
1415 | if (!err) | ||
1416 | break; | ||
1417 | if (err == -ENODEV) | ||
1418 | goto cmd_abort; | ||
1419 | /* Fall through */ | ||
1420 | } | ||
1421 | case MMC_BLK_ECC_ERR: | ||
1422 | if (brq->data.blocks > 1) { | ||
1423 | /* Redo read one sector at a time */ | ||
1424 | pr_warning("%s: retrying using single block read\n", | ||
1425 | req->rq_disk->disk_name); | ||
1426 | disable_multi = 1; | ||
1427 | break; | ||
1428 | } | ||
1429 | /* | 1119 | /* |
1430 | * After an error, we redo I/O one sector at a | 1120 | * After an error, we redo I/O one sector at a |
1431 | * time, so we only reach here after trying to | 1121 | * time, so we only reach here after trying to |
1432 | * read a single sector. | 1122 | * read a single sector. |
1433 | */ | 1123 | */ |
1434 | ret = blk_end_request(req, -EIO, | 1124 | spin_lock_irq(&md->lock); |
1125 | ret = __blk_end_request(req, -EIO, | ||
1435 | brq->data.blksz); | 1126 | brq->data.blksz); |
1127 | spin_unlock_irq(&md->lock); | ||
1436 | if (!ret) | 1128 | if (!ret) |
1437 | goto start_new_req; | 1129 | goto start_new_req; |
1438 | break; | 1130 | break; |
1439 | case MMC_BLK_NOMEDIUM: | ||
1440 | goto cmd_abort; | ||
1441 | } | 1131 | } |
1442 | 1132 | ||
1443 | if (ret) { | 1133 | if (ret) { |
1444 | /* | 1134 | /* |
1445 | * In case of a incomplete request | 1135 | * In case of a none complete request |
1446 | * prepare it again and resend. | 1136 | * prepare it again and resend. |
1447 | */ | 1137 | */ |
1448 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | 1138 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); |
@@ -1450,13 +1140,40 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1450 | } | 1140 | } |
1451 | } while (ret); | 1141 | } while (ret); |
1452 | 1142 | ||
1143 | if (brq->cmd.resp[0] & R1_URGENT_BKOPS) | ||
1144 | mmc_card_set_need_bkops(card); | ||
1145 | |||
1453 | return 1; | 1146 | return 1; |
1454 | 1147 | ||
1148 | cmd_err: | ||
1149 | /* | ||
1150 | * If this is an SD card and we're writing, we can first | ||
1151 | * mark the known good sectors as ok. | ||
1152 | * | ||
1153 | * If the card is not SD, we can still ok written sectors | ||
1154 | * as reported by the controller (which might be less than | ||
1155 | * the real number of written sectors, but never more). | ||
1156 | */ | ||
1157 | if (mmc_card_sd(card)) { | ||
1158 | u32 blocks; | ||
1159 | |||
1160 | blocks = mmc_sd_num_wr_blocks(card); | ||
1161 | if (blocks != (u32)-1) { | ||
1162 | spin_lock_irq(&md->lock); | ||
1163 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1164 | spin_unlock_irq(&md->lock); | ||
1165 | } | ||
1166 | } else { | ||
1167 | spin_lock_irq(&md->lock); | ||
1168 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1169 | spin_unlock_irq(&md->lock); | ||
1170 | } | ||
1171 | |||
1455 | cmd_abort: | 1172 | cmd_abort: |
1456 | if (mmc_card_removed(card)) | 1173 | spin_lock_irq(&md->lock); |
1457 | req->cmd_flags |= REQ_QUIET; | ||
1458 | while (ret) | 1174 | while (ret) |
1459 | ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1175 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); |
1176 | spin_unlock_irq(&md->lock); | ||
1460 | 1177 | ||
1461 | start_new_req: | 1178 | start_new_req: |
1462 | if (rqc) { | 1179 | if (rqc) { |
@@ -1467,21 +1184,28 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1467 | return 0; | 1184 | return 0; |
1468 | } | 1185 | } |
1469 | 1186 | ||
1187 | static int | ||
1188 | mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card); | ||
1189 | |||
1470 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | 1190 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) |
1471 | { | 1191 | { |
1472 | int ret; | 1192 | int ret; |
1473 | struct mmc_blk_data *md = mq->data; | 1193 | struct mmc_blk_data *md = mq->data; |
1474 | struct mmc_card *card = md->queue.card; | 1194 | struct mmc_card *card = md->queue.card; |
1475 | 1195 | ||
1196 | #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME | ||
1197 | if (mmc_bus_needs_resume(card->host)) { | ||
1198 | mmc_resume_bus(card->host); | ||
1199 | mmc_blk_set_blksize(md, card); | ||
1200 | } | ||
1201 | #endif | ||
1202 | |||
1476 | if (req && !mq->mqrq_prev->req) | 1203 | if (req && !mq->mqrq_prev->req) |
1477 | /* claim host only for the first request */ | 1204 | /* claim host only for the first request */ |
1478 | mmc_claim_host(card->host); | 1205 | mmc_claim_host(card->host); |
1479 | 1206 | ||
1480 | ret = mmc_blk_part_switch(card, md); | 1207 | ret = mmc_blk_part_switch(card, md); |
1481 | if (ret) { | 1208 | if (ret) { |
1482 | if (req) { | ||
1483 | blk_end_request_all(req, -EIO); | ||
1484 | } | ||
1485 | ret = 0; | 1209 | ret = 0; |
1486 | goto out; | 1210 | goto out; |
1487 | } | 1211 | } |
@@ -1490,8 +1214,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1490 | /* complete ongoing async transfer before issuing discard */ | 1214 | /* complete ongoing async transfer before issuing discard */ |
1491 | if (card->host->areq) | 1215 | if (card->host->areq) |
1492 | mmc_blk_issue_rw_rq(mq, NULL); | 1216 | mmc_blk_issue_rw_rq(mq, NULL); |
1493 | if (req->cmd_flags & REQ_SECURE && | 1217 | if (req->cmd_flags & REQ_SECURE) |
1494 | !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) | ||
1495 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | 1218 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
1496 | else | 1219 | else |
1497 | ret = mmc_blk_issue_discard_rq(mq, req); | 1220 | ret = mmc_blk_issue_discard_rq(mq, req); |
@@ -1501,6 +1224,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1501 | mmc_blk_issue_rw_rq(mq, NULL); | 1224 | mmc_blk_issue_rw_rq(mq, NULL); |
1502 | ret = mmc_blk_issue_flush(mq, req); | 1225 | ret = mmc_blk_issue_flush(mq, req); |
1503 | } else { | 1226 | } else { |
1227 | /* Abort any current bk ops of eMMC card by issuing HPI */ | ||
1228 | if (mmc_card_mmc(mq->card) && mmc_card_doing_bkops(mq->card)) | ||
1229 | mmc_interrupt_hpi(mq->card); | ||
1230 | |||
1504 | ret = mmc_blk_issue_rw_rq(mq, req); | 1231 | ret = mmc_blk_issue_rw_rq(mq, req); |
1505 | } | 1232 | } |
1506 | 1233 | ||
@@ -1521,8 +1248,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1521 | struct device *parent, | 1248 | struct device *parent, |
1522 | sector_t size, | 1249 | sector_t size, |
1523 | bool default_ro, | 1250 | bool default_ro, |
1524 | const char *subname, | 1251 | const char *subname) |
1525 | int area_type) | ||
1526 | { | 1252 | { |
1527 | struct mmc_blk_data *md; | 1253 | struct mmc_blk_data *md; |
1528 | int devidx, ret; | 1254 | int devidx, ret; |
@@ -1547,12 +1273,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1547 | if (!subname) { | 1273 | if (!subname) { |
1548 | md->name_idx = find_first_zero_bit(name_use, max_devices); | 1274 | md->name_idx = find_first_zero_bit(name_use, max_devices); |
1549 | __set_bit(md->name_idx, name_use); | 1275 | __set_bit(md->name_idx, name_use); |
1550 | } else | 1276 | } |
1277 | else | ||
1551 | md->name_idx = ((struct mmc_blk_data *) | 1278 | md->name_idx = ((struct mmc_blk_data *) |
1552 | dev_to_disk(parent)->private_data)->name_idx; | 1279 | dev_to_disk(parent)->private_data)->name_idx; |
1553 | 1280 | ||
1554 | md->area_type = area_type; | ||
1555 | |||
1556 | /* | 1281 | /* |
1557 | * Set the read-only status based on the supported commands | 1282 | * Set the read-only status based on the supported commands |
1558 | * and the write protect switch. | 1283 | * and the write protect switch. |
@@ -1583,8 +1308,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1583 | md->disk->queue = md->queue.queue; | 1308 | md->disk->queue = md->queue.queue; |
1584 | md->disk->driverfs_dev = parent; | 1309 | md->disk->driverfs_dev = parent; |
1585 | set_disk_ro(md->disk, md->read_only || default_ro); | 1310 | set_disk_ro(md->disk, md->read_only || default_ro); |
1586 | if (area_type & MMC_BLK_DATA_AREA_RPMB) | 1311 | md->disk->flags = GENHD_FL_EXT_DEVT; |
1587 | md->disk->flags |= GENHD_FL_NO_PART_SCAN; | ||
1588 | 1312 | ||
1589 | /* | 1313 | /* |
1590 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | 1314 | * As discussed on lkml, GENHD_FL_REMOVABLE should: |
@@ -1601,12 +1325,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1601 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), | 1325 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
1602 | "mmcblk%d%s", md->name_idx, subname ? subname : ""); | 1326 | "mmcblk%d%s", md->name_idx, subname ? subname : ""); |
1603 | 1327 | ||
1604 | if (mmc_card_mmc(card)) | 1328 | blk_queue_logical_block_size(md->queue.queue, 512); |
1605 | blk_queue_logical_block_size(md->queue.queue, | ||
1606 | card->ext_csd.data_sector_size); | ||
1607 | else | ||
1608 | blk_queue_logical_block_size(md->queue.queue, 512); | ||
1609 | |||
1610 | set_capacity(md->disk, size); | 1329 | set_capacity(md->disk, size); |
1611 | 1330 | ||
1612 | if (mmc_host_cmd23(card->host)) { | 1331 | if (mmc_host_cmd23(card->host)) { |
@@ -1653,8 +1372,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |||
1653 | size = card->csd.capacity << (card->csd.read_blkbits - 9); | 1372 | size = card->csd.capacity << (card->csd.read_blkbits - 9); |
1654 | } | 1373 | } |
1655 | 1374 | ||
1656 | md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, | 1375 | md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); |
1657 | MMC_BLK_DATA_AREA_MAIN); | ||
1658 | return md; | 1376 | return md; |
1659 | } | 1377 | } |
1660 | 1378 | ||
@@ -1663,14 +1381,13 @@ static int mmc_blk_alloc_part(struct mmc_card *card, | |||
1663 | unsigned int part_type, | 1381 | unsigned int part_type, |
1664 | sector_t size, | 1382 | sector_t size, |
1665 | bool default_ro, | 1383 | bool default_ro, |
1666 | const char *subname, | 1384 | const char *subname) |
1667 | int area_type) | ||
1668 | { | 1385 | { |
1669 | char cap_str[10]; | 1386 | char cap_str[10]; |
1670 | struct mmc_blk_data *part_md; | 1387 | struct mmc_blk_data *part_md; |
1671 | 1388 | ||
1672 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | 1389 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, |
1673 | subname, area_type); | 1390 | subname); |
1674 | if (IS_ERR(part_md)) | 1391 | if (IS_ERR(part_md)) |
1675 | return PTR_ERR(part_md); | 1392 | return PTR_ERR(part_md); |
1676 | part_md->part_type = part_type; | 1393 | part_md->part_type = part_type; |
@@ -1678,53 +1395,60 @@ static int mmc_blk_alloc_part(struct mmc_card *card, | |||
1678 | 1395 | ||
1679 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, | 1396 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, |
1680 | cap_str, sizeof(cap_str)); | 1397 | cap_str, sizeof(cap_str)); |
1681 | pr_info("%s: %s %s partition %u %s\n", | 1398 | printk(KERN_INFO "%s: %s %s partition %u %s\n", |
1682 | part_md->disk->disk_name, mmc_card_id(card), | 1399 | part_md->disk->disk_name, mmc_card_id(card), |
1683 | mmc_card_name(card), part_md->part_type, cap_str); | 1400 | mmc_card_name(card), part_md->part_type, cap_str); |
1684 | return 0; | 1401 | return 0; |
1685 | } | 1402 | } |
1686 | 1403 | ||
1687 | /* MMC Physical partitions consist of two boot partitions and | ||
1688 | * up to four general purpose partitions. | ||
1689 | * For each partition enabled in EXT_CSD a block device will be allocatedi | ||
1690 | * to provide access to the partition. | ||
1691 | */ | ||
1692 | |||
1693 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) | 1404 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
1694 | { | 1405 | { |
1695 | int idx, ret = 0; | 1406 | int ret = 0; |
1696 | 1407 | ||
1697 | if (!mmc_card_mmc(card)) | 1408 | if (!mmc_card_mmc(card)) |
1698 | return 0; | 1409 | return 0; |
1699 | 1410 | ||
1700 | for (idx = 0; idx < card->nr_parts; idx++) { | 1411 | if (card->ext_csd.boot_size) { |
1701 | if (card->part[idx].size) { | 1412 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, |
1702 | ret = mmc_blk_alloc_part(card, md, | 1413 | card->ext_csd.boot_size >> 9, |
1703 | card->part[idx].part_cfg, | 1414 | true, |
1704 | card->part[idx].size >> 9, | 1415 | "boot0"); |
1705 | card->part[idx].force_ro, | 1416 | if (ret) |
1706 | card->part[idx].name, | 1417 | return ret; |
1707 | card->part[idx].area_type); | 1418 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, |
1708 | if (ret) | 1419 | card->ext_csd.boot_size >> 9, |
1709 | return ret; | 1420 | true, |
1710 | } | 1421 | "boot1"); |
1422 | if (ret) | ||
1423 | return ret; | ||
1711 | } | 1424 | } |
1712 | 1425 | ||
1713 | return ret; | 1426 | return ret; |
1714 | } | 1427 | } |
1715 | 1428 | ||
1716 | static void mmc_blk_remove_req(struct mmc_blk_data *md) | 1429 | static int |
1430 | mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | ||
1717 | { | 1431 | { |
1718 | struct mmc_card *card; | 1432 | int err; |
1719 | 1433 | ||
1434 | mmc_claim_host(card->host); | ||
1435 | err = mmc_set_blocklen(card, 512); | ||
1436 | mmc_release_host(card->host); | ||
1437 | |||
1438 | if (err) { | ||
1439 | printk(KERN_ERR "%s: unable to set block size to 512: %d\n", | ||
1440 | md->disk->disk_name, err); | ||
1441 | return -EINVAL; | ||
1442 | } | ||
1443 | |||
1444 | return 0; | ||
1445 | } | ||
1446 | |||
1447 | static void mmc_blk_remove_req(struct mmc_blk_data *md) | ||
1448 | { | ||
1720 | if (md) { | 1449 | if (md) { |
1721 | card = md->queue.card; | ||
1722 | if (md->disk->flags & GENHD_FL_UP) { | 1450 | if (md->disk->flags & GENHD_FL_UP) { |
1723 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | 1451 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); |
1724 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | ||
1725 | card->ext_csd.boot_ro_lockable) | ||
1726 | device_remove_file(disk_to_dev(md->disk), | ||
1727 | &md->power_ro_lock); | ||
1728 | 1452 | ||
1729 | /* Stop new requests from getting into the queue */ | 1453 | /* Stop new requests from getting into the queue */ |
1730 | del_gendisk(md->disk); | 1454 | del_gendisk(md->disk); |
@@ -1753,7 +1477,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card, | |||
1753 | static int mmc_add_disk(struct mmc_blk_data *md) | 1477 | static int mmc_add_disk(struct mmc_blk_data *md) |
1754 | { | 1478 | { |
1755 | int ret; | 1479 | int ret; |
1756 | struct mmc_card *card = md->queue.card; | ||
1757 | 1480 | ||
1758 | add_disk(md->disk); | 1481 | add_disk(md->disk); |
1759 | md->force_ro.show = force_ro_show; | 1482 | md->force_ro.show = force_ro_show; |
@@ -1763,55 +1486,18 @@ static int mmc_add_disk(struct mmc_blk_data *md) | |||
1763 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | 1486 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; |
1764 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | 1487 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); |
1765 | if (ret) | 1488 | if (ret) |
1766 | goto force_ro_fail; | 1489 | del_gendisk(md->disk); |
1767 | |||
1768 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | ||
1769 | card->ext_csd.boot_ro_lockable) { | ||
1770 | umode_t mode; | ||
1771 | |||
1772 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | ||
1773 | mode = S_IRUGO; | ||
1774 | else | ||
1775 | mode = S_IRUGO | S_IWUSR; | ||
1776 | |||
1777 | md->power_ro_lock.show = power_ro_lock_show; | ||
1778 | md->power_ro_lock.store = power_ro_lock_store; | ||
1779 | sysfs_attr_init(&md->power_ro_lock.attr); | ||
1780 | md->power_ro_lock.attr.mode = mode; | ||
1781 | md->power_ro_lock.attr.name = | ||
1782 | "ro_lock_until_next_power_on"; | ||
1783 | ret = device_create_file(disk_to_dev(md->disk), | ||
1784 | &md->power_ro_lock); | ||
1785 | if (ret) | ||
1786 | goto power_ro_lock_fail; | ||
1787 | } | ||
1788 | return ret; | ||
1789 | |||
1790 | power_ro_lock_fail: | ||
1791 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | ||
1792 | force_ro_fail: | ||
1793 | del_gendisk(md->disk); | ||
1794 | 1490 | ||
1795 | return ret; | 1491 | return ret; |
1796 | } | 1492 | } |
1797 | 1493 | ||
1798 | #define CID_MANFID_SANDISK 0x2 | ||
1799 | #define CID_MANFID_TOSHIBA 0x11 | ||
1800 | #define CID_MANFID_MICRON 0x13 | ||
1801 | #define CID_MANFID_SAMSUNG 0x15 | ||
1802 | |||
1803 | static const struct mmc_fixup blk_fixups[] = | 1494 | static const struct mmc_fixup blk_fixups[] = |
1804 | { | 1495 | { |
1805 | MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, | 1496 | MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1806 | MMC_QUIRK_INAND_CMD38), | 1497 | MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1807 | MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, | 1498 | MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1808 | MMC_QUIRK_INAND_CMD38), | 1499 | MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1809 | MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, | 1500 | MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), |
1810 | MMC_QUIRK_INAND_CMD38), | ||
1811 | MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
1812 | MMC_QUIRK_INAND_CMD38), | ||
1813 | MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, | ||
1814 | MMC_QUIRK_INAND_CMD38), | ||
1815 | 1501 | ||
1816 | /* | 1502 | /* |
1817 | * Some MMC cards experience performance degradation with CMD23 | 1503 | * Some MMC cards experience performance degradation with CMD23 |
@@ -1821,48 +1507,19 @@ static const struct mmc_fixup blk_fixups[] = | |||
1821 | * | 1507 | * |
1822 | * N.B. This doesn't affect SD cards. | 1508 | * N.B. This doesn't affect SD cards. |
1823 | */ | 1509 | */ |
1824 | MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | 1510 | MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc, |
1825 | MMC_QUIRK_BLK_NO_CMD23), | 1511 | MMC_QUIRK_BLK_NO_CMD23), |
1826 | MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | 1512 | MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc, |
1827 | MMC_QUIRK_BLK_NO_CMD23), | 1513 | MMC_QUIRK_BLK_NO_CMD23), |
1828 | MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, | 1514 | MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, |
1829 | MMC_QUIRK_BLK_NO_CMD23), | 1515 | MMC_QUIRK_BLK_NO_CMD23), |
1830 | |||
1831 | /* | ||
1832 | * Some Micron MMC cards needs longer data read timeout than | ||
1833 | * indicated in CSD. | ||
1834 | */ | ||
1835 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, | ||
1836 | MMC_QUIRK_LONG_READ_TIME), | ||
1837 | |||
1838 | /* | ||
1839 | * On these Samsung MoviNAND parts, performing secure erase or | ||
1840 | * secure trim can result in unrecoverable corruption due to a | ||
1841 | * firmware bug. | ||
1842 | */ | ||
1843 | MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1844 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1845 | MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1846 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1847 | MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1848 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1849 | MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1850 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1851 | MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1852 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1853 | MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1854 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1855 | MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1856 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1857 | MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1858 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1859 | |||
1860 | END_FIXUP | 1516 | END_FIXUP |
1861 | }; | 1517 | }; |
1862 | 1518 | ||
1863 | static int mmc_blk_probe(struct mmc_card *card) | 1519 | static int mmc_blk_probe(struct mmc_card *card) |
1864 | { | 1520 | { |
1865 | struct mmc_blk_data *md, *part_md; | 1521 | struct mmc_blk_data *md, *part_md; |
1522 | int err; | ||
1866 | char cap_str[10]; | 1523 | char cap_str[10]; |
1867 | 1524 | ||
1868 | /* | 1525 | /* |
@@ -1875,9 +1532,13 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
1875 | if (IS_ERR(md)) | 1532 | if (IS_ERR(md)) |
1876 | return PTR_ERR(md); | 1533 | return PTR_ERR(md); |
1877 | 1534 | ||
1535 | err = mmc_blk_set_blksize(md, card); | ||
1536 | if (err) | ||
1537 | goto out; | ||
1538 | |||
1878 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, | 1539 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, |
1879 | cap_str, sizeof(cap_str)); | 1540 | cap_str, sizeof(cap_str)); |
1880 | pr_info("%s: %s %s %s %s\n", | 1541 | printk(KERN_INFO "%s: %s %s %s %s\n", |
1881 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), | 1542 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
1882 | cap_str, md->read_only ? "(ro)" : ""); | 1543 | cap_str, md->read_only ? "(ro)" : ""); |
1883 | 1544 | ||
@@ -1887,6 +1548,9 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
1887 | mmc_set_drvdata(card, md); | 1548 | mmc_set_drvdata(card, md); |
1888 | mmc_fixup_device(card, blk_fixups); | 1549 | mmc_fixup_device(card, blk_fixups); |
1889 | 1550 | ||
1551 | #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME | ||
1552 | mmc_set_bus_resume_policy(card->host, 1); | ||
1553 | #endif | ||
1890 | if (mmc_add_disk(md)) | 1554 | if (mmc_add_disk(md)) |
1891 | goto out; | 1555 | goto out; |
1892 | 1556 | ||
@@ -1899,7 +1563,7 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
1899 | out: | 1563 | out: |
1900 | mmc_blk_remove_parts(card, md); | 1564 | mmc_blk_remove_parts(card, md); |
1901 | mmc_blk_remove_req(md); | 1565 | mmc_blk_remove_req(md); |
1902 | return 0; | 1566 | return err; |
1903 | } | 1567 | } |
1904 | 1568 | ||
1905 | static void mmc_blk_remove(struct mmc_card *card) | 1569 | static void mmc_blk_remove(struct mmc_card *card) |
@@ -1912,10 +1576,13 @@ static void mmc_blk_remove(struct mmc_card *card) | |||
1912 | mmc_release_host(card->host); | 1576 | mmc_release_host(card->host); |
1913 | mmc_blk_remove_req(md); | 1577 | mmc_blk_remove_req(md); |
1914 | mmc_set_drvdata(card, NULL); | 1578 | mmc_set_drvdata(card, NULL); |
1579 | #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME | ||
1580 | mmc_set_bus_resume_policy(card->host, 0); | ||
1581 | #endif | ||
1915 | } | 1582 | } |
1916 | 1583 | ||
1917 | #ifdef CONFIG_PM | 1584 | #ifdef CONFIG_PM |
1918 | static int mmc_blk_suspend(struct mmc_card *card) | 1585 | static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) |
1919 | { | 1586 | { |
1920 | struct mmc_blk_data *part_md; | 1587 | struct mmc_blk_data *part_md; |
1921 | struct mmc_blk_data *md = mmc_get_drvdata(card); | 1588 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
@@ -1935,6 +1602,10 @@ static int mmc_blk_resume(struct mmc_card *card) | |||
1935 | struct mmc_blk_data *md = mmc_get_drvdata(card); | 1602 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1936 | 1603 | ||
1937 | if (md) { | 1604 | if (md) { |
1605 | #ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME | ||
1606 | mmc_blk_set_blksize(md, card); | ||
1607 | #endif | ||
1608 | |||
1938 | /* | 1609 | /* |
1939 | * Resume involves the card going into idle state, | 1610 | * Resume involves the card going into idle state, |
1940 | * so current partition is always the main one. | 1611 | * so current partition is always the main one. |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 759714ed6be..440b97d9e44 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/module.h> | ||
26 | 25 | ||
27 | #define RESULT_OK 0 | 26 | #define RESULT_OK 0 |
28 | #define RESULT_FAIL 1 | 27 | #define RESULT_FAIL 1 |
@@ -170,6 +169,96 @@ struct mmc_test_async_req { | |||
170 | struct mmc_test_card *test; | 169 | struct mmc_test_card *test; |
171 | }; | 170 | }; |
172 | 171 | ||
172 | struct mmc_test_parameter { | ||
173 | const char *name; | ||
174 | long value; | ||
175 | long (*exec)(struct mmc_test_card *); | ||
176 | const char *input; | ||
177 | }; | ||
178 | |||
179 | static long mmc_test_set_testcase(struct mmc_test_card *test); | ||
180 | static long mmc_test_set_clock(struct mmc_test_card *test); | ||
181 | static long mmc_test_set_bus_width(struct mmc_test_card *test); | ||
182 | static long mmc_test_set_timing(struct mmc_test_card *test); | ||
183 | |||
184 | |||
185 | static struct mmc_test_parameter mmc_test_parameter[] = { | ||
186 | { | ||
187 | .name = "Testcase Number", | ||
188 | .value = 1, | ||
189 | .exec = mmc_test_set_testcase, | ||
190 | .input = "-n", | ||
191 | }, | ||
192 | { | ||
193 | .name = "Clock Rate", | ||
194 | .value = -1, | ||
195 | .exec = mmc_test_set_clock, | ||
196 | .input = "-c", | ||
197 | }, | ||
198 | { | ||
199 | .name = "Bus Width", | ||
200 | .value = -1, | ||
201 | .exec = mmc_test_set_bus_width, | ||
202 | .input = "-b", | ||
203 | }, | ||
204 | { | ||
205 | .name = "Timing", | ||
206 | .value = -1, | ||
207 | .exec = mmc_test_set_timing, | ||
208 | .input = "-t", | ||
209 | }, | ||
210 | }; | ||
211 | |||
212 | static long mmc_test_set_testcase(struct mmc_test_card *test) | ||
213 | { | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static long mmc_test_set_clock(struct mmc_test_card *test) | ||
218 | { | ||
219 | long clock = mmc_test_parameter[1].value; | ||
220 | if (-1 == clock) | ||
221 | return test->card->host->ios.clock; | ||
222 | WARN_ON(clock < test->card->host->f_min); | ||
223 | if (clock > test->card->host->f_max) | ||
224 | clock = test->card->host->f_max; | ||
225 | |||
226 | test->card->host->ios.clock = clock; | ||
227 | |||
228 | return test->card->host->ios.clock; | ||
229 | } | ||
230 | |||
231 | static long mmc_test_set_bus_width(struct mmc_test_card *test) | ||
232 | { | ||
233 | long bus_width = mmc_test_parameter[2].value; | ||
234 | if (-1 == bus_width) | ||
235 | return test->card->host->ios.bus_width; | ||
236 | |||
237 | test->card->host->ios.bus_width = bus_width; | ||
238 | |||
239 | return test->card->host->ios.bus_width = bus_width; | ||
240 | } | ||
241 | |||
242 | static long mmc_test_set_timing(struct mmc_test_card *test) | ||
243 | { | ||
244 | long timing = mmc_test_parameter[3].value; | ||
245 | if (-1 == timing) | ||
246 | return test->card->host->ios.timing; | ||
247 | test->card->host->ios.timing = timing; | ||
248 | |||
249 | return test->card->host->ios.timing; | ||
250 | } | ||
251 | |||
252 | static void mmc_test_set_parameters(struct mmc_test_card *test) | ||
253 | { | ||
254 | int i; | ||
255 | for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) { | ||
256 | printk(KERN_INFO "Parameter[%s] set to [%ld]\n", | ||
257 | mmc_test_parameter[i].name, | ||
258 | mmc_test_parameter[i].exec(test)); | ||
259 | } | ||
260 | } | ||
261 | |||
173 | /*******************************************************************/ | 262 | /*******************************************************************/ |
174 | /* General helper functions */ | 263 | /* General helper functions */ |
175 | /*******************************************************************/ | 264 | /*******************************************************************/ |
@@ -251,7 +340,7 @@ static int mmc_test_wait_busy(struct mmc_test_card *test) | |||
251 | if (!busy && mmc_test_busy(&cmd)) { | 340 | if (!busy && mmc_test_busy(&cmd)) { |
252 | busy = 1; | 341 | busy = 1; |
253 | if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) | 342 | if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) |
254 | pr_info("%s: Warning: Host did not " | 343 | printk(KERN_INFO "%s: Warning: Host did not " |
255 | "wait for busy state to end.\n", | 344 | "wait for busy state to end.\n", |
256 | mmc_hostname(test->card->host)); | 345 | mmc_hostname(test->card->host)); |
257 | } | 346 | } |
@@ -553,7 +642,7 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, | |||
553 | rate = mmc_test_rate(bytes, &ts); | 642 | rate = mmc_test_rate(bytes, &ts); |
554 | iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ | 643 | iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ |
555 | 644 | ||
556 | pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " | 645 | printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " |
557 | "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", | 646 | "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", |
558 | mmc_hostname(test->card->host), sectors, sectors >> 1, | 647 | mmc_hostname(test->card->host), sectors, sectors >> 1, |
559 | (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, | 648 | (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, |
@@ -579,7 +668,7 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, | |||
579 | rate = mmc_test_rate(tot, &ts); | 668 | rate = mmc_test_rate(tot, &ts); |
580 | iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ | 669 | iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ |
581 | 670 | ||
582 | pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " | 671 | printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " |
583 | "%lu.%09lu seconds (%u kB/s, %u KiB/s, " | 672 | "%lu.%09lu seconds (%u kB/s, %u KiB/s, " |
584 | "%u.%02u IOPS, sg_len %d)\n", | 673 | "%u.%02u IOPS, sg_len %d)\n", |
585 | mmc_hostname(test->card->host), count, sectors, count, | 674 | mmc_hostname(test->card->host), count, sectors, count, |
@@ -1409,7 +1498,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test) | |||
1409 | 1498 | ||
1410 | static int mmc_test_no_highmem(struct mmc_test_card *test) | 1499 | static int mmc_test_no_highmem(struct mmc_test_card *test) |
1411 | { | 1500 | { |
1412 | pr_info("%s: Highmem not configured - test skipped\n", | 1501 | printk(KERN_INFO "%s: Highmem not configured - test skipped\n", |
1413 | mmc_hostname(test->card->host)); | 1502 | mmc_hostname(test->card->host)); |
1414 | return 0; | 1503 | return 0; |
1415 | } | 1504 | } |
@@ -1436,7 +1525,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, | |||
1436 | t->max_seg_sz, &t->sg_len, min_sg_len); | 1525 | t->max_seg_sz, &t->sg_len, min_sg_len); |
1437 | } | 1526 | } |
1438 | if (err) | 1527 | if (err) |
1439 | pr_info("%s: Failed to map sg list\n", | 1528 | printk(KERN_INFO "%s: Failed to map sg list\n", |
1440 | mmc_hostname(test->card->host)); | 1529 | mmc_hostname(test->card->host)); |
1441 | return err; | 1530 | return err; |
1442 | } | 1531 | } |
@@ -1581,7 +1670,6 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) | |||
1581 | 1670 | ||
1582 | t->max_segs = test->card->host->max_segs; | 1671 | t->max_segs = test->card->host->max_segs; |
1583 | t->max_seg_sz = test->card->host->max_seg_size; | 1672 | t->max_seg_sz = test->card->host->max_seg_size; |
1584 | t->max_seg_sz -= t->max_seg_sz % 512; | ||
1585 | 1673 | ||
1586 | t->max_tfr = t->max_sz; | 1674 | t->max_tfr = t->max_sz; |
1587 | if (t->max_tfr >> 9 > test->card->host->max_blk_count) | 1675 | if (t->max_tfr >> 9 > test->card->host->max_blk_count) |
@@ -2137,7 +2225,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test, | |||
2137 | 2225 | ||
2138 | return ret; | 2226 | return ret; |
2139 | err: | 2227 | err: |
2140 | pr_info("[%s] error\n", __func__); | 2228 | printk(KERN_INFO "[%s] error\n", __func__); |
2141 | return ret; | 2229 | return ret; |
2142 | } | 2230 | } |
2143 | 2231 | ||
@@ -2151,7 +2239,7 @@ static int mmc_test_rw_multiple_size(struct mmc_test_card *test, | |||
2151 | 2239 | ||
2152 | if (rw->do_nonblock_req && | 2240 | if (rw->do_nonblock_req && |
2153 | ((!pre_req && post_req) || (pre_req && !post_req))) { | 2241 | ((!pre_req && post_req) || (pre_req && !post_req))) { |
2154 | pr_info("error: only one of pre/post is defined\n"); | 2242 | printk(KERN_INFO "error: only one of pre/post is defined\n"); |
2155 | return -EINVAL; | 2243 | return -EINVAL; |
2156 | } | 2244 | } |
2157 | 2245 | ||
@@ -2330,31 +2418,6 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) | |||
2330 | return mmc_test_rw_multiple_sg_len(test, &test_data); | 2418 | return mmc_test_rw_multiple_sg_len(test, &test_data); |
2331 | } | 2419 | } |
2332 | 2420 | ||
2333 | /* | ||
2334 | * eMMC hardware reset. | ||
2335 | */ | ||
2336 | static int mmc_test_hw_reset(struct mmc_test_card *test) | ||
2337 | { | ||
2338 | struct mmc_card *card = test->card; | ||
2339 | struct mmc_host *host = card->host; | ||
2340 | int err; | ||
2341 | |||
2342 | err = mmc_hw_reset_check(host); | ||
2343 | if (!err) | ||
2344 | return RESULT_OK; | ||
2345 | |||
2346 | if (err == -ENOSYS) | ||
2347 | return RESULT_FAIL; | ||
2348 | |||
2349 | if (err != -EOPNOTSUPP) | ||
2350 | return err; | ||
2351 | |||
2352 | if (!mmc_can_reset(card)) | ||
2353 | return RESULT_UNSUP_CARD; | ||
2354 | |||
2355 | return RESULT_UNSUP_HOST; | ||
2356 | } | ||
2357 | |||
2358 | static const struct mmc_test_case mmc_test_cases[] = { | 2421 | static const struct mmc_test_case mmc_test_cases[] = { |
2359 | { | 2422 | { |
2360 | .name = "Basic write (no data verification)", | 2423 | .name = "Basic write (no data verification)", |
@@ -2677,11 +2740,6 @@ static const struct mmc_test_case mmc_test_cases[] = { | |||
2677 | .run = mmc_test_profile_sglen_r_nonblock_perf, | 2740 | .run = mmc_test_profile_sglen_r_nonblock_perf, |
2678 | .cleanup = mmc_test_area_cleanup, | 2741 | .cleanup = mmc_test_area_cleanup, |
2679 | }, | 2742 | }, |
2680 | |||
2681 | { | ||
2682 | .name = "eMMC hardware reset", | ||
2683 | .run = mmc_test_hw_reset, | ||
2684 | }, | ||
2685 | }; | 2743 | }; |
2686 | 2744 | ||
2687 | static DEFINE_MUTEX(mmc_test_lock); | 2745 | static DEFINE_MUTEX(mmc_test_lock); |
@@ -2692,25 +2750,27 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase) | |||
2692 | { | 2750 | { |
2693 | int i, ret; | 2751 | int i, ret; |
2694 | 2752 | ||
2695 | pr_info("%s: Starting tests of card %s...\n", | 2753 | printk(KERN_INFO "%s: Starting tests of card %s...\n", |
2696 | mmc_hostname(test->card->host), mmc_card_id(test->card)); | 2754 | mmc_hostname(test->card->host), mmc_card_id(test->card)); |
2697 | 2755 | ||
2698 | mmc_claim_host(test->card->host); | 2756 | mmc_claim_host(test->card->host); |
2699 | 2757 | ||
2758 | mmc_test_set_parameters(test); | ||
2759 | |||
2700 | for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { | 2760 | for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { |
2701 | struct mmc_test_general_result *gr; | 2761 | struct mmc_test_general_result *gr; |
2702 | 2762 | ||
2703 | if (testcase && ((i + 1) != testcase)) | 2763 | if (testcase && ((i + 1) != testcase)) |
2704 | continue; | 2764 | continue; |
2705 | 2765 | ||
2706 | pr_info("%s: Test case %d. %s...\n", | 2766 | printk(KERN_INFO "%s: Test case %d. %s...\n", |
2707 | mmc_hostname(test->card->host), i + 1, | 2767 | mmc_hostname(test->card->host), i + 1, |
2708 | mmc_test_cases[i].name); | 2768 | mmc_test_cases[i].name); |
2709 | 2769 | ||
2710 | if (mmc_test_cases[i].prepare) { | 2770 | if (mmc_test_cases[i].prepare) { |
2711 | ret = mmc_test_cases[i].prepare(test); | 2771 | ret = mmc_test_cases[i].prepare(test); |
2712 | if (ret) { | 2772 | if (ret) { |
2713 | pr_info("%s: Result: Prepare " | 2773 | printk(KERN_INFO "%s: Result: Prepare " |
2714 | "stage failed! (%d)\n", | 2774 | "stage failed! (%d)\n", |
2715 | mmc_hostname(test->card->host), | 2775 | mmc_hostname(test->card->host), |
2716 | ret); | 2776 | ret); |
@@ -2740,25 +2800,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase) | |||
2740 | ret = mmc_test_cases[i].run(test); | 2800 | ret = mmc_test_cases[i].run(test); |
2741 | switch (ret) { | 2801 | switch (ret) { |
2742 | case RESULT_OK: | 2802 | case RESULT_OK: |
2743 | pr_info("%s: Result: OK\n", | 2803 | printk(KERN_INFO "%s: Result: OK\n", |
2744 | mmc_hostname(test->card->host)); | 2804 | mmc_hostname(test->card->host)); |
2745 | break; | 2805 | break; |
2746 | case RESULT_FAIL: | 2806 | case RESULT_FAIL: |
2747 | pr_info("%s: Result: FAILED\n", | 2807 | printk(KERN_INFO "%s: Result: FAILED\n", |
2748 | mmc_hostname(test->card->host)); | 2808 | mmc_hostname(test->card->host)); |
2749 | break; | 2809 | break; |
2750 | case RESULT_UNSUP_HOST: | 2810 | case RESULT_UNSUP_HOST: |
2751 | pr_info("%s: Result: UNSUPPORTED " | 2811 | printk(KERN_INFO "%s: Result: UNSUPPORTED " |
2752 | "(by host)\n", | 2812 | "(by host)\n", |
2753 | mmc_hostname(test->card->host)); | 2813 | mmc_hostname(test->card->host)); |
2754 | break; | 2814 | break; |
2755 | case RESULT_UNSUP_CARD: | 2815 | case RESULT_UNSUP_CARD: |
2756 | pr_info("%s: Result: UNSUPPORTED " | 2816 | printk(KERN_INFO "%s: Result: UNSUPPORTED " |
2757 | "(by card)\n", | 2817 | "(by card)\n", |
2758 | mmc_hostname(test->card->host)); | 2818 | mmc_hostname(test->card->host)); |
2759 | break; | 2819 | break; |
2760 | default: | 2820 | default: |
2761 | pr_info("%s: Result: ERROR (%d)\n", | 2821 | printk(KERN_INFO "%s: Result: ERROR (%d)\n", |
2762 | mmc_hostname(test->card->host), ret); | 2822 | mmc_hostname(test->card->host), ret); |
2763 | } | 2823 | } |
2764 | 2824 | ||
@@ -2769,7 +2829,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase) | |||
2769 | if (mmc_test_cases[i].cleanup) { | 2829 | if (mmc_test_cases[i].cleanup) { |
2770 | ret = mmc_test_cases[i].cleanup(test); | 2830 | ret = mmc_test_cases[i].cleanup(test); |
2771 | if (ret) { | 2831 | if (ret) { |
2772 | pr_info("%s: Warning: Cleanup " | 2832 | printk(KERN_INFO "%s: Warning: Cleanup " |
2773 | "stage failed! (%d)\n", | 2833 | "stage failed! (%d)\n", |
2774 | mmc_hostname(test->card->host), | 2834 | mmc_hostname(test->card->host), |
2775 | ret); | 2835 | ret); |
@@ -2779,7 +2839,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase) | |||
2779 | 2839 | ||
2780 | mmc_release_host(test->card->host); | 2840 | mmc_release_host(test->card->host); |
2781 | 2841 | ||
2782 | pr_info("%s: Tests completed.\n", | 2842 | printk(KERN_INFO "%s: Tests completed.\n", |
2783 | mmc_hostname(test->card->host)); | 2843 | mmc_hostname(test->card->host)); |
2784 | } | 2844 | } |
2785 | 2845 | ||
@@ -2809,6 +2869,23 @@ static void mmc_test_free_result(struct mmc_card *card) | |||
2809 | 2869 | ||
2810 | static LIST_HEAD(mmc_test_file_test); | 2870 | static LIST_HEAD(mmc_test_file_test); |
2811 | 2871 | ||
2872 | static void mmc_test_usage(struct seq_file *sf) | ||
2873 | { | ||
2874 | int i = 0; | ||
2875 | |||
2876 | seq_printf(sf, "\nHow to run test:" | ||
2877 | "\necho <testcase> [[param1 value1].... ] > test" | ||
2878 | "\nExample:: echo 1 -b 4 -c 2500000 -t 2" | ||
2879 | "\n\nSupported parameters in sequence\n"); | ||
2880 | |||
2881 | for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) { | ||
2882 | seq_printf(sf, "Parameter%d Name:[%s] option:[%s]\n", | ||
2883 | i + 1, mmc_test_parameter[i].name, | ||
2884 | mmc_test_parameter[i].input); | ||
2885 | } | ||
2886 | seq_printf(sf, "\'-1\' passed to take default value\n\n\n"); | ||
2887 | } | ||
2888 | |||
2812 | static int mtf_test_show(struct seq_file *sf, void *data) | 2889 | static int mtf_test_show(struct seq_file *sf, void *data) |
2813 | { | 2890 | { |
2814 | struct mmc_card *card = (struct mmc_card *)sf->private; | 2891 | struct mmc_card *card = (struct mmc_card *)sf->private; |
@@ -2843,24 +2920,92 @@ static int mtf_test_open(struct inode *inode, struct file *file) | |||
2843 | return single_open(file, mtf_test_show, inode->i_private); | 2920 | return single_open(file, mtf_test_show, inode->i_private); |
2844 | } | 2921 | } |
2845 | 2922 | ||
2923 | static int mmc_test_extract_parameters(char *data_buf) | ||
2924 | { | ||
2925 | char *running = NULL; | ||
2926 | char *token = NULL; | ||
2927 | const char delimiters[] = " "; | ||
2928 | long value; | ||
2929 | int i; | ||
2930 | int set = 0; | ||
2931 | |||
2932 | running = data_buf; | ||
2933 | |||
2934 | /*Example: | ||
2935 | * echo <testcasenumber> [[param1 value1] [param1 value1]] > test | ||
2936 | * $] echo 1 > test | Execute testcase 1 | ||
2937 | * $] echo 1 -c 2500000 | execute tesecase 1 and set clock to 2500000 | ||
2938 | * $] echo 1 -b 4 -c 2500000 -t 2 | | ||
2939 | * execute tesecase 1, set clock to 2500000, set bus_width 4, | ||
2940 | * and set timing to 2 | ||
2941 | */ | ||
2942 | |||
2943 | while ((token = strsep(&running, delimiters))) { | ||
2944 | if (strict_strtol(token, 10, &value)) { | ||
2945 | /* [Param1 value1] combination | ||
2946 | * Compare with available param list | ||
2947 | */ | ||
2948 | for (i = 0; i < ARRAY_SIZE(mmc_test_parameter); i++) { | ||
2949 | if (!strcmp(mmc_test_parameter[i].input, | ||
2950 | token)) { | ||
2951 | /* Valid Option, extract following | ||
2952 | * value and save it | ||
2953 | */ | ||
2954 | token = strsep(&running, delimiters); | ||
2955 | if (strict_strtol(token, 10, | ||
2956 | &(mmc_test_parameter[i].value))) { | ||
2957 | |||
2958 | printk(KERN_ERR "wrong parameter value\n"); | ||
2959 | return -EINVAL; | ||
2960 | } else { | ||
2961 | break; | ||
2962 | } | ||
2963 | } | ||
2964 | } | ||
2965 | if (i == ARRAY_SIZE(mmc_test_parameter)) { | ||
2966 | printk(KERN_ERR "uknown mmc_test option\n"); | ||
2967 | return -EINVAL; | ||
2968 | } | ||
2969 | } else { | ||
2970 | /* Testcase number */ | ||
2971 | if (!set) { | ||
2972 | mmc_test_parameter[0].value = value; | ||
2973 | set = 1; | ||
2974 | } else { | ||
2975 | printk(KERN_ERR "invalid options"); | ||
2976 | return -EINVAL; | ||
2977 | } | ||
2978 | } | ||
2979 | } | ||
2980 | return 0; | ||
2981 | } | ||
2982 | |||
2846 | static ssize_t mtf_test_write(struct file *file, const char __user *buf, | 2983 | static ssize_t mtf_test_write(struct file *file, const char __user *buf, |
2847 | size_t count, loff_t *pos) | 2984 | size_t count, loff_t *pos) |
2848 | { | 2985 | { |
2849 | struct seq_file *sf = (struct seq_file *)file->private_data; | 2986 | struct seq_file *sf = (struct seq_file *)file->private_data; |
2850 | struct mmc_card *card = (struct mmc_card *)sf->private; | 2987 | struct mmc_card *card = (struct mmc_card *)sf->private; |
2851 | struct mmc_test_card *test; | 2988 | struct mmc_test_card *test; |
2852 | char lbuf[12]; | 2989 | char *data_buf = NULL; |
2853 | long testcase; | 2990 | long testcase; |
2854 | 2991 | ||
2855 | if (count >= sizeof(lbuf)) | 2992 | data_buf = kzalloc(count, GFP_KERNEL); |
2856 | return -EINVAL; | 2993 | if (data_buf == NULL) |
2994 | return -ENOMEM; | ||
2857 | 2995 | ||
2858 | if (copy_from_user(lbuf, buf, count)) | 2996 | if (copy_from_user(data_buf, buf, count)) { |
2997 | kfree(data_buf); | ||
2859 | return -EFAULT; | 2998 | return -EFAULT; |
2860 | lbuf[count] = '\0'; | 2999 | } |
3000 | data_buf[strlen(data_buf) - 1] = '\0'; | ||
3001 | if (mmc_test_extract_parameters(data_buf)) { | ||
3002 | mmc_test_usage(sf); | ||
3003 | return -EFAULT; | ||
3004 | } | ||
2861 | 3005 | ||
2862 | if (strict_strtol(lbuf, 10, &testcase)) | 3006 | kfree(data_buf); |
2863 | return -EINVAL; | 3007 | |
3008 | testcase = mmc_test_parameter[0].value; | ||
2864 | 3009 | ||
2865 | test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); | 3010 | test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); |
2866 | if (!test) | 3011 | if (!test) |
@@ -2950,7 +3095,7 @@ static void mmc_test_free_dbgfs_file(struct mmc_card *card) | |||
2950 | } | 3095 | } |
2951 | 3096 | ||
2952 | static int __mmc_test_register_dbgfs_file(struct mmc_card *card, | 3097 | static int __mmc_test_register_dbgfs_file(struct mmc_card *card, |
2953 | const char *name, umode_t mode, const struct file_operations *fops) | 3098 | const char *name, mode_t mode, const struct file_operations *fops) |
2954 | { | 3099 | { |
2955 | struct dentry *file = NULL; | 3100 | struct dentry *file = NULL; |
2956 | struct mmc_test_dbgfs_file *df; | 3101 | struct mmc_test_dbgfs_file *df; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index fadf52eb5d7..5db38cbcea6 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -29,8 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | static int mmc_prep_request(struct request_queue *q, struct request *req) | 30 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
31 | { | 31 | { |
32 | struct mmc_queue *mq = q->queuedata; | ||
33 | |||
34 | /* | 32 | /* |
35 | * We only like normal block requests and discards. | 33 | * We only like normal block requests and discards. |
36 | */ | 34 | */ |
@@ -39,9 +37,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
39 | return BLKPREP_KILL; | 37 | return BLKPREP_KILL; |
40 | } | 38 | } |
41 | 39 | ||
42 | if (mq && mmc_card_removed(mq->card)) | ||
43 | return BLKPREP_KILL; | ||
44 | |||
45 | req->cmd_flags |= REQ_DONTPREP; | 40 | req->cmd_flags |= REQ_DONTPREP; |
46 | 41 | ||
47 | return BLKPREP_OK; | 42 | return BLKPREP_OK; |
@@ -68,17 +63,13 @@ static int mmc_queue_thread(void *d) | |||
68 | if (req || mq->mqrq_prev->req) { | 63 | if (req || mq->mqrq_prev->req) { |
69 | set_current_state(TASK_RUNNING); | 64 | set_current_state(TASK_RUNNING); |
70 | mq->issue_fn(mq, req); | 65 | mq->issue_fn(mq, req); |
71 | 66 | } else { | |
72 | /* | 67 | /* |
73 | * Current request becomes previous request | 68 | * Since the queue is empty, start synchronous |
74 | * and vice versa. | 69 | * background ops if there is a request for it. |
75 | */ | 70 | */ |
76 | mq->mqrq_prev->brq.mrq.data = NULL; | 71 | if (mmc_card_need_bkops(mq->card)) |
77 | mq->mqrq_prev->req = NULL; | 72 | mmc_bkops_start(mq->card, true); |
78 | tmp = mq->mqrq_prev; | ||
79 | mq->mqrq_prev = mq->mqrq_cur; | ||
80 | mq->mqrq_cur = tmp; | ||
81 | } else { | ||
82 | if (kthread_should_stop()) { | 73 | if (kthread_should_stop()) { |
83 | set_current_state(TASK_RUNNING); | 74 | set_current_state(TASK_RUNNING); |
84 | break; | 75 | break; |
@@ -87,6 +78,13 @@ static int mmc_queue_thread(void *d) | |||
87 | schedule(); | 78 | schedule(); |
88 | down(&mq->thread_sem); | 79 | down(&mq->thread_sem); |
89 | } | 80 | } |
81 | |||
82 | /* Current request becomes previous request and vice versa. */ | ||
83 | mq->mqrq_prev->brq.mrq.data = NULL; | ||
84 | mq->mqrq_prev->req = NULL; | ||
85 | tmp = mq->mqrq_prev; | ||
86 | mq->mqrq_prev = mq->mqrq_cur; | ||
87 | mq->mqrq_cur = tmp; | ||
90 | } while (1); | 88 | } while (1); |
91 | up(&mq->thread_sem); | 89 | up(&mq->thread_sem); |
92 | 90 | ||
@@ -99,7 +97,7 @@ static int mmc_queue_thread(void *d) | |||
99 | * on any queue on this host, and attempt to issue it. This may | 97 | * on any queue on this host, and attempt to issue it. This may |
100 | * not be the queue we were asked to process. | 98 | * not be the queue we were asked to process. |
101 | */ | 99 | */ |
102 | static void mmc_request_fn(struct request_queue *q) | 100 | static void mmc_request(struct request_queue *q) |
103 | { | 101 | { |
104 | struct mmc_queue *mq = q->queuedata; | 102 | struct mmc_queue *mq = q->queuedata; |
105 | struct request *req; | 103 | struct request *req; |
@@ -116,7 +114,7 @@ static void mmc_request_fn(struct request_queue *q) | |||
116 | wake_up_process(mq->thread); | 114 | wake_up_process(mq->thread); |
117 | } | 115 | } |
118 | 116 | ||
119 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) | 117 | struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
120 | { | 118 | { |
121 | struct scatterlist *sg; | 119 | struct scatterlist *sg; |
122 | 120 | ||
@@ -142,13 +140,13 @@ static void mmc_queue_setup_discard(struct request_queue *q, | |||
142 | 140 | ||
143 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 141 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
144 | q->limits.max_discard_sectors = max_discard; | 142 | q->limits.max_discard_sectors = max_discard; |
145 | if (card->erased_byte == 0 && !mmc_can_discard(card)) | 143 | if (card->erased_byte == 0) |
146 | q->limits.discard_zeroes_data = 1; | 144 | q->limits.discard_zeroes_data = 1; |
147 | q->limits.discard_granularity = card->pref_erase << 9; | 145 | q->limits.discard_granularity = card->pref_erase << 9; |
148 | /* granularity must not be greater than max. discard */ | 146 | /* granularity must not be greater than max. discard */ |
149 | if (card->pref_erase > max_discard) | 147 | if (card->pref_erase > max_discard) |
150 | q->limits.discard_granularity = 0; | 148 | q->limits.discard_granularity = 0; |
151 | if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) | 149 | if (mmc_can_secure_erase_trim(card)) |
152 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); | 150 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); |
153 | } | 151 | } |
154 | 152 | ||
@@ -174,10 +172,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
174 | limit = *mmc_dev(host)->dma_mask; | 172 | limit = *mmc_dev(host)->dma_mask; |
175 | 173 | ||
176 | mq->card = card; | 174 | mq->card = card; |
177 | mq->queue = blk_init_queue(mmc_request_fn, lock); | 175 | mq->queue = blk_init_queue(mmc_request, lock); |
178 | if (!mq->queue) | 176 | if (!mq->queue) |
179 | return -ENOMEM; | 177 | return -ENOMEM; |
180 | 178 | ||
179 | memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); | ||
180 | memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); | ||
181 | mq->mqrq_cur = mqrq_cur; | 181 | mq->mqrq_cur = mqrq_cur; |
182 | mq->mqrq_prev = mqrq_prev; | 182 | mq->mqrq_prev = mqrq_prev; |
183 | mq->queue->queuedata = mq; | 183 | mq->queue->queuedata = mq; |
@@ -203,13 +203,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
203 | if (bouncesz > 512) { | 203 | if (bouncesz > 512) { |
204 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 204 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
205 | if (!mqrq_cur->bounce_buf) { | 205 | if (!mqrq_cur->bounce_buf) { |
206 | pr_warning("%s: unable to " | 206 | printk(KERN_WARNING "%s: unable to " |
207 | "allocate bounce cur buffer\n", | 207 | "allocate bounce cur buffer\n", |
208 | mmc_card_name(card)); | 208 | mmc_card_name(card)); |
209 | } | 209 | } |
210 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 210 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
211 | if (!mqrq_prev->bounce_buf) { | 211 | if (!mqrq_prev->bounce_buf) { |
212 | pr_warning("%s: unable to " | 212 | printk(KERN_WARNING "%s: unable to " |
213 | "allocate bounce prev buffer\n", | 213 | "allocate bounce prev buffer\n", |
214 | mmc_card_name(card)); | 214 | mmc_card_name(card)); |
215 | kfree(mqrq_cur->bounce_buf); | 215 | kfree(mqrq_cur->bounce_buf); |
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index bd57a11acc7..c8c9edb3d7c 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c | |||
@@ -66,6 +66,8 @@ struct uart_icount { | |||
66 | 66 | ||
67 | struct sdio_uart_port { | 67 | struct sdio_uart_port { |
68 | struct tty_port port; | 68 | struct tty_port port; |
69 | struct kref kref; | ||
70 | struct tty_struct *tty; | ||
69 | unsigned int index; | 71 | unsigned int index; |
70 | struct sdio_func *func; | 72 | struct sdio_func *func; |
71 | struct mutex func_lock; | 73 | struct mutex func_lock; |
@@ -91,6 +93,7 @@ static int sdio_uart_add_port(struct sdio_uart_port *port) | |||
91 | { | 93 | { |
92 | int index, ret = -EBUSY; | 94 | int index, ret = -EBUSY; |
93 | 95 | ||
96 | kref_init(&port->kref); | ||
94 | mutex_init(&port->func_lock); | 97 | mutex_init(&port->func_lock); |
95 | spin_lock_init(&port->write_lock); | 98 | spin_lock_init(&port->write_lock); |
96 | if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL)) | 99 | if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL)) |
@@ -120,15 +123,23 @@ static struct sdio_uart_port *sdio_uart_port_get(unsigned index) | |||
120 | spin_lock(&sdio_uart_table_lock); | 123 | spin_lock(&sdio_uart_table_lock); |
121 | port = sdio_uart_table[index]; | 124 | port = sdio_uart_table[index]; |
122 | if (port) | 125 | if (port) |
123 | tty_port_get(&port->port); | 126 | kref_get(&port->kref); |
124 | spin_unlock(&sdio_uart_table_lock); | 127 | spin_unlock(&sdio_uart_table_lock); |
125 | 128 | ||
126 | return port; | 129 | return port; |
127 | } | 130 | } |
128 | 131 | ||
132 | static void sdio_uart_port_destroy(struct kref *kref) | ||
133 | { | ||
134 | struct sdio_uart_port *port = | ||
135 | container_of(kref, struct sdio_uart_port, kref); | ||
136 | kfifo_free(&port->xmit_fifo); | ||
137 | kfree(port); | ||
138 | } | ||
139 | |||
129 | static void sdio_uart_port_put(struct sdio_uart_port *port) | 140 | static void sdio_uart_port_put(struct sdio_uart_port *port) |
130 | { | 141 | { |
131 | tty_port_put(&port->port); | 142 | kref_put(&port->kref, sdio_uart_port_destroy); |
132 | } | 143 | } |
133 | 144 | ||
134 | static void sdio_uart_port_remove(struct sdio_uart_port *port) | 145 | static void sdio_uart_port_remove(struct sdio_uart_port *port) |
@@ -507,7 +518,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port) | |||
507 | if (status & UART_MSR_DCTS) { | 518 | if (status & UART_MSR_DCTS) { |
508 | port->icount.cts++; | 519 | port->icount.cts++; |
509 | tty = tty_port_tty_get(&port->port); | 520 | tty = tty_port_tty_get(&port->port); |
510 | if (tty && (tty->termios.c_cflag & CRTSCTS)) { | 521 | if (tty && (tty->termios->c_cflag & CRTSCTS)) { |
511 | int cts = (status & UART_MSR_CTS); | 522 | int cts = (status & UART_MSR_CTS); |
512 | if (tty->hw_stopped) { | 523 | if (tty->hw_stopped) { |
513 | if (cts) { | 524 | if (cts) { |
@@ -660,12 +671,12 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) | |||
660 | port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE; | 671 | port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE; |
661 | port->mctrl = TIOCM_OUT2; | 672 | port->mctrl = TIOCM_OUT2; |
662 | 673 | ||
663 | sdio_uart_change_speed(port, &tty->termios, NULL); | 674 | sdio_uart_change_speed(port, tty->termios, NULL); |
664 | 675 | ||
665 | if (tty->termios.c_cflag & CBAUD) | 676 | if (tty->termios->c_cflag & CBAUD) |
666 | sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); | 677 | sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); |
667 | 678 | ||
668 | if (tty->termios.c_cflag & CRTSCTS) | 679 | if (tty->termios->c_cflag & CRTSCTS) |
669 | if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) | 680 | if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) |
670 | tty->hw_stopped = 1; | 681 | tty->hw_stopped = 1; |
671 | 682 | ||
@@ -726,14 +737,6 @@ static void sdio_uart_shutdown(struct tty_port *tport) | |||
726 | sdio_uart_release_func(port); | 737 | sdio_uart_release_func(port); |
727 | } | 738 | } |
728 | 739 | ||
729 | static void sdio_uart_port_destroy(struct tty_port *tport) | ||
730 | { | ||
731 | struct sdio_uart_port *port = | ||
732 | container_of(tport, struct sdio_uart_port, port); | ||
733 | kfifo_free(&port->xmit_fifo); | ||
734 | kfree(port); | ||
735 | } | ||
736 | |||
737 | /** | 740 | /** |
738 | * sdio_uart_install - install method | 741 | * sdio_uart_install - install method |
739 | * @driver: the driver in use (sdio_uart in our case) | 742 | * @driver: the driver in use (sdio_uart in our case) |
@@ -747,12 +750,15 @@ static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty) | |||
747 | { | 750 | { |
748 | int idx = tty->index; | 751 | int idx = tty->index; |
749 | struct sdio_uart_port *port = sdio_uart_port_get(idx); | 752 | struct sdio_uart_port *port = sdio_uart_port_get(idx); |
750 | int ret = tty_standard_install(driver, tty); | 753 | int ret = tty_init_termios(tty); |
751 | 754 | ||
752 | if (ret == 0) | 755 | if (ret == 0) { |
756 | tty_driver_kref_get(driver); | ||
757 | tty->count++; | ||
753 | /* This is the ref sdio_uart_port get provided */ | 758 | /* This is the ref sdio_uart_port get provided */ |
754 | tty->driver_data = port; | 759 | tty->driver_data = port; |
755 | else | 760 | driver->ttys[idx] = tty; |
761 | } else | ||
756 | sdio_uart_port_put(port); | 762 | sdio_uart_port_put(port); |
757 | return ret; | 763 | return ret; |
758 | } | 764 | } |
@@ -847,7 +853,7 @@ static void sdio_uart_throttle(struct tty_struct *tty) | |||
847 | { | 853 | { |
848 | struct sdio_uart_port *port = tty->driver_data; | 854 | struct sdio_uart_port *port = tty->driver_data; |
849 | 855 | ||
850 | if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS)) | 856 | if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) |
851 | return; | 857 | return; |
852 | 858 | ||
853 | if (sdio_uart_claim_func(port) != 0) | 859 | if (sdio_uart_claim_func(port) != 0) |
@@ -858,7 +864,7 @@ static void sdio_uart_throttle(struct tty_struct *tty) | |||
858 | sdio_uart_start_tx(port); | 864 | sdio_uart_start_tx(port); |
859 | } | 865 | } |
860 | 866 | ||
861 | if (tty->termios.c_cflag & CRTSCTS) | 867 | if (tty->termios->c_cflag & CRTSCTS) |
862 | sdio_uart_clear_mctrl(port, TIOCM_RTS); | 868 | sdio_uart_clear_mctrl(port, TIOCM_RTS); |
863 | 869 | ||
864 | sdio_uart_irq(port->func); | 870 | sdio_uart_irq(port->func); |
@@ -869,7 +875,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty) | |||
869 | { | 875 | { |
870 | struct sdio_uart_port *port = tty->driver_data; | 876 | struct sdio_uart_port *port = tty->driver_data; |
871 | 877 | ||
872 | if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS)) | 878 | if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) |
873 | return; | 879 | return; |
874 | 880 | ||
875 | if (sdio_uart_claim_func(port) != 0) | 881 | if (sdio_uart_claim_func(port) != 0) |
@@ -884,7 +890,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty) | |||
884 | } | 890 | } |
885 | } | 891 | } |
886 | 892 | ||
887 | if (tty->termios.c_cflag & CRTSCTS) | 893 | if (tty->termios->c_cflag & CRTSCTS) |
888 | sdio_uart_set_mctrl(port, TIOCM_RTS); | 894 | sdio_uart_set_mctrl(port, TIOCM_RTS); |
889 | 895 | ||
890 | sdio_uart_irq(port->func); | 896 | sdio_uart_irq(port->func); |
@@ -895,12 +901,12 @@ static void sdio_uart_set_termios(struct tty_struct *tty, | |||
895 | struct ktermios *old_termios) | 901 | struct ktermios *old_termios) |
896 | { | 902 | { |
897 | struct sdio_uart_port *port = tty->driver_data; | 903 | struct sdio_uart_port *port = tty->driver_data; |
898 | unsigned int cflag = tty->termios.c_cflag; | 904 | unsigned int cflag = tty->termios->c_cflag; |
899 | 905 | ||
900 | if (sdio_uart_claim_func(port) != 0) | 906 | if (sdio_uart_claim_func(port) != 0) |
901 | return; | 907 | return; |
902 | 908 | ||
903 | sdio_uart_change_speed(port, &tty->termios, old_termios); | 909 | sdio_uart_change_speed(port, tty->termios, old_termios); |
904 | 910 | ||
905 | /* Handle transition to B0 status */ | 911 | /* Handle transition to B0 status */ |
906 | if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) | 912 | if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) |
@@ -1042,7 +1048,6 @@ static const struct tty_port_operations sdio_uart_port_ops = { | |||
1042 | .carrier_raised = uart_carrier_raised, | 1048 | .carrier_raised = uart_carrier_raised, |
1043 | .shutdown = sdio_uart_shutdown, | 1049 | .shutdown = sdio_uart_shutdown, |
1044 | .activate = sdio_uart_activate, | 1050 | .activate = sdio_uart_activate, |
1045 | .destruct = sdio_uart_port_destroy, | ||
1046 | }; | 1051 | }; |
1047 | 1052 | ||
1048 | static const struct tty_operations sdio_uart_ops = { | 1053 | static const struct tty_operations sdio_uart_ops = { |
@@ -1077,7 +1082,7 @@ static int sdio_uart_probe(struct sdio_func *func, | |||
1077 | return -ENOMEM; | 1082 | return -ENOMEM; |
1078 | 1083 | ||
1079 | if (func->class == SDIO_CLASS_UART) { | 1084 | if (func->class == SDIO_CLASS_UART) { |
1080 | pr_warning("%s: need info on UART class basic setup\n", | 1085 | printk(KERN_WARNING "%s: need info on UART class basic setup\n", |
1081 | sdio_func_id(func)); | 1086 | sdio_func_id(func)); |
1082 | kfree(port); | 1087 | kfree(port); |
1083 | return -ENOSYS; | 1088 | return -ENOSYS; |
@@ -1096,23 +1101,23 @@ static int sdio_uart_probe(struct sdio_func *func, | |||
1096 | break; | 1101 | break; |
1097 | } | 1102 | } |
1098 | if (!tpl) { | 1103 | if (!tpl) { |
1099 | pr_warning( | 1104 | printk(KERN_WARNING |
1100 | "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", | 1105 | "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", |
1101 | sdio_func_id(func)); | 1106 | sdio_func_id(func)); |
1102 | kfree(port); | 1107 | kfree(port); |
1103 | return -EINVAL; | 1108 | return -EINVAL; |
1104 | } | 1109 | } |
1105 | pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", | 1110 | printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", |
1106 | sdio_func_id(func), tpl->data[2], tpl->data[3]); | 1111 | sdio_func_id(func), tpl->data[2], tpl->data[3]); |
1107 | port->regs_offset = (tpl->data[4] << 0) | | 1112 | port->regs_offset = (tpl->data[4] << 0) | |
1108 | (tpl->data[5] << 8) | | 1113 | (tpl->data[5] << 8) | |
1109 | (tpl->data[6] << 16); | 1114 | (tpl->data[6] << 16); |
1110 | pr_debug("%s: regs offset = 0x%x\n", | 1115 | printk(KERN_DEBUG "%s: regs offset = 0x%x\n", |
1111 | sdio_func_id(func), port->regs_offset); | 1116 | sdio_func_id(func), port->regs_offset); |
1112 | port->uartclk = tpl->data[7] * 115200; | 1117 | port->uartclk = tpl->data[7] * 115200; |
1113 | if (port->uartclk == 0) | 1118 | if (port->uartclk == 0) |
1114 | port->uartclk = 115200; | 1119 | port->uartclk = 115200; |
1115 | pr_debug("%s: clk %d baudcode %u 4800-div %u\n", | 1120 | printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n", |
1116 | sdio_func_id(func), port->uartclk, | 1121 | sdio_func_id(func), port->uartclk, |
1117 | tpl->data[7], tpl->data[8] | (tpl->data[9] << 8)); | 1122 | tpl->data[7], tpl->data[8] | (tpl->data[9] << 8)); |
1118 | } else { | 1123 | } else { |
@@ -1130,8 +1135,8 @@ static int sdio_uart_probe(struct sdio_func *func, | |||
1130 | kfree(port); | 1135 | kfree(port); |
1131 | } else { | 1136 | } else { |
1132 | struct device *dev; | 1137 | struct device *dev; |
1133 | dev = tty_port_register_device(&port->port, | 1138 | dev = tty_register_device(sdio_uart_tty_driver, |
1134 | sdio_uart_tty_driver, port->index, &func->dev); | 1139 | port->index, &func->dev); |
1135 | if (IS_ERR(dev)) { | 1140 | if (IS_ERR(dev)) { |
1136 | sdio_uart_port_remove(port); | 1141 | sdio_uart_port_remove(port); |
1137 | ret = PTR_ERR(dev); | 1142 | ret = PTR_ERR(dev); |
@@ -1173,6 +1178,7 @@ static int __init sdio_uart_init(void) | |||
1173 | if (!tty_drv) | 1178 | if (!tty_drv) |
1174 | return -ENOMEM; | 1179 | return -ENOMEM; |
1175 | 1180 | ||
1181 | tty_drv->owner = THIS_MODULE; | ||
1176 | tty_drv->driver_name = "sdio_uart"; | 1182 | tty_drv->driver_name = "sdio_uart"; |
1177 | tty_drv->name = "ttySDIO"; | 1183 | tty_drv->name = "ttySDIO"; |
1178 | tty_drv->major = 0; /* dynamically allocated */ | 1184 | tty_drv->major = 0; /* dynamically allocated */ |
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index ef103871517..85c2e1acd15 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig | |||
@@ -27,3 +27,20 @@ config MMC_CLKGATE | |||
27 | support handling this in order for it to be of any use. | 27 | support handling this in order for it to be of any use. |
28 | 28 | ||
29 | If unsure, say N. | 29 | If unsure, say N. |
30 | |||
31 | config MMC_EMBEDDED_SDIO | ||
32 | boolean "MMC embedded SDIO device support (EXPERIMENTAL)" | ||
33 | depends on EXPERIMENTAL | ||
34 | help | ||
35 | If you say Y here, support will be added for embedded SDIO | ||
36 | devices which do not contain the necessary enumeration | ||
37 | support in hardware to be properly detected. | ||
38 | |||
39 | config MMC_PARANOID_SD_INIT | ||
40 | bool "Enable paranoid SD card initialization (EXPERIMENTAL)" | ||
41 | depends on EXPERIMENTAL | ||
42 | help | ||
43 | If you say Y here, the MMC layer will be extra paranoid | ||
44 | about re-trying SD init requests. This can be a useful | ||
45 | work-around for buggy controllers and hardware. Enable | ||
46 | if you are experiencing issues with SD detection. | ||
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index 38ed210ce2f..639501970b4 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile | |||
@@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \ | |||
7 | mmc.o mmc_ops.o sd.o sd_ops.o \ | 7 | mmc.o mmc_ops.o sd.o sd_ops.o \ |
8 | sdio.o sdio_ops.o sdio_bus.o \ | 8 | sdio.o sdio_ops.o sdio_bus.o \ |
9 | sdio_cis.o sdio_io.o sdio_irq.o \ | 9 | sdio_cis.o sdio_io.o sdio_irq.o \ |
10 | quirks.o slot-gpio.o | 10 | quirks.o |
11 | 11 | ||
12 | mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o | 12 | mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o |
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 420cb6753c1..f4bdbe6982c 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c | |||
@@ -11,11 +11,9 @@ | |||
11 | * MMC card bus driver model | 11 | * MMC card bus driver model |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/export.h> | ||
15 | #include <linux/device.h> | 14 | #include <linux/device.h> |
16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | #include <linux/stat.h> | ||
19 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
20 | 18 | ||
21 | #include <linux/mmc/card.h> | 19 | #include <linux/mmc/card.h> |
@@ -27,6 +25,10 @@ | |||
27 | 25 | ||
28 | #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) | 26 | #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) |
29 | 27 | ||
28 | #ifdef CONFIG_MMC_TEST | ||
29 | static struct mmc_driver *mmc_test_drv; | ||
30 | #endif | ||
31 | |||
30 | static ssize_t mmc_type_show(struct device *dev, | 32 | static ssize_t mmc_type_show(struct device *dev, |
31 | struct device_attribute *attr, char *buf) | 33 | struct device_attribute *attr, char *buf) |
32 | { | 34 | { |
@@ -109,6 +111,13 @@ static int mmc_bus_probe(struct device *dev) | |||
109 | struct mmc_driver *drv = to_mmc_driver(dev->driver); | 111 | struct mmc_driver *drv = to_mmc_driver(dev->driver); |
110 | struct mmc_card *card = mmc_dev_to_card(dev); | 112 | struct mmc_card *card = mmc_dev_to_card(dev); |
111 | 113 | ||
114 | #ifdef CONFIG_MMC_TEST | ||
115 | /* | ||
116 | * Hack: Explicitly invoking mmc_test probe to co-exist with mmcblk driver. | ||
117 | */ | ||
118 | mmc_test_drv->probe(card); | ||
119 | #endif | ||
120 | |||
112 | return drv->probe(card); | 121 | return drv->probe(card); |
113 | } | 122 | } |
114 | 123 | ||
@@ -122,19 +131,19 @@ static int mmc_bus_remove(struct device *dev) | |||
122 | return 0; | 131 | return 0; |
123 | } | 132 | } |
124 | 133 | ||
125 | #ifdef CONFIG_PM_SLEEP | 134 | static int mmc_bus_pm_suspend(struct device *dev) |
126 | static int mmc_bus_suspend(struct device *dev) | ||
127 | { | 135 | { |
128 | struct mmc_driver *drv = to_mmc_driver(dev->driver); | 136 | struct mmc_driver *drv = to_mmc_driver(dev->driver); |
129 | struct mmc_card *card = mmc_dev_to_card(dev); | 137 | struct mmc_card *card = mmc_dev_to_card(dev); |
130 | int ret = 0; | 138 | int ret = 0; |
139 | pm_message_t state = { PM_EVENT_SUSPEND }; | ||
131 | 140 | ||
132 | if (dev->driver && drv->suspend) | 141 | if (dev->driver && drv->suspend) |
133 | ret = drv->suspend(card); | 142 | ret = drv->suspend(card, state); |
134 | return ret; | 143 | return ret; |
135 | } | 144 | } |
136 | 145 | ||
137 | static int mmc_bus_resume(struct device *dev) | 146 | static int mmc_bus_pm_resume(struct device *dev) |
138 | { | 147 | { |
139 | struct mmc_driver *drv = to_mmc_driver(dev->driver); | 148 | struct mmc_driver *drv = to_mmc_driver(dev->driver); |
140 | struct mmc_card *card = mmc_dev_to_card(dev); | 149 | struct mmc_card *card = mmc_dev_to_card(dev); |
@@ -144,10 +153,8 @@ static int mmc_bus_resume(struct device *dev) | |||
144 | ret = drv->resume(card); | 153 | ret = drv->resume(card); |
145 | return ret; | 154 | return ret; |
146 | } | 155 | } |
147 | #endif | ||
148 | 156 | ||
149 | #ifdef CONFIG_PM_RUNTIME | 157 | #ifdef CONFIG_PM_RUNTIME |
150 | |||
151 | static int mmc_runtime_suspend(struct device *dev) | 158 | static int mmc_runtime_suspend(struct device *dev) |
152 | { | 159 | { |
153 | struct mmc_card *card = mmc_dev_to_card(dev); | 160 | struct mmc_card *card = mmc_dev_to_card(dev); |
@@ -166,13 +173,11 @@ static int mmc_runtime_idle(struct device *dev) | |||
166 | { | 173 | { |
167 | return pm_runtime_suspend(dev); | 174 | return pm_runtime_suspend(dev); |
168 | } | 175 | } |
169 | 176 | #endif /* CONFIG_PM_RUNTIME */ | |
170 | #endif /* !CONFIG_PM_RUNTIME */ | ||
171 | 177 | ||
172 | static const struct dev_pm_ops mmc_bus_pm_ops = { | 178 | static const struct dev_pm_ops mmc_bus_pm_ops = { |
173 | SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, | 179 | SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_pm_suspend, mmc_bus_pm_resume) |
174 | mmc_runtime_idle) | 180 | SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle) |
175 | SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) | ||
176 | }; | 181 | }; |
177 | 182 | ||
178 | static struct bus_type mmc_bus_type = { | 183 | static struct bus_type mmc_bus_type = { |
@@ -202,6 +207,10 @@ void mmc_unregister_bus(void) | |||
202 | int mmc_register_driver(struct mmc_driver *drv) | 207 | int mmc_register_driver(struct mmc_driver *drv) |
203 | { | 208 | { |
204 | drv->drv.bus = &mmc_bus_type; | 209 | drv->drv.bus = &mmc_bus_type; |
210 | #ifdef CONFIG_MMC_TEST | ||
211 | if (!strcmp(drv->drv.name, "mmc_test")) | ||
212 | mmc_test_drv = drv; | ||
213 | #endif | ||
205 | return driver_register(&drv->drv); | 214 | return driver_register(&drv->drv); |
206 | } | 215 | } |
207 | 216 | ||
@@ -225,7 +234,8 @@ static void mmc_release_card(struct device *dev) | |||
225 | 234 | ||
226 | sdio_free_common_cis(card); | 235 | sdio_free_common_cis(card); |
227 | 236 | ||
228 | kfree(card->info); | 237 | if (card->info) |
238 | kfree(card->info); | ||
229 | 239 | ||
230 | kfree(card); | 240 | kfree(card); |
231 | } | 241 | } |
@@ -260,15 +270,6 @@ int mmc_add_card(struct mmc_card *card) | |||
260 | { | 270 | { |
261 | int ret; | 271 | int ret; |
262 | const char *type; | 272 | const char *type; |
263 | const char *uhs_bus_speed_mode = ""; | ||
264 | static const char *const uhs_speeds[] = { | ||
265 | [UHS_SDR12_BUS_SPEED] = "SDR12 ", | ||
266 | [UHS_SDR25_BUS_SPEED] = "SDR25 ", | ||
267 | [UHS_SDR50_BUS_SPEED] = "SDR50 ", | ||
268 | [UHS_SDR104_BUS_SPEED] = "SDR104 ", | ||
269 | [UHS_DDR50_BUS_SPEED] = "DDR50 ", | ||
270 | }; | ||
271 | |||
272 | 273 | ||
273 | dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); | 274 | dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); |
274 | 275 | ||
@@ -298,24 +299,19 @@ int mmc_add_card(struct mmc_card *card) | |||
298 | break; | 299 | break; |
299 | } | 300 | } |
300 | 301 | ||
301 | if (mmc_sd_card_uhs(card) && | ||
302 | (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) | ||
303 | uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; | ||
304 | |||
305 | if (mmc_host_is_spi(card->host)) { | 302 | if (mmc_host_is_spi(card->host)) { |
306 | pr_info("%s: new %s%s%s card on SPI\n", | 303 | printk(KERN_INFO "%s: new %s%s%s card on SPI\n", |
307 | mmc_hostname(card->host), | 304 | mmc_hostname(card->host), |
308 | mmc_card_highspeed(card) ? "high speed " : "", | 305 | mmc_card_highspeed(card) ? "high speed " : "", |
309 | mmc_card_ddr_mode(card) ? "DDR " : "", | 306 | mmc_card_ddr_mode(card) ? "DDR " : "", |
310 | type); | 307 | type); |
311 | } else { | 308 | } else { |
312 | pr_info("%s: new %s%s%s%s%s card at address %04x\n", | 309 | printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", |
313 | mmc_hostname(card->host), | 310 | mmc_hostname(card->host), |
314 | mmc_card_uhs(card) ? "ultra high speed " : | 311 | mmc_sd_card_uhs(card) ? "ultra high speed " : |
315 | (mmc_card_highspeed(card) ? "high speed " : ""), | 312 | (mmc_card_highspeed(card) ? "high speed " : ""), |
316 | (mmc_card_hs200(card) ? "HS200 " : ""), | ||
317 | mmc_card_ddr_mode(card) ? "DDR " : "", | 313 | mmc_card_ddr_mode(card) ? "DDR " : "", |
318 | uhs_bus_speed_mode, type, card->rca); | 314 | type, card->rca); |
319 | } | 315 | } |
320 | 316 | ||
321 | #ifdef CONFIG_DEBUG_FS | 317 | #ifdef CONFIG_DEBUG_FS |
@@ -343,10 +339,10 @@ void mmc_remove_card(struct mmc_card *card) | |||
343 | 339 | ||
344 | if (mmc_card_present(card)) { | 340 | if (mmc_card_present(card)) { |
345 | if (mmc_host_is_spi(card->host)) { | 341 | if (mmc_host_is_spi(card->host)) { |
346 | pr_info("%s: SPI card removed\n", | 342 | printk(KERN_INFO "%s: SPI card removed\n", |
347 | mmc_hostname(card->host)); | 343 | mmc_hostname(card->host)); |
348 | } else { | 344 | } else { |
349 | pr_info("%s: card %04x removed\n", | 345 | printk(KERN_INFO "%s: card %04x removed\n", |
350 | mmc_hostname(card->host), card->rca); | 346 | mmc_hostname(card->host), card->rca); |
351 | } | 347 | } |
352 | device_del(&card->dev); | 348 | device_del(&card->dev); |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index aaed7687cf0..2a288e936a8 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. | 5 | * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. |
6 | * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. | 6 | * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. |
7 | * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. | 7 | * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. |
8 | * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved. | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -24,9 +25,7 @@ | |||
24 | #include <linux/regulator/consumer.h> | 25 | #include <linux/regulator/consumer.h> |
25 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
26 | #include <linux/suspend.h> | 27 | #include <linux/suspend.h> |
27 | #include <linux/fault-inject.h> | 28 | #include <linux/wakelock.h> |
28 | #include <linux/random.h> | ||
29 | #include <linux/slab.h> | ||
30 | 29 | ||
31 | #include <linux/mmc/card.h> | 30 | #include <linux/mmc/card.h> |
32 | #include <linux/mmc/host.h> | 31 | #include <linux/mmc/host.h> |
@@ -42,24 +41,14 @@ | |||
42 | #include "sd_ops.h" | 41 | #include "sd_ops.h" |
43 | #include "sdio_ops.h" | 42 | #include "sdio_ops.h" |
44 | 43 | ||
45 | /* If the device is not responding */ | ||
46 | #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ | ||
47 | |||
48 | /* | ||
49 | * Background operations can take a long time, depending on the housekeeping | ||
50 | * operations the card has to perform. | ||
51 | */ | ||
52 | #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ | ||
53 | |||
54 | static struct workqueue_struct *workqueue; | 44 | static struct workqueue_struct *workqueue; |
55 | static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; | ||
56 | 45 | ||
57 | /* | 46 | /* |
58 | * Enabling software CRCs on the data blocks can be a significant (30%) | 47 | * Enabling software CRCs on the data blocks can be a significant (30%) |
59 | * performance cost, and for other reasons may not always be desired. | 48 | * performance cost, and for other reasons may not always be desired. |
60 | * So we allow it it to be disabled. | 49 | * So we allow it it to be disabled. |
61 | */ | 50 | */ |
62 | bool use_spi_crc = 1; | 51 | int use_spi_crc = 1; |
63 | module_param(use_spi_crc, bool, 0); | 52 | module_param(use_spi_crc, bool, 0); |
64 | 53 | ||
65 | /* | 54 | /* |
@@ -69,9 +58,9 @@ module_param(use_spi_crc, bool, 0); | |||
69 | * overridden if necessary. | 58 | * overridden if necessary. |
70 | */ | 59 | */ |
71 | #ifdef CONFIG_MMC_UNSAFE_RESUME | 60 | #ifdef CONFIG_MMC_UNSAFE_RESUME |
72 | bool mmc_assume_removable; | 61 | int mmc_assume_removable; |
73 | #else | 62 | #else |
74 | bool mmc_assume_removable = 1; | 63 | int mmc_assume_removable = 1; |
75 | #endif | 64 | #endif |
76 | EXPORT_SYMBOL(mmc_assume_removable); | 65 | EXPORT_SYMBOL(mmc_assume_removable); |
77 | module_param_named(removable, mmc_assume_removable, bool, 0644); | 66 | module_param_named(removable, mmc_assume_removable, bool, 0644); |
@@ -96,43 +85,6 @@ static void mmc_flush_scheduled_work(void) | |||
96 | flush_workqueue(workqueue); | 85 | flush_workqueue(workqueue); |
97 | } | 86 | } |
98 | 87 | ||
99 | #ifdef CONFIG_FAIL_MMC_REQUEST | ||
100 | |||
101 | /* | ||
102 | * Internal function. Inject random data errors. | ||
103 | * If mmc_data is NULL no errors are injected. | ||
104 | */ | ||
105 | static void mmc_should_fail_request(struct mmc_host *host, | ||
106 | struct mmc_request *mrq) | ||
107 | { | ||
108 | struct mmc_command *cmd = mrq->cmd; | ||
109 | struct mmc_data *data = mrq->data; | ||
110 | static const int data_errors[] = { | ||
111 | -ETIMEDOUT, | ||
112 | -EILSEQ, | ||
113 | -EIO, | ||
114 | }; | ||
115 | |||
116 | if (!data) | ||
117 | return; | ||
118 | |||
119 | if (cmd->error || data->error || | ||
120 | !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) | ||
121 | return; | ||
122 | |||
123 | data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; | ||
124 | data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; | ||
125 | } | ||
126 | |||
127 | #else /* CONFIG_FAIL_MMC_REQUEST */ | ||
128 | |||
129 | static inline void mmc_should_fail_request(struct mmc_host *host, | ||
130 | struct mmc_request *mrq) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | #endif /* CONFIG_FAIL_MMC_REQUEST */ | ||
135 | |||
136 | /** | 88 | /** |
137 | * mmc_request_done - finish processing an MMC request | 89 | * mmc_request_done - finish processing an MMC request |
138 | * @host: MMC host which completed request | 90 | * @host: MMC host which completed request |
@@ -151,16 +103,19 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) | |||
151 | cmd->retries = 0; | 103 | cmd->retries = 0; |
152 | } | 104 | } |
153 | 105 | ||
154 | if (err && cmd->retries && !mmc_card_removed(host->card)) { | 106 | if (err && cmd->retries) { |
155 | /* | 107 | pr_debug("%s: req failed (CMD%u): %d, retrying...\n", |
156 | * Request starter must handle retries - see | 108 | mmc_hostname(host), cmd->opcode, err); |
157 | * mmc_wait_for_req_done(). | ||
158 | */ | ||
159 | if (mrq->done) | ||
160 | mrq->done(mrq); | ||
161 | } else { | ||
162 | mmc_should_fail_request(host, mrq); | ||
163 | 109 | ||
110 | cmd->retries--; | ||
111 | cmd->error = 0; | ||
112 | if (mrq->data) { | ||
113 | mrq->data->error = 0; | ||
114 | if (mrq->stop) | ||
115 | mrq->stop->error = 0; | ||
116 | } | ||
117 | host->ops->request(host, mrq); | ||
118 | } else { | ||
164 | led_trigger_event(host->led, LED_OFF); | 119 | led_trigger_event(host->led, LED_OFF); |
165 | 120 | ||
166 | pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", | 121 | pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", |
@@ -199,12 +154,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
199 | struct scatterlist *sg; | 154 | struct scatterlist *sg; |
200 | #endif | 155 | #endif |
201 | 156 | ||
202 | if (mrq->sbc) { | ||
203 | pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", | ||
204 | mmc_hostname(host), mrq->sbc->opcode, | ||
205 | mrq->sbc->arg, mrq->sbc->flags); | ||
206 | } | ||
207 | |||
208 | pr_debug("%s: starting CMD%u arg %08x flags %08x\n", | 157 | pr_debug("%s: starting CMD%u arg %08x flags %08x\n", |
209 | mmc_hostname(host), mrq->cmd->opcode, | 158 | mmc_hostname(host), mrq->cmd->opcode, |
210 | mrq->cmd->arg, mrq->cmd->flags); | 159 | mrq->cmd->arg, mrq->cmd->flags); |
@@ -255,107 +204,22 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
255 | host->ops->request(host, mrq); | 204 | host->ops->request(host, mrq); |
256 | } | 205 | } |
257 | 206 | ||
258 | /** | ||
259 | * mmc_start_bkops - start BKOPS for supported cards | ||
260 | * @card: MMC card to start BKOPS | ||
261 | * @form_exception: A flag to indicate if this function was | ||
262 | * called due to an exception raised by the card | ||
263 | * | ||
264 | * Start background operations whenever requested. | ||
265 | * When the urgent BKOPS bit is set in a R1 command response | ||
266 | * then background operations should be started immediately. | ||
267 | */ | ||
268 | void mmc_start_bkops(struct mmc_card *card, bool from_exception) | ||
269 | { | ||
270 | int err; | ||
271 | int timeout; | ||
272 | bool use_busy_signal; | ||
273 | |||
274 | BUG_ON(!card); | ||
275 | |||
276 | if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) | ||
277 | return; | ||
278 | |||
279 | err = mmc_read_bkops_status(card); | ||
280 | if (err) { | ||
281 | pr_err("%s: Failed to read bkops status: %d\n", | ||
282 | mmc_hostname(card->host), err); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | if (!card->ext_csd.raw_bkops_status) | ||
287 | return; | ||
288 | |||
289 | if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && | ||
290 | from_exception) | ||
291 | return; | ||
292 | |||
293 | mmc_claim_host(card->host); | ||
294 | if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { | ||
295 | timeout = MMC_BKOPS_MAX_TIMEOUT; | ||
296 | use_busy_signal = true; | ||
297 | } else { | ||
298 | timeout = 0; | ||
299 | use_busy_signal = false; | ||
300 | } | ||
301 | |||
302 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
303 | EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); | ||
304 | if (err) { | ||
305 | pr_warn("%s: Error %d starting bkops\n", | ||
306 | mmc_hostname(card->host), err); | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * For urgent bkops status (LEVEL_2 and more) | ||
312 | * bkops executed synchronously, otherwise | ||
313 | * the operation is in progress | ||
314 | */ | ||
315 | if (!use_busy_signal) | ||
316 | mmc_card_set_doing_bkops(card); | ||
317 | out: | ||
318 | mmc_release_host(card->host); | ||
319 | } | ||
320 | EXPORT_SYMBOL(mmc_start_bkops); | ||
321 | |||
322 | static void mmc_wait_done(struct mmc_request *mrq) | 207 | static void mmc_wait_done(struct mmc_request *mrq) |
323 | { | 208 | { |
324 | complete(&mrq->completion); | 209 | complete(&mrq->completion); |
325 | } | 210 | } |
326 | 211 | ||
327 | static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) | 212 | static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) |
328 | { | 213 | { |
329 | init_completion(&mrq->completion); | 214 | init_completion(&mrq->completion); |
330 | mrq->done = mmc_wait_done; | 215 | mrq->done = mmc_wait_done; |
331 | if (mmc_card_removed(host->card)) { | ||
332 | mrq->cmd->error = -ENOMEDIUM; | ||
333 | complete(&mrq->completion); | ||
334 | return -ENOMEDIUM; | ||
335 | } | ||
336 | mmc_start_request(host, mrq); | 216 | mmc_start_request(host, mrq); |
337 | return 0; | ||
338 | } | 217 | } |
339 | 218 | ||
340 | static void mmc_wait_for_req_done(struct mmc_host *host, | 219 | static void mmc_wait_for_req_done(struct mmc_host *host, |
341 | struct mmc_request *mrq) | 220 | struct mmc_request *mrq) |
342 | { | 221 | { |
343 | struct mmc_command *cmd; | 222 | wait_for_completion(&mrq->completion); |
344 | |||
345 | while (1) { | ||
346 | wait_for_completion(&mrq->completion); | ||
347 | |||
348 | cmd = mrq->cmd; | ||
349 | if (!cmd->error || !cmd->retries || | ||
350 | mmc_card_removed(host->card)) | ||
351 | break; | ||
352 | |||
353 | pr_debug("%s: req failed (CMD%u): %d, retrying...\n", | ||
354 | mmc_hostname(host), cmd->opcode, cmd->error); | ||
355 | cmd->retries--; | ||
356 | cmd->error = 0; | ||
357 | host->ops->request(host, mrq); | ||
358 | } | ||
359 | } | 223 | } |
360 | 224 | ||
361 | /** | 225 | /** |
@@ -372,11 +236,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host, | |||
372 | static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, | 236 | static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, |
373 | bool is_first_req) | 237 | bool is_first_req) |
374 | { | 238 | { |
375 | if (host->ops->pre_req) { | 239 | if (host->ops->pre_req) |
376 | mmc_host_clk_hold(host); | ||
377 | host->ops->pre_req(host, mrq, is_first_req); | 240 | host->ops->pre_req(host, mrq, is_first_req); |
378 | mmc_host_clk_release(host); | ||
379 | } | ||
380 | } | 241 | } |
381 | 242 | ||
382 | /** | 243 | /** |
@@ -391,11 +252,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, | |||
391 | static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, | 252 | static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, |
392 | int err) | 253 | int err) |
393 | { | 254 | { |
394 | if (host->ops->post_req) { | 255 | if (host->ops->post_req) |
395 | mmc_host_clk_hold(host); | ||
396 | host->ops->post_req(host, mrq, err); | 256 | host->ops->post_req(host, mrq, err); |
397 | mmc_host_clk_release(host); | ||
398 | } | ||
399 | } | 257 | } |
400 | 258 | ||
401 | /** | 259 | /** |
@@ -418,7 +276,6 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, | |||
418 | struct mmc_async_req *areq, int *error) | 276 | struct mmc_async_req *areq, int *error) |
419 | { | 277 | { |
420 | int err = 0; | 278 | int err = 0; |
421 | int start_err = 0; | ||
422 | struct mmc_async_req *data = host->areq; | 279 | struct mmc_async_req *data = host->areq; |
423 | 280 | ||
424 | /* Prepare a new request */ | 281 | /* Prepare a new request */ |
@@ -428,31 +285,24 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, | |||
428 | if (host->areq) { | 285 | if (host->areq) { |
429 | mmc_wait_for_req_done(host, host->areq->mrq); | 286 | mmc_wait_for_req_done(host, host->areq->mrq); |
430 | err = host->areq->err_check(host->card, host->areq); | 287 | err = host->areq->err_check(host->card, host->areq); |
431 | /* | 288 | if (err) { |
432 | * Check BKOPS urgency for each R1 response | 289 | mmc_post_req(host, host->areq->mrq, 0); |
433 | */ | 290 | if (areq) |
434 | if (host->card && mmc_card_mmc(host->card) && | 291 | mmc_post_req(host, areq->mrq, -EINVAL); |
435 | ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || | 292 | |
436 | (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && | 293 | host->areq = NULL; |
437 | (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) | 294 | goto out; |
438 | mmc_start_bkops(host->card, true); | 295 | } |
439 | } | 296 | } |
440 | 297 | ||
441 | if (!err && areq) | 298 | if (areq) |
442 | start_err = __mmc_start_req(host, areq->mrq); | 299 | __mmc_start_req(host, areq->mrq); |
443 | 300 | ||
444 | if (host->areq) | 301 | if (host->areq) |
445 | mmc_post_req(host, host->areq->mrq, 0); | 302 | mmc_post_req(host, host->areq->mrq, 0); |
446 | 303 | ||
447 | /* Cancel a prepared request if it was not started. */ | 304 | host->areq = areq; |
448 | if ((err || start_err) && areq) | 305 | out: |
449 | mmc_post_req(host, areq->mrq, -EINVAL); | ||
450 | |||
451 | if (err) | ||
452 | host->areq = NULL; | ||
453 | else | ||
454 | host->areq = areq; | ||
455 | |||
456 | if (error) | 306 | if (error) |
457 | *error = err; | 307 | *error = err; |
458 | return data; | 308 | return data; |
@@ -476,20 +326,63 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) | |||
476 | EXPORT_SYMBOL(mmc_wait_for_req); | 326 | EXPORT_SYMBOL(mmc_wait_for_req); |
477 | 327 | ||
478 | /** | 328 | /** |
329 | * mmc_bkops_start - Issue start for mmc background ops | ||
330 | * @card: the MMC card associated with bkops | ||
331 | * @is_synchronous: is the backops synchronous | ||
332 | * | ||
333 | * Issued background ops without the busy wait. | ||
334 | */ | ||
335 | int mmc_bkops_start(struct mmc_card *card, bool is_synchronous) | ||
336 | { | ||
337 | int err; | ||
338 | unsigned long flags; | ||
339 | |||
340 | BUG_ON(!card); | ||
341 | |||
342 | if (!card->ext_csd.bk_ops_en || mmc_card_doing_bkops(card)) | ||
343 | return 1; | ||
344 | |||
345 | mmc_claim_host(card->host); | ||
346 | err = mmc_send_bk_ops_cmd(card, is_synchronous); | ||
347 | if (err) | ||
348 | pr_err("%s: abort bk ops (%d error)\n", | ||
349 | mmc_hostname(card->host), err); | ||
350 | |||
351 | /* | ||
352 | * Incase of asynchronous backops, set card state | ||
353 | * to doing bk ops to ensure that HPI is issued before | ||
354 | * handling any new request in the queue. | ||
355 | */ | ||
356 | spin_lock_irqsave(&card->host->lock, flags); | ||
357 | mmc_card_clr_need_bkops(card); | ||
358 | if (!is_synchronous) | ||
359 | mmc_card_set_doing_bkops(card); | ||
360 | spin_unlock_irqrestore(&card->host->lock, flags); | ||
361 | |||
362 | mmc_release_host(card->host); | ||
363 | |||
364 | return err; | ||
365 | } | ||
366 | EXPORT_SYMBOL(mmc_bkops_start); | ||
367 | |||
368 | /** | ||
479 | * mmc_interrupt_hpi - Issue for High priority Interrupt | 369 | * mmc_interrupt_hpi - Issue for High priority Interrupt |
480 | * @card: the MMC card associated with the HPI transfer | 370 | * @card: the MMC card associated with the HPI transfer |
481 | * | 371 | * |
482 | * Issued High Priority Interrupt, and check for card status | 372 | * Issued High Priority Interrupt, and check for card status |
483 | * until out-of prg-state. | 373 | * util out-of prg-state. |
484 | */ | 374 | */ |
485 | int mmc_interrupt_hpi(struct mmc_card *card) | 375 | int mmc_interrupt_hpi(struct mmc_card *card) |
486 | { | 376 | { |
487 | int err; | 377 | int err; |
488 | u32 status; | 378 | u32 status; |
489 | unsigned long prg_wait; | 379 | unsigned long flags; |
490 | 380 | ||
491 | BUG_ON(!card); | 381 | BUG_ON(!card); |
492 | 382 | ||
383 | if (!mmc_card_mmc(card)) | ||
384 | return 1; | ||
385 | |||
493 | if (!card->ext_csd.hpi_en) { | 386 | if (!card->ext_csd.hpi_en) { |
494 | pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); | 387 | pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); |
495 | return 1; | 388 | return 1; |
@@ -502,41 +395,35 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
502 | goto out; | 395 | goto out; |
503 | } | 396 | } |
504 | 397 | ||
505 | switch (R1_CURRENT_STATE(status)) { | 398 | /* |
506 | case R1_STATE_IDLE: | 399 | * If the card status is in PRG-state, we can send the HPI command. |
507 | case R1_STATE_READY: | 400 | */ |
508 | case R1_STATE_STBY: | 401 | if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { |
509 | case R1_STATE_TRAN: | 402 | do { |
510 | /* | 403 | /* |
511 | * In idle and transfer states, HPI is not needed and the caller | 404 | * We don't know when the HPI command will finish |
512 | * can issue the next intended command immediately | 405 | * processing, so we need to resend HPI until out |
513 | */ | 406 | * of prg-state, and keep checking the card status |
514 | goto out; | 407 | * with SEND_STATUS. If a timeout error occurs when |
515 | case R1_STATE_PRG: | 408 | * sending the HPI command, we are already out of |
516 | break; | 409 | * prg-state. |
517 | default: | 410 | */ |
518 | /* In all other states, it's illegal to issue HPI */ | 411 | err = mmc_send_hpi_cmd(card, &status); |
519 | pr_debug("%s: HPI cannot be sent. Card state=%d\n", | 412 | if (err) |
520 | mmc_hostname(card->host), R1_CURRENT_STATE(status)); | 413 | pr_debug("%s: abort HPI (%d error)\n", |
521 | err = -EINVAL; | 414 | mmc_hostname(card->host), err); |
522 | goto out; | ||
523 | } | ||
524 | |||
525 | err = mmc_send_hpi_cmd(card, &status); | ||
526 | if (err) | ||
527 | goto out; | ||
528 | |||
529 | prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); | ||
530 | do { | ||
531 | err = mmc_send_status(card, &status); | ||
532 | 415 | ||
533 | if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) | 416 | err = mmc_send_status(card, &status); |
534 | break; | 417 | if (err) |
535 | if (time_after(jiffies, prg_wait)) | 418 | break; |
536 | err = -ETIMEDOUT; | 419 | } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); |
537 | } while (!err); | 420 | } else |
421 | pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); | ||
538 | 422 | ||
539 | out: | 423 | out: |
424 | spin_lock_irqsave(&card->host->lock, flags); | ||
425 | mmc_card_clr_doing_bkops(card); | ||
426 | spin_unlock_irqrestore(&card->host->lock, flags); | ||
540 | mmc_release_host(card->host); | 427 | mmc_release_host(card->host); |
541 | return err; | 428 | return err; |
542 | } | 429 | } |
@@ -554,7 +441,7 @@ EXPORT_SYMBOL(mmc_interrupt_hpi); | |||
554 | */ | 441 | */ |
555 | int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) | 442 | int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) |
556 | { | 443 | { |
557 | struct mmc_request mrq = {NULL}; | 444 | struct mmc_request mrq = {0}; |
558 | 445 | ||
559 | WARN_ON(!host->claimed); | 446 | WARN_ON(!host->claimed); |
560 | 447 | ||
@@ -572,64 +459,6 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries | |||
572 | EXPORT_SYMBOL(mmc_wait_for_cmd); | 459 | EXPORT_SYMBOL(mmc_wait_for_cmd); |
573 | 460 | ||
574 | /** | 461 | /** |
575 | * mmc_stop_bkops - stop ongoing BKOPS | ||
576 | * @card: MMC card to check BKOPS | ||
577 | * | ||
578 | * Send HPI command to stop ongoing background operations to | ||
579 | * allow rapid servicing of foreground operations, e.g. read/ | ||
580 | * writes. Wait until the card comes out of the programming state | ||
581 | * to avoid errors in servicing read/write requests. | ||
582 | */ | ||
583 | int mmc_stop_bkops(struct mmc_card *card) | ||
584 | { | ||
585 | int err = 0; | ||
586 | |||
587 | BUG_ON(!card); | ||
588 | err = mmc_interrupt_hpi(card); | ||
589 | |||
590 | /* | ||
591 | * If err is EINVAL, we can't issue an HPI. | ||
592 | * It should complete the BKOPS. | ||
593 | */ | ||
594 | if (!err || (err == -EINVAL)) { | ||
595 | mmc_card_clr_doing_bkops(card); | ||
596 | err = 0; | ||
597 | } | ||
598 | |||
599 | return err; | ||
600 | } | ||
601 | EXPORT_SYMBOL(mmc_stop_bkops); | ||
602 | |||
603 | int mmc_read_bkops_status(struct mmc_card *card) | ||
604 | { | ||
605 | int err; | ||
606 | u8 *ext_csd; | ||
607 | |||
608 | /* | ||
609 | * In future work, we should consider storing the entire ext_csd. | ||
610 | */ | ||
611 | ext_csd = kmalloc(512, GFP_KERNEL); | ||
612 | if (!ext_csd) { | ||
613 | pr_err("%s: could not allocate buffer to receive the ext_csd.\n", | ||
614 | mmc_hostname(card->host)); | ||
615 | return -ENOMEM; | ||
616 | } | ||
617 | |||
618 | mmc_claim_host(card->host); | ||
619 | err = mmc_send_ext_csd(card, ext_csd); | ||
620 | mmc_release_host(card->host); | ||
621 | if (err) | ||
622 | goto out; | ||
623 | |||
624 | card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; | ||
625 | card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; | ||
626 | out: | ||
627 | kfree(ext_csd); | ||
628 | return err; | ||
629 | } | ||
630 | EXPORT_SYMBOL(mmc_read_bkops_status); | ||
631 | |||
632 | /** | ||
633 | * mmc_set_data_timeout - set the timeout for a data command | 462 | * mmc_set_data_timeout - set the timeout for a data command |
634 | * @data: data phase for command | 463 | * @data: data phase for command |
635 | * @card: the MMC card associated with the data transfer | 464 | * @card: the MMC card associated with the data transfer |
@@ -678,14 +507,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) | |||
678 | 507 | ||
679 | if (data->flags & MMC_DATA_WRITE) | 508 | if (data->flags & MMC_DATA_WRITE) |
680 | /* | 509 | /* |
681 | * The MMC spec "It is strongly recommended | 510 | * The limit is really 250 ms, but that is |
682 | * for hosts to implement more than 500ms | 511 | * insufficient for some crappy cards. |
683 | * timeout value even if the card indicates | ||
684 | * the 250ms maximum busy length." Even the | ||
685 | * previous value of 300ms is known to be | ||
686 | * insufficient for some cards. | ||
687 | */ | 512 | */ |
688 | limit_us = 3000000; | 513 | limit_us = 300000; |
689 | else | 514 | else |
690 | limit_us = 100000; | 515 | limit_us = 100000; |
691 | 516 | ||
@@ -697,18 +522,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) | |||
697 | data->timeout_clks = 0; | 522 | data->timeout_clks = 0; |
698 | } | 523 | } |
699 | } | 524 | } |
700 | |||
701 | /* | ||
702 | * Some cards require longer data read timeout than indicated in CSD. | ||
703 | * Address this by setting the read timeout to a "reasonably high" | ||
704 | * value. For the cards tested, 300ms has proven enough. If necessary, | ||
705 | * this value can be increased if other problematic cards require this. | ||
706 | */ | ||
707 | if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { | ||
708 | data->timeout_ns = 300000000; | ||
709 | data->timeout_clks = 0; | ||
710 | } | ||
711 | |||
712 | /* | 525 | /* |
713 | * Some cards need very high timeouts if driven in SPI mode. | 526 | * Some cards need very high timeouts if driven in SPI mode. |
714 | * The worst observed timeout was 900ms after writing a | 527 | * The worst observed timeout was 900ms after writing a |
@@ -755,6 +568,101 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) | |||
755 | EXPORT_SYMBOL(mmc_align_data_size); | 568 | EXPORT_SYMBOL(mmc_align_data_size); |
756 | 569 | ||
757 | /** | 570 | /** |
571 | * mmc_host_enable - enable a host. | ||
572 | * @host: mmc host to enable | ||
573 | * | ||
574 | * Hosts that support power saving can use the 'enable' and 'disable' | ||
575 | * methods to exit and enter power saving states. For more information | ||
576 | * see comments for struct mmc_host_ops. | ||
577 | */ | ||
578 | int mmc_host_enable(struct mmc_host *host) | ||
579 | { | ||
580 | if (!(host->caps & MMC_CAP_DISABLE)) | ||
581 | return 0; | ||
582 | |||
583 | if (host->en_dis_recurs) | ||
584 | return 0; | ||
585 | |||
586 | if (host->nesting_cnt++) | ||
587 | return 0; | ||
588 | |||
589 | cancel_delayed_work_sync(&host->disable); | ||
590 | |||
591 | if (host->enabled) | ||
592 | return 0; | ||
593 | |||
594 | if (host->ops->enable) { | ||
595 | int err; | ||
596 | |||
597 | host->en_dis_recurs = 1; | ||
598 | err = host->ops->enable(host); | ||
599 | host->en_dis_recurs = 0; | ||
600 | |||
601 | if (err) { | ||
602 | pr_debug("%s: enable error %d\n", | ||
603 | mmc_hostname(host), err); | ||
604 | return err; | ||
605 | } | ||
606 | } | ||
607 | host->enabled = 1; | ||
608 | return 0; | ||
609 | } | ||
610 | EXPORT_SYMBOL(mmc_host_enable); | ||
611 | |||
612 | static int mmc_host_do_disable(struct mmc_host *host, int lazy) | ||
613 | { | ||
614 | if (host->ops->disable) { | ||
615 | int err; | ||
616 | |||
617 | host->en_dis_recurs = 1; | ||
618 | err = host->ops->disable(host, lazy); | ||
619 | host->en_dis_recurs = 0; | ||
620 | |||
621 | if (err < 0) { | ||
622 | pr_debug("%s: disable error %d\n", | ||
623 | mmc_hostname(host), err); | ||
624 | return err; | ||
625 | } | ||
626 | if (err > 0) { | ||
627 | unsigned long delay = msecs_to_jiffies(err); | ||
628 | |||
629 | mmc_schedule_delayed_work(&host->disable, delay); | ||
630 | } | ||
631 | } | ||
632 | host->enabled = 0; | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | /** | ||
637 | * mmc_host_disable - disable a host. | ||
638 | * @host: mmc host to disable | ||
639 | * | ||
640 | * Hosts that support power saving can use the 'enable' and 'disable' | ||
641 | * methods to exit and enter power saving states. For more information | ||
642 | * see comments for struct mmc_host_ops. | ||
643 | */ | ||
644 | int mmc_host_disable(struct mmc_host *host) | ||
645 | { | ||
646 | int err; | ||
647 | |||
648 | if (!(host->caps & MMC_CAP_DISABLE)) | ||
649 | return 0; | ||
650 | |||
651 | if (host->en_dis_recurs) | ||
652 | return 0; | ||
653 | |||
654 | if (--host->nesting_cnt) | ||
655 | return 0; | ||
656 | |||
657 | if (!host->enabled) | ||
658 | return 0; | ||
659 | |||
660 | err = mmc_host_do_disable(host, 0); | ||
661 | return err; | ||
662 | } | ||
663 | EXPORT_SYMBOL(mmc_host_disable); | ||
664 | |||
665 | /** | ||
758 | * __mmc_claim_host - exclusively claim a host | 666 | * __mmc_claim_host - exclusively claim a host |
759 | * @host: mmc host to claim | 667 | * @host: mmc host to claim |
760 | * @abort: whether or not the operation should be aborted | 668 | * @abort: whether or not the operation should be aborted |
@@ -792,8 +700,8 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) | |||
792 | wake_up(&host->wq); | 700 | wake_up(&host->wq); |
793 | spin_unlock_irqrestore(&host->lock, flags); | 701 | spin_unlock_irqrestore(&host->lock, flags); |
794 | remove_wait_queue(&host->wq, &wait); | 702 | remove_wait_queue(&host->wq, &wait); |
795 | if (host->ops->enable && !stop && host->claim_cnt == 1) | 703 | if (!stop) |
796 | host->ops->enable(host); | 704 | mmc_host_enable(host); |
797 | return stop; | 705 | return stop; |
798 | } | 706 | } |
799 | 707 | ||
@@ -818,28 +726,21 @@ int mmc_try_claim_host(struct mmc_host *host) | |||
818 | claimed_host = 1; | 726 | claimed_host = 1; |
819 | } | 727 | } |
820 | spin_unlock_irqrestore(&host->lock, flags); | 728 | spin_unlock_irqrestore(&host->lock, flags); |
821 | if (host->ops->enable && claimed_host && host->claim_cnt == 1) | ||
822 | host->ops->enable(host); | ||
823 | return claimed_host; | 729 | return claimed_host; |
824 | } | 730 | } |
825 | EXPORT_SYMBOL(mmc_try_claim_host); | 731 | EXPORT_SYMBOL(mmc_try_claim_host); |
826 | 732 | ||
827 | /** | 733 | /** |
828 | * mmc_release_host - release a host | 734 | * mmc_do_release_host - release a claimed host |
829 | * @host: mmc host to release | 735 | * @host: mmc host to release |
830 | * | 736 | * |
831 | * Release a MMC host, allowing others to claim the host | 737 | * If you successfully claimed a host, this function will |
832 | * for their operations. | 738 | * release it again. |
833 | */ | 739 | */ |
834 | void mmc_release_host(struct mmc_host *host) | 740 | void mmc_do_release_host(struct mmc_host *host) |
835 | { | 741 | { |
836 | unsigned long flags; | 742 | unsigned long flags; |
837 | 743 | ||
838 | WARN_ON(!host->claimed); | ||
839 | |||
840 | if (host->ops->disable && host->claim_cnt == 1) | ||
841 | host->ops->disable(host); | ||
842 | |||
843 | spin_lock_irqsave(&host->lock, flags); | 744 | spin_lock_irqsave(&host->lock, flags); |
844 | if (--host->claim_cnt) { | 745 | if (--host->claim_cnt) { |
845 | /* Release for nested claim */ | 746 | /* Release for nested claim */ |
@@ -851,6 +752,67 @@ void mmc_release_host(struct mmc_host *host) | |||
851 | wake_up(&host->wq); | 752 | wake_up(&host->wq); |
852 | } | 753 | } |
853 | } | 754 | } |
755 | EXPORT_SYMBOL(mmc_do_release_host); | ||
756 | |||
757 | void mmc_host_deeper_disable(struct work_struct *work) | ||
758 | { | ||
759 | struct mmc_host *host = | ||
760 | container_of(work, struct mmc_host, disable.work); | ||
761 | |||
762 | /* If the host is claimed then we do not want to disable it anymore */ | ||
763 | if (!mmc_try_claim_host(host)) | ||
764 | return; | ||
765 | mmc_host_do_disable(host, 1); | ||
766 | mmc_do_release_host(host); | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * mmc_host_lazy_disable - lazily disable a host. | ||
771 | * @host: mmc host to disable | ||
772 | * | ||
773 | * Hosts that support power saving can use the 'enable' and 'disable' | ||
774 | * methods to exit and enter power saving states. For more information | ||
775 | * see comments for struct mmc_host_ops. | ||
776 | */ | ||
777 | int mmc_host_lazy_disable(struct mmc_host *host) | ||
778 | { | ||
779 | if (!(host->caps & MMC_CAP_DISABLE)) | ||
780 | return 0; | ||
781 | |||
782 | if (host->en_dis_recurs) | ||
783 | return 0; | ||
784 | |||
785 | if (--host->nesting_cnt) | ||
786 | return 0; | ||
787 | |||
788 | if (!host->enabled) | ||
789 | return 0; | ||
790 | |||
791 | if (host->disable_delay) { | ||
792 | mmc_schedule_delayed_work(&host->disable, | ||
793 | msecs_to_jiffies(host->disable_delay)); | ||
794 | return 0; | ||
795 | } else | ||
796 | return mmc_host_do_disable(host, 1); | ||
797 | } | ||
798 | EXPORT_SYMBOL(mmc_host_lazy_disable); | ||
799 | |||
800 | /** | ||
801 | * mmc_release_host - release a host | ||
802 | * @host: mmc host to release | ||
803 | * | ||
804 | * Release a MMC host, allowing others to claim the host | ||
805 | * for their operations. | ||
806 | */ | ||
807 | void mmc_release_host(struct mmc_host *host) | ||
808 | { | ||
809 | WARN_ON(!host->claimed); | ||
810 | |||
811 | mmc_host_lazy_disable(host); | ||
812 | |||
813 | mmc_do_release_host(host); | ||
814 | } | ||
815 | |||
854 | EXPORT_SYMBOL(mmc_release_host); | 816 | EXPORT_SYMBOL(mmc_release_host); |
855 | 817 | ||
856 | /* | 818 | /* |
@@ -1091,7 +1053,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply) | |||
1091 | 1053 | ||
1092 | return result; | 1054 | return result; |
1093 | } | 1055 | } |
1094 | EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); | 1056 | EXPORT_SYMBOL(mmc_regulator_get_ocrmask); |
1095 | 1057 | ||
1096 | /** | 1058 | /** |
1097 | * mmc_regulator_set_ocr - set regulator to match host->ios voltage | 1059 | * mmc_regulator_set_ocr - set regulator to match host->ios voltage |
@@ -1116,8 +1078,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
1116 | int tmp; | 1078 | int tmp; |
1117 | int voltage; | 1079 | int voltage; |
1118 | 1080 | ||
1119 | /* | 1081 | /* REVISIT mmc_vddrange_to_ocrmask() may have set some |
1120 | * REVISIT mmc_vddrange_to_ocrmask() may have set some | ||
1121 | * bits this regulator doesn't quite support ... don't | 1082 | * bits this regulator doesn't quite support ... don't |
1122 | * be too picky, most cards and regulators are OK with | 1083 | * be too picky, most cards and regulators are OK with |
1123 | * a 0.1V range goof (it's a small error percentage). | 1084 | * a 0.1V range goof (it's a small error percentage). |
@@ -1131,15 +1092,10 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
1131 | max_uV = min_uV + 100 * 1000; | 1092 | max_uV = min_uV + 100 * 1000; |
1132 | } | 1093 | } |
1133 | 1094 | ||
1134 | /* | 1095 | /* avoid needless changes to this voltage; the regulator |
1135 | * If we're using a fixed/static regulator, don't call | 1096 | * might not allow this operation |
1136 | * regulator_set_voltage; it would fail. | ||
1137 | */ | 1097 | */ |
1138 | voltage = regulator_get_voltage(supply); | 1098 | voltage = regulator_get_voltage(supply); |
1139 | |||
1140 | if (regulator_count_voltages(supply) == 1) | ||
1141 | min_uV = max_uV = voltage; | ||
1142 | |||
1143 | if (voltage < 0) | 1099 | if (voltage < 0) |
1144 | result = voltage; | 1100 | result = voltage; |
1145 | else if (voltage < min_uV || voltage > max_uV) | 1101 | else if (voltage < min_uV || voltage > max_uV) |
@@ -1163,30 +1119,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
1163 | "could not set regulator OCR (%d)\n", result); | 1119 | "could not set regulator OCR (%d)\n", result); |
1164 | return result; | 1120 | return result; |
1165 | } | 1121 | } |
1166 | EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); | 1122 | EXPORT_SYMBOL(mmc_regulator_set_ocr); |
1167 | |||
1168 | int mmc_regulator_get_supply(struct mmc_host *mmc) | ||
1169 | { | ||
1170 | struct device *dev = mmc_dev(mmc); | ||
1171 | struct regulator *supply; | ||
1172 | int ret; | ||
1173 | |||
1174 | supply = devm_regulator_get(dev, "vmmc"); | ||
1175 | mmc->supply.vmmc = supply; | ||
1176 | mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc"); | ||
1177 | |||
1178 | if (IS_ERR(supply)) | ||
1179 | return PTR_ERR(supply); | ||
1180 | |||
1181 | ret = mmc_regulator_get_ocrmask(supply); | ||
1182 | if (ret > 0) | ||
1183 | mmc->ocr_avail = ret; | ||
1184 | else | ||
1185 | dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); | ||
1186 | |||
1187 | return 0; | ||
1188 | } | ||
1189 | EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); | ||
1190 | 1123 | ||
1191 | #endif /* CONFIG_REGULATOR */ | 1124 | #endif /* CONFIG_REGULATOR */ |
1192 | 1125 | ||
@@ -1245,11 +1178,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 | |||
1245 | 1178 | ||
1246 | host->ios.signal_voltage = signal_voltage; | 1179 | host->ios.signal_voltage = signal_voltage; |
1247 | 1180 | ||
1248 | if (host->ops->start_signal_voltage_switch) { | 1181 | if (host->ops->start_signal_voltage_switch) |
1249 | mmc_host_clk_hold(host); | ||
1250 | err = host->ops->start_signal_voltage_switch(host, &host->ios); | 1182 | err = host->ops->start_signal_voltage_switch(host, &host->ios); |
1251 | mmc_host_clk_release(host); | ||
1252 | } | ||
1253 | 1183 | ||
1254 | return err; | 1184 | return err; |
1255 | } | 1185 | } |
@@ -1291,9 +1221,6 @@ static void mmc_power_up(struct mmc_host *host) | |||
1291 | { | 1221 | { |
1292 | int bit; | 1222 | int bit; |
1293 | 1223 | ||
1294 | if (host->ios.power_mode == MMC_POWER_ON) | ||
1295 | return; | ||
1296 | |||
1297 | mmc_host_clk_hold(host); | 1224 | mmc_host_clk_hold(host); |
1298 | 1225 | ||
1299 | /* If ocr is set, we use it */ | 1226 | /* If ocr is set, we use it */ |
@@ -1303,19 +1230,18 @@ static void mmc_power_up(struct mmc_host *host) | |||
1303 | bit = fls(host->ocr_avail) - 1; | 1230 | bit = fls(host->ocr_avail) - 1; |
1304 | 1231 | ||
1305 | host->ios.vdd = bit; | 1232 | host->ios.vdd = bit; |
1306 | if (mmc_host_is_spi(host)) | 1233 | if (mmc_host_is_spi(host)) { |
1307 | host->ios.chip_select = MMC_CS_HIGH; | 1234 | host->ios.chip_select = MMC_CS_HIGH; |
1308 | else | 1235 | host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; |
1236 | } else { | ||
1309 | host->ios.chip_select = MMC_CS_DONTCARE; | 1237 | host->ios.chip_select = MMC_CS_DONTCARE; |
1310 | host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; | 1238 | host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; |
1239 | } | ||
1311 | host->ios.power_mode = MMC_POWER_UP; | 1240 | host->ios.power_mode = MMC_POWER_UP; |
1312 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 1241 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
1313 | host->ios.timing = MMC_TIMING_LEGACY; | 1242 | host->ios.timing = MMC_TIMING_LEGACY; |
1314 | mmc_set_ios(host); | 1243 | mmc_set_ios(host); |
1315 | 1244 | ||
1316 | /* Set signal voltage to 3.3V */ | ||
1317 | mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); | ||
1318 | |||
1319 | /* | 1245 | /* |
1320 | * This delay should be sufficient to allow the power supply | 1246 | * This delay should be sufficient to allow the power supply |
1321 | * to reach the minimum voltage. | 1247 | * to reach the minimum voltage. |
@@ -1338,15 +1264,11 @@ static void mmc_power_up(struct mmc_host *host) | |||
1338 | 1264 | ||
1339 | void mmc_power_off(struct mmc_host *host) | 1265 | void mmc_power_off(struct mmc_host *host) |
1340 | { | 1266 | { |
1341 | if (host->ios.power_mode == MMC_POWER_OFF) | ||
1342 | return; | ||
1343 | |||
1344 | mmc_host_clk_hold(host); | 1267 | mmc_host_clk_hold(host); |
1345 | 1268 | ||
1346 | host->ios.clock = 0; | 1269 | host->ios.clock = 0; |
1347 | host->ios.vdd = 0; | 1270 | host->ios.vdd = 0; |
1348 | 1271 | ||
1349 | |||
1350 | /* | 1272 | /* |
1351 | * Reset ocr mask to be the highest possible voltage supported for | 1273 | * Reset ocr mask to be the highest possible voltage supported for |
1352 | * this mmc host. This value will be used at next power up. | 1274 | * this mmc host. This value will be used at next power up. |
@@ -1362,13 +1284,6 @@ void mmc_power_off(struct mmc_host *host) | |||
1362 | host->ios.timing = MMC_TIMING_LEGACY; | 1284 | host->ios.timing = MMC_TIMING_LEGACY; |
1363 | mmc_set_ios(host); | 1285 | mmc_set_ios(host); |
1364 | 1286 | ||
1365 | /* | ||
1366 | * Some configurations, such as the 802.11 SDIO card in the OLPC | ||
1367 | * XO-1.5, require a short delay after poweroff before the card | ||
1368 | * can be successfully turned on again. | ||
1369 | */ | ||
1370 | mmc_delay(1); | ||
1371 | |||
1372 | mmc_host_clk_release(host); | 1287 | mmc_host_clk_release(host); |
1373 | } | 1288 | } |
1374 | 1289 | ||
@@ -1411,6 +1326,36 @@ static inline void mmc_bus_put(struct mmc_host *host) | |||
1411 | spin_unlock_irqrestore(&host->lock, flags); | 1326 | spin_unlock_irqrestore(&host->lock, flags); |
1412 | } | 1327 | } |
1413 | 1328 | ||
1329 | int mmc_resume_bus(struct mmc_host *host) | ||
1330 | { | ||
1331 | unsigned long flags; | ||
1332 | |||
1333 | if (!mmc_bus_needs_resume(host)) | ||
1334 | return -EINVAL; | ||
1335 | |||
1336 | printk("%s: Starting deferred resume\n", mmc_hostname(host)); | ||
1337 | spin_lock_irqsave(&host->lock, flags); | ||
1338 | host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME; | ||
1339 | host->rescan_disable = 0; | ||
1340 | spin_unlock_irqrestore(&host->lock, flags); | ||
1341 | |||
1342 | mmc_bus_get(host); | ||
1343 | if (host->bus_ops && !host->bus_dead) { | ||
1344 | mmc_power_up(host); | ||
1345 | BUG_ON(!host->bus_ops->resume); | ||
1346 | host->bus_ops->resume(host); | ||
1347 | } | ||
1348 | |||
1349 | if (host->bus_ops->detect && !host->bus_dead) | ||
1350 | host->bus_ops->detect(host); | ||
1351 | |||
1352 | mmc_bus_put(host); | ||
1353 | printk("%s: Deferred resume completed\n", mmc_hostname(host)); | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | EXPORT_SYMBOL(mmc_resume_bus); | ||
1358 | |||
1414 | /* | 1359 | /* |
1415 | * Assign a mmc bus handler to a host. Only one bus handler may control a | 1360 | * Assign a mmc bus handler to a host. Only one bus handler may control a |
1416 | * host at any given time. | 1361 | * host at any given time. |
@@ -1475,7 +1420,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay) | |||
1475 | WARN_ON(host->removed); | 1420 | WARN_ON(host->removed); |
1476 | spin_unlock_irqrestore(&host->lock, flags); | 1421 | spin_unlock_irqrestore(&host->lock, flags); |
1477 | #endif | 1422 | #endif |
1478 | host->detect_change = 1; | 1423 | |
1424 | wake_lock(&host->detect_wake_lock); | ||
1479 | mmc_schedule_delayed_work(&host->detect, delay); | 1425 | mmc_schedule_delayed_work(&host->detect, delay); |
1480 | } | 1426 | } |
1481 | 1427 | ||
@@ -1535,10 +1481,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, | |||
1535 | { | 1481 | { |
1536 | unsigned int erase_timeout; | 1482 | unsigned int erase_timeout; |
1537 | 1483 | ||
1538 | if (arg == MMC_DISCARD_ARG || | 1484 | if (card->ext_csd.erase_group_def & 1) { |
1539 | (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { | ||
1540 | erase_timeout = card->ext_csd.trim_timeout; | ||
1541 | } else if (card->ext_csd.erase_group_def & 1) { | ||
1542 | /* High Capacity Erase Group Size uses HC timeouts */ | 1485 | /* High Capacity Erase Group Size uses HC timeouts */ |
1543 | if (arg == MMC_TRIM_ARG) | 1486 | if (arg == MMC_TRIM_ARG) |
1544 | erase_timeout = card->ext_csd.trim_timeout; | 1487 | erase_timeout = card->ext_csd.trim_timeout; |
@@ -1634,7 +1577,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1634 | { | 1577 | { |
1635 | struct mmc_command cmd = {0}; | 1578 | struct mmc_command cmd = {0}; |
1636 | unsigned int qty = 0; | 1579 | unsigned int qty = 0; |
1637 | unsigned long timeout; | ||
1638 | int err; | 1580 | int err; |
1639 | 1581 | ||
1640 | /* | 1582 | /* |
@@ -1675,9 +1617,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1675 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | 1617 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
1676 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 1618 | err = mmc_wait_for_cmd(card->host, &cmd, 0); |
1677 | if (err) { | 1619 | if (err) { |
1678 | pr_err("mmc_erase: group start error %d, " | 1620 | printk(KERN_ERR "mmc_erase: group start error %d, " |
1679 | "status %#x\n", err, cmd.resp[0]); | 1621 | "status %#x\n", err, cmd.resp[0]); |
1680 | err = -EIO; | 1622 | err = -EINVAL; |
1681 | goto out; | 1623 | goto out; |
1682 | } | 1624 | } |
1683 | 1625 | ||
@@ -1690,9 +1632,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1690 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | 1632 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
1691 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 1633 | err = mmc_wait_for_cmd(card->host, &cmd, 0); |
1692 | if (err) { | 1634 | if (err) { |
1693 | pr_err("mmc_erase: group end error %d, status %#x\n", | 1635 | printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", |
1694 | err, cmd.resp[0]); | 1636 | err, cmd.resp[0]); |
1695 | err = -EIO; | 1637 | err = -EINVAL; |
1696 | goto out; | 1638 | goto out; |
1697 | } | 1639 | } |
1698 | 1640 | ||
@@ -1703,7 +1645,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1703 | cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); | 1645 | cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); |
1704 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 1646 | err = mmc_wait_for_cmd(card->host, &cmd, 0); |
1705 | if (err) { | 1647 | if (err) { |
1706 | pr_err("mmc_erase: erase error %d, status %#x\n", | 1648 | printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", |
1707 | err, cmd.resp[0]); | 1649 | err, cmd.resp[0]); |
1708 | err = -EIO; | 1650 | err = -EIO; |
1709 | goto out; | 1651 | goto out; |
@@ -1712,7 +1654,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1712 | if (mmc_host_is_spi(card->host)) | 1654 | if (mmc_host_is_spi(card->host)) |
1713 | goto out; | 1655 | goto out; |
1714 | 1656 | ||
1715 | timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); | ||
1716 | do { | 1657 | do { |
1717 | memset(&cmd, 0, sizeof(struct mmc_command)); | 1658 | memset(&cmd, 0, sizeof(struct mmc_command)); |
1718 | cmd.opcode = MMC_SEND_STATUS; | 1659 | cmd.opcode = MMC_SEND_STATUS; |
@@ -1721,24 +1662,13 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1721 | /* Do not retry else we can't see errors */ | 1662 | /* Do not retry else we can't see errors */ |
1722 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 1663 | err = mmc_wait_for_cmd(card->host, &cmd, 0); |
1723 | if (err || (cmd.resp[0] & 0xFDF92000)) { | 1664 | if (err || (cmd.resp[0] & 0xFDF92000)) { |
1724 | pr_err("error %d requesting status %#x\n", | 1665 | printk(KERN_ERR "error %d requesting status %#x\n", |
1725 | err, cmd.resp[0]); | 1666 | err, cmd.resp[0]); |
1726 | err = -EIO; | 1667 | err = -EIO; |
1727 | goto out; | 1668 | goto out; |
1728 | } | 1669 | } |
1729 | |||
1730 | /* Timeout if the device never becomes ready for data and | ||
1731 | * never leaves the program state. | ||
1732 | */ | ||
1733 | if (time_after(jiffies, timeout)) { | ||
1734 | pr_err("%s: Card stuck in programming state! %s\n", | ||
1735 | mmc_hostname(card->host), __func__); | ||
1736 | err = -EIO; | ||
1737 | goto out; | ||
1738 | } | ||
1739 | |||
1740 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || | 1670 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || |
1741 | (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); | 1671 | R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); |
1742 | out: | 1672 | out: |
1743 | return err; | 1673 | return err; |
1744 | } | 1674 | } |
@@ -1827,28 +1757,6 @@ int mmc_can_trim(struct mmc_card *card) | |||
1827 | } | 1757 | } |
1828 | EXPORT_SYMBOL(mmc_can_trim); | 1758 | EXPORT_SYMBOL(mmc_can_trim); |
1829 | 1759 | ||
1830 | int mmc_can_discard(struct mmc_card *card) | ||
1831 | { | ||
1832 | /* | ||
1833 | * As there's no way to detect the discard support bit at v4.5 | ||
1834 | * use the s/w feature support filed. | ||
1835 | */ | ||
1836 | if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) | ||
1837 | return 1; | ||
1838 | return 0; | ||
1839 | } | ||
1840 | EXPORT_SYMBOL(mmc_can_discard); | ||
1841 | |||
1842 | int mmc_can_sanitize(struct mmc_card *card) | ||
1843 | { | ||
1844 | if (!mmc_can_trim(card) && !mmc_can_erase(card)) | ||
1845 | return 0; | ||
1846 | if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) | ||
1847 | return 1; | ||
1848 | return 0; | ||
1849 | } | ||
1850 | EXPORT_SYMBOL(mmc_can_sanitize); | ||
1851 | |||
1852 | int mmc_can_secure_erase_trim(struct mmc_card *card) | 1760 | int mmc_can_secure_erase_trim(struct mmc_card *card) |
1853 | { | 1761 | { |
1854 | if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) | 1762 | if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) |
@@ -1958,108 +1866,6 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) | |||
1958 | } | 1866 | } |
1959 | EXPORT_SYMBOL(mmc_set_blocklen); | 1867 | EXPORT_SYMBOL(mmc_set_blocklen); |
1960 | 1868 | ||
1961 | int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, | ||
1962 | bool is_rel_write) | ||
1963 | { | ||
1964 | struct mmc_command cmd = {0}; | ||
1965 | |||
1966 | cmd.opcode = MMC_SET_BLOCK_COUNT; | ||
1967 | cmd.arg = blockcount & 0x0000FFFF; | ||
1968 | if (is_rel_write) | ||
1969 | cmd.arg |= 1 << 31; | ||
1970 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; | ||
1971 | return mmc_wait_for_cmd(card->host, &cmd, 5); | ||
1972 | } | ||
1973 | EXPORT_SYMBOL(mmc_set_blockcount); | ||
1974 | |||
1975 | static void mmc_hw_reset_for_init(struct mmc_host *host) | ||
1976 | { | ||
1977 | if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) | ||
1978 | return; | ||
1979 | mmc_host_clk_hold(host); | ||
1980 | host->ops->hw_reset(host); | ||
1981 | mmc_host_clk_release(host); | ||
1982 | } | ||
1983 | |||
1984 | int mmc_can_reset(struct mmc_card *card) | ||
1985 | { | ||
1986 | u8 rst_n_function; | ||
1987 | |||
1988 | if (!mmc_card_mmc(card)) | ||
1989 | return 0; | ||
1990 | rst_n_function = card->ext_csd.rst_n_function; | ||
1991 | if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) | ||
1992 | return 0; | ||
1993 | return 1; | ||
1994 | } | ||
1995 | EXPORT_SYMBOL(mmc_can_reset); | ||
1996 | |||
1997 | static int mmc_do_hw_reset(struct mmc_host *host, int check) | ||
1998 | { | ||
1999 | struct mmc_card *card = host->card; | ||
2000 | |||
2001 | if (!host->bus_ops->power_restore) | ||
2002 | return -EOPNOTSUPP; | ||
2003 | |||
2004 | if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) | ||
2005 | return -EOPNOTSUPP; | ||
2006 | |||
2007 | if (!card) | ||
2008 | return -EINVAL; | ||
2009 | |||
2010 | if (!mmc_can_reset(card)) | ||
2011 | return -EOPNOTSUPP; | ||
2012 | |||
2013 | mmc_host_clk_hold(host); | ||
2014 | mmc_set_clock(host, host->f_init); | ||
2015 | |||
2016 | host->ops->hw_reset(host); | ||
2017 | |||
2018 | /* If the reset has happened, then a status command will fail */ | ||
2019 | if (check) { | ||
2020 | struct mmc_command cmd = {0}; | ||
2021 | int err; | ||
2022 | |||
2023 | cmd.opcode = MMC_SEND_STATUS; | ||
2024 | if (!mmc_host_is_spi(card->host)) | ||
2025 | cmd.arg = card->rca << 16; | ||
2026 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | ||
2027 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | ||
2028 | if (!err) { | ||
2029 | mmc_host_clk_release(host); | ||
2030 | return -ENOSYS; | ||
2031 | } | ||
2032 | } | ||
2033 | |||
2034 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); | ||
2035 | if (mmc_host_is_spi(host)) { | ||
2036 | host->ios.chip_select = MMC_CS_HIGH; | ||
2037 | host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; | ||
2038 | } else { | ||
2039 | host->ios.chip_select = MMC_CS_DONTCARE; | ||
2040 | host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; | ||
2041 | } | ||
2042 | host->ios.bus_width = MMC_BUS_WIDTH_1; | ||
2043 | host->ios.timing = MMC_TIMING_LEGACY; | ||
2044 | mmc_set_ios(host); | ||
2045 | |||
2046 | mmc_host_clk_release(host); | ||
2047 | |||
2048 | return host->bus_ops->power_restore(host); | ||
2049 | } | ||
2050 | |||
2051 | int mmc_hw_reset(struct mmc_host *host) | ||
2052 | { | ||
2053 | return mmc_do_hw_reset(host, 0); | ||
2054 | } | ||
2055 | EXPORT_SYMBOL(mmc_hw_reset); | ||
2056 | |||
2057 | int mmc_hw_reset_check(struct mmc_host *host) | ||
2058 | { | ||
2059 | return mmc_do_hw_reset(host, 1); | ||
2060 | } | ||
2061 | EXPORT_SYMBOL(mmc_hw_reset_check); | ||
2062 | |||
2063 | static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) | 1869 | static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) |
2064 | { | 1870 | { |
2065 | host->f_init = freq; | 1871 | host->f_init = freq; |
@@ -2071,12 +1877,6 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) | |||
2071 | mmc_power_up(host); | 1877 | mmc_power_up(host); |
2072 | 1878 | ||
2073 | /* | 1879 | /* |
2074 | * Some eMMCs (with VCCQ always on) may not be reset after power up, so | ||
2075 | * do a hardware reset if possible. | ||
2076 | */ | ||
2077 | mmc_hw_reset_for_init(host); | ||
2078 | |||
2079 | /* | ||
2080 | * sdio_reset sends CMD52 to reset card. Since we do not know | 1880 | * sdio_reset sends CMD52 to reset card. Since we do not know |
2081 | * if the card is being re-initialized, just send it. CMD52 | 1881 | * if the card is being re-initialized, just send it. CMD52 |
2082 | * should be ignored by SD/eMMC cards. | 1882 | * should be ignored by SD/eMMC cards. |
@@ -2098,75 +1898,17 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) | |||
2098 | return -EIO; | 1898 | return -EIO; |
2099 | } | 1899 | } |
2100 | 1900 | ||
2101 | int _mmc_detect_card_removed(struct mmc_host *host) | ||
2102 | { | ||
2103 | int ret; | ||
2104 | |||
2105 | if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) | ||
2106 | return 0; | ||
2107 | |||
2108 | if (!host->card || mmc_card_removed(host->card)) | ||
2109 | return 1; | ||
2110 | |||
2111 | ret = host->bus_ops->alive(host); | ||
2112 | if (ret) { | ||
2113 | mmc_card_set_removed(host->card); | ||
2114 | pr_debug("%s: card remove detected\n", mmc_hostname(host)); | ||
2115 | } | ||
2116 | |||
2117 | return ret; | ||
2118 | } | ||
2119 | |||
2120 | int mmc_detect_card_removed(struct mmc_host *host) | ||
2121 | { | ||
2122 | struct mmc_card *card = host->card; | ||
2123 | int ret; | ||
2124 | |||
2125 | WARN_ON(!host->claimed); | ||
2126 | |||
2127 | if (!card) | ||
2128 | return 1; | ||
2129 | |||
2130 | ret = mmc_card_removed(card); | ||
2131 | /* | ||
2132 | * The card will be considered unchanged unless we have been asked to | ||
2133 | * detect a change or host requires polling to provide card detection. | ||
2134 | */ | ||
2135 | if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) && | ||
2136 | !(host->caps2 & MMC_CAP2_DETECT_ON_ERR)) | ||
2137 | return ret; | ||
2138 | |||
2139 | host->detect_change = 0; | ||
2140 | if (!ret) { | ||
2141 | ret = _mmc_detect_card_removed(host); | ||
2142 | if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) { | ||
2143 | /* | ||
2144 | * Schedule a detect work as soon as possible to let a | ||
2145 | * rescan handle the card removal. | ||
2146 | */ | ||
2147 | cancel_delayed_work(&host->detect); | ||
2148 | mmc_detect_change(host, 0); | ||
2149 | } | ||
2150 | } | ||
2151 | |||
2152 | return ret; | ||
2153 | } | ||
2154 | EXPORT_SYMBOL(mmc_detect_card_removed); | ||
2155 | |||
2156 | void mmc_rescan(struct work_struct *work) | 1901 | void mmc_rescan(struct work_struct *work) |
2157 | { | 1902 | { |
1903 | static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; | ||
2158 | struct mmc_host *host = | 1904 | struct mmc_host *host = |
2159 | container_of(work, struct mmc_host, detect.work); | 1905 | container_of(work, struct mmc_host, detect.work); |
2160 | int i; | 1906 | int i; |
1907 | bool extend_wakelock = false; | ||
2161 | 1908 | ||
2162 | if (host->rescan_disable) | 1909 | if (host->rescan_disable) |
2163 | return; | 1910 | return; |
2164 | 1911 | ||
2165 | /* If there is a non-removable card registered, only scan once */ | ||
2166 | if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) | ||
2167 | return; | ||
2168 | host->rescan_entered = 1; | ||
2169 | |||
2170 | mmc_bus_get(host); | 1912 | mmc_bus_get(host); |
2171 | 1913 | ||
2172 | /* | 1914 | /* |
@@ -2177,7 +1919,11 @@ void mmc_rescan(struct work_struct *work) | |||
2177 | && !(host->caps & MMC_CAP_NONREMOVABLE)) | 1919 | && !(host->caps & MMC_CAP_NONREMOVABLE)) |
2178 | host->bus_ops->detect(host); | 1920 | host->bus_ops->detect(host); |
2179 | 1921 | ||
2180 | host->detect_change = 0; | 1922 | /* If the card was removed the bus will be marked |
1923 | * as dead - extend the wakelock so userspace | ||
1924 | * can respond */ | ||
1925 | if (host->bus_dead) | ||
1926 | extend_wakelock = 1; | ||
2181 | 1927 | ||
2182 | /* | 1928 | /* |
2183 | * Let mmc_bus_put() free the bus/bus_ops if we've found that | 1929 | * Let mmc_bus_put() free the bus/bus_ops if we've found that |
@@ -2198,32 +1944,34 @@ void mmc_rescan(struct work_struct *work) | |||
2198 | */ | 1944 | */ |
2199 | mmc_bus_put(host); | 1945 | mmc_bus_put(host); |
2200 | 1946 | ||
2201 | if (host->ops->get_cd && host->ops->get_cd(host) == 0) { | 1947 | if (host->ops->get_cd && host->ops->get_cd(host) == 0) |
2202 | mmc_claim_host(host); | ||
2203 | mmc_power_off(host); | ||
2204 | mmc_release_host(host); | ||
2205 | goto out; | 1948 | goto out; |
2206 | } | ||
2207 | 1949 | ||
2208 | mmc_claim_host(host); | 1950 | mmc_claim_host(host); |
2209 | for (i = 0; i < ARRAY_SIZE(freqs); i++) { | 1951 | for (i = 0; i < ARRAY_SIZE(freqs); i++) { |
2210 | if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) | 1952 | if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) { |
1953 | extend_wakelock = true; | ||
2211 | break; | 1954 | break; |
1955 | } | ||
2212 | if (freqs[i] <= host->f_min) | 1956 | if (freqs[i] <= host->f_min) |
2213 | break; | 1957 | break; |
2214 | } | 1958 | } |
2215 | mmc_release_host(host); | 1959 | mmc_release_host(host); |
2216 | 1960 | ||
2217 | out: | 1961 | out: |
2218 | if (host->caps & MMC_CAP_NEEDS_POLL) | 1962 | if (extend_wakelock) |
1963 | wake_lock_timeout(&host->detect_wake_lock, HZ / 2); | ||
1964 | else | ||
1965 | wake_unlock(&host->detect_wake_lock); | ||
1966 | if (host->caps & MMC_CAP_NEEDS_POLL) { | ||
1967 | wake_lock(&host->detect_wake_lock); | ||
2219 | mmc_schedule_delayed_work(&host->detect, HZ); | 1968 | mmc_schedule_delayed_work(&host->detect, HZ); |
1969 | } | ||
2220 | } | 1970 | } |
2221 | 1971 | ||
2222 | void mmc_start_host(struct mmc_host *host) | 1972 | void mmc_start_host(struct mmc_host *host) |
2223 | { | 1973 | { |
2224 | host->f_init = max(freqs[0], host->f_min); | 1974 | mmc_power_off(host); |
2225 | host->rescan_disable = 0; | ||
2226 | mmc_power_up(host); | ||
2227 | mmc_detect_change(host, 0); | 1975 | mmc_detect_change(host, 0); |
2228 | } | 1976 | } |
2229 | 1977 | ||
@@ -2236,8 +1984,10 @@ void mmc_stop_host(struct mmc_host *host) | |||
2236 | spin_unlock_irqrestore(&host->lock, flags); | 1984 | spin_unlock_irqrestore(&host->lock, flags); |
2237 | #endif | 1985 | #endif |
2238 | 1986 | ||
2239 | host->rescan_disable = 1; | 1987 | if (host->caps & MMC_CAP_DISABLE) |
2240 | cancel_delayed_work_sync(&host->detect); | 1988 | cancel_delayed_work(&host->disable); |
1989 | if (cancel_delayed_work_sync(&host->detect)) | ||
1990 | wake_unlock(&host->detect_wake_lock); | ||
2241 | mmc_flush_scheduled_work(); | 1991 | mmc_flush_scheduled_work(); |
2242 | 1992 | ||
2243 | /* clear pm flags now and let card drivers set them as needed */ | 1993 | /* clear pm flags now and let card drivers set them as needed */ |
@@ -2245,7 +1995,6 @@ void mmc_stop_host(struct mmc_host *host) | |||
2245 | 1995 | ||
2246 | mmc_bus_get(host); | 1996 | mmc_bus_get(host); |
2247 | if (host->bus_ops && !host->bus_dead) { | 1997 | if (host->bus_ops && !host->bus_dead) { |
2248 | /* Calling bus_ops->remove() with a claimed host can deadlock */ | ||
2249 | if (host->bus_ops->remove) | 1998 | if (host->bus_ops->remove) |
2250 | host->bus_ops->remove(host); | 1999 | host->bus_ops->remove(host); |
2251 | 2000 | ||
@@ -2317,9 +2066,6 @@ int mmc_card_awake(struct mmc_host *host) | |||
2317 | { | 2066 | { |
2318 | int err = -ENOSYS; | 2067 | int err = -ENOSYS; |
2319 | 2068 | ||
2320 | if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) | ||
2321 | return 0; | ||
2322 | |||
2323 | mmc_bus_get(host); | 2069 | mmc_bus_get(host); |
2324 | 2070 | ||
2325 | if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) | 2071 | if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) |
@@ -2335,12 +2081,9 @@ int mmc_card_sleep(struct mmc_host *host) | |||
2335 | { | 2081 | { |
2336 | int err = -ENOSYS; | 2082 | int err = -ENOSYS; |
2337 | 2083 | ||
2338 | if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) | ||
2339 | return 0; | ||
2340 | |||
2341 | mmc_bus_get(host); | 2084 | mmc_bus_get(host); |
2342 | 2085 | ||
2343 | if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) | 2086 | if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) |
2344 | err = host->bus_ops->sleep(host); | 2087 | err = host->bus_ops->sleep(host); |
2345 | 2088 | ||
2346 | mmc_bus_put(host); | 2089 | mmc_bus_put(host); |
@@ -2359,70 +2102,6 @@ int mmc_card_can_sleep(struct mmc_host *host) | |||
2359 | } | 2102 | } |
2360 | EXPORT_SYMBOL(mmc_card_can_sleep); | 2103 | EXPORT_SYMBOL(mmc_card_can_sleep); |
2361 | 2104 | ||
2362 | /* | ||
2363 | * Flush the cache to the non-volatile storage. | ||
2364 | */ | ||
2365 | int mmc_flush_cache(struct mmc_card *card) | ||
2366 | { | ||
2367 | struct mmc_host *host = card->host; | ||
2368 | int err = 0; | ||
2369 | |||
2370 | if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) | ||
2371 | return err; | ||
2372 | |||
2373 | if (mmc_card_mmc(card) && | ||
2374 | (card->ext_csd.cache_size > 0) && | ||
2375 | (card->ext_csd.cache_ctrl & 1)) { | ||
2376 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
2377 | EXT_CSD_FLUSH_CACHE, 1, 0); | ||
2378 | if (err) | ||
2379 | pr_err("%s: cache flush error %d\n", | ||
2380 | mmc_hostname(card->host), err); | ||
2381 | } | ||
2382 | |||
2383 | return err; | ||
2384 | } | ||
2385 | EXPORT_SYMBOL(mmc_flush_cache); | ||
2386 | |||
2387 | /* | ||
2388 | * Turn the cache ON/OFF. | ||
2389 | * Turning the cache OFF shall trigger flushing of the data | ||
2390 | * to the non-volatile storage. | ||
2391 | */ | ||
2392 | int mmc_cache_ctrl(struct mmc_host *host, u8 enable) | ||
2393 | { | ||
2394 | struct mmc_card *card = host->card; | ||
2395 | unsigned int timeout; | ||
2396 | int err = 0; | ||
2397 | |||
2398 | if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || | ||
2399 | mmc_card_is_removable(host)) | ||
2400 | return err; | ||
2401 | |||
2402 | mmc_claim_host(host); | ||
2403 | if (card && mmc_card_mmc(card) && | ||
2404 | (card->ext_csd.cache_size > 0)) { | ||
2405 | enable = !!enable; | ||
2406 | |||
2407 | if (card->ext_csd.cache_ctrl ^ enable) { | ||
2408 | timeout = enable ? card->ext_csd.generic_cmd6_time : 0; | ||
2409 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
2410 | EXT_CSD_CACHE_CTRL, enable, timeout); | ||
2411 | if (err) | ||
2412 | pr_err("%s: cache %s error %d\n", | ||
2413 | mmc_hostname(card->host), | ||
2414 | enable ? "on" : "off", | ||
2415 | err); | ||
2416 | else | ||
2417 | card->ext_csd.cache_ctrl = enable; | ||
2418 | } | ||
2419 | } | ||
2420 | mmc_release_host(host); | ||
2421 | |||
2422 | return err; | ||
2423 | } | ||
2424 | EXPORT_SYMBOL(mmc_cache_ctrl); | ||
2425 | |||
2426 | #ifdef CONFIG_PM | 2105 | #ifdef CONFIG_PM |
2427 | 2106 | ||
2428 | /** | 2107 | /** |
@@ -2433,30 +2112,27 @@ int mmc_suspend_host(struct mmc_host *host) | |||
2433 | { | 2112 | { |
2434 | int err = 0; | 2113 | int err = 0; |
2435 | 2114 | ||
2436 | cancel_delayed_work(&host->detect); | 2115 | if (mmc_bus_needs_resume(host)) |
2437 | mmc_flush_scheduled_work(); | 2116 | return 0; |
2438 | 2117 | ||
2439 | err = mmc_cache_ctrl(host, 0); | 2118 | if (mmc_card_mmc(host->card) && mmc_card_doing_bkops(host->card)) |
2440 | if (err) | 2119 | mmc_interrupt_hpi(host->card); |
2441 | goto out; | 2120 | mmc_card_clr_need_bkops(host->card); |
2121 | |||
2122 | if (host->caps & MMC_CAP_DISABLE) | ||
2123 | cancel_delayed_work(&host->disable); | ||
2124 | if (cancel_delayed_work(&host->detect)) | ||
2125 | wake_unlock(&host->detect_wake_lock); | ||
2126 | mmc_flush_scheduled_work(); | ||
2442 | 2127 | ||
2443 | mmc_bus_get(host); | 2128 | mmc_bus_get(host); |
2444 | if (host->bus_ops && !host->bus_dead) { | 2129 | if (host->bus_ops && !host->bus_dead) { |
2445 | if (host->bus_ops->suspend) { | 2130 | if (host->bus_ops->suspend) |
2446 | if (mmc_card_doing_bkops(host->card)) { | ||
2447 | err = mmc_stop_bkops(host->card); | ||
2448 | if (err) | ||
2449 | goto out; | ||
2450 | } | ||
2451 | err = host->bus_ops->suspend(host); | 2131 | err = host->bus_ops->suspend(host); |
2452 | } | ||
2453 | |||
2454 | if (err == -ENOSYS || !host->bus_ops->resume) { | 2132 | if (err == -ENOSYS || !host->bus_ops->resume) { |
2455 | /* | 2133 | /* |
2456 | * We simply "remove" the card in this case. | 2134 | * We simply "remove" the card in this case. |
2457 | * It will be redetected on resume. (Calling | 2135 | * It will be redetected on resume. |
2458 | * bus_ops->remove() with a claimed host can | ||
2459 | * deadlock.) | ||
2460 | */ | 2136 | */ |
2461 | if (host->bus_ops->remove) | 2137 | if (host->bus_ops->remove) |
2462 | host->bus_ops->remove(host); | 2138 | host->bus_ops->remove(host); |
@@ -2473,7 +2149,6 @@ int mmc_suspend_host(struct mmc_host *host) | |||
2473 | if (!err && !mmc_card_keep_power(host)) | 2149 | if (!err && !mmc_card_keep_power(host)) |
2474 | mmc_power_off(host); | 2150 | mmc_power_off(host); |
2475 | 2151 | ||
2476 | out: | ||
2477 | return err; | 2152 | return err; |
2478 | } | 2153 | } |
2479 | 2154 | ||
@@ -2488,6 +2163,12 @@ int mmc_resume_host(struct mmc_host *host) | |||
2488 | int err = 0; | 2163 | int err = 0; |
2489 | 2164 | ||
2490 | mmc_bus_get(host); | 2165 | mmc_bus_get(host); |
2166 | if (mmc_bus_manual_resume(host)) { | ||
2167 | host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; | ||
2168 | mmc_bus_put(host); | ||
2169 | return 0; | ||
2170 | } | ||
2171 | |||
2491 | if (host->bus_ops && !host->bus_dead) { | 2172 | if (host->bus_ops && !host->bus_dead) { |
2492 | if (!mmc_card_keep_power(host)) { | 2173 | if (!mmc_card_keep_power(host)) { |
2493 | mmc_power_up(host); | 2174 | mmc_power_up(host); |
@@ -2508,7 +2189,7 @@ int mmc_resume_host(struct mmc_host *host) | |||
2508 | BUG_ON(!host->bus_ops->resume); | 2189 | BUG_ON(!host->bus_ops->resume); |
2509 | err = host->bus_ops->resume(host); | 2190 | err = host->bus_ops->resume(host); |
2510 | if (err) { | 2191 | if (err) { |
2511 | pr_warning("%s: error %d during resume " | 2192 | printk(KERN_WARNING "%s: error %d during resume " |
2512 | "(card was removed?)\n", | 2193 | "(card was removed?)\n", |
2513 | mmc_hostname(host), err); | 2194 | mmc_hostname(host), err); |
2514 | err = 0; | 2195 | err = 0; |
@@ -2531,35 +2212,30 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
2531 | struct mmc_host *host = container_of( | 2212 | struct mmc_host *host = container_of( |
2532 | notify_block, struct mmc_host, pm_notify); | 2213 | notify_block, struct mmc_host, pm_notify); |
2533 | unsigned long flags; | 2214 | unsigned long flags; |
2534 | int err = 0; | 2215 | |
2535 | 2216 | ||
2536 | switch (mode) { | 2217 | switch (mode) { |
2537 | case PM_HIBERNATION_PREPARE: | 2218 | case PM_HIBERNATION_PREPARE: |
2538 | case PM_SUSPEND_PREPARE: | 2219 | case PM_SUSPEND_PREPARE: |
2539 | if (host->card && mmc_card_mmc(host->card) && | ||
2540 | mmc_card_doing_bkops(host->card)) { | ||
2541 | err = mmc_stop_bkops(host->card); | ||
2542 | if (err) { | ||
2543 | pr_err("%s: didn't stop bkops\n", | ||
2544 | mmc_hostname(host)); | ||
2545 | return err; | ||
2546 | } | ||
2547 | mmc_card_clr_doing_bkops(host->card); | ||
2548 | } | ||
2549 | 2220 | ||
2550 | spin_lock_irqsave(&host->lock, flags); | 2221 | spin_lock_irqsave(&host->lock, flags); |
2222 | if (mmc_bus_needs_resume(host)) { | ||
2223 | spin_unlock_irqrestore(&host->lock, flags); | ||
2224 | break; | ||
2225 | } | ||
2551 | host->rescan_disable = 1; | 2226 | host->rescan_disable = 1; |
2552 | spin_unlock_irqrestore(&host->lock, flags); | 2227 | spin_unlock_irqrestore(&host->lock, flags); |
2553 | cancel_delayed_work_sync(&host->detect); | 2228 | if (cancel_delayed_work_sync(&host->detect)) |
2229 | wake_unlock(&host->detect_wake_lock); | ||
2554 | 2230 | ||
2555 | if (!host->bus_ops || host->bus_ops->suspend) | 2231 | if (!host->bus_ops || host->bus_ops->suspend) |
2556 | break; | 2232 | break; |
2557 | 2233 | ||
2558 | /* Calling bus_ops->remove() with a claimed host can deadlock */ | 2234 | mmc_claim_host(host); |
2235 | |||
2559 | if (host->bus_ops->remove) | 2236 | if (host->bus_ops->remove) |
2560 | host->bus_ops->remove(host); | 2237 | host->bus_ops->remove(host); |
2561 | 2238 | ||
2562 | mmc_claim_host(host); | ||
2563 | mmc_detach_bus(host); | 2239 | mmc_detach_bus(host); |
2564 | mmc_power_off(host); | 2240 | mmc_power_off(host); |
2565 | mmc_release_host(host); | 2241 | mmc_release_host(host); |
@@ -2571,6 +2247,10 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
2571 | case PM_POST_RESTORE: | 2247 | case PM_POST_RESTORE: |
2572 | 2248 | ||
2573 | spin_lock_irqsave(&host->lock, flags); | 2249 | spin_lock_irqsave(&host->lock, flags); |
2250 | if (mmc_bus_manual_resume(host)) { | ||
2251 | spin_unlock_irqrestore(&host->lock, flags); | ||
2252 | break; | ||
2253 | } | ||
2574 | host->rescan_disable = 0; | 2254 | host->rescan_disable = 0; |
2575 | spin_unlock_irqrestore(&host->lock, flags); | 2255 | spin_unlock_irqrestore(&host->lock, flags); |
2576 | mmc_detect_change(host, 0); | 2256 | mmc_detect_change(host, 0); |
@@ -2581,6 +2261,22 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
2581 | } | 2261 | } |
2582 | #endif | 2262 | #endif |
2583 | 2263 | ||
2264 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
2265 | void mmc_set_embedded_sdio_data(struct mmc_host *host, | ||
2266 | struct sdio_cis *cis, | ||
2267 | struct sdio_cccr *cccr, | ||
2268 | struct sdio_embedded_func *funcs, | ||
2269 | int num_funcs) | ||
2270 | { | ||
2271 | host->embedded_sdio_data.cis = cis; | ||
2272 | host->embedded_sdio_data.cccr = cccr; | ||
2273 | host->embedded_sdio_data.funcs = funcs; | ||
2274 | host->embedded_sdio_data.num_funcs = num_funcs; | ||
2275 | } | ||
2276 | |||
2277 | EXPORT_SYMBOL(mmc_set_embedded_sdio_data); | ||
2278 | #endif | ||
2279 | |||
2584 | static int __init mmc_init(void) | 2280 | static int __init mmc_init(void) |
2585 | { | 2281 | { |
2586 | int ret; | 2282 | int ret; |
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3bdafbca354..14664f1fb16 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h | |||
@@ -24,7 +24,6 @@ struct mmc_bus_ops { | |||
24 | int (*resume)(struct mmc_host *); | 24 | int (*resume)(struct mmc_host *); |
25 | int (*power_save)(struct mmc_host *); | 25 | int (*power_save)(struct mmc_host *); |
26 | int (*power_restore)(struct mmc_host *); | 26 | int (*power_restore)(struct mmc_host *); |
27 | int (*alive)(struct mmc_host *); | ||
28 | }; | 27 | }; |
29 | 28 | ||
30 | void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); | 29 | void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); |
@@ -60,14 +59,12 @@ void mmc_rescan(struct work_struct *work); | |||
60 | void mmc_start_host(struct mmc_host *host); | 59 | void mmc_start_host(struct mmc_host *host); |
61 | void mmc_stop_host(struct mmc_host *host); | 60 | void mmc_stop_host(struct mmc_host *host); |
62 | 61 | ||
63 | int _mmc_detect_card_removed(struct mmc_host *host); | ||
64 | |||
65 | int mmc_attach_mmc(struct mmc_host *host); | 62 | int mmc_attach_mmc(struct mmc_host *host); |
66 | int mmc_attach_sd(struct mmc_host *host); | 63 | int mmc_attach_sd(struct mmc_host *host); |
67 | int mmc_attach_sdio(struct mmc_host *host); | 64 | int mmc_attach_sdio(struct mmc_host *host); |
68 | 65 | ||
69 | /* Module parameters */ | 66 | /* Module parameters */ |
70 | extern bool use_spi_crc; | 67 | extern int use_spi_crc; |
71 | 68 | ||
72 | /* Debugfs information for hosts and cards */ | 69 | /* Debugfs information for hosts and cards */ |
73 | void mmc_add_host_debugfs(struct mmc_host *host); | 70 | void mmc_add_host_debugfs(struct mmc_host *host); |
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 35c2f85b195..998797ed67a 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c | |||
@@ -7,14 +7,11 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/moduleparam.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
13 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
14 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
15 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
16 | #include <linux/stat.h> | 14 | #include <linux/stat.h> |
17 | #include <linux/fault-inject.h> | ||
18 | 15 | ||
19 | #include <linux/mmc/card.h> | 16 | #include <linux/mmc/card.h> |
20 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
@@ -22,14 +19,6 @@ | |||
22 | #include "core.h" | 19 | #include "core.h" |
23 | #include "mmc_ops.h" | 20 | #include "mmc_ops.h" |
24 | 21 | ||
25 | #ifdef CONFIG_FAIL_MMC_REQUEST | ||
26 | |||
27 | static DECLARE_FAULT_ATTR(fail_default_attr); | ||
28 | static char *fail_request; | ||
29 | module_param(fail_request, charp, 0); | ||
30 | |||
31 | #endif /* CONFIG_FAIL_MMC_REQUEST */ | ||
32 | |||
33 | /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */ | 22 | /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */ |
34 | static int mmc_ios_show(struct seq_file *s, void *data) | 23 | static int mmc_ios_show(struct seq_file *s, void *data) |
35 | { | 24 | { |
@@ -57,8 +46,6 @@ static int mmc_ios_show(struct seq_file *s, void *data) | |||
57 | const char *str; | 46 | const char *str; |
58 | 47 | ||
59 | seq_printf(s, "clock:\t\t%u Hz\n", ios->clock); | 48 | seq_printf(s, "clock:\t\t%u Hz\n", ios->clock); |
60 | if (host->actual_clock) | ||
61 | seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock); | ||
62 | seq_printf(s, "vdd:\t\t%u ", ios->vdd); | 49 | seq_printf(s, "vdd:\t\t%u ", ios->vdd); |
63 | if ((1 << ios->vdd) & MMC_VDD_165_195) | 50 | if ((1 << ios->vdd) & MMC_VDD_165_195) |
64 | seq_printf(s, "(1.65 - 1.95 V)\n"); | 51 | seq_printf(s, "(1.65 - 1.95 V)\n"); |
@@ -126,40 +113,12 @@ static int mmc_ios_show(struct seq_file *s, void *data) | |||
126 | case MMC_TIMING_SD_HS: | 113 | case MMC_TIMING_SD_HS: |
127 | str = "sd high-speed"; | 114 | str = "sd high-speed"; |
128 | break; | 115 | break; |
129 | case MMC_TIMING_UHS_SDR50: | ||
130 | str = "sd uhs SDR50"; | ||
131 | break; | ||
132 | case MMC_TIMING_UHS_SDR104: | ||
133 | str = "sd uhs SDR104"; | ||
134 | break; | ||
135 | case MMC_TIMING_UHS_DDR50: | ||
136 | str = "sd uhs DDR50"; | ||
137 | break; | ||
138 | case MMC_TIMING_MMC_HS200: | ||
139 | str = "mmc high-speed SDR200"; | ||
140 | break; | ||
141 | default: | 116 | default: |
142 | str = "invalid"; | 117 | str = "invalid"; |
143 | break; | 118 | break; |
144 | } | 119 | } |
145 | seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str); | 120 | seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str); |
146 | 121 | ||
147 | switch (ios->signal_voltage) { | ||
148 | case MMC_SIGNAL_VOLTAGE_330: | ||
149 | str = "3.30 V"; | ||
150 | break; | ||
151 | case MMC_SIGNAL_VOLTAGE_180: | ||
152 | str = "1.80 V"; | ||
153 | break; | ||
154 | case MMC_SIGNAL_VOLTAGE_120: | ||
155 | str = "1.20 V"; | ||
156 | break; | ||
157 | default: | ||
158 | str = "invalid"; | ||
159 | break; | ||
160 | } | ||
161 | seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str); | ||
162 | |||
163 | return 0; | 122 | return 0; |
164 | } | 123 | } |
165 | 124 | ||
@@ -229,15 +188,6 @@ void mmc_add_host_debugfs(struct mmc_host *host) | |||
229 | root, &host->clk_delay)) | 188 | root, &host->clk_delay)) |
230 | goto err_node; | 189 | goto err_node; |
231 | #endif | 190 | #endif |
232 | #ifdef CONFIG_FAIL_MMC_REQUEST | ||
233 | if (fail_request) | ||
234 | setup_fault_attr(&fail_default_attr, fail_request); | ||
235 | host->fail_mmc_request = fail_default_attr; | ||
236 | if (IS_ERR(fault_create_debugfs_attr("fail_mmc_request", | ||
237 | root, | ||
238 | &host->fail_mmc_request))) | ||
239 | goto err_node; | ||
240 | #endif | ||
241 | return; | 191 | return; |
242 | 192 | ||
243 | err_node: | 193 | err_node: |
@@ -297,7 +247,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) | |||
297 | if (err) | 247 | if (err) |
298 | goto out_free; | 248 | goto out_free; |
299 | 249 | ||
300 | for (i = 0; i < 512; i++) | 250 | for (i = 511; i >= 0; i--) |
301 | n += sprintf(buf + n, "%02x", ext_csd[i]); | 251 | n += sprintf(buf + n, "%02x", ext_csd[i]); |
302 | n += sprintf(buf + n, "\n"); | 252 | n += sprintf(buf + n, "\n"); |
303 | BUG_ON(n != EXT_CSD_STR_LEN); | 253 | BUG_ON(n != EXT_CSD_STR_LEN); |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ee2e16b1701..e09f0a7eb65 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/idr.h> | 17 | #include <linux/idr.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include <linux/export.h> | ||
20 | #include <linux/leds.h> | 19 | #include <linux/leds.h> |
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
@@ -32,7 +31,6 @@ | |||
32 | static void mmc_host_classdev_release(struct device *dev) | 31 | static void mmc_host_classdev_release(struct device *dev) |
33 | { | 32 | { |
34 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | 33 | struct mmc_host *host = cls_dev_to_mmc_host(dev); |
35 | mutex_destroy(&host->slot.lock); | ||
36 | kfree(host); | 34 | kfree(host); |
37 | } | 35 | } |
38 | 36 | ||
@@ -55,27 +53,6 @@ static DEFINE_IDR(mmc_host_idr); | |||
55 | static DEFINE_SPINLOCK(mmc_host_lock); | 53 | static DEFINE_SPINLOCK(mmc_host_lock); |
56 | 54 | ||
57 | #ifdef CONFIG_MMC_CLKGATE | 55 | #ifdef CONFIG_MMC_CLKGATE |
58 | static ssize_t clkgate_delay_show(struct device *dev, | ||
59 | struct device_attribute *attr, char *buf) | ||
60 | { | ||
61 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | ||
62 | return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); | ||
63 | } | ||
64 | |||
65 | static ssize_t clkgate_delay_store(struct device *dev, | ||
66 | struct device_attribute *attr, const char *buf, size_t count) | ||
67 | { | ||
68 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | ||
69 | unsigned long flags, value; | ||
70 | |||
71 | if (kstrtoul(buf, 0, &value)) | ||
72 | return -EINVAL; | ||
73 | |||
74 | spin_lock_irqsave(&host->clk_lock, flags); | ||
75 | host->clkgate_delay = value; | ||
76 | spin_unlock_irqrestore(&host->clk_lock, flags); | ||
77 | return count; | ||
78 | } | ||
79 | 56 | ||
80 | /* | 57 | /* |
81 | * Enabling clock gating will make the core call out to the host | 58 | * Enabling clock gating will make the core call out to the host |
@@ -136,7 +113,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |||
136 | static void mmc_host_clk_gate_work(struct work_struct *work) | 113 | static void mmc_host_clk_gate_work(struct work_struct *work) |
137 | { | 114 | { |
138 | struct mmc_host *host = container_of(work, struct mmc_host, | 115 | struct mmc_host *host = container_of(work, struct mmc_host, |
139 | clk_gate_work.work); | 116 | clk_gate_work); |
140 | 117 | ||
141 | mmc_host_clk_gate_delayed(host); | 118 | mmc_host_clk_gate_delayed(host); |
142 | } | 119 | } |
@@ -153,8 +130,6 @@ void mmc_host_clk_hold(struct mmc_host *host) | |||
153 | { | 130 | { |
154 | unsigned long flags; | 131 | unsigned long flags; |
155 | 132 | ||
156 | /* cancel any clock gating work scheduled by mmc_host_clk_release() */ | ||
157 | cancel_delayed_work_sync(&host->clk_gate_work); | ||
158 | mutex_lock(&host->clk_gate_mutex); | 133 | mutex_lock(&host->clk_gate_mutex); |
159 | spin_lock_irqsave(&host->clk_lock, flags); | 134 | spin_lock_irqsave(&host->clk_lock, flags); |
160 | if (host->clk_gated) { | 135 | if (host->clk_gated) { |
@@ -204,8 +179,7 @@ void mmc_host_clk_release(struct mmc_host *host) | |||
204 | host->clk_requests--; | 179 | host->clk_requests--; |
205 | if (mmc_host_may_gate_card(host->card) && | 180 | if (mmc_host_may_gate_card(host->card) && |
206 | !host->clk_requests) | 181 | !host->clk_requests) |
207 | schedule_delayed_work(&host->clk_gate_work, | 182 | queue_work(system_nrt_wq, &host->clk_gate_work); |
208 | msecs_to_jiffies(host->clkgate_delay)); | ||
209 | spin_unlock_irqrestore(&host->clk_lock, flags); | 183 | spin_unlock_irqrestore(&host->clk_lock, flags); |
210 | } | 184 | } |
211 | 185 | ||
@@ -238,13 +212,8 @@ static inline void mmc_host_clk_init(struct mmc_host *host) | |||
238 | host->clk_requests = 0; | 212 | host->clk_requests = 0; |
239 | /* Hold MCI clock for 8 cycles by default */ | 213 | /* Hold MCI clock for 8 cycles by default */ |
240 | host->clk_delay = 8; | 214 | host->clk_delay = 8; |
241 | /* | ||
242 | * Default clock gating delay is 0ms to avoid wasting power. | ||
243 | * This value can be tuned by writing into sysfs entry. | ||
244 | */ | ||
245 | host->clkgate_delay = 0; | ||
246 | host->clk_gated = false; | 215 | host->clk_gated = false; |
247 | INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); | 216 | INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
248 | spin_lock_init(&host->clk_lock); | 217 | spin_lock_init(&host->clk_lock); |
249 | mutex_init(&host->clk_gate_mutex); | 218 | mutex_init(&host->clk_gate_mutex); |
250 | } | 219 | } |
@@ -259,7 +228,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) | |||
259 | * Wait for any outstanding gate and then make sure we're | 228 | * Wait for any outstanding gate and then make sure we're |
260 | * ungated before exiting. | 229 | * ungated before exiting. |
261 | */ | 230 | */ |
262 | if (cancel_delayed_work_sync(&host->clk_gate_work)) | 231 | if (cancel_work_sync(&host->clk_gate_work)) |
263 | mmc_host_clk_gate_delayed(host); | 232 | mmc_host_clk_gate_delayed(host); |
264 | if (host->clk_gated) | 233 | if (host->clk_gated) |
265 | mmc_host_clk_hold(host); | 234 | mmc_host_clk_hold(host); |
@@ -267,17 +236,6 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) | |||
267 | WARN_ON(host->clk_requests > 1); | 236 | WARN_ON(host->clk_requests > 1); |
268 | } | 237 | } |
269 | 238 | ||
270 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) | ||
271 | { | ||
272 | host->clkgate_delay_attr.show = clkgate_delay_show; | ||
273 | host->clkgate_delay_attr.store = clkgate_delay_store; | ||
274 | sysfs_attr_init(&host->clkgate_delay_attr.attr); | ||
275 | host->clkgate_delay_attr.attr.name = "clkgate_delay"; | ||
276 | host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
277 | if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) | ||
278 | pr_err("%s: Failed to create clkgate_delay sysfs entry\n", | ||
279 | mmc_hostname(host)); | ||
280 | } | ||
281 | #else | 239 | #else |
282 | 240 | ||
283 | static inline void mmc_host_clk_init(struct mmc_host *host) | 241 | static inline void mmc_host_clk_init(struct mmc_host *host) |
@@ -288,10 +246,6 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) | |||
288 | { | 246 | { |
289 | } | 247 | } |
290 | 248 | ||
291 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) | ||
292 | { | ||
293 | } | ||
294 | |||
295 | #endif | 249 | #endif |
296 | 250 | ||
297 | /** | 251 | /** |
@@ -313,8 +267,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
313 | if (!host) | 267 | if (!host) |
314 | return NULL; | 268 | return NULL; |
315 | 269 | ||
316 | /* scanning will be enabled when we're ready */ | ||
317 | host->rescan_disable = 1; | ||
318 | spin_lock(&mmc_host_lock); | 270 | spin_lock(&mmc_host_lock); |
319 | err = idr_get_new(&mmc_host_idr, host, &host->index); | 271 | err = idr_get_new(&mmc_host_idr, host, &host->index); |
320 | spin_unlock(&mmc_host_lock); | 272 | spin_unlock(&mmc_host_lock); |
@@ -330,12 +282,12 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
330 | 282 | ||
331 | mmc_host_clk_init(host); | 283 | mmc_host_clk_init(host); |
332 | 284 | ||
333 | mutex_init(&host->slot.lock); | ||
334 | host->slot.cd_irq = -EINVAL; | ||
335 | |||
336 | spin_lock_init(&host->lock); | 285 | spin_lock_init(&host->lock); |
337 | init_waitqueue_head(&host->wq); | 286 | init_waitqueue_head(&host->wq); |
287 | wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND, | ||
288 | kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host))); | ||
338 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); | 289 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
290 | INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); | ||
339 | #ifdef CONFIG_PM | 291 | #ifdef CONFIG_PM |
340 | host->pm_notify.notifier_call = mmc_pm_notify; | 292 | host->pm_notify.notifier_call = mmc_pm_notify; |
341 | #endif | 293 | #endif |
@@ -384,10 +336,10 @@ int mmc_add_host(struct mmc_host *host) | |||
384 | #ifdef CONFIG_DEBUG_FS | 336 | #ifdef CONFIG_DEBUG_FS |
385 | mmc_add_host_debugfs(host); | 337 | mmc_add_host_debugfs(host); |
386 | #endif | 338 | #endif |
387 | mmc_host_clk_sysfs_init(host); | ||
388 | 339 | ||
389 | mmc_start_host(host); | 340 | mmc_start_host(host); |
390 | register_pm_notifier(&host->pm_notify); | 341 | if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
342 | register_pm_notifier(&host->pm_notify); | ||
391 | 343 | ||
392 | return 0; | 344 | return 0; |
393 | } | 345 | } |
@@ -404,7 +356,9 @@ EXPORT_SYMBOL(mmc_add_host); | |||
404 | */ | 356 | */ |
405 | void mmc_remove_host(struct mmc_host *host) | 357 | void mmc_remove_host(struct mmc_host *host) |
406 | { | 358 | { |
407 | unregister_pm_notifier(&host->pm_notify); | 359 | if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) |
360 | unregister_pm_notifier(&host->pm_notify); | ||
361 | |||
408 | mmc_stop_host(host); | 362 | mmc_stop_host(host); |
409 | 363 | ||
410 | #ifdef CONFIG_DEBUG_FS | 364 | #ifdef CONFIG_DEBUG_FS |
@@ -431,6 +385,7 @@ void mmc_free_host(struct mmc_host *host) | |||
431 | spin_lock(&mmc_host_lock); | 385 | spin_lock(&mmc_host_lock); |
432 | idr_remove(&mmc_host_idr, host->index); | 386 | idr_remove(&mmc_host_idr, host->index); |
433 | spin_unlock(&mmc_host_lock); | 387 | spin_unlock(&mmc_host_lock); |
388 | wake_lock_destroy(&host->detect_wake_lock); | ||
434 | 389 | ||
435 | put_device(&host->class_dev); | 390 | put_device(&host->class_dev); |
436 | } | 391 | } |
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index f2ab9e57812..fb8a5cd2e4a 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h | |||
@@ -15,5 +15,27 @@ | |||
15 | int mmc_register_host_class(void); | 15 | int mmc_register_host_class(void); |
16 | void mmc_unregister_host_class(void); | 16 | void mmc_unregister_host_class(void); |
17 | 17 | ||
18 | #ifdef CONFIG_MMC_CLKGATE | ||
19 | void mmc_host_clk_hold(struct mmc_host *host); | ||
20 | void mmc_host_clk_release(struct mmc_host *host); | ||
21 | unsigned int mmc_host_clk_rate(struct mmc_host *host); | ||
22 | |||
23 | #else | ||
24 | static inline void mmc_host_clk_hold(struct mmc_host *host) | ||
25 | { | ||
26 | } | ||
27 | |||
28 | static inline void mmc_host_clk_release(struct mmc_host *host) | ||
29 | { | ||
30 | } | ||
31 | |||
32 | static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) | ||
33 | { | ||
34 | return host->ios.clock; | ||
35 | } | ||
36 | #endif | ||
37 | |||
38 | void mmc_host_deeper_disable(struct work_struct *work); | ||
39 | |||
18 | #endif | 40 | #endif |
19 | 41 | ||
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index e6e39111e05..69fb2275845 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright (C) 2003-2004 Russell King, All Rights Reserved. | 4 | * Copyright (C) 2003-2004 Russell King, All Rights Reserved. |
5 | * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. | 5 | * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. |
6 | * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. | 6 | * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. |
7 | * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved. | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -12,7 +13,6 @@ | |||
12 | 13 | ||
13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
15 | #include <linux/stat.h> | ||
16 | 16 | ||
17 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/card.h> | 18 | #include <linux/mmc/card.h> |
@@ -102,7 +102,7 @@ static int mmc_decode_cid(struct mmc_card *card) | |||
102 | break; | 102 | break; |
103 | 103 | ||
104 | default: | 104 | default: |
105 | pr_err("%s: card has unknown MMCA version %d\n", | 105 | printk(KERN_ERR "%s: card has unknown MMCA version %d\n", |
106 | mmc_hostname(card->host), card->csd.mmca_vsn); | 106 | mmc_hostname(card->host), card->csd.mmca_vsn); |
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | } | 108 | } |
@@ -136,7 +136,7 @@ static int mmc_decode_csd(struct mmc_card *card) | |||
136 | */ | 136 | */ |
137 | csd->structure = UNSTUFF_BITS(resp, 126, 2); | 137 | csd->structure = UNSTUFF_BITS(resp, 126, 2); |
138 | if (csd->structure == 0) { | 138 | if (csd->structure == 0) { |
139 | pr_err("%s: unrecognised CSD structure version %d\n", | 139 | printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", |
140 | mmc_hostname(card->host), csd->structure); | 140 | mmc_hostname(card->host), csd->structure); |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | } | 142 | } |
@@ -196,7 +196,7 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) | |||
196 | */ | 196 | */ |
197 | ext_csd = kmalloc(512, GFP_KERNEL); | 197 | ext_csd = kmalloc(512, GFP_KERNEL); |
198 | if (!ext_csd) { | 198 | if (!ext_csd) { |
199 | pr_err("%s: could not allocate a buffer to " | 199 | printk(KERN_ERR "%s: could not allocate a buffer to " |
200 | "receive the ext_csd.\n", mmc_hostname(card->host)); | 200 | "receive the ext_csd.\n", mmc_hostname(card->host)); |
201 | return -ENOMEM; | 201 | return -ENOMEM; |
202 | } | 202 | } |
@@ -218,12 +218,12 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) | |||
218 | * stored in their CSD. | 218 | * stored in their CSD. |
219 | */ | 219 | */ |
220 | if (card->csd.capacity == (4096 * 512)) { | 220 | if (card->csd.capacity == (4096 * 512)) { |
221 | pr_err("%s: unable to read EXT_CSD " | 221 | printk(KERN_ERR "%s: unable to read EXT_CSD " |
222 | "on a possible high capacity card. " | 222 | "on a possible high capacity card. " |
223 | "Card will be ignored.\n", | 223 | "Card will be ignored.\n", |
224 | mmc_hostname(card->host)); | 224 | mmc_hostname(card->host)); |
225 | } else { | 225 | } else { |
226 | pr_warning("%s: unable to read " | 226 | printk(KERN_WARNING "%s: unable to read " |
227 | "EXT_CSD, performance might " | 227 | "EXT_CSD, performance might " |
228 | "suffer.\n", | 228 | "suffer.\n", |
229 | mmc_hostname(card->host)); | 229 | mmc_hostname(card->host)); |
@@ -235,44 +235,12 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) | |||
235 | return err; | 235 | return err; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void mmc_select_card_type(struct mmc_card *card) | ||
239 | { | ||
240 | struct mmc_host *host = card->host; | ||
241 | u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; | ||
242 | u32 caps = host->caps, caps2 = host->caps2; | ||
243 | unsigned int hs_max_dtr = 0; | ||
244 | |||
245 | if (card_type & EXT_CSD_CARD_TYPE_26) | ||
246 | hs_max_dtr = MMC_HIGH_26_MAX_DTR; | ||
247 | |||
248 | if (caps & MMC_CAP_MMC_HIGHSPEED && | ||
249 | card_type & EXT_CSD_CARD_TYPE_52) | ||
250 | hs_max_dtr = MMC_HIGH_52_MAX_DTR; | ||
251 | |||
252 | if ((caps & MMC_CAP_1_8V_DDR && | ||
253 | card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || | ||
254 | (caps & MMC_CAP_1_2V_DDR && | ||
255 | card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) | ||
256 | hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; | ||
257 | |||
258 | if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && | ||
259 | card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || | ||
260 | (caps2 & MMC_CAP2_HS200_1_2V_SDR && | ||
261 | card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) | ||
262 | hs_max_dtr = MMC_HS200_MAX_DTR; | ||
263 | |||
264 | card->ext_csd.hs_max_dtr = hs_max_dtr; | ||
265 | card->ext_csd.card_type = card_type; | ||
266 | } | ||
267 | |||
268 | /* | 238 | /* |
269 | * Decode extended CSD. | 239 | * Decode extended CSD. |
270 | */ | 240 | */ |
271 | static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | 241 | static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) |
272 | { | 242 | { |
273 | int err = 0, idx; | 243 | int err = 0; |
274 | unsigned int part_size; | ||
275 | u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; | ||
276 | 244 | ||
277 | BUG_ON(!card); | 245 | BUG_ON(!card); |
278 | 246 | ||
@@ -283,7 +251,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
283 | card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; | 251 | card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; |
284 | if (card->csd.structure == 3) { | 252 | if (card->csd.structure == 3) { |
285 | if (card->ext_csd.raw_ext_csd_structure > 2) { | 253 | if (card->ext_csd.raw_ext_csd_structure > 2) { |
286 | pr_err("%s: unrecognised EXT_CSD structure " | 254 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " |
287 | "version %d\n", mmc_hostname(card->host), | 255 | "version %d\n", mmc_hostname(card->host), |
288 | card->ext_csd.raw_ext_csd_structure); | 256 | card->ext_csd.raw_ext_csd_structure); |
289 | err = -EINVAL; | 257 | err = -EINVAL; |
@@ -293,7 +261,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
293 | 261 | ||
294 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; | 262 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; |
295 | if (card->ext_csd.rev > 6) { | 263 | if (card->ext_csd.rev > 6) { |
296 | pr_err("%s: unrecognised EXT_CSD revision %d\n", | 264 | printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", |
297 | mmc_hostname(card->host), card->ext_csd.rev); | 265 | mmc_hostname(card->host), card->ext_csd.rev); |
298 | err = -EINVAL; | 266 | err = -EINVAL; |
299 | goto out; | 267 | goto out; |
@@ -314,9 +282,35 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
314 | if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) | 282 | if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) |
315 | mmc_card_set_blockaddr(card); | 283 | mmc_card_set_blockaddr(card); |
316 | } | 284 | } |
317 | |||
318 | card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; | 285 | card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; |
319 | mmc_select_card_type(card); | 286 | switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { |
287 | case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | | ||
288 | EXT_CSD_CARD_TYPE_26: | ||
289 | card->ext_csd.hs_max_dtr = 52000000; | ||
290 | card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; | ||
291 | break; | ||
292 | case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | | ||
293 | EXT_CSD_CARD_TYPE_26: | ||
294 | card->ext_csd.hs_max_dtr = 52000000; | ||
295 | card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; | ||
296 | break; | ||
297 | case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | | ||
298 | EXT_CSD_CARD_TYPE_26: | ||
299 | card->ext_csd.hs_max_dtr = 52000000; | ||
300 | card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; | ||
301 | break; | ||
302 | case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: | ||
303 | card->ext_csd.hs_max_dtr = 52000000; | ||
304 | break; | ||
305 | case EXT_CSD_CARD_TYPE_26: | ||
306 | card->ext_csd.hs_max_dtr = 26000000; | ||
307 | break; | ||
308 | default: | ||
309 | /* MMC v4 spec says this cannot happen */ | ||
310 | printk(KERN_WARNING "%s: card is mmc v4 but doesn't " | ||
311 | "support any high-speed modes.\n", | ||
312 | mmc_hostname(card->host)); | ||
313 | } | ||
320 | 314 | ||
321 | card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; | 315 | card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; |
322 | card->ext_csd.raw_erase_timeout_mult = | 316 | card->ext_csd.raw_erase_timeout_mult = |
@@ -347,19 +341,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
347 | * There are two boot regions of equal size, defined in | 341 | * There are two boot regions of equal size, defined in |
348 | * multiples of 128K. | 342 | * multiples of 128K. |
349 | */ | 343 | */ |
350 | if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { | 344 | card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; |
351 | for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { | ||
352 | part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; | ||
353 | mmc_part_add(card, part_size, | ||
354 | EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, | ||
355 | "boot%d", idx, true, | ||
356 | MMC_BLK_DATA_AREA_BOOT); | ||
357 | } | ||
358 | } | ||
359 | } | 345 | } |
360 | 346 | ||
361 | card->ext_csd.raw_hc_erase_gap_size = | 347 | card->ext_csd.raw_hc_erase_gap_size = |
362 | ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; | 348 | ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; |
363 | card->ext_csd.raw_sec_trim_mult = | 349 | card->ext_csd.raw_sec_trim_mult = |
364 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; | 350 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; |
365 | card->ext_csd.raw_sec_erase_mult = | 351 | card->ext_csd.raw_sec_erase_mult = |
@@ -377,9 +363,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
377 | card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; | 363 | card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; |
378 | if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && | 364 | if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && |
379 | (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { | 365 | (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { |
380 | hc_erase_grp_sz = | 366 | u8 hc_erase_grp_sz = |
381 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; | 367 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; |
382 | hc_wp_grp_sz = | 368 | u8 hc_wp_grp_sz = |
383 | ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; | 369 | ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; |
384 | 370 | ||
385 | card->ext_csd.enhanced_area_en = 1; | 371 | card->ext_csd.enhanced_area_en = 1; |
@@ -408,42 +394,6 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
408 | card->ext_csd.enhanced_area_offset = -EINVAL; | 394 | card->ext_csd.enhanced_area_offset = -EINVAL; |
409 | card->ext_csd.enhanced_area_size = -EINVAL; | 395 | card->ext_csd.enhanced_area_size = -EINVAL; |
410 | } | 396 | } |
411 | |||
412 | /* | ||
413 | * General purpose partition feature support -- | ||
414 | * If ext_csd has the size of general purpose partitions, | ||
415 | * set size, part_cfg, partition name in mmc_part. | ||
416 | */ | ||
417 | if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & | ||
418 | EXT_CSD_PART_SUPPORT_PART_EN) { | ||
419 | if (card->ext_csd.enhanced_area_en != 1) { | ||
420 | hc_erase_grp_sz = | ||
421 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; | ||
422 | hc_wp_grp_sz = | ||
423 | ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; | ||
424 | |||
425 | card->ext_csd.enhanced_area_en = 1; | ||
426 | } | ||
427 | |||
428 | for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { | ||
429 | if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && | ||
430 | !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && | ||
431 | !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) | ||
432 | continue; | ||
433 | part_size = | ||
434 | (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] | ||
435 | << 16) + | ||
436 | (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] | ||
437 | << 8) + | ||
438 | ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; | ||
439 | part_size *= (size_t)(hc_erase_grp_sz * | ||
440 | hc_wp_grp_sz); | ||
441 | mmc_part_add(card, part_size << 19, | ||
442 | EXT_CSD_PART_CONFIG_ACC_GP0 + idx, | ||
443 | "gp%d", idx, false, | ||
444 | MMC_BLK_DATA_AREA_GP); | ||
445 | } | ||
446 | } | ||
447 | card->ext_csd.sec_trim_mult = | 397 | card->ext_csd.sec_trim_mult = |
448 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; | 398 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; |
449 | card->ext_csd.sec_erase_mult = | 399 | card->ext_csd.sec_erase_mult = |
@@ -452,33 +402,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
452 | ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; | 402 | ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; |
453 | card->ext_csd.trim_timeout = 300 * | 403 | card->ext_csd.trim_timeout = 300 * |
454 | ext_csd[EXT_CSD_TRIM_MULT]; | 404 | ext_csd[EXT_CSD_TRIM_MULT]; |
455 | |||
456 | /* | ||
457 | * Note that the call to mmc_part_add above defaults to read | ||
458 | * only. If this default assumption is changed, the call must | ||
459 | * take into account the value of boot_locked below. | ||
460 | */ | ||
461 | card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; | ||
462 | card->ext_csd.boot_ro_lockable = true; | ||
463 | } | 405 | } |
464 | 406 | ||
407 | card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; | ||
465 | if (card->ext_csd.rev >= 5) { | 408 | if (card->ext_csd.rev >= 5) { |
466 | /* check whether the eMMC card supports BKOPS */ | 409 | card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; |
467 | if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { | ||
468 | card->ext_csd.bkops = 1; | ||
469 | card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; | ||
470 | card->ext_csd.raw_bkops_status = | ||
471 | ext_csd[EXT_CSD_BKOPS_STATUS]; | ||
472 | if (!card->ext_csd.bkops_en) | ||
473 | pr_info("%s: BKOPS_EN bit is not set\n", | ||
474 | mmc_hostname(card->host)); | ||
475 | } | ||
476 | |||
477 | /* check whether the eMMC card supports HPI */ | 410 | /* check whether the eMMC card supports HPI */ |
478 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { | 411 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { |
479 | card->ext_csd.hpi = 1; | 412 | card->ext_csd.hpi = 1; |
480 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) | 413 | if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) |
481 | card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; | 414 | card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; |
482 | else | 415 | else |
483 | card->ext_csd.hpi_cmd = MMC_SEND_STATUS; | 416 | card->ext_csd.hpi_cmd = MMC_SEND_STATUS; |
484 | /* | 417 | /* |
@@ -489,59 +422,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
489 | ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; | 422 | ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; |
490 | } | 423 | } |
491 | 424 | ||
492 | card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; | 425 | /* Check whether the eMMC card supports background ops */ |
493 | card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; | 426 | if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) |
494 | 427 | card->ext_csd.bk_ops = 1; | |
495 | /* | ||
496 | * RPMB regions are defined in multiples of 128K. | ||
497 | */ | ||
498 | card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; | ||
499 | if (ext_csd[EXT_CSD_RPMB_MULT]) { | ||
500 | mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, | ||
501 | EXT_CSD_PART_CONFIG_ACC_RPMB, | ||
502 | "rpmb", 0, false, | ||
503 | MMC_BLK_DATA_AREA_RPMB); | ||
504 | } | ||
505 | } | 428 | } |
506 | 429 | ||
507 | card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; | ||
508 | if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) | 430 | if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) |
509 | card->erased_byte = 0xFF; | 431 | card->erased_byte = 0xFF; |
510 | else | 432 | else |
511 | card->erased_byte = 0x0; | 433 | card->erased_byte = 0x0; |
512 | 434 | ||
513 | /* eMMC v4.5 or later */ | ||
514 | if (card->ext_csd.rev >= 6) { | ||
515 | card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; | ||
516 | |||
517 | card->ext_csd.generic_cmd6_time = 10 * | ||
518 | ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; | ||
519 | card->ext_csd.power_off_longtime = 10 * | ||
520 | ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; | ||
521 | |||
522 | card->ext_csd.cache_size = | ||
523 | ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | | ||
524 | ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | | ||
525 | ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | | ||
526 | ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; | ||
527 | |||
528 | if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) | ||
529 | card->ext_csd.data_sector_size = 4096; | ||
530 | else | ||
531 | card->ext_csd.data_sector_size = 512; | ||
532 | |||
533 | if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && | ||
534 | (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { | ||
535 | card->ext_csd.data_tag_unit_size = | ||
536 | ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * | ||
537 | (card->ext_csd.data_sector_size); | ||
538 | } else { | ||
539 | card->ext_csd.data_tag_unit_size = 0; | ||
540 | } | ||
541 | } else { | ||
542 | card->ext_csd.data_sector_size = 512; | ||
543 | } | ||
544 | |||
545 | out: | 435 | out: |
546 | return err; | 436 | return err; |
547 | } | 437 | } |
@@ -563,12 +453,16 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) | |||
563 | err = mmc_get_ext_csd(card, &bw_ext_csd); | 453 | err = mmc_get_ext_csd(card, &bw_ext_csd); |
564 | 454 | ||
565 | if (err || bw_ext_csd == NULL) { | 455 | if (err || bw_ext_csd == NULL) { |
566 | err = -EINVAL; | 456 | if (bus_width != MMC_BUS_WIDTH_1) |
457 | err = -EINVAL; | ||
567 | goto out; | 458 | goto out; |
568 | } | 459 | } |
569 | 460 | ||
461 | if (bus_width == MMC_BUS_WIDTH_1) | ||
462 | goto out; | ||
463 | |||
570 | /* only compare read only fields */ | 464 | /* only compare read only fields */ |
571 | err = !((card->ext_csd.raw_partition_support == | 465 | err = (!(card->ext_csd.raw_partition_support == |
572 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && | 466 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && |
573 | (card->ext_csd.raw_erased_mem_count == | 467 | (card->ext_csd.raw_erased_mem_count == |
574 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && | 468 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && |
@@ -626,8 +520,6 @@ MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); | |||
626 | MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", | 520 | MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", |
627 | card->ext_csd.enhanced_area_offset); | 521 | card->ext_csd.enhanced_area_offset); |
628 | MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); | 522 | MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); |
629 | MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); | ||
630 | MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); | ||
631 | 523 | ||
632 | static struct attribute *mmc_std_attrs[] = { | 524 | static struct attribute *mmc_std_attrs[] = { |
633 | &dev_attr_cid.attr, | 525 | &dev_attr_cid.attr, |
@@ -643,8 +535,6 @@ static struct attribute *mmc_std_attrs[] = { | |||
643 | &dev_attr_serial.attr, | 535 | &dev_attr_serial.attr, |
644 | &dev_attr_enhanced_area_offset.attr, | 536 | &dev_attr_enhanced_area_offset.attr, |
645 | &dev_attr_enhanced_area_size.attr, | 537 | &dev_attr_enhanced_area_size.attr, |
646 | &dev_attr_raw_rpmb_size_mult.attr, | ||
647 | &dev_attr_rel_sectors.attr, | ||
648 | NULL, | 538 | NULL, |
649 | }; | 539 | }; |
650 | 540 | ||
@@ -662,166 +552,6 @@ static struct device_type mmc_type = { | |||
662 | }; | 552 | }; |
663 | 553 | ||
664 | /* | 554 | /* |
665 | * Select the PowerClass for the current bus width | ||
666 | * If power class is defined for 4/8 bit bus in the | ||
667 | * extended CSD register, select it by executing the | ||
668 | * mmc_switch command. | ||
669 | */ | ||
670 | static int mmc_select_powerclass(struct mmc_card *card, | ||
671 | unsigned int bus_width, u8 *ext_csd) | ||
672 | { | ||
673 | int err = 0; | ||
674 | unsigned int pwrclass_val; | ||
675 | unsigned int index = 0; | ||
676 | struct mmc_host *host; | ||
677 | |||
678 | BUG_ON(!card); | ||
679 | |||
680 | host = card->host; | ||
681 | BUG_ON(!host); | ||
682 | |||
683 | if (ext_csd == NULL) | ||
684 | return 0; | ||
685 | |||
686 | /* Power class selection is supported for versions >= 4.0 */ | ||
687 | if (card->csd.mmca_vsn < CSD_SPEC_VER_4) | ||
688 | return 0; | ||
689 | |||
690 | /* Power class values are defined only for 4/8 bit bus */ | ||
691 | if (bus_width == EXT_CSD_BUS_WIDTH_1) | ||
692 | return 0; | ||
693 | |||
694 | switch (1 << host->ios.vdd) { | ||
695 | case MMC_VDD_165_195: | ||
696 | if (host->ios.clock <= 26000000) | ||
697 | index = EXT_CSD_PWR_CL_26_195; | ||
698 | else if (host->ios.clock <= 52000000) | ||
699 | index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? | ||
700 | EXT_CSD_PWR_CL_52_195 : | ||
701 | EXT_CSD_PWR_CL_DDR_52_195; | ||
702 | else if (host->ios.clock <= 200000000) | ||
703 | index = EXT_CSD_PWR_CL_200_195; | ||
704 | break; | ||
705 | case MMC_VDD_27_28: | ||
706 | case MMC_VDD_28_29: | ||
707 | case MMC_VDD_29_30: | ||
708 | case MMC_VDD_30_31: | ||
709 | case MMC_VDD_31_32: | ||
710 | case MMC_VDD_32_33: | ||
711 | case MMC_VDD_33_34: | ||
712 | case MMC_VDD_34_35: | ||
713 | case MMC_VDD_35_36: | ||
714 | if (host->ios.clock <= 26000000) | ||
715 | index = EXT_CSD_PWR_CL_26_360; | ||
716 | else if (host->ios.clock <= 52000000) | ||
717 | index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? | ||
718 | EXT_CSD_PWR_CL_52_360 : | ||
719 | EXT_CSD_PWR_CL_DDR_52_360; | ||
720 | else if (host->ios.clock <= 200000000) | ||
721 | index = EXT_CSD_PWR_CL_200_360; | ||
722 | break; | ||
723 | default: | ||
724 | pr_warning("%s: Voltage range not supported " | ||
725 | "for power class.\n", mmc_hostname(host)); | ||
726 | return -EINVAL; | ||
727 | } | ||
728 | |||
729 | pwrclass_val = ext_csd[index]; | ||
730 | |||
731 | if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) | ||
732 | pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> | ||
733 | EXT_CSD_PWR_CL_8BIT_SHIFT; | ||
734 | else | ||
735 | pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> | ||
736 | EXT_CSD_PWR_CL_4BIT_SHIFT; | ||
737 | |||
738 | /* If the power class is different from the default value */ | ||
739 | if (pwrclass_val > 0) { | ||
740 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
741 | EXT_CSD_POWER_CLASS, | ||
742 | pwrclass_val, | ||
743 | card->ext_csd.generic_cmd6_time); | ||
744 | } | ||
745 | |||
746 | return err; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Selects the desired buswidth and switch to the HS200 mode | ||
751 | * if bus width set without error | ||
752 | */ | ||
753 | static int mmc_select_hs200(struct mmc_card *card) | ||
754 | { | ||
755 | int idx, err = -EINVAL; | ||
756 | struct mmc_host *host; | ||
757 | static unsigned ext_csd_bits[] = { | ||
758 | EXT_CSD_BUS_WIDTH_4, | ||
759 | EXT_CSD_BUS_WIDTH_8, | ||
760 | }; | ||
761 | static unsigned bus_widths[] = { | ||
762 | MMC_BUS_WIDTH_4, | ||
763 | MMC_BUS_WIDTH_8, | ||
764 | }; | ||
765 | |||
766 | BUG_ON(!card); | ||
767 | |||
768 | host = card->host; | ||
769 | |||
770 | if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && | ||
771 | host->caps2 & MMC_CAP2_HS200_1_2V_SDR) | ||
772 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); | ||
773 | |||
774 | if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && | ||
775 | host->caps2 & MMC_CAP2_HS200_1_8V_SDR) | ||
776 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0); | ||
777 | |||
778 | /* If fails try again during next card power cycle */ | ||
779 | if (err) | ||
780 | goto err; | ||
781 | |||
782 | idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; | ||
783 | |||
784 | /* | ||
785 | * Unlike SD, MMC cards dont have a configuration register to notify | ||
786 | * supported bus width. So bus test command should be run to identify | ||
787 | * the supported bus width or compare the ext csd values of current | ||
788 | * bus width and ext csd values of 1 bit mode read earlier. | ||
789 | */ | ||
790 | for (; idx >= 0; idx--) { | ||
791 | |||
792 | /* | ||
793 | * Host is capable of 8bit transfer, then switch | ||
794 | * the device to work in 8bit transfer mode. If the | ||
795 | * mmc switch command returns error then switch to | ||
796 | * 4bit transfer mode. On success set the corresponding | ||
797 | * bus width on the host. | ||
798 | */ | ||
799 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
800 | EXT_CSD_BUS_WIDTH, | ||
801 | ext_csd_bits[idx], | ||
802 | card->ext_csd.generic_cmd6_time); | ||
803 | if (err) | ||
804 | continue; | ||
805 | |||
806 | mmc_set_bus_width(card->host, bus_widths[idx]); | ||
807 | |||
808 | if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) | ||
809 | err = mmc_compare_ext_csds(card, bus_widths[idx]); | ||
810 | else | ||
811 | err = mmc_bus_test(card, bus_widths[idx]); | ||
812 | if (!err) | ||
813 | break; | ||
814 | } | ||
815 | |||
816 | /* switch to HS200 mode if bus width set successfully */ | ||
817 | if (!err) | ||
818 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
819 | EXT_CSD_HS_TIMING, 2, 0); | ||
820 | err: | ||
821 | return err; | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * Handle the detection and initialisation of a card. | 555 | * Handle the detection and initialisation of a card. |
826 | * | 556 | * |
827 | * In the case of a resume, "oldcard" will contain the card | 557 | * In the case of a resume, "oldcard" will contain the card |
@@ -840,16 +570,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
840 | BUG_ON(!host); | 570 | BUG_ON(!host); |
841 | WARN_ON(!host->claimed); | 571 | WARN_ON(!host->claimed); |
842 | 572 | ||
843 | /* Set correct bus mode for MMC before attempting init */ | ||
844 | if (!mmc_host_is_spi(host)) | ||
845 | mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); | ||
846 | |||
847 | /* | 573 | /* |
848 | * Since we're changing the OCR value, we seem to | 574 | * Since we're changing the OCR value, we seem to |
849 | * need to tell some cards to go back to the idle | 575 | * need to tell some cards to go back to the idle |
850 | * state. We wait 1ms to give cards time to | 576 | * state. We wait 1ms to give cards time to |
851 | * respond. | 577 | * respond. |
852 | * mmc_go_idle is needed for eMMC that are asleep | ||
853 | */ | 578 | */ |
854 | mmc_go_idle(host); | 579 | mmc_go_idle(host); |
855 | 580 | ||
@@ -963,11 +688,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
963 | * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF | 688 | * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF |
964 | * bit. This bit will be lost every time after a reset or power off. | 689 | * bit. This bit will be lost every time after a reset or power off. |
965 | */ | 690 | */ |
966 | if (card->ext_csd.enhanced_area_en || | 691 | if (card->ext_csd.enhanced_area_en) { |
967 | (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { | ||
968 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 692 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
969 | EXT_CSD_ERASE_GROUP_DEF, 1, | 693 | EXT_CSD_ERASE_GROUP_DEF, 1, 0); |
970 | card->ext_csd.generic_cmd6_time); | ||
971 | 694 | ||
972 | if (err && err != -EBADMSG) | 695 | if (err && err != -EBADMSG) |
973 | goto free_card; | 696 | goto free_card; |
@@ -1005,56 +728,56 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1005 | } | 728 | } |
1006 | 729 | ||
1007 | /* | 730 | /* |
1008 | * If the host supports the power_off_notify capability then | 731 | * Activate high speed (if supported) |
1009 | * set the notification byte in the ext_csd register of device | ||
1010 | */ | 732 | */ |
1011 | if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && | 733 | if ((card->ext_csd.hs_max_dtr != 0) && |
1012 | (card->ext_csd.rev >= 6)) { | 734 | (host->caps & MMC_CAP_MMC_HIGHSPEED)) { |
1013 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 735 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1014 | EXT_CSD_POWER_OFF_NOTIFICATION, | 736 | EXT_CSD_HS_TIMING, 1, 0); |
1015 | EXT_CSD_POWER_ON, | ||
1016 | card->ext_csd.generic_cmd6_time); | ||
1017 | if (err && err != -EBADMSG) | 737 | if (err && err != -EBADMSG) |
1018 | goto free_card; | 738 | goto free_card; |
1019 | 739 | ||
1020 | /* | 740 | if (err) { |
1021 | * The err can be -EBADMSG or 0, | 741 | printk(KERN_WARNING "%s: switch to highspeed failed\n", |
1022 | * so check for success and update the flag | 742 | mmc_hostname(card->host)); |
1023 | */ | 743 | err = 0; |
1024 | if (!err) | 744 | } else { |
1025 | card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; | 745 | mmc_card_set_highspeed(card); |
746 | mmc_set_timing(card->host, MMC_TIMING_MMC_HS); | ||
747 | } | ||
1026 | } | 748 | } |
1027 | 749 | ||
1028 | /* | 750 | /* |
1029 | * Activate high speed (if supported) | 751 | * Enable HPI feature (if supported) |
1030 | */ | 752 | */ |
1031 | if (card->ext_csd.hs_max_dtr != 0) { | 753 | if (card->ext_csd.hpi && (card->host->caps & MMC_CAP_BKOPS)) { |
1032 | err = 0; | 754 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1033 | if (card->ext_csd.hs_max_dtr > 52000000 && | 755 | EXT_CSD_HPI_MGMT, 1, 0); |
1034 | host->caps2 & MMC_CAP2_HS200) | ||
1035 | err = mmc_select_hs200(card); | ||
1036 | else if (host->caps & MMC_CAP_MMC_HIGHSPEED) | ||
1037 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1038 | EXT_CSD_HS_TIMING, 1, | ||
1039 | card->ext_csd.generic_cmd6_time); | ||
1040 | |||
1041 | if (err && err != -EBADMSG) | 756 | if (err && err != -EBADMSG) |
1042 | goto free_card; | 757 | goto free_card; |
758 | if (err) { | ||
759 | pr_warning("%s: Enabling HPI failed\n", | ||
760 | mmc_hostname(card->host)); | ||
761 | err = 0; | ||
762 | } else { | ||
763 | card->ext_csd.hpi_en = 1; | ||
764 | } | ||
765 | } | ||
1043 | 766 | ||
767 | /* | ||
768 | * Enable Background ops feature (if supported) | ||
769 | */ | ||
770 | if (card->ext_csd.bk_ops && (card->host->caps & MMC_CAP_BKOPS)) { | ||
771 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
772 | EXT_CSD_BKOPS_EN, 1, 0); | ||
773 | if (err && err != -EBADMSG) | ||
774 | goto free_card; | ||
1044 | if (err) { | 775 | if (err) { |
1045 | pr_warning("%s: switch to highspeed failed\n", | 776 | pr_warning("%s: Enabling BK ops failed\n", |
1046 | mmc_hostname(card->host)); | 777 | mmc_hostname(card->host)); |
1047 | err = 0; | 778 | err = 0; |
1048 | } else { | 779 | } else { |
1049 | if (card->ext_csd.hs_max_dtr > 52000000 && | 780 | card->ext_csd.bk_ops_en = 1; |
1050 | host->caps2 & MMC_CAP2_HS200) { | ||
1051 | mmc_card_set_hs200(card); | ||
1052 | mmc_set_timing(card->host, | ||
1053 | MMC_TIMING_MMC_HS200); | ||
1054 | } else { | ||
1055 | mmc_card_set_highspeed(card); | ||
1056 | mmc_set_timing(card->host, MMC_TIMING_MMC_HS); | ||
1057 | } | ||
1058 | } | 781 | } |
1059 | } | 782 | } |
1060 | 783 | ||
@@ -1063,11 +786,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1063 | */ | 786 | */ |
1064 | max_dtr = (unsigned int)-1; | 787 | max_dtr = (unsigned int)-1; |
1065 | 788 | ||
1066 | if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { | 789 | if (mmc_card_highspeed(card)) { |
1067 | if (max_dtr > card->ext_csd.hs_max_dtr) | 790 | if (max_dtr > card->ext_csd.hs_max_dtr) |
1068 | max_dtr = card->ext_csd.hs_max_dtr; | 791 | max_dtr = card->ext_csd.hs_max_dtr; |
1069 | if (mmc_card_highspeed(card) && (max_dtr > 52000000)) | ||
1070 | max_dtr = 52000000; | ||
1071 | } else if (max_dtr > card->csd.max_dtr) { | 792 | } else if (max_dtr > card->csd.max_dtr) { |
1072 | max_dtr = card->csd.max_dtr; | 793 | max_dtr = card->csd.max_dtr; |
1073 | } | 794 | } |
@@ -1091,50 +812,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1091 | } | 812 | } |
1092 | 813 | ||
1093 | /* | 814 | /* |
1094 | * Indicate HS200 SDR mode (if supported). | ||
1095 | */ | ||
1096 | if (mmc_card_hs200(card)) { | ||
1097 | u32 ext_csd_bits; | ||
1098 | u32 bus_width = card->host->ios.bus_width; | ||
1099 | |||
1100 | /* | ||
1101 | * For devices supporting HS200 mode, the bus width has | ||
1102 | * to be set before executing the tuning function. If | ||
1103 | * set before tuning, then device will respond with CRC | ||
1104 | * errors for responses on CMD line. So for HS200 the | ||
1105 | * sequence will be | ||
1106 | * 1. set bus width 4bit / 8 bit (1 bit not supported) | ||
1107 | * 2. switch to HS200 mode | ||
1108 | * 3. set the clock to > 52Mhz <=200MHz and | ||
1109 | * 4. execute tuning for HS200 | ||
1110 | */ | ||
1111 | if ((host->caps2 & MMC_CAP2_HS200) && | ||
1112 | card->host->ops->execute_tuning) { | ||
1113 | mmc_host_clk_hold(card->host); | ||
1114 | err = card->host->ops->execute_tuning(card->host, | ||
1115 | MMC_SEND_TUNING_BLOCK_HS200); | ||
1116 | mmc_host_clk_release(card->host); | ||
1117 | } | ||
1118 | if (err) { | ||
1119 | pr_warning("%s: tuning execution failed\n", | ||
1120 | mmc_hostname(card->host)); | ||
1121 | goto err; | ||
1122 | } | ||
1123 | |||
1124 | ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? | ||
1125 | EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; | ||
1126 | err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); | ||
1127 | if (err) | ||
1128 | pr_warning("%s: power class selection to bus width %d" | ||
1129 | " failed\n", mmc_hostname(card->host), | ||
1130 | 1 << bus_width); | ||
1131 | } | ||
1132 | |||
1133 | /* | ||
1134 | * Activate wide bus and DDR (if supported). | 815 | * Activate wide bus and DDR (if supported). |
1135 | */ | 816 | */ |
1136 | if (!mmc_card_hs200(card) && | 817 | if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && |
1137 | (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && | ||
1138 | (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { | 818 | (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { |
1139 | static unsigned ext_csd_bits[][2] = { | 819 | static unsigned ext_csd_bits[][2] = { |
1140 | { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, | 820 | { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, |
@@ -1156,18 +836,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1156 | bus_width = bus_widths[idx]; | 836 | bus_width = bus_widths[idx]; |
1157 | if (bus_width == MMC_BUS_WIDTH_1) | 837 | if (bus_width == MMC_BUS_WIDTH_1) |
1158 | ddr = 0; /* no DDR for 1-bit width */ | 838 | ddr = 0; /* no DDR for 1-bit width */ |
1159 | err = mmc_select_powerclass(card, ext_csd_bits[idx][0], | ||
1160 | ext_csd); | ||
1161 | if (err) | ||
1162 | pr_warning("%s: power class selection to " | ||
1163 | "bus width %d failed\n", | ||
1164 | mmc_hostname(card->host), | ||
1165 | 1 << bus_width); | ||
1166 | |||
1167 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 839 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1168 | EXT_CSD_BUS_WIDTH, | 840 | EXT_CSD_BUS_WIDTH, |
1169 | ext_csd_bits[idx][0], | 841 | ext_csd_bits[idx][0], |
1170 | card->ext_csd.generic_cmd6_time); | 842 | 0); |
1171 | if (!err) { | 843 | if (!err) { |
1172 | mmc_set_bus_width(card->host, bus_width); | 844 | mmc_set_bus_width(card->host, bus_width); |
1173 | 845 | ||
@@ -1187,21 +859,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1187 | } | 859 | } |
1188 | 860 | ||
1189 | if (!err && ddr) { | 861 | if (!err && ddr) { |
1190 | err = mmc_select_powerclass(card, ext_csd_bits[idx][1], | ||
1191 | ext_csd); | ||
1192 | if (err) | ||
1193 | pr_warning("%s: power class selection to " | ||
1194 | "bus width %d ddr %d failed\n", | ||
1195 | mmc_hostname(card->host), | ||
1196 | 1 << bus_width, ddr); | ||
1197 | |||
1198 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 862 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1199 | EXT_CSD_BUS_WIDTH, | 863 | EXT_CSD_BUS_WIDTH, |
1200 | ext_csd_bits[idx][1], | 864 | ext_csd_bits[idx][1], |
1201 | card->ext_csd.generic_cmd6_time); | 865 | 0); |
1202 | } | 866 | } |
1203 | if (err) { | 867 | if (err) { |
1204 | pr_warning("%s: switch to bus width %d ddr %d " | 868 | printk(KERN_WARNING "%s: switch to bus width %d ddr %d " |
1205 | "failed\n", mmc_hostname(card->host), | 869 | "failed\n", mmc_hostname(card->host), |
1206 | 1 << bus_width, ddr); | 870 | 1 << bus_width, ddr); |
1207 | goto free_card; | 871 | goto free_card; |
@@ -1220,7 +884,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1220 | * | 884 | * |
1221 | * WARNING: eMMC rules are NOT the same as SD DDR | 885 | * WARNING: eMMC rules are NOT the same as SD DDR |
1222 | */ | 886 | */ |
1223 | if (ddr == MMC_1_2V_DDR_MODE) { | 887 | if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) { |
1224 | err = mmc_set_signal_voltage(host, | 888 | err = mmc_set_signal_voltage(host, |
1225 | MMC_SIGNAL_VOLTAGE_120, 0); | 889 | MMC_SIGNAL_VOLTAGE_120, 0); |
1226 | if (err) | 890 | if (err) |
@@ -1232,49 +896,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1232 | } | 896 | } |
1233 | } | 897 | } |
1234 | 898 | ||
1235 | /* | ||
1236 | * Enable HPI feature (if supported) | ||
1237 | */ | ||
1238 | if (card->ext_csd.hpi) { | ||
1239 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1240 | EXT_CSD_HPI_MGMT, 1, | ||
1241 | card->ext_csd.generic_cmd6_time); | ||
1242 | if (err && err != -EBADMSG) | ||
1243 | goto free_card; | ||
1244 | if (err) { | ||
1245 | pr_warning("%s: Enabling HPI failed\n", | ||
1246 | mmc_hostname(card->host)); | ||
1247 | err = 0; | ||
1248 | } else | ||
1249 | card->ext_csd.hpi_en = 1; | ||
1250 | } | ||
1251 | |||
1252 | /* | ||
1253 | * If cache size is higher than 0, this indicates | ||
1254 | * the existence of cache and it can be turned on. | ||
1255 | */ | ||
1256 | if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && | ||
1257 | card->ext_csd.cache_size > 0) { | ||
1258 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1259 | EXT_CSD_CACHE_CTRL, 1, | ||
1260 | card->ext_csd.generic_cmd6_time); | ||
1261 | if (err && err != -EBADMSG) | ||
1262 | goto free_card; | ||
1263 | |||
1264 | /* | ||
1265 | * Only if no error, cache is turned on successfully. | ||
1266 | */ | ||
1267 | if (err) { | ||
1268 | pr_warning("%s: Cache is supported, " | ||
1269 | "but failed to turn on (%d)\n", | ||
1270 | mmc_hostname(card->host), err); | ||
1271 | card->ext_csd.cache_ctrl = 0; | ||
1272 | err = 0; | ||
1273 | } else { | ||
1274 | card->ext_csd.cache_ctrl = 1; | ||
1275 | } | ||
1276 | } | ||
1277 | |||
1278 | if (!oldcard) | 899 | if (!oldcard) |
1279 | host->card = card; | 900 | host->card = card; |
1280 | 901 | ||
@@ -1290,35 +911,6 @@ err: | |||
1290 | return err; | 911 | return err; |
1291 | } | 912 | } |
1292 | 913 | ||
1293 | static int mmc_can_poweroff_notify(const struct mmc_card *card) | ||
1294 | { | ||
1295 | return card && | ||
1296 | mmc_card_mmc(card) && | ||
1297 | (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); | ||
1298 | } | ||
1299 | |||
1300 | static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) | ||
1301 | { | ||
1302 | unsigned int timeout = card->ext_csd.generic_cmd6_time; | ||
1303 | int err; | ||
1304 | |||
1305 | /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ | ||
1306 | if (notify_type == EXT_CSD_POWER_OFF_LONG) | ||
1307 | timeout = card->ext_csd.power_off_longtime; | ||
1308 | |||
1309 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
1310 | EXT_CSD_POWER_OFF_NOTIFICATION, | ||
1311 | notify_type, timeout); | ||
1312 | if (err) | ||
1313 | pr_err("%s: Power Off Notification timed out, %u\n", | ||
1314 | mmc_hostname(card->host), timeout); | ||
1315 | |||
1316 | /* Disable the power off notification after the switch operation. */ | ||
1317 | card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; | ||
1318 | |||
1319 | return err; | ||
1320 | } | ||
1321 | |||
1322 | /* | 914 | /* |
1323 | * Host is being removed. Free up the current card. | 915 | * Host is being removed. Free up the current card. |
1324 | */ | 916 | */ |
@@ -1332,14 +924,6 @@ static void mmc_remove(struct mmc_host *host) | |||
1332 | } | 924 | } |
1333 | 925 | ||
1334 | /* | 926 | /* |
1335 | * Card detection - card is alive. | ||
1336 | */ | ||
1337 | static int mmc_alive(struct mmc_host *host) | ||
1338 | { | ||
1339 | return mmc_send_status(host->card, NULL); | ||
1340 | } | ||
1341 | |||
1342 | /* | ||
1343 | * Card detection callback from host. | 927 | * Card detection callback from host. |
1344 | */ | 928 | */ |
1345 | static void mmc_detect(struct mmc_host *host) | 929 | static void mmc_detect(struct mmc_host *host) |
@@ -1354,7 +938,7 @@ static void mmc_detect(struct mmc_host *host) | |||
1354 | /* | 938 | /* |
1355 | * Just check if our card has been removed. | 939 | * Just check if our card has been removed. |
1356 | */ | 940 | */ |
1357 | err = _mmc_detect_card_removed(host); | 941 | err = mmc_send_status(host->card, NULL); |
1358 | 942 | ||
1359 | mmc_release_host(host); | 943 | mmc_release_host(host); |
1360 | 944 | ||
@@ -1373,22 +957,16 @@ static void mmc_detect(struct mmc_host *host) | |||
1373 | */ | 957 | */ |
1374 | static int mmc_suspend(struct mmc_host *host) | 958 | static int mmc_suspend(struct mmc_host *host) |
1375 | { | 959 | { |
1376 | int err = 0; | ||
1377 | |||
1378 | BUG_ON(!host); | 960 | BUG_ON(!host); |
1379 | BUG_ON(!host->card); | 961 | BUG_ON(!host->card); |
1380 | 962 | ||
1381 | mmc_claim_host(host); | 963 | mmc_claim_host(host); |
1382 | if (mmc_can_poweroff_notify(host->card)) | 964 | if (!mmc_host_is_spi(host)) |
1383 | err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); | 965 | mmc_deselect_cards(host); |
1384 | else if (mmc_card_can_sleep(host)) | 966 | host->card->state &= ~MMC_STATE_HIGHSPEED; |
1385 | err = mmc_card_sleep(host); | ||
1386 | else if (!mmc_host_is_spi(host)) | ||
1387 | err = mmc_deselect_cards(host); | ||
1388 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | ||
1389 | mmc_release_host(host); | 967 | mmc_release_host(host); |
1390 | 968 | ||
1391 | return err; | 969 | return 0; |
1392 | } | 970 | } |
1393 | 971 | ||
1394 | /* | 972 | /* |
@@ -1415,7 +993,7 @@ static int mmc_power_restore(struct mmc_host *host) | |||
1415 | { | 993 | { |
1416 | int ret; | 994 | int ret; |
1417 | 995 | ||
1418 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | 996 | host->card->state &= ~MMC_STATE_HIGHSPEED; |
1419 | mmc_claim_host(host); | 997 | mmc_claim_host(host); |
1420 | ret = mmc_init_card(host, host->ocr, host->card); | 998 | ret = mmc_init_card(host, host->ocr, host->card); |
1421 | mmc_release_host(host); | 999 | mmc_release_host(host); |
@@ -1461,7 +1039,6 @@ static const struct mmc_bus_ops mmc_ops = { | |||
1461 | .suspend = NULL, | 1039 | .suspend = NULL, |
1462 | .resume = NULL, | 1040 | .resume = NULL, |
1463 | .power_restore = mmc_power_restore, | 1041 | .power_restore = mmc_power_restore, |
1464 | .alive = mmc_alive, | ||
1465 | }; | 1042 | }; |
1466 | 1043 | ||
1467 | static const struct mmc_bus_ops mmc_ops_unsafe = { | 1044 | static const struct mmc_bus_ops mmc_ops_unsafe = { |
@@ -1472,7 +1049,6 @@ static const struct mmc_bus_ops mmc_ops_unsafe = { | |||
1472 | .suspend = mmc_suspend, | 1049 | .suspend = mmc_suspend, |
1473 | .resume = mmc_resume, | 1050 | .resume = mmc_resume, |
1474 | .power_restore = mmc_power_restore, | 1051 | .power_restore = mmc_power_restore, |
1475 | .alive = mmc_alive, | ||
1476 | }; | 1052 | }; |
1477 | 1053 | ||
1478 | static void mmc_attach_bus_ops(struct mmc_host *host) | 1054 | static void mmc_attach_bus_ops(struct mmc_host *host) |
@@ -1497,10 +1073,6 @@ int mmc_attach_mmc(struct mmc_host *host) | |||
1497 | BUG_ON(!host); | 1073 | BUG_ON(!host); |
1498 | WARN_ON(!host->claimed); | 1074 | WARN_ON(!host->claimed); |
1499 | 1075 | ||
1500 | /* Set correct bus mode for MMC before attempting attach */ | ||
1501 | if (!mmc_host_is_spi(host)) | ||
1502 | mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); | ||
1503 | |||
1504 | err = mmc_send_op_cond(host, 0, &ocr); | 1076 | err = mmc_send_op_cond(host, 0, &ocr); |
1505 | if (err) | 1077 | if (err) |
1506 | return err; | 1078 | return err; |
@@ -1523,7 +1095,7 @@ int mmc_attach_mmc(struct mmc_host *host) | |||
1523 | * support. | 1095 | * support. |
1524 | */ | 1096 | */ |
1525 | if (ocr & 0x7F) { | 1097 | if (ocr & 0x7F) { |
1526 | pr_warning("%s: card claims to support voltages " | 1098 | printk(KERN_WARNING "%s: card claims to support voltages " |
1527 | "below the defined range. These will be ignored.\n", | 1099 | "below the defined range. These will be ignored.\n", |
1528 | mmc_hostname(host)); | 1100 | mmc_hostname(host)); |
1529 | ocr &= ~0x7F; | 1101 | ocr &= ~0x7F; |
@@ -1562,7 +1134,7 @@ remove_card: | |||
1562 | err: | 1134 | err: |
1563 | mmc_detach_bus(host); | 1135 | mmc_detach_bus(host); |
1564 | 1136 | ||
1565 | pr_err("%s: error %d whilst initialising MMC card\n", | 1137 | printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", |
1566 | mmc_hostname(host), err); | 1138 | mmc_hostname(host), err); |
1567 | 1139 | ||
1568 | return err; | 1140 | return err; |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 6d8f7012d73..330b968393d 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/export.h> | ||
14 | #include <linux/types.h> | 13 | #include <linux/types.h> |
15 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
16 | 15 | ||
@@ -21,8 +20,6 @@ | |||
21 | #include "core.h" | 20 | #include "core.h" |
22 | #include "mmc_ops.h" | 21 | #include "mmc_ops.h" |
23 | 22 | ||
24 | #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ | ||
25 | |||
26 | static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) | 23 | static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) |
27 | { | 24 | { |
28 | int err; | 25 | int err; |
@@ -232,32 +229,22 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) | |||
232 | return 0; | 229 | return 0; |
233 | } | 230 | } |
234 | 231 | ||
235 | /* | ||
236 | * NOTE: void *buf, caller for the buf is required to use DMA-capable | ||
237 | * buffer or on-stack buffer (with some overhead in callee). | ||
238 | */ | ||
239 | static int | 232 | static int |
240 | mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | 233 | mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, |
241 | u32 opcode, void *buf, unsigned len) | 234 | u32 opcode, void *buf, unsigned len) |
242 | { | 235 | { |
243 | struct mmc_request mrq = {NULL}; | 236 | struct mmc_request mrq = {0}; |
244 | struct mmc_command cmd = {0}; | 237 | struct mmc_command cmd = {0}; |
245 | struct mmc_data data = {0}; | 238 | struct mmc_data data = {0}; |
246 | struct scatterlist sg; | 239 | struct scatterlist sg; |
247 | void *data_buf; | 240 | void *data_buf; |
248 | int is_on_stack; | ||
249 | 241 | ||
250 | is_on_stack = object_is_on_stack(buf); | 242 | /* dma onto stack is unsafe/nonportable, but callers to this |
251 | if (is_on_stack) { | 243 | * routine normally provide temporary on-stack buffers ... |
252 | /* | 244 | */ |
253 | * dma onto stack is unsafe/nonportable, but callers to this | 245 | data_buf = kmalloc(len, GFP_KERNEL); |
254 | * routine normally provide temporary on-stack buffers ... | 246 | if (data_buf == NULL) |
255 | */ | 247 | return -ENOMEM; |
256 | data_buf = kmalloc(len, GFP_KERNEL); | ||
257 | if (!data_buf) | ||
258 | return -ENOMEM; | ||
259 | } else | ||
260 | data_buf = buf; | ||
261 | 248 | ||
262 | mrq.cmd = &cmd; | 249 | mrq.cmd = &cmd; |
263 | mrq.data = &data; | 250 | mrq.data = &data; |
@@ -292,10 +279,8 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
292 | 279 | ||
293 | mmc_wait_for_req(host, &mrq); | 280 | mmc_wait_for_req(host, &mrq); |
294 | 281 | ||
295 | if (is_on_stack) { | 282 | memcpy(buf, data_buf, len); |
296 | memcpy(buf, data_buf, len); | 283 | kfree(data_buf); |
297 | kfree(data_buf); | ||
298 | } | ||
299 | 284 | ||
300 | if (cmd.error) | 285 | if (cmd.error) |
301 | return cmd.error; | 286 | return cmd.error; |
@@ -308,32 +293,24 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
308 | int mmc_send_csd(struct mmc_card *card, u32 *csd) | 293 | int mmc_send_csd(struct mmc_card *card, u32 *csd) |
309 | { | 294 | { |
310 | int ret, i; | 295 | int ret, i; |
311 | u32 *csd_tmp; | ||
312 | 296 | ||
313 | if (!mmc_host_is_spi(card->host)) | 297 | if (!mmc_host_is_spi(card->host)) |
314 | return mmc_send_cxd_native(card->host, card->rca << 16, | 298 | return mmc_send_cxd_native(card->host, card->rca << 16, |
315 | csd, MMC_SEND_CSD); | 299 | csd, MMC_SEND_CSD); |
316 | 300 | ||
317 | csd_tmp = kmalloc(16, GFP_KERNEL); | 301 | ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); |
318 | if (!csd_tmp) | ||
319 | return -ENOMEM; | ||
320 | |||
321 | ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); | ||
322 | if (ret) | 302 | if (ret) |
323 | goto err; | 303 | return ret; |
324 | 304 | ||
325 | for (i = 0;i < 4;i++) | 305 | for (i = 0;i < 4;i++) |
326 | csd[i] = be32_to_cpu(csd_tmp[i]); | 306 | csd[i] = be32_to_cpu(csd[i]); |
327 | 307 | ||
328 | err: | 308 | return 0; |
329 | kfree(csd_tmp); | ||
330 | return ret; | ||
331 | } | 309 | } |
332 | 310 | ||
333 | int mmc_send_cid(struct mmc_host *host, u32 *cid) | 311 | int mmc_send_cid(struct mmc_host *host, u32 *cid) |
334 | { | 312 | { |
335 | int ret, i; | 313 | int ret, i; |
336 | u32 *cid_tmp; | ||
337 | 314 | ||
338 | if (!mmc_host_is_spi(host)) { | 315 | if (!mmc_host_is_spi(host)) { |
339 | if (!host->card) | 316 | if (!host->card) |
@@ -342,20 +319,14 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid) | |||
342 | cid, MMC_SEND_CID); | 319 | cid, MMC_SEND_CID); |
343 | } | 320 | } |
344 | 321 | ||
345 | cid_tmp = kmalloc(16, GFP_KERNEL); | 322 | ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); |
346 | if (!cid_tmp) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); | ||
350 | if (ret) | 323 | if (ret) |
351 | goto err; | 324 | return ret; |
352 | 325 | ||
353 | for (i = 0;i < 4;i++) | 326 | for (i = 0;i < 4;i++) |
354 | cid[i] = be32_to_cpu(cid_tmp[i]); | 327 | cid[i] = be32_to_cpu(cid[i]); |
355 | 328 | ||
356 | err: | 329 | return 0; |
357 | kfree(cid_tmp); | ||
358 | return ret; | ||
359 | } | 330 | } |
360 | 331 | ||
361 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) | 332 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) |
@@ -395,23 +366,21 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) | |||
395 | } | 366 | } |
396 | 367 | ||
397 | /** | 368 | /** |
398 | * __mmc_switch - modify EXT_CSD register | 369 | * mmc_switch - modify EXT_CSD register |
399 | * @card: the MMC card associated with the data transfer | 370 | * @card: the MMC card associated with the data transfer |
400 | * @set: cmd set values | 371 | * @set: cmd set values |
401 | * @index: EXT_CSD register index | 372 | * @index: EXT_CSD register index |
402 | * @value: value to program into EXT_CSD register | 373 | * @value: value to program into EXT_CSD register |
403 | * @timeout_ms: timeout (ms) for operation performed by register write, | 374 | * @timeout_ms: timeout (ms) for operation performed by register write, |
404 | * timeout of zero implies maximum possible timeout | 375 | * timeout of zero implies maximum possible timeout |
405 | * @use_busy_signal: use the busy signal as response type | ||
406 | * | 376 | * |
407 | * Modifies the EXT_CSD register for selected card. | 377 | * Modifies the EXT_CSD register for selected card. |
408 | */ | 378 | */ |
409 | int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | 379 | int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, |
410 | unsigned int timeout_ms, bool use_busy_signal) | 380 | unsigned int timeout_ms) |
411 | { | 381 | { |
412 | int err; | 382 | int err; |
413 | struct mmc_command cmd = {0}; | 383 | struct mmc_command cmd = {0}; |
414 | unsigned long timeout; | ||
415 | u32 status; | 384 | u32 status; |
416 | 385 | ||
417 | BUG_ON(!card); | 386 | BUG_ON(!card); |
@@ -422,25 +391,14 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
422 | (index << 16) | | 391 | (index << 16) | |
423 | (value << 8) | | 392 | (value << 8) | |
424 | set; | 393 | set; |
425 | cmd.flags = MMC_CMD_AC; | 394 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; |
426 | if (use_busy_signal) | ||
427 | cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; | ||
428 | else | ||
429 | cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; | ||
430 | |||
431 | |||
432 | cmd.cmd_timeout_ms = timeout_ms; | 395 | cmd.cmd_timeout_ms = timeout_ms; |
433 | 396 | ||
434 | err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); | 397 | err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); |
435 | if (err) | 398 | if (err) |
436 | return err; | 399 | return err; |
437 | 400 | ||
438 | /* No need to check card status in case of unblocking command */ | ||
439 | if (!use_busy_signal) | ||
440 | return 0; | ||
441 | |||
442 | /* Must check status to be sure of no errors */ | 401 | /* Must check status to be sure of no errors */ |
443 | timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS); | ||
444 | do { | 402 | do { |
445 | err = mmc_send_status(card, &status); | 403 | err = mmc_send_status(card, &status); |
446 | if (err) | 404 | if (err) |
@@ -449,13 +407,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
449 | break; | 407 | break; |
450 | if (mmc_host_is_spi(card->host)) | 408 | if (mmc_host_is_spi(card->host)) |
451 | break; | 409 | break; |
452 | |||
453 | /* Timeout if the device never leaves the program state. */ | ||
454 | if (time_after(jiffies, timeout)) { | ||
455 | pr_err("%s: Card stuck in programming state! %s\n", | ||
456 | mmc_hostname(card->host), __func__); | ||
457 | return -ETIMEDOUT; | ||
458 | } | ||
459 | } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); | 410 | } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); |
460 | 411 | ||
461 | if (mmc_host_is_spi(card->host)) { | 412 | if (mmc_host_is_spi(card->host)) { |
@@ -463,7 +414,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
463 | return -EBADMSG; | 414 | return -EBADMSG; |
464 | } else { | 415 | } else { |
465 | if (status & 0xFDFFA000) | 416 | if (status & 0xFDFFA000) |
466 | pr_warning("%s: unexpected status %#x after " | 417 | printk(KERN_WARNING "%s: unexpected status %#x after " |
467 | "switch", mmc_hostname(card->host), status); | 418 | "switch", mmc_hostname(card->host), status); |
468 | if (status & R1_SWITCH_ERROR) | 419 | if (status & R1_SWITCH_ERROR) |
469 | return -EBADMSG; | 420 | return -EBADMSG; |
@@ -471,13 +422,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | |||
471 | 422 | ||
472 | return 0; | 423 | return 0; |
473 | } | 424 | } |
474 | EXPORT_SYMBOL_GPL(__mmc_switch); | ||
475 | |||
476 | int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, | ||
477 | unsigned int timeout_ms) | ||
478 | { | ||
479 | return __mmc_switch(card, set, index, value, timeout_ms, true); | ||
480 | } | ||
481 | EXPORT_SYMBOL_GPL(mmc_switch); | 425 | EXPORT_SYMBOL_GPL(mmc_switch); |
482 | 426 | ||
483 | int mmc_send_status(struct mmc_card *card, u32 *status) | 427 | int mmc_send_status(struct mmc_card *card, u32 *status) |
@@ -510,7 +454,7 @@ static int | |||
510 | mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, | 454 | mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, |
511 | u8 len) | 455 | u8 len) |
512 | { | 456 | { |
513 | struct mmc_request mrq = {NULL}; | 457 | struct mmc_request mrq = {0}; |
514 | struct mmc_command cmd = {0}; | 458 | struct mmc_command cmd = {0}; |
515 | struct mmc_data data = {0}; | 459 | struct mmc_data data = {0}; |
516 | struct scatterlist sg; | 460 | struct scatterlist sg; |
@@ -532,7 +476,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, | |||
532 | else if (len == 4) | 476 | else if (len == 4) |
533 | test_buf = testdata_4bit; | 477 | test_buf = testdata_4bit; |
534 | else { | 478 | else { |
535 | pr_err("%s: Invalid bus_width %d\n", | 479 | printk(KERN_ERR "%s: Invalid bus_width %d\n", |
536 | mmc_hostname(host), len); | 480 | mmc_hostname(host), len); |
537 | kfree(data_buf); | 481 | kfree(data_buf); |
538 | return -EINVAL; | 482 | return -EINVAL; |
@@ -608,22 +552,15 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) | |||
608 | { | 552 | { |
609 | struct mmc_command cmd = {0}; | 553 | struct mmc_command cmd = {0}; |
610 | unsigned int opcode; | 554 | unsigned int opcode; |
555 | unsigned int flags; | ||
611 | int err; | 556 | int err; |
612 | 557 | ||
613 | if (!card->ext_csd.hpi) { | ||
614 | pr_warning("%s: Card didn't support HPI command\n", | ||
615 | mmc_hostname(card->host)); | ||
616 | return -EINVAL; | ||
617 | } | ||
618 | |||
619 | opcode = card->ext_csd.hpi_cmd; | 558 | opcode = card->ext_csd.hpi_cmd; |
620 | if (opcode == MMC_STOP_TRANSMISSION) | 559 | flags = MMC_RSP_R1 | MMC_CMD_AC; |
621 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
622 | else if (opcode == MMC_SEND_STATUS) | ||
623 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
624 | 560 | ||
625 | cmd.opcode = opcode; | 561 | cmd.opcode = opcode; |
626 | cmd.arg = card->rca << 16 | 1; | 562 | cmd.arg = card->rca << 16 | 1; |
563 | cmd.flags = flags; | ||
627 | 564 | ||
628 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 565 | err = mmc_wait_for_cmd(card->host, &cmd, 0); |
629 | if (err) { | 566 | if (err) { |
@@ -637,3 +574,46 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) | |||
637 | 574 | ||
638 | return 0; | 575 | return 0; |
639 | } | 576 | } |
577 | |||
578 | int mmc_send_bk_ops_cmd(struct mmc_card *card, bool is_synchronous) | ||
579 | { | ||
580 | int err; | ||
581 | struct mmc_command cmd; | ||
582 | u32 status; | ||
583 | |||
584 | BUG_ON(!card); | ||
585 | BUG_ON(!card->host); | ||
586 | |||
587 | memset(&cmd, 0, sizeof(struct mmc_command)); | ||
588 | |||
589 | cmd.opcode = MMC_SWITCH; | ||
590 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | ||
591 | (EXT_CSD_BKOPS_EN << 16) | | ||
592 | (1 << 8) | | ||
593 | EXT_CSD_CMD_SET_NORMAL; | ||
594 | if (is_synchronous) | ||
595 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
596 | else | ||
597 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
598 | |||
599 | err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); | ||
600 | if (err) | ||
601 | return err; | ||
602 | |||
603 | /* Must check status to be sure of no errors */ | ||
604 | do { | ||
605 | err = mmc_send_status(card, &status); | ||
606 | if (err) | ||
607 | return err; | ||
608 | if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) | ||
609 | break; | ||
610 | } while (R1_CURRENT_STATE(status) == 7); | ||
611 | |||
612 | if (status & 0xFDFFA000) | ||
613 | printk(KERN_ERR "%s: unexpected status %#x after " | ||
614 | "switch", mmc_hostname(card->host), status); | ||
615 | if (status & R1_SWITCH_ERROR) | ||
616 | return -EBADMSG; | ||
617 | |||
618 | return 0; | ||
619 | } | ||
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 3dd8941c298..d8f157dee14 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * linux/drivers/mmc/core/mmc_ops.h | 2 | * linux/drivers/mmc/core/mmc_ops.h |
3 | * | 3 | * |
4 | * Copyright 2006-2007 Pierre Ossman | 4 | * Copyright 2006-2007 Pierre Ossman |
5 | * Copyright (c) 2012 NVIDIA Corporation, All Rights Reserved. | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -27,6 +28,7 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc); | |||
27 | int mmc_card_sleepawake(struct mmc_host *host, int sleep); | 28 | int mmc_card_sleepawake(struct mmc_host *host, int sleep); |
28 | int mmc_bus_test(struct mmc_card *card, u8 bus_width); | 29 | int mmc_bus_test(struct mmc_card *card, u8 bus_width); |
29 | int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); | 30 | int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); |
31 | int mmc_send_bk_ops_cmd(struct mmc_card *card, bool is_synchronous); | ||
30 | 32 | ||
31 | #endif | 33 | #endif |
32 | 34 | ||
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index 06ee1aeaace..3a596217029 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/export.h> | ||
15 | #include <linux/mmc/card.h> | 14 | #include <linux/mmc/card.h> |
16 | 15 | ||
17 | #ifndef SDIO_VENDOR_ID_TI | 16 | #ifndef SDIO_VENDOR_ID_TI |
@@ -22,14 +21,6 @@ | |||
22 | #define SDIO_DEVICE_ID_TI_WL1271 0x4076 | 21 | #define SDIO_DEVICE_ID_TI_WL1271 0x4076 |
23 | #endif | 22 | #endif |
24 | 23 | ||
25 | #ifndef SDIO_VENDOR_ID_STE | ||
26 | #define SDIO_VENDOR_ID_STE 0x0020 | ||
27 | #endif | ||
28 | |||
29 | #ifndef SDIO_DEVICE_ID_STE_CW1200 | ||
30 | #define SDIO_DEVICE_ID_STE_CW1200 0x2280 | ||
31 | #endif | ||
32 | |||
33 | /* | 24 | /* |
34 | * This hook just adds a quirk for all sdio devices | 25 | * This hook just adds a quirk for all sdio devices |
35 | */ | 26 | */ |
@@ -55,9 +46,6 @@ static const struct mmc_fixup mmc_fixup_methods[] = { | |||
55 | SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, | 46 | SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, |
56 | add_quirk, MMC_QUIRK_DISABLE_CD), | 47 | add_quirk, MMC_QUIRK_DISABLE_CD), |
57 | 48 | ||
58 | SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200, | ||
59 | add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512), | ||
60 | |||
61 | END_FIXUP | 49 | END_FIXUP |
62 | }; | 50 | }; |
63 | 51 | ||
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 74972c241df..cb2a9d4d451 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/stat.h> | ||
16 | 15 | ||
17 | #include <linux/mmc/host.h> | 16 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/card.h> | 17 | #include <linux/mmc/card.h> |
@@ -164,7 +163,7 @@ static int mmc_decode_csd(struct mmc_card *card) | |||
164 | csd->erase_size = 1; | 163 | csd->erase_size = 1; |
165 | break; | 164 | break; |
166 | default: | 165 | default: |
167 | pr_err("%s: unrecognised CSD structure version %d\n", | 166 | printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", |
168 | mmc_hostname(card->host), csd_struct); | 167 | mmc_hostname(card->host), csd_struct); |
169 | return -EINVAL; | 168 | return -EINVAL; |
170 | } | 169 | } |
@@ -188,7 +187,7 @@ static int mmc_decode_scr(struct mmc_card *card) | |||
188 | 187 | ||
189 | scr_struct = UNSTUFF_BITS(resp, 60, 4); | 188 | scr_struct = UNSTUFF_BITS(resp, 60, 4); |
190 | if (scr_struct != 0) { | 189 | if (scr_struct != 0) { |
191 | pr_err("%s: unrecognised SCR structure version %d\n", | 190 | printk(KERN_ERR "%s: unrecognised SCR structure version %d\n", |
192 | mmc_hostname(card->host), scr_struct); | 191 | mmc_hostname(card->host), scr_struct); |
193 | return -EINVAL; | 192 | return -EINVAL; |
194 | } | 193 | } |
@@ -219,7 +218,7 @@ static int mmc_read_ssr(struct mmc_card *card) | |||
219 | u32 *ssr; | 218 | u32 *ssr; |
220 | 219 | ||
221 | if (!(card->csd.cmdclass & CCC_APP_SPEC)) { | 220 | if (!(card->csd.cmdclass & CCC_APP_SPEC)) { |
222 | pr_warning("%s: card lacks mandatory SD Status " | 221 | printk(KERN_WARNING "%s: card lacks mandatory SD Status " |
223 | "function.\n", mmc_hostname(card->host)); | 222 | "function.\n", mmc_hostname(card->host)); |
224 | return 0; | 223 | return 0; |
225 | } | 224 | } |
@@ -230,7 +229,7 @@ static int mmc_read_ssr(struct mmc_card *card) | |||
230 | 229 | ||
231 | err = mmc_app_sd_status(card, ssr); | 230 | err = mmc_app_sd_status(card, ssr); |
232 | if (err) { | 231 | if (err) { |
233 | pr_warning("%s: problem reading SD Status " | 232 | printk(KERN_WARNING "%s: problem reading SD Status " |
234 | "register.\n", mmc_hostname(card->host)); | 233 | "register.\n", mmc_hostname(card->host)); |
235 | err = 0; | 234 | err = 0; |
236 | goto out; | 235 | goto out; |
@@ -244,7 +243,7 @@ static int mmc_read_ssr(struct mmc_card *card) | |||
244 | * bitfield positions accordingly. | 243 | * bitfield positions accordingly. |
245 | */ | 244 | */ |
246 | au = UNSTUFF_BITS(ssr, 428 - 384, 4); | 245 | au = UNSTUFF_BITS(ssr, 428 - 384, 4); |
247 | if (au > 0 && au <= 9) { | 246 | if (au > 0 || au <= 9) { |
248 | card->ssr.au = 1 << (au + 4); | 247 | card->ssr.au = 1 << (au + 4); |
249 | es = UNSTUFF_BITS(ssr, 408 - 384, 16); | 248 | es = UNSTUFF_BITS(ssr, 408 - 384, 16); |
250 | et = UNSTUFF_BITS(ssr, 402 - 384, 6); | 249 | et = UNSTUFF_BITS(ssr, 402 - 384, 6); |
@@ -254,7 +253,7 @@ static int mmc_read_ssr(struct mmc_card *card) | |||
254 | card->ssr.erase_offset = eo * 1000; | 253 | card->ssr.erase_offset = eo * 1000; |
255 | } | 254 | } |
256 | } else { | 255 | } else { |
257 | pr_warning("%s: SD Status: Invalid Allocation Unit " | 256 | printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit " |
258 | "size.\n", mmc_hostname(card->host)); | 257 | "size.\n", mmc_hostname(card->host)); |
259 | } | 258 | } |
260 | out: | 259 | out: |
@@ -274,7 +273,7 @@ static int mmc_read_switch(struct mmc_card *card) | |||
274 | return 0; | 273 | return 0; |
275 | 274 | ||
276 | if (!(card->csd.cmdclass & CCC_SWITCH)) { | 275 | if (!(card->csd.cmdclass & CCC_SWITCH)) { |
277 | pr_warning("%s: card lacks mandatory switch " | 276 | printk(KERN_WARNING "%s: card lacks mandatory switch " |
278 | "function, performance might suffer.\n", | 277 | "function, performance might suffer.\n", |
279 | mmc_hostname(card->host)); | 278 | mmc_hostname(card->host)); |
280 | return 0; | 279 | return 0; |
@@ -284,18 +283,14 @@ static int mmc_read_switch(struct mmc_card *card) | |||
284 | 283 | ||
285 | status = kmalloc(64, GFP_KERNEL); | 284 | status = kmalloc(64, GFP_KERNEL); |
286 | if (!status) { | 285 | if (!status) { |
287 | pr_err("%s: could not allocate a buffer for " | 286 | printk(KERN_ERR "%s: could not allocate a buffer for " |
288 | "switch capabilities.\n", | 287 | "switch capabilities.\n", |
289 | mmc_hostname(card->host)); | 288 | mmc_hostname(card->host)); |
290 | return -ENOMEM; | 289 | return -ENOMEM; |
291 | } | 290 | } |
292 | 291 | ||
293 | /* | 292 | /* Find out the supported Bus Speed Modes. */ |
294 | * Find out the card's support bits with a mode 0 operation. | 293 | err = mmc_sd_switch(card, 0, 0, 1, status); |
295 | * The argument does not matter, as the support bits do not | ||
296 | * change with the arguments. | ||
297 | */ | ||
298 | err = mmc_sd_switch(card, 0, 0, 0, status); | ||
299 | if (err) { | 294 | if (err) { |
300 | /* | 295 | /* |
301 | * If the host or the card can't do the switch, | 296 | * If the host or the card can't do the switch, |
@@ -304,22 +299,60 @@ static int mmc_read_switch(struct mmc_card *card) | |||
304 | if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) | 299 | if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) |
305 | goto out; | 300 | goto out; |
306 | 301 | ||
307 | pr_warning("%s: problem reading Bus Speed modes.\n", | 302 | printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n", |
308 | mmc_hostname(card->host)); | 303 | mmc_hostname(card->host)); |
309 | err = 0; | 304 | err = 0; |
310 | 305 | ||
311 | goto out; | 306 | goto out; |
312 | } | 307 | } |
313 | 308 | ||
314 | if (status[13] & SD_MODE_HIGH_SPEED) | ||
315 | card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR; | ||
316 | |||
317 | if (card->scr.sda_spec3) { | 309 | if (card->scr.sda_spec3) { |
318 | card->sw_caps.sd3_bus_mode = status[13]; | 310 | card->sw_caps.sd3_bus_mode = status[13]; |
319 | /* Driver Strengths supported by the card */ | 311 | |
312 | /* Find out Driver Strengths supported by the card */ | ||
313 | err = mmc_sd_switch(card, 0, 2, 1, status); | ||
314 | if (err) { | ||
315 | /* | ||
316 | * If the host or the card can't do the switch, | ||
317 | * fail more gracefully. | ||
318 | */ | ||
319 | if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) | ||
320 | goto out; | ||
321 | |||
322 | printk(KERN_WARNING "%s: problem reading " | ||
323 | "Driver Strength.\n", | ||
324 | mmc_hostname(card->host)); | ||
325 | err = 0; | ||
326 | |||
327 | goto out; | ||
328 | } | ||
329 | |||
320 | card->sw_caps.sd3_drv_type = status[9]; | 330 | card->sw_caps.sd3_drv_type = status[9]; |
331 | |||
332 | /* Find out Current Limits supported by the card */ | ||
333 | err = mmc_sd_switch(card, 0, 3, 1, status); | ||
334 | if (err) { | ||
335 | /* | ||
336 | * If the host or the card can't do the switch, | ||
337 | * fail more gracefully. | ||
338 | */ | ||
339 | if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) | ||
340 | goto out; | ||
341 | |||
342 | printk(KERN_WARNING "%s: problem reading " | ||
343 | "Current Limit.\n", | ||
344 | mmc_hostname(card->host)); | ||
345 | err = 0; | ||
346 | |||
347 | goto out; | ||
348 | } | ||
349 | |||
350 | card->sw_caps.sd3_curr_limit = status[7]; | ||
321 | } | 351 | } |
322 | 352 | ||
353 | if (status[13] & 0x02) | ||
354 | card->sw_caps.hs_max_dtr = 50000000; | ||
355 | |||
323 | out: | 356 | out: |
324 | kfree(status); | 357 | kfree(status); |
325 | 358 | ||
@@ -350,7 +383,7 @@ int mmc_sd_switch_hs(struct mmc_card *card) | |||
350 | 383 | ||
351 | status = kmalloc(64, GFP_KERNEL); | 384 | status = kmalloc(64, GFP_KERNEL); |
352 | if (!status) { | 385 | if (!status) { |
353 | pr_err("%s: could not allocate a buffer for " | 386 | printk(KERN_ERR "%s: could not allocate a buffer for " |
354 | "switch capabilities.\n", mmc_hostname(card->host)); | 387 | "switch capabilities.\n", mmc_hostname(card->host)); |
355 | return -ENOMEM; | 388 | return -ENOMEM; |
356 | } | 389 | } |
@@ -360,7 +393,7 @@ int mmc_sd_switch_hs(struct mmc_card *card) | |||
360 | goto out; | 393 | goto out; |
361 | 394 | ||
362 | if ((status[16] & 0xF) != 1) { | 395 | if ((status[16] & 0xF) != 1) { |
363 | pr_warning("%s: Problem switching card " | 396 | printk(KERN_WARNING "%s: Problem switching card " |
364 | "into high-speed mode!\n", | 397 | "into high-speed mode!\n", |
365 | mmc_hostname(card->host)); | 398 | mmc_hostname(card->host)); |
366 | err = 0; | 399 | err = 0; |
@@ -417,18 +450,16 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status) | |||
417 | * information and let the hardware specific code | 450 | * information and let the hardware specific code |
418 | * return what is possible given the options | 451 | * return what is possible given the options |
419 | */ | 452 | */ |
420 | mmc_host_clk_hold(card->host); | ||
421 | drive_strength = card->host->ops->select_drive_strength( | 453 | drive_strength = card->host->ops->select_drive_strength( |
422 | card->sw_caps.uhs_max_dtr, | 454 | card->sw_caps.uhs_max_dtr, |
423 | host_drv_type, card_drv_type); | 455 | host_drv_type, card_drv_type); |
424 | mmc_host_clk_release(card->host); | ||
425 | 456 | ||
426 | err = mmc_sd_switch(card, 1, 2, drive_strength, status); | 457 | err = mmc_sd_switch(card, 1, 2, drive_strength, status); |
427 | if (err) | 458 | if (err) |
428 | return err; | 459 | return err; |
429 | 460 | ||
430 | if ((status[15] & 0xF) != drive_strength) { | 461 | if ((status[15] & 0xF) != drive_strength) { |
431 | pr_warning("%s: Problem setting drive strength!\n", | 462 | printk(KERN_WARNING "%s: Problem setting drive strength!\n", |
432 | mmc_hostname(card->host)); | 463 | mmc_hostname(card->host)); |
433 | return 0; | 464 | return 0; |
434 | } | 465 | } |
@@ -507,90 +538,72 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) | |||
507 | return err; | 538 | return err; |
508 | 539 | ||
509 | if ((status[16] & 0xF) != card->sd_bus_speed) | 540 | if ((status[16] & 0xF) != card->sd_bus_speed) |
510 | pr_warning("%s: Problem setting bus speed mode!\n", | 541 | printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", |
511 | mmc_hostname(card->host)); | 542 | mmc_hostname(card->host)); |
512 | else { | 543 | else { |
513 | mmc_set_timing(card->host, timing); | 544 | mmc_set_timing(card->host, timing); |
545 | if (timing == MMC_TIMING_UHS_DDR50) | ||
546 | mmc_card_set_ddr_mode(card); | ||
514 | mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); | 547 | mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); |
515 | } | 548 | } |
516 | 549 | ||
517 | return 0; | 550 | return 0; |
518 | } | 551 | } |
519 | 552 | ||
520 | /* Get host's max current setting at its current voltage */ | ||
521 | static u32 sd_get_host_max_current(struct mmc_host *host) | ||
522 | { | ||
523 | u32 voltage, max_current; | ||
524 | |||
525 | voltage = 1 << host->ios.vdd; | ||
526 | switch (voltage) { | ||
527 | case MMC_VDD_165_195: | ||
528 | max_current = host->max_current_180; | ||
529 | break; | ||
530 | case MMC_VDD_29_30: | ||
531 | case MMC_VDD_30_31: | ||
532 | max_current = host->max_current_300; | ||
533 | break; | ||
534 | case MMC_VDD_32_33: | ||
535 | case MMC_VDD_33_34: | ||
536 | max_current = host->max_current_330; | ||
537 | break; | ||
538 | default: | ||
539 | max_current = 0; | ||
540 | } | ||
541 | |||
542 | return max_current; | ||
543 | } | ||
544 | |||
545 | static int sd_set_current_limit(struct mmc_card *card, u8 *status) | 553 | static int sd_set_current_limit(struct mmc_card *card, u8 *status) |
546 | { | 554 | { |
547 | int current_limit = SD_SET_CURRENT_NO_CHANGE; | 555 | int current_limit = 0; |
548 | int err; | 556 | int err; |
549 | u32 max_current; | ||
550 | 557 | ||
551 | /* | 558 | /* |
552 | * Current limit switch is only defined for SDR50, SDR104, and DDR50 | 559 | * Current limit switch is only defined for SDR50, SDR104, and DDR50 |
553 | * bus speed modes. For other bus speed modes, we do not change the | 560 | * bus speed modes. For other bus speed modes, we set the default |
554 | * current limit. | 561 | * current limit of 200mA. |
555 | */ | 562 | */ |
556 | if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) && | 563 | if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) || |
557 | (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) && | 564 | (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) || |
558 | (card->sd_bus_speed != UHS_DDR50_BUS_SPEED)) | 565 | (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) { |
559 | return 0; | 566 | if (card->host->caps & MMC_CAP_MAX_CURRENT_800) { |
560 | 567 | if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800) | |
561 | /* | 568 | current_limit = SD_SET_CURRENT_LIMIT_800; |
562 | * Host has different current capabilities when operating at | 569 | else if (card->sw_caps.sd3_curr_limit & |
563 | * different voltages, so find out its max current first. | 570 | SD_MAX_CURRENT_600) |
564 | */ | 571 | current_limit = SD_SET_CURRENT_LIMIT_600; |
565 | max_current = sd_get_host_max_current(card->host); | 572 | else if (card->sw_caps.sd3_curr_limit & |
566 | 573 | SD_MAX_CURRENT_400) | |
567 | /* | 574 | current_limit = SD_SET_CURRENT_LIMIT_400; |
568 | * We only check host's capability here, if we set a limit that is | 575 | else if (card->sw_caps.sd3_curr_limit & |
569 | * higher than the card's maximum current, the card will be using its | 576 | SD_MAX_CURRENT_200) |
570 | * maximum current, e.g. if the card's maximum current is 300ma, and | 577 | current_limit = SD_SET_CURRENT_LIMIT_200; |
571 | * when we set current limit to 200ma, the card will draw 200ma, and | 578 | } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) { |
572 | * when we set current limit to 400/600/800ma, the card will draw its | 579 | if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600) |
573 | * maximum 300ma from the host. | 580 | current_limit = SD_SET_CURRENT_LIMIT_600; |
574 | */ | 581 | else if (card->sw_caps.sd3_curr_limit & |
575 | if (max_current >= 800) | 582 | SD_MAX_CURRENT_400) |
576 | current_limit = SD_SET_CURRENT_LIMIT_800; | 583 | current_limit = SD_SET_CURRENT_LIMIT_400; |
577 | else if (max_current >= 600) | 584 | else if (card->sw_caps.sd3_curr_limit & |
578 | current_limit = SD_SET_CURRENT_LIMIT_600; | 585 | SD_MAX_CURRENT_200) |
579 | else if (max_current >= 400) | 586 | current_limit = SD_SET_CURRENT_LIMIT_200; |
580 | current_limit = SD_SET_CURRENT_LIMIT_400; | 587 | } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) { |
581 | else if (max_current >= 200) | 588 | if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400) |
589 | current_limit = SD_SET_CURRENT_LIMIT_400; | ||
590 | else if (card->sw_caps.sd3_curr_limit & | ||
591 | SD_MAX_CURRENT_200) | ||
592 | current_limit = SD_SET_CURRENT_LIMIT_200; | ||
593 | } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) { | ||
594 | if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200) | ||
595 | current_limit = SD_SET_CURRENT_LIMIT_200; | ||
596 | } | ||
597 | } else | ||
582 | current_limit = SD_SET_CURRENT_LIMIT_200; | 598 | current_limit = SD_SET_CURRENT_LIMIT_200; |
583 | 599 | ||
584 | if (current_limit != SD_SET_CURRENT_NO_CHANGE) { | 600 | err = mmc_sd_switch(card, 1, 3, current_limit, status); |
585 | err = mmc_sd_switch(card, 1, 3, current_limit, status); | 601 | if (err) |
586 | if (err) | 602 | return err; |
587 | return err; | ||
588 | |||
589 | if (((status[15] >> 4) & 0x0F) != current_limit) | ||
590 | pr_warning("%s: Problem setting current limit!\n", | ||
591 | mmc_hostname(card->host)); | ||
592 | 603 | ||
593 | } | 604 | if (((status[15] >> 4) & 0x0F) != current_limit) |
605 | printk(KERN_WARNING "%s: Problem setting current limit!\n", | ||
606 | mmc_hostname(card->host)); | ||
594 | 607 | ||
595 | return 0; | 608 | return 0; |
596 | } | 609 | } |
@@ -611,7 +624,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) | |||
611 | 624 | ||
612 | status = kmalloc(64, GFP_KERNEL); | 625 | status = kmalloc(64, GFP_KERNEL); |
613 | if (!status) { | 626 | if (!status) { |
614 | pr_err("%s: could not allocate a buffer for " | 627 | printk(KERN_ERR "%s: could not allocate a buffer for " |
615 | "switch capabilities.\n", mmc_hostname(card->host)); | 628 | "switch capabilities.\n", mmc_hostname(card->host)); |
616 | return -ENOMEM; | 629 | return -ENOMEM; |
617 | } | 630 | } |
@@ -648,12 +661,8 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) | |||
648 | goto out; | 661 | goto out; |
649 | 662 | ||
650 | /* SPI mode doesn't define CMD19 */ | 663 | /* SPI mode doesn't define CMD19 */ |
651 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) { | 664 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) |
652 | mmc_host_clk_hold(card->host); | 665 | err = card->host->ops->execute_tuning(card->host); |
653 | err = card->host->ops->execute_tuning(card->host, | ||
654 | MMC_SEND_TUNING_BLOCK); | ||
655 | mmc_host_clk_release(card->host); | ||
656 | } | ||
657 | 666 | ||
658 | out: | 667 | out: |
659 | kfree(status); | 668 | kfree(status); |
@@ -712,7 +721,6 @@ struct device_type sd_type = { | |||
712 | int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) | 721 | int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) |
713 | { | 722 | { |
714 | int err; | 723 | int err; |
715 | u32 max_current; | ||
716 | 724 | ||
717 | /* | 725 | /* |
718 | * Since we're changing the OCR value, we seem to | 726 | * Since we're changing the OCR value, we seem to |
@@ -740,12 +748,9 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) | |||
740 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) | 748 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) |
741 | ocr |= SD_OCR_S18R; | 749 | ocr |= SD_OCR_S18R; |
742 | 750 | ||
743 | /* | 751 | /* If the host can supply more than 150mA, XPC should be set to 1. */ |
744 | * If the host can supply more than 150mA at current voltage, | 752 | if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 | |
745 | * XPC should be set to 1. | 753 | MMC_CAP_SET_XPC_180)) |
746 | */ | ||
747 | max_current = sd_get_host_max_current(host); | ||
748 | if (max_current > 150) | ||
749 | ocr |= SD_OCR_XPC; | 754 | ocr |= SD_OCR_XPC; |
750 | 755 | ||
751 | try_again: | 756 | try_again: |
@@ -796,6 +801,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, | |||
796 | bool reinit) | 801 | bool reinit) |
797 | { | 802 | { |
798 | int err; | 803 | int err; |
804 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
805 | int retries; | ||
806 | #endif | ||
799 | 807 | ||
800 | if (!reinit) { | 808 | if (!reinit) { |
801 | /* | 809 | /* |
@@ -822,7 +830,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, | |||
822 | /* | 830 | /* |
823 | * Fetch switch information from card. | 831 | * Fetch switch information from card. |
824 | */ | 832 | */ |
833 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
834 | for (retries = 1; retries <= 3; retries++) { | ||
835 | err = mmc_read_switch(card); | ||
836 | if (!err) { | ||
837 | if (retries > 1) { | ||
838 | printk(KERN_WARNING | ||
839 | "%s: recovered\n", | ||
840 | mmc_hostname(host)); | ||
841 | } | ||
842 | break; | ||
843 | } else { | ||
844 | printk(KERN_WARNING | ||
845 | "%s: read switch failed (attempt %d)\n", | ||
846 | mmc_hostname(host), retries); | ||
847 | } | ||
848 | } | ||
849 | #else | ||
825 | err = mmc_read_switch(card); | 850 | err = mmc_read_switch(card); |
851 | #endif | ||
852 | |||
826 | if (err) | 853 | if (err) |
827 | return err; | 854 | return err; |
828 | } | 855 | } |
@@ -845,14 +872,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, | |||
845 | if (!reinit) { | 872 | if (!reinit) { |
846 | int ro = -1; | 873 | int ro = -1; |
847 | 874 | ||
848 | if (host->ops->get_ro) { | 875 | if (host->ops->get_ro) |
849 | mmc_host_clk_hold(card->host); | ||
850 | ro = host->ops->get_ro(host); | 876 | ro = host->ops->get_ro(host); |
851 | mmc_host_clk_release(card->host); | ||
852 | } | ||
853 | 877 | ||
854 | if (ro < 0) { | 878 | if (ro < 0) { |
855 | pr_warning("%s: host does not " | 879 | printk(KERN_WARNING "%s: host does not " |
856 | "support reading read-only " | 880 | "support reading read-only " |
857 | "switch. assuming write-enable.\n", | 881 | "switch. assuming write-enable.\n", |
858 | mmc_hostname(host)); | 882 | mmc_hostname(host)); |
@@ -929,6 +953,8 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
929 | err = mmc_send_relative_addr(host, &card->rca); | 953 | err = mmc_send_relative_addr(host, &card->rca); |
930 | if (err) | 954 | if (err) |
931 | return err; | 955 | return err; |
956 | |||
957 | mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); | ||
932 | } | 958 | } |
933 | 959 | ||
934 | if (!oldcard) { | 960 | if (!oldcard) { |
@@ -959,17 +985,14 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, | |||
959 | goto free_card; | 985 | goto free_card; |
960 | 986 | ||
961 | /* Card is an ultra-high-speed card */ | 987 | /* Card is an ultra-high-speed card */ |
962 | mmc_card_set_uhs(card); | 988 | mmc_sd_card_set_uhs(card); |
963 | 989 | ||
964 | /* | 990 | /* |
965 | * Since initialization is now complete, enable preset | 991 | * Since initialization is now complete, enable preset |
966 | * value registers for UHS-I cards. | 992 | * value registers for UHS-I cards. |
967 | */ | 993 | */ |
968 | if (host->ops->enable_preset_value) { | 994 | if (host->ops->enable_preset_value) |
969 | mmc_host_clk_hold(card->host); | ||
970 | host->ops->enable_preset_value(host, true); | 995 | host->ops->enable_preset_value(host, true); |
971 | mmc_host_clk_release(card->host); | ||
972 | } | ||
973 | } else { | 996 | } else { |
974 | /* | 997 | /* |
975 | * Attempt to change to high-speed (if supported) | 998 | * Attempt to change to high-speed (if supported) |
@@ -1021,30 +1044,40 @@ static void mmc_sd_remove(struct mmc_host *host) | |||
1021 | } | 1044 | } |
1022 | 1045 | ||
1023 | /* | 1046 | /* |
1024 | * Card detection - card is alive. | ||
1025 | */ | ||
1026 | static int mmc_sd_alive(struct mmc_host *host) | ||
1027 | { | ||
1028 | return mmc_send_status(host->card, NULL); | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1032 | * Card detection callback from host. | 1047 | * Card detection callback from host. |
1033 | */ | 1048 | */ |
1034 | static void mmc_sd_detect(struct mmc_host *host) | 1049 | static void mmc_sd_detect(struct mmc_host *host) |
1035 | { | 1050 | { |
1036 | int err; | 1051 | int err = 0; |
1052 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
1053 | int retries = 5; | ||
1054 | #endif | ||
1037 | 1055 | ||
1038 | BUG_ON(!host); | 1056 | BUG_ON(!host); |
1039 | BUG_ON(!host->card); | 1057 | BUG_ON(!host->card); |
1040 | 1058 | ||
1041 | mmc_claim_host(host); | 1059 | mmc_claim_host(host); |
1042 | 1060 | ||
1043 | /* | 1061 | /* |
1044 | * Just check if our card has been removed. | 1062 | * Just check if our card has been removed. |
1045 | */ | 1063 | */ |
1046 | err = _mmc_detect_card_removed(host); | 1064 | #ifdef CONFIG_MMC_PARANOID_SD_INIT |
1047 | 1065 | while(retries) { | |
1066 | err = mmc_send_status(host->card, NULL); | ||
1067 | if (err) { | ||
1068 | retries--; | ||
1069 | udelay(5); | ||
1070 | continue; | ||
1071 | } | ||
1072 | break; | ||
1073 | } | ||
1074 | if (!retries) { | ||
1075 | printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n", | ||
1076 | __func__, mmc_hostname(host), err); | ||
1077 | } | ||
1078 | #else | ||
1079 | err = mmc_send_status(host->card, NULL); | ||
1080 | #endif | ||
1048 | mmc_release_host(host); | 1081 | mmc_release_host(host); |
1049 | 1082 | ||
1050 | if (err) { | 1083 | if (err) { |
@@ -1062,18 +1095,16 @@ static void mmc_sd_detect(struct mmc_host *host) | |||
1062 | */ | 1095 | */ |
1063 | static int mmc_sd_suspend(struct mmc_host *host) | 1096 | static int mmc_sd_suspend(struct mmc_host *host) |
1064 | { | 1097 | { |
1065 | int err = 0; | ||
1066 | |||
1067 | BUG_ON(!host); | 1098 | BUG_ON(!host); |
1068 | BUG_ON(!host->card); | 1099 | BUG_ON(!host->card); |
1069 | 1100 | ||
1070 | mmc_claim_host(host); | 1101 | mmc_claim_host(host); |
1071 | if (!mmc_host_is_spi(host)) | 1102 | if (!mmc_host_is_spi(host)) |
1072 | err = mmc_deselect_cards(host); | 1103 | mmc_deselect_cards(host); |
1073 | host->card->state &= ~MMC_STATE_HIGHSPEED; | 1104 | host->card->state &= ~MMC_STATE_HIGHSPEED; |
1074 | mmc_release_host(host); | 1105 | mmc_release_host(host); |
1075 | 1106 | ||
1076 | return err; | 1107 | return 0; |
1077 | } | 1108 | } |
1078 | 1109 | ||
1079 | /* | 1110 | /* |
@@ -1085,12 +1116,31 @@ static int mmc_sd_suspend(struct mmc_host *host) | |||
1085 | static int mmc_sd_resume(struct mmc_host *host) | 1116 | static int mmc_sd_resume(struct mmc_host *host) |
1086 | { | 1117 | { |
1087 | int err; | 1118 | int err; |
1119 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
1120 | int retries; | ||
1121 | #endif | ||
1088 | 1122 | ||
1089 | BUG_ON(!host); | 1123 | BUG_ON(!host); |
1090 | BUG_ON(!host->card); | 1124 | BUG_ON(!host->card); |
1091 | 1125 | ||
1092 | mmc_claim_host(host); | 1126 | mmc_claim_host(host); |
1127 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
1128 | retries = 5; | ||
1129 | while (retries) { | ||
1130 | err = mmc_sd_init_card(host, host->ocr, host->card); | ||
1131 | |||
1132 | if (err) { | ||
1133 | printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n", | ||
1134 | mmc_hostname(host), err, retries); | ||
1135 | mdelay(5); | ||
1136 | retries--; | ||
1137 | continue; | ||
1138 | } | ||
1139 | break; | ||
1140 | } | ||
1141 | #else | ||
1093 | err = mmc_sd_init_card(host, host->ocr, host->card); | 1142 | err = mmc_sd_init_card(host, host->ocr, host->card); |
1143 | #endif | ||
1094 | mmc_release_host(host); | 1144 | mmc_release_host(host); |
1095 | 1145 | ||
1096 | return err; | 1146 | return err; |
@@ -1114,7 +1164,6 @@ static const struct mmc_bus_ops mmc_sd_ops = { | |||
1114 | .suspend = NULL, | 1164 | .suspend = NULL, |
1115 | .resume = NULL, | 1165 | .resume = NULL, |
1116 | .power_restore = mmc_sd_power_restore, | 1166 | .power_restore = mmc_sd_power_restore, |
1117 | .alive = mmc_sd_alive, | ||
1118 | }; | 1167 | }; |
1119 | 1168 | ||
1120 | static const struct mmc_bus_ops mmc_sd_ops_unsafe = { | 1169 | static const struct mmc_bus_ops mmc_sd_ops_unsafe = { |
@@ -1123,7 +1172,6 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = { | |||
1123 | .suspend = mmc_sd_suspend, | 1172 | .suspend = mmc_sd_suspend, |
1124 | .resume = mmc_sd_resume, | 1173 | .resume = mmc_sd_resume, |
1125 | .power_restore = mmc_sd_power_restore, | 1174 | .power_restore = mmc_sd_power_restore, |
1126 | .alive = mmc_sd_alive, | ||
1127 | }; | 1175 | }; |
1128 | 1176 | ||
1129 | static void mmc_sd_attach_bus_ops(struct mmc_host *host) | 1177 | static void mmc_sd_attach_bus_ops(struct mmc_host *host) |
@@ -1144,16 +1192,21 @@ int mmc_attach_sd(struct mmc_host *host) | |||
1144 | { | 1192 | { |
1145 | int err; | 1193 | int err; |
1146 | u32 ocr; | 1194 | u32 ocr; |
1195 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
1196 | int retries; | ||
1197 | #endif | ||
1147 | 1198 | ||
1148 | BUG_ON(!host); | 1199 | BUG_ON(!host); |
1149 | WARN_ON(!host->claimed); | 1200 | WARN_ON(!host->claimed); |
1150 | 1201 | ||
1202 | /* Make sure we are at 3.3V signalling voltage */ | ||
1203 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); | ||
1204 | if (err) | ||
1205 | return err; | ||
1206 | |||
1151 | /* Disable preset value enable if already set since last time */ | 1207 | /* Disable preset value enable if already set since last time */ |
1152 | if (host->ops->enable_preset_value) { | 1208 | if (host->ops->enable_preset_value) |
1153 | mmc_host_clk_hold(host); | ||
1154 | host->ops->enable_preset_value(host, false); | 1209 | host->ops->enable_preset_value(host, false); |
1155 | mmc_host_clk_release(host); | ||
1156 | } | ||
1157 | 1210 | ||
1158 | err = mmc_send_app_op_cond(host, 0, &ocr); | 1211 | err = mmc_send_app_op_cond(host, 0, &ocr); |
1159 | if (err) | 1212 | if (err) |
@@ -1179,7 +1232,7 @@ int mmc_attach_sd(struct mmc_host *host) | |||
1179 | * support. | 1232 | * support. |
1180 | */ | 1233 | */ |
1181 | if (ocr & 0x7F) { | 1234 | if (ocr & 0x7F) { |
1182 | pr_warning("%s: card claims to support voltages " | 1235 | printk(KERN_WARNING "%s: card claims to support voltages " |
1183 | "below the defined range. These will be ignored.\n", | 1236 | "below the defined range. These will be ignored.\n", |
1184 | mmc_hostname(host)); | 1237 | mmc_hostname(host)); |
1185 | ocr &= ~0x7F; | 1238 | ocr &= ~0x7F; |
@@ -1187,7 +1240,7 @@ int mmc_attach_sd(struct mmc_host *host) | |||
1187 | 1240 | ||
1188 | if ((ocr & MMC_VDD_165_195) && | 1241 | if ((ocr & MMC_VDD_165_195) && |
1189 | !(host->ocr_avail_sd & MMC_VDD_165_195)) { | 1242 | !(host->ocr_avail_sd & MMC_VDD_165_195)) { |
1190 | pr_warning("%s: SD card claims to support the " | 1243 | printk(KERN_WARNING "%s: SD card claims to support the " |
1191 | "incompletely defined 'low voltage range'. This " | 1244 | "incompletely defined 'low voltage range'. This " |
1192 | "will be ignored.\n", mmc_hostname(host)); | 1245 | "will be ignored.\n", mmc_hostname(host)); |
1193 | ocr &= ~MMC_VDD_165_195; | 1246 | ocr &= ~MMC_VDD_165_195; |
@@ -1206,9 +1259,27 @@ int mmc_attach_sd(struct mmc_host *host) | |||
1206 | /* | 1259 | /* |
1207 | * Detect and init the card. | 1260 | * Detect and init the card. |
1208 | */ | 1261 | */ |
1262 | #ifdef CONFIG_MMC_PARANOID_SD_INIT | ||
1263 | retries = 5; | ||
1264 | while (retries) { | ||
1265 | err = mmc_sd_init_card(host, host->ocr, NULL); | ||
1266 | if (err) { | ||
1267 | retries--; | ||
1268 | continue; | ||
1269 | } | ||
1270 | break; | ||
1271 | } | ||
1272 | |||
1273 | if (!retries) { | ||
1274 | printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", | ||
1275 | mmc_hostname(host), err); | ||
1276 | goto err; | ||
1277 | } | ||
1278 | #else | ||
1209 | err = mmc_sd_init_card(host, host->ocr, NULL); | 1279 | err = mmc_sd_init_card(host, host->ocr, NULL); |
1210 | if (err) | 1280 | if (err) |
1211 | goto err; | 1281 | goto err; |
1282 | #endif | ||
1212 | 1283 | ||
1213 | mmc_release_host(host); | 1284 | mmc_release_host(host); |
1214 | err = mmc_add_card(host->card); | 1285 | err = mmc_add_card(host->card); |
@@ -1226,7 +1297,7 @@ remove_card: | |||
1226 | err: | 1297 | err: |
1227 | mmc_detach_bus(host); | 1298 | mmc_detach_bus(host); |
1228 | 1299 | ||
1229 | pr_err("%s: error %d whilst initialising SD card\n", | 1300 | printk(KERN_ERR "%s: error %d whilst initialising SD card\n", |
1230 | mmc_hostname(host), err); | 1301 | mmc_hostname(host), err); |
1231 | 1302 | ||
1232 | return err; | 1303 | return err; |
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 274ef00b446..021fed15380 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/export.h> | ||
15 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
16 | 15 | ||
17 | #include <linux/mmc/host.h> | 16 | #include <linux/mmc/host.h> |
@@ -68,7 +67,7 @@ EXPORT_SYMBOL_GPL(mmc_app_cmd); | |||
68 | int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, | 67 | int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, |
69 | struct mmc_command *cmd, int retries) | 68 | struct mmc_command *cmd, int retries) |
70 | { | 69 | { |
71 | struct mmc_request mrq = {NULL}; | 70 | struct mmc_request mrq = {0}; |
72 | 71 | ||
73 | int i, err; | 72 | int i, err; |
74 | 73 | ||
@@ -245,7 +244,7 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) | |||
245 | int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | 244 | int mmc_app_send_scr(struct mmc_card *card, u32 *scr) |
246 | { | 245 | { |
247 | int err; | 246 | int err; |
248 | struct mmc_request mrq = {NULL}; | 247 | struct mmc_request mrq = {0}; |
249 | struct mmc_command cmd = {0}; | 248 | struct mmc_command cmd = {0}; |
250 | struct mmc_data data = {0}; | 249 | struct mmc_data data = {0}; |
251 | struct scatterlist sg; | 250 | struct scatterlist sg; |
@@ -304,7 +303,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
304 | int mmc_sd_switch(struct mmc_card *card, int mode, int group, | 303 | int mmc_sd_switch(struct mmc_card *card, int mode, int group, |
305 | u8 value, u8 *resp) | 304 | u8 value, u8 *resp) |
306 | { | 305 | { |
307 | struct mmc_request mrq = {NULL}; | 306 | struct mmc_request mrq = {0}; |
308 | struct mmc_command cmd = {0}; | 307 | struct mmc_command cmd = {0}; |
309 | struct mmc_data data = {0}; | 308 | struct mmc_data data = {0}; |
310 | struct scatterlist sg; | 309 | struct scatterlist sg; |
@@ -349,7 +348,7 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, | |||
349 | int mmc_app_sd_status(struct mmc_card *card, void *ssr) | 348 | int mmc_app_sd_status(struct mmc_card *card, void *ssr) |
350 | { | 349 | { |
351 | int err; | 350 | int err; |
352 | struct mmc_request mrq = {NULL}; | 351 | struct mmc_request mrq = {0}; |
353 | struct mmc_command cmd = {0}; | 352 | struct mmc_command cmd = {0}; |
354 | struct mmc_data data = {0}; | 353 | struct mmc_data data = {0}; |
355 | struct scatterlist sg; | 354 | struct scatterlist sg; |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 2273ce6b6c1..3d8a5e41a48 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/mmc/host.h> | 15 | #include <linux/mmc/host.h> |
16 | #include <linux/mmc/card.h> | 16 | #include <linux/mmc/card.h> |
17 | #include <linux/mmc/mmc.h> | ||
18 | #include <linux/mmc/sdio.h> | 17 | #include <linux/mmc/sdio.h> |
19 | #include <linux/mmc/sdio_func.h> | 18 | #include <linux/mmc/sdio_func.h> |
20 | #include <linux/mmc/sdio_ids.h> | 19 | #include <linux/mmc/sdio_ids.h> |
@@ -28,6 +27,10 @@ | |||
28 | #include "sdio_ops.h" | 27 | #include "sdio_ops.h" |
29 | #include "sdio_cis.h" | 28 | #include "sdio_cis.h" |
30 | 29 | ||
30 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
31 | #include <linux/mmc/sdio_ids.h> | ||
32 | #endif | ||
33 | |||
31 | static int sdio_read_fbr(struct sdio_func *func) | 34 | static int sdio_read_fbr(struct sdio_func *func) |
32 | { | 35 | { |
33 | int ret; | 36 | int ret; |
@@ -98,11 +101,10 @@ fail: | |||
98 | return ret; | 101 | return ret; |
99 | } | 102 | } |
100 | 103 | ||
101 | static int sdio_read_cccr(struct mmc_card *card, u32 ocr) | 104 | static int sdio_read_cccr(struct mmc_card *card) |
102 | { | 105 | { |
103 | int ret; | 106 | int ret; |
104 | int cccr_vsn; | 107 | int cccr_vsn; |
105 | int uhs = ocr & R4_18V_PRESENT; | ||
106 | unsigned char data; | 108 | unsigned char data; |
107 | unsigned char speed; | 109 | unsigned char speed; |
108 | 110 | ||
@@ -150,7 +152,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr) | |||
150 | card->scr.sda_spec3 = 0; | 152 | card->scr.sda_spec3 = 0; |
151 | card->sw_caps.sd3_bus_mode = 0; | 153 | card->sw_caps.sd3_bus_mode = 0; |
152 | card->sw_caps.sd3_drv_type = 0; | 154 | card->sw_caps.sd3_drv_type = 0; |
153 | if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) { | 155 | if (cccr_vsn >= SDIO_CCCR_REV_3_00) { |
154 | card->scr.sda_spec3 = 1; | 156 | card->scr.sda_spec3 = 1; |
155 | ret = mmc_io_rw_direct(card, 0, 0, | 157 | ret = mmc_io_rw_direct(card, 0, 0, |
156 | SDIO_CCCR_UHS, 0, &data); | 158 | SDIO_CCCR_UHS, 0, &data); |
@@ -218,12 +220,6 @@ static int sdio_enable_wide(struct mmc_card *card) | |||
218 | if (ret) | 220 | if (ret) |
219 | return ret; | 221 | return ret; |
220 | 222 | ||
221 | if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED) | ||
222 | pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n", | ||
223 | mmc_hostname(card->host), ctrl); | ||
224 | |||
225 | /* set as 4-bit bus width */ | ||
226 | ctrl &= ~SDIO_BUS_WIDTH_MASK; | ||
227 | ctrl |= SDIO_BUS_WIDTH_4BIT; | 223 | ctrl |= SDIO_BUS_WIDTH_4BIT; |
228 | 224 | ||
229 | ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); | 225 | ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); |
@@ -564,8 +560,7 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card) | |||
564 | 560 | ||
565 | /* Initialize and start re-tuning timer */ | 561 | /* Initialize and start re-tuning timer */ |
566 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) | 562 | if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) |
567 | err = card->host->ops->execute_tuning(card->host, | 563 | err = card->host->ops->execute_tuning(card->host); |
568 | MMC_SEND_TUNING_BLOCK); | ||
569 | 564 | ||
570 | out: | 565 | out: |
571 | 566 | ||
@@ -641,7 +636,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
641 | /* | 636 | /* |
642 | * If the host and card support UHS-I mode request the card | 637 | * If the host and card support UHS-I mode request the card |
643 | * to switch to 1.8V signaling level. No 1.8v signalling if | 638 | * to switch to 1.8V signaling level. No 1.8v signalling if |
644 | * UHS mode is not enabled to maintain compatibility and some | 639 | * UHS mode is not enabled to maintain compatibilty and some |
645 | * systems that claim 1.8v signalling in fact do not support | 640 | * systems that claim 1.8v signalling in fact do not support |
646 | * it. | 641 | * it. |
647 | */ | 642 | */ |
@@ -650,8 +645,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
650 | (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | | 645 | (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | |
651 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | | 646 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | |
652 | MMC_CAP_UHS_DDR50))) { | 647 | MMC_CAP_UHS_DDR50))) { |
653 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, | 648 | err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true); |
654 | true); | ||
655 | if (err) { | 649 | if (err) { |
656 | ocr &= ~R4_18V_PRESENT; | 650 | ocr &= ~R4_18V_PRESENT; |
657 | host->ocr &= ~R4_18V_PRESENT; | 651 | host->ocr &= ~R4_18V_PRESENT; |
@@ -677,6 +671,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
677 | */ | 671 | */ |
678 | if (oldcard) | 672 | if (oldcard) |
679 | oldcard->rca = card->rca; | 673 | oldcard->rca = card->rca; |
674 | |||
675 | mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); | ||
680 | } | 676 | } |
681 | 677 | ||
682 | /* | 678 | /* |
@@ -716,19 +712,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
716 | goto finish; | 712 | goto finish; |
717 | } | 713 | } |
718 | 714 | ||
719 | /* | 715 | #ifdef CONFIG_MMC_EMBEDDED_SDIO |
720 | * Read the common registers. | 716 | if (host->embedded_sdio_data.cccr) |
721 | */ | 717 | memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr)); |
722 | err = sdio_read_cccr(card, ocr); | 718 | else { |
723 | if (err) | 719 | #endif |
724 | goto remove; | 720 | /* |
721 | * Read the common registers. | ||
722 | */ | ||
723 | err = sdio_read_cccr(card); | ||
724 | if (err) | ||
725 | goto remove; | ||
726 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
727 | } | ||
728 | #endif | ||
725 | 729 | ||
726 | /* | 730 | #ifdef CONFIG_MMC_EMBEDDED_SDIO |
727 | * Read the common CIS tuples. | 731 | if (host->embedded_sdio_data.cis) |
728 | */ | 732 | memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis)); |
729 | err = sdio_read_common_cis(card); | 733 | else { |
730 | if (err) | 734 | #endif |
731 | goto remove; | 735 | /* |
736 | * Read the common CIS tuples. | ||
737 | */ | ||
738 | err = sdio_read_common_cis(card); | ||
739 | if (err) | ||
740 | goto remove; | ||
741 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
742 | } | ||
743 | #endif | ||
732 | 744 | ||
733 | if (oldcard) { | 745 | if (oldcard) { |
734 | int same = (card->cis.vendor == oldcard->cis.vendor && | 746 | int same = (card->cis.vendor == oldcard->cis.vendor && |
@@ -769,7 +781,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, | |||
769 | goto remove; | 781 | goto remove; |
770 | 782 | ||
771 | /* Card is an ultra-high-speed card */ | 783 | /* Card is an ultra-high-speed card */ |
772 | mmc_card_set_uhs(card); | 784 | mmc_sd_card_set_uhs(card); |
773 | } else { | 785 | } else { |
774 | /* | 786 | /* |
775 | * Switch to high-speed (if supported). | 787 | * Switch to high-speed (if supported). |
@@ -829,14 +841,6 @@ static void mmc_sdio_remove(struct mmc_host *host) | |||
829 | } | 841 | } |
830 | 842 | ||
831 | /* | 843 | /* |
832 | * Card detection - card is alive. | ||
833 | */ | ||
834 | static int mmc_sdio_alive(struct mmc_host *host) | ||
835 | { | ||
836 | return mmc_select_card(host->card); | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * Card detection callback from host. | 844 | * Card detection callback from host. |
841 | */ | 845 | */ |
842 | static void mmc_sdio_detect(struct mmc_host *host) | 846 | static void mmc_sdio_detect(struct mmc_host *host) |
@@ -858,7 +862,7 @@ static void mmc_sdio_detect(struct mmc_host *host) | |||
858 | /* | 862 | /* |
859 | * Just check if our card has been removed. | 863 | * Just check if our card has been removed. |
860 | */ | 864 | */ |
861 | err = _mmc_detect_card_removed(host); | 865 | err = mmc_select_card(host->card); |
862 | 866 | ||
863 | mmc_release_host(host); | 867 | mmc_release_host(host); |
864 | 868 | ||
@@ -950,7 +954,7 @@ static int mmc_sdio_resume(struct mmc_host *host) | |||
950 | } | 954 | } |
951 | 955 | ||
952 | if (!err && host->sdio_irqs) | 956 | if (!err && host->sdio_irqs) |
953 | wake_up_process(host->sdio_irq_thread); | 957 | mmc_signal_sdio_irq(host); |
954 | mmc_release_host(host); | 958 | mmc_release_host(host); |
955 | 959 | ||
956 | /* | 960 | /* |
@@ -1002,7 +1006,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host) | |||
1002 | * With these steps taken, mmc_select_voltage() is also required to | 1006 | * With these steps taken, mmc_select_voltage() is also required to |
1003 | * restore the correct voltage setting of the card. | 1007 | * restore the correct voltage setting of the card. |
1004 | */ | 1008 | */ |
1005 | |||
1006 | sdio_reset(host); | 1009 | sdio_reset(host); |
1007 | mmc_go_idle(host); | 1010 | mmc_go_idle(host); |
1008 | mmc_send_if_cond(host, host->ocr_avail); | 1011 | mmc_send_if_cond(host, host->ocr_avail); |
@@ -1037,7 +1040,6 @@ static const struct mmc_bus_ops mmc_sdio_ops = { | |||
1037 | .suspend = mmc_sdio_suspend, | 1040 | .suspend = mmc_sdio_suspend, |
1038 | .resume = mmc_sdio_resume, | 1041 | .resume = mmc_sdio_resume, |
1039 | .power_restore = mmc_sdio_power_restore, | 1042 | .power_restore = mmc_sdio_power_restore, |
1040 | .alive = mmc_sdio_alive, | ||
1041 | }; | 1043 | }; |
1042 | 1044 | ||
1043 | 1045 | ||
@@ -1066,7 +1068,7 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
1066 | * support. | 1068 | * support. |
1067 | */ | 1069 | */ |
1068 | if (ocr & 0x7F) { | 1070 | if (ocr & 0x7F) { |
1069 | pr_warning("%s: card claims to support voltages " | 1071 | printk(KERN_WARNING "%s: card claims to support voltages " |
1070 | "below the defined range. These will be ignored.\n", | 1072 | "below the defined range. These will be ignored.\n", |
1071 | mmc_hostname(host)); | 1073 | mmc_hostname(host)); |
1072 | ocr &= ~0x7F; | 1074 | ocr &= ~0x7F; |
@@ -1123,14 +1125,36 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
1123 | funcs = (ocr & 0x70000000) >> 28; | 1125 | funcs = (ocr & 0x70000000) >> 28; |
1124 | card->sdio_funcs = 0; | 1126 | card->sdio_funcs = 0; |
1125 | 1127 | ||
1128 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
1129 | if (host->embedded_sdio_data.funcs) | ||
1130 | card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs; | ||
1131 | #endif | ||
1132 | |||
1126 | /* | 1133 | /* |
1127 | * Initialize (but don't add) all present functions. | 1134 | * Initialize (but don't add) all present functions. |
1128 | */ | 1135 | */ |
1129 | for (i = 0; i < funcs; i++, card->sdio_funcs++) { | 1136 | for (i = 0; i < funcs; i++, card->sdio_funcs++) { |
1130 | err = sdio_init_func(host->card, i + 1); | 1137 | #ifdef CONFIG_MMC_EMBEDDED_SDIO |
1131 | if (err) | 1138 | if (host->embedded_sdio_data.funcs) { |
1132 | goto remove; | 1139 | struct sdio_func *tmp; |
1133 | 1140 | ||
1141 | tmp = sdio_alloc_func(host->card); | ||
1142 | if (IS_ERR(tmp)) | ||
1143 | goto remove; | ||
1144 | tmp->num = (i + 1); | ||
1145 | card->sdio_func[i] = tmp; | ||
1146 | tmp->class = host->embedded_sdio_data.funcs[i].f_class; | ||
1147 | tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize; | ||
1148 | tmp->vendor = card->cis.vendor; | ||
1149 | tmp->device = card->cis.device; | ||
1150 | } else { | ||
1151 | #endif | ||
1152 | err = sdio_init_func(host->card, i + 1); | ||
1153 | if (err) | ||
1154 | goto remove; | ||
1155 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
1156 | } | ||
1157 | #endif | ||
1134 | /* | 1158 | /* |
1135 | * Enable Runtime PM for this func (if supported) | 1159 | * Enable Runtime PM for this func (if supported) |
1136 | */ | 1160 | */ |
@@ -1172,9 +1196,83 @@ remove: | |||
1172 | err: | 1196 | err: |
1173 | mmc_detach_bus(host); | 1197 | mmc_detach_bus(host); |
1174 | 1198 | ||
1175 | pr_err("%s: error %d whilst initialising SDIO card\n", | 1199 | printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", |
1176 | mmc_hostname(host), err); | 1200 | mmc_hostname(host), err); |
1177 | 1201 | ||
1178 | return err; | 1202 | return err; |
1179 | } | 1203 | } |
1180 | 1204 | ||
1205 | int sdio_reset_comm(struct mmc_card *card) | ||
1206 | { | ||
1207 | struct mmc_host *host = card->host; | ||
1208 | u32 ocr; | ||
1209 | int err; | ||
1210 | |||
1211 | printk("%s():\n", __func__); | ||
1212 | mmc_claim_host(host); | ||
1213 | |||
1214 | mmc_go_idle(host); | ||
1215 | |||
1216 | mmc_set_clock(host, host->f_min); | ||
1217 | |||
1218 | err = mmc_send_io_op_cond(host, 0, &ocr); | ||
1219 | if (err) | ||
1220 | goto err; | ||
1221 | |||
1222 | host->ocr = mmc_select_voltage(host, ocr); | ||
1223 | if (!host->ocr) { | ||
1224 | err = -EINVAL; | ||
1225 | goto err; | ||
1226 | } | ||
1227 | |||
1228 | err = mmc_send_io_op_cond(host, host->ocr, &ocr); | ||
1229 | if (err) | ||
1230 | goto err; | ||
1231 | |||
1232 | if (mmc_host_is_spi(host)) { | ||
1233 | err = mmc_spi_set_crc(host, use_spi_crc); | ||
1234 | if (err) | ||
1235 | goto err; | ||
1236 | } | ||
1237 | |||
1238 | if (!mmc_host_is_spi(host)) { | ||
1239 | err = mmc_send_relative_addr(host, &card->rca); | ||
1240 | if (err) | ||
1241 | goto err; | ||
1242 | mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); | ||
1243 | } | ||
1244 | if (!mmc_host_is_spi(host)) { | ||
1245 | err = mmc_select_card(card); | ||
1246 | if (err) | ||
1247 | goto err; | ||
1248 | } | ||
1249 | |||
1250 | /* | ||
1251 | * Switch to high-speed (if supported). | ||
1252 | */ | ||
1253 | err = sdio_enable_hs(card); | ||
1254 | if (err > 0) | ||
1255 | mmc_sd_go_highspeed(card); | ||
1256 | else if (err) | ||
1257 | goto err; | ||
1258 | |||
1259 | /* | ||
1260 | * Change to the card's maximum speed. | ||
1261 | */ | ||
1262 | mmc_set_clock(host, mmc_sdio_get_max_clock(card)); | ||
1263 | |||
1264 | err = sdio_enable_4bit_bus(card); | ||
1265 | if (err > 0) | ||
1266 | mmc_set_bus_width(host, MMC_BUS_WIDTH_4); | ||
1267 | else if (err) | ||
1268 | goto err; | ||
1269 | |||
1270 | mmc_release_host(host); | ||
1271 | return 0; | ||
1272 | err: | ||
1273 | printk("%s: Error resetting SDIO communications (%d)\n", | ||
1274 | mmc_hostname(host), err); | ||
1275 | mmc_release_host(host); | ||
1276 | return err; | ||
1277 | } | ||
1278 | EXPORT_SYMBOL(sdio_reset_comm); | ||
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 5e57048e2c1..ca58c307a12 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/export.h> | ||
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
19 | 18 | ||
@@ -24,6 +23,10 @@ | |||
24 | #include "sdio_cis.h" | 23 | #include "sdio_cis.h" |
25 | #include "sdio_bus.h" | 24 | #include "sdio_bus.h" |
26 | 25 | ||
26 | #ifdef CONFIG_MMC_EMBEDDED_SDIO | ||
27 | #include <linux/mmc/host.h> | ||
28 | #endif | ||
29 | |||
27 | /* show configuration fields */ | 30 | /* show configuration fields */ |
28 | #define sdio_config_attr(field, format_string) \ | 31 | #define sdio_config_attr(field, format_string) \ |
29 | static ssize_t \ | 32 | static ssize_t \ |
@@ -174,7 +177,7 @@ static int sdio_bus_remove(struct device *dev) | |||
174 | drv->remove(func); | 177 | drv->remove(func); |
175 | 178 | ||
176 | if (func->irq_handler) { | 179 | if (func->irq_handler) { |
177 | pr_warning("WARNING: driver %s did not remove " | 180 | printk(KERN_WARNING "WARNING: driver %s did not remove " |
178 | "its interrupt handler!\n", drv->name); | 181 | "its interrupt handler!\n", drv->name); |
179 | sdio_claim_host(func); | 182 | sdio_claim_host(func); |
180 | sdio_release_irq(func); | 183 | sdio_release_irq(func); |
@@ -192,22 +195,9 @@ static int sdio_bus_remove(struct device *dev) | |||
192 | return ret; | 195 | return ret; |
193 | } | 196 | } |
194 | 197 | ||
195 | #ifdef CONFIG_PM | 198 | #ifdef CONFIG_PM_RUNTIME |
196 | |||
197 | #ifdef CONFIG_PM_SLEEP | ||
198 | static int pm_no_operation(struct device *dev) | ||
199 | { | ||
200 | /* | ||
201 | * Prevent the PM core from calling SDIO device drivers' suspend | ||
202 | * callback routines, which it is not supposed to do, by using this | ||
203 | * empty function as the bus type suspend callaback for SDIO. | ||
204 | */ | ||
205 | return 0; | ||
206 | } | ||
207 | #endif | ||
208 | 199 | ||
209 | static const struct dev_pm_ops sdio_bus_pm_ops = { | 200 | static const struct dev_pm_ops sdio_bus_pm_ops = { |
210 | SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation) | ||
211 | SET_RUNTIME_PM_OPS( | 201 | SET_RUNTIME_PM_OPS( |
212 | pm_generic_runtime_suspend, | 202 | pm_generic_runtime_suspend, |
213 | pm_generic_runtime_resume, | 203 | pm_generic_runtime_resume, |
@@ -217,11 +207,11 @@ static const struct dev_pm_ops sdio_bus_pm_ops = { | |||
217 | 207 | ||
218 | #define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) | 208 | #define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) |
219 | 209 | ||
220 | #else /* !CONFIG_PM */ | 210 | #else /* !CONFIG_PM_RUNTIME */ |
221 | 211 | ||
222 | #define SDIO_PM_OPS_PTR NULL | 212 | #define SDIO_PM_OPS_PTR NULL |
223 | 213 | ||
224 | #endif /* !CONFIG_PM */ | 214 | #endif /* !CONFIG_PM_RUNTIME */ |
225 | 215 | ||
226 | static struct bus_type sdio_bus_type = { | 216 | static struct bus_type sdio_bus_type = { |
227 | .name = "sdio", | 217 | .name = "sdio", |
@@ -270,9 +260,17 @@ static void sdio_release_func(struct device *dev) | |||
270 | { | 260 | { |
271 | struct sdio_func *func = dev_to_sdio_func(dev); | 261 | struct sdio_func *func = dev_to_sdio_func(dev); |
272 | 262 | ||
273 | sdio_free_func_cis(func); | 263 | #ifdef CONFIG_MMC_EMBEDDED_SDIO |
264 | /* | ||
265 | * If this device is embedded then we never allocated | ||
266 | * cis tables for this func | ||
267 | */ | ||
268 | if (!func->card->host->embedded_sdio_data.funcs) | ||
269 | #endif | ||
270 | sdio_free_func_cis(func); | ||
274 | 271 | ||
275 | kfree(func->info); | 272 | if (func->info) |
273 | kfree(func->info); | ||
276 | 274 | ||
277 | kfree(func); | 275 | kfree(func); |
278 | } | 276 | } |
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index 8e94e555b78..541bdb89e0c 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c | |||
@@ -132,7 +132,7 @@ static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func, | |||
132 | ret = -EINVAL; | 132 | ret = -EINVAL; |
133 | } | 133 | } |
134 | if (ret && ret != -EILSEQ && ret != -ENOENT) { | 134 | if (ret && ret != -EILSEQ && ret != -ENOENT) { |
135 | pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n", | 135 | printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n", |
136 | mmc_hostname(card->host), tpl_descr, code, size); | 136 | mmc_hostname(card->host), tpl_descr, code, size); |
137 | } | 137 | } |
138 | } else { | 138 | } else { |
@@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) | |||
313 | 313 | ||
314 | if (ret == -ENOENT) { | 314 | if (ret == -ENOENT) { |
315 | /* warn about unknown tuples */ | 315 | /* warn about unknown tuples */ |
316 | pr_warn_ratelimited("%s: queuing unknown" | 316 | printk(KERN_WARNING "%s: queuing unknown" |
317 | " CIS tuple 0x%02x (%u bytes)\n", | 317 | " CIS tuple 0x%02x (%u bytes)\n", |
318 | mmc_hostname(card->host), | 318 | mmc_hostname(card->host), |
319 | tpl_code, tpl_link); | 319 | tpl_code, tpl_link); |
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 78cb4d5d9d5..549a3414464 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/export.h> | ||
13 | #include <linux/mmc/host.h> | 12 | #include <linux/mmc/host.h> |
14 | #include <linux/mmc/card.h> | 13 | #include <linux/mmc/card.h> |
15 | #include <linux/mmc/sdio.h> | 14 | #include <linux/mmc/sdio.h> |
@@ -188,16 +187,14 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size); | |||
188 | */ | 187 | */ |
189 | static inline unsigned int sdio_max_byte_size(struct sdio_func *func) | 188 | static inline unsigned int sdio_max_byte_size(struct sdio_func *func) |
190 | { | 189 | { |
191 | unsigned mval = func->card->host->max_blk_size; | 190 | unsigned mval = min(func->card->host->max_seg_size, |
191 | func->card->host->max_blk_size); | ||
192 | 192 | ||
193 | if (mmc_blksz_for_byte_mode(func->card)) | 193 | if (mmc_blksz_for_byte_mode(func->card)) |
194 | mval = min(mval, func->cur_blksize); | 194 | mval = min(mval, func->cur_blksize); |
195 | else | 195 | else |
196 | mval = min(mval, func->max_blksize); | 196 | mval = min(mval, func->max_blksize); |
197 | 197 | ||
198 | if (mmc_card_broken_byte_mode_512(func->card)) | ||
199 | return min(mval, 511u); | ||
200 | |||
201 | return min(mval, 512u); /* maximum size for byte mode */ | 198 | return min(mval, 512u); /* maximum size for byte mode */ |
202 | } | 199 | } |
203 | 200 | ||
@@ -310,10 +307,13 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, | |||
310 | /* Do the bulk of the transfer using block mode (if supported). */ | 307 | /* Do the bulk of the transfer using block mode (if supported). */ |
311 | if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { | 308 | if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { |
312 | /* Blocks per command is limited by host count, host transfer | 309 | /* Blocks per command is limited by host count, host transfer |
313 | * size and the maximum for IO_RW_EXTENDED of 511 blocks. */ | 310 | * size (we only use a single sg entry) and the maximum for |
314 | max_blocks = min(func->card->host->max_blk_count, 511u); | 311 | * IO_RW_EXTENDED of 511 blocks. */ |
312 | max_blocks = min(func->card->host->max_blk_count, | ||
313 | func->card->host->max_seg_size / func->cur_blksize); | ||
314 | max_blocks = min(max_blocks, 511u); | ||
315 | 315 | ||
316 | while (remainder >= func->cur_blksize) { | 316 | while (remainder > func->cur_blksize) { |
317 | unsigned blocks; | 317 | unsigned blocks; |
318 | 318 | ||
319 | blocks = remainder / func->cur_blksize; | 319 | blocks = remainder / func->cur_blksize; |
@@ -338,9 +338,8 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, | |||
338 | while (remainder > 0) { | 338 | while (remainder > 0) { |
339 | size = min(remainder, sdio_max_byte_size(func)); | 339 | size = min(remainder, sdio_max_byte_size(func)); |
340 | 340 | ||
341 | /* Indicate byte mode by setting "blocks" = 0 */ | ||
342 | ret = mmc_io_rw_extended(func->card, write, func->num, addr, | 341 | ret = mmc_io_rw_extended(func->card, write, func->num, addr, |
343 | incr_addr, buf, 0, size); | 342 | incr_addr, buf, 1, size); |
344 | if (ret) | 343 | if (ret) |
345 | return ret; | 344 | return ret; |
346 | 345 | ||
@@ -384,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) | |||
384 | EXPORT_SYMBOL_GPL(sdio_readb); | 383 | EXPORT_SYMBOL_GPL(sdio_readb); |
385 | 384 | ||
386 | /** | 385 | /** |
386 | * sdio_readb_ext - read a single byte from a SDIO function | ||
387 | * @func: SDIO function to access | ||
388 | * @addr: address to read | ||
389 | * @err_ret: optional status value from transfer | ||
390 | * @in: value to add to argument | ||
391 | * | ||
392 | * Reads a single byte from the address space of a given SDIO | ||
393 | * function. If there is a problem reading the address, 0xff | ||
394 | * is returned and @err_ret will contain the error code. | ||
395 | */ | ||
396 | unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr, | ||
397 | int *err_ret, unsigned in) | ||
398 | { | ||
399 | int ret; | ||
400 | unsigned char val; | ||
401 | |||
402 | BUG_ON(!func); | ||
403 | |||
404 | if (err_ret) | ||
405 | *err_ret = 0; | ||
406 | |||
407 | ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val); | ||
408 | if (ret) { | ||
409 | if (err_ret) | ||
410 | *err_ret = ret; | ||
411 | return 0xFF; | ||
412 | } | ||
413 | |||
414 | return val; | ||
415 | } | ||
416 | EXPORT_SYMBOL_GPL(sdio_readb_ext); | ||
417 | |||
418 | /** | ||
387 | * sdio_writeb - write a single byte to a SDIO function | 419 | * sdio_writeb - write a single byte to a SDIO function |
388 | * @func: SDIO function to access | 420 | * @func: SDIO function to access |
389 | * @b: byte to write | 421 | * @b: byte to write |
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 3d8ceb4084d..03ead028d2c 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/kthread.h> | 18 | #include <linux/kthread.h> |
19 | #include <linux/export.h> | ||
20 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
21 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
22 | 21 | ||
@@ -28,27 +27,25 @@ | |||
28 | 27 | ||
29 | #include "sdio_ops.h" | 28 | #include "sdio_ops.h" |
30 | 29 | ||
31 | static int process_sdio_pending_irqs(struct mmc_host *host) | 30 | static int process_sdio_pending_irqs(struct mmc_card *card) |
32 | { | 31 | { |
33 | struct mmc_card *card = host->card; | ||
34 | int i, ret, count; | 32 | int i, ret, count; |
35 | unsigned char pending; | 33 | unsigned char pending; |
36 | struct sdio_func *func; | 34 | struct sdio_func *func; |
37 | 35 | ||
38 | /* | 36 | /* |
39 | * Optimization, if there is only 1 function interrupt registered | 37 | * Optimization, if there is only 1 function interrupt registered |
40 | * and we know an IRQ was signaled then call irq handler directly. | 38 | * call irq handler directly |
41 | * Otherwise do the full probe. | ||
42 | */ | 39 | */ |
43 | func = card->sdio_single_irq; | 40 | func = card->sdio_single_irq; |
44 | if (func && host->sdio_irq_pending) { | 41 | if (func) { |
45 | func->irq_handler(func); | 42 | func->irq_handler(func); |
46 | return 1; | 43 | return 1; |
47 | } | 44 | } |
48 | 45 | ||
49 | ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); | 46 | ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); |
50 | if (ret) { | 47 | if (ret) { |
51 | pr_debug("%s: error %d reading SDIO_CCCR_INTx\n", | 48 | printk(KERN_DEBUG "%s: error %d reading SDIO_CCCR_INTx\n", |
52 | mmc_card_id(card), ret); | 49 | mmc_card_id(card), ret); |
53 | return ret; | 50 | return ret; |
54 | } | 51 | } |
@@ -58,7 +55,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host) | |||
58 | if (pending & (1 << i)) { | 55 | if (pending & (1 << i)) { |
59 | func = card->sdio_func[i - 1]; | 56 | func = card->sdio_func[i - 1]; |
60 | if (!func) { | 57 | if (!func) { |
61 | pr_warning("%s: pending IRQ for " | 58 | printk(KERN_WARNING "%s: pending IRQ for " |
62 | "non-existent function\n", | 59 | "non-existent function\n", |
63 | mmc_card_id(card)); | 60 | mmc_card_id(card)); |
64 | ret = -EINVAL; | 61 | ret = -EINVAL; |
@@ -66,7 +63,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host) | |||
66 | func->irq_handler(func); | 63 | func->irq_handler(func); |
67 | count++; | 64 | count++; |
68 | } else { | 65 | } else { |
69 | pr_warning("%s: pending IRQ with no handler\n", | 66 | printk(KERN_WARNING "%s: pending IRQ with no handler\n", |
70 | sdio_func_id(func)); | 67 | sdio_func_id(func)); |
71 | ret = -EINVAL; | 68 | ret = -EINVAL; |
72 | } | 69 | } |
@@ -118,8 +115,7 @@ static int sdio_irq_thread(void *_host) | |||
118 | ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); | 115 | ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); |
119 | if (ret) | 116 | if (ret) |
120 | break; | 117 | break; |
121 | ret = process_sdio_pending_irqs(host); | 118 | ret = process_sdio_pending_irqs(host->card); |
122 | host->sdio_irq_pending = false; | ||
123 | mmc_release_host(host); | 119 | mmc_release_host(host); |
124 | 120 | ||
125 | /* | 121 | /* |
@@ -149,21 +145,15 @@ static int sdio_irq_thread(void *_host) | |||
149 | } | 145 | } |
150 | 146 | ||
151 | set_current_state(TASK_INTERRUPTIBLE); | 147 | set_current_state(TASK_INTERRUPTIBLE); |
152 | if (host->caps & MMC_CAP_SDIO_IRQ) { | 148 | if (host->caps & MMC_CAP_SDIO_IRQ) |
153 | mmc_host_clk_hold(host); | ||
154 | host->ops->enable_sdio_irq(host, 1); | 149 | host->ops->enable_sdio_irq(host, 1); |
155 | mmc_host_clk_release(host); | ||
156 | } | ||
157 | if (!kthread_should_stop()) | 150 | if (!kthread_should_stop()) |
158 | schedule_timeout(period); | 151 | schedule_timeout(period); |
159 | set_current_state(TASK_RUNNING); | 152 | set_current_state(TASK_RUNNING); |
160 | } while (!kthread_should_stop()); | 153 | } while (!kthread_should_stop()); |
161 | 154 | ||
162 | if (host->caps & MMC_CAP_SDIO_IRQ) { | 155 | if (host->caps & MMC_CAP_SDIO_IRQ) |
163 | mmc_host_clk_hold(host); | ||
164 | host->ops->enable_sdio_irq(host, 0); | 156 | host->ops->enable_sdio_irq(host, 0); |
165 | mmc_host_clk_release(host); | ||
166 | } | ||
167 | 157 | ||
168 | pr_debug("%s: IRQ thread exiting with code %d\n", | 158 | pr_debug("%s: IRQ thread exiting with code %d\n", |
169 | mmc_hostname(host), ret); | 159 | mmc_hostname(host), ret); |
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index 62508b457c4..f087d876c57 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c | |||
@@ -121,16 +121,15 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, | |||
121 | int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, | 121 | int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, |
122 | unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) | 122 | unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) |
123 | { | 123 | { |
124 | struct mmc_request mrq = {NULL}; | 124 | struct mmc_request mrq = {0}; |
125 | struct mmc_command cmd = {0}; | 125 | struct mmc_command cmd = {0}; |
126 | struct mmc_data data = {0}; | 126 | struct mmc_data data = {0}; |
127 | struct scatterlist sg, *sg_ptr; | 127 | struct scatterlist sg; |
128 | struct sg_table sgtable; | ||
129 | unsigned int nents, left_size, i; | ||
130 | unsigned int seg_size = card->host->max_seg_size; | ||
131 | 128 | ||
132 | BUG_ON(!card); | 129 | BUG_ON(!card); |
133 | BUG_ON(fn > 7); | 130 | BUG_ON(fn > 7); |
131 | BUG_ON(blocks == 1 && blksz > 512); | ||
132 | WARN_ON(blocks == 0); | ||
134 | WARN_ON(blksz == 0); | 133 | WARN_ON(blksz == 0); |
135 | 134 | ||
136 | /* sanity check */ | 135 | /* sanity check */ |
@@ -145,46 +144,24 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, | |||
145 | cmd.arg |= fn << 28; | 144 | cmd.arg |= fn << 28; |
146 | cmd.arg |= incr_addr ? 0x04000000 : 0x00000000; | 145 | cmd.arg |= incr_addr ? 0x04000000 : 0x00000000; |
147 | cmd.arg |= addr << 9; | 146 | cmd.arg |= addr << 9; |
148 | if (blocks == 0) | 147 | if (blocks == 1 && blksz <= 512) |
149 | cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */ | 148 | cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */ |
150 | else | 149 | else |
151 | cmd.arg |= 0x08000000 | blocks; /* block mode */ | 150 | cmd.arg |= 0x08000000 | blocks; /* block mode */ |
152 | cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; | 151 | cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; |
153 | 152 | ||
154 | data.blksz = blksz; | 153 | data.blksz = blksz; |
155 | /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ | 154 | data.blocks = blocks; |
156 | data.blocks = blocks ? blocks : 1; | ||
157 | data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; | 155 | data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; |
156 | data.sg = &sg; | ||
157 | data.sg_len = 1; | ||
158 | 158 | ||
159 | left_size = data.blksz * data.blocks; | 159 | sg_init_one(&sg, buf, blksz * blocks); |
160 | nents = (left_size - 1) / seg_size + 1; | ||
161 | if (nents > 1) { | ||
162 | if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) | ||
163 | return -ENOMEM; | ||
164 | |||
165 | data.sg = sgtable.sgl; | ||
166 | data.sg_len = nents; | ||
167 | |||
168 | for_each_sg(data.sg, sg_ptr, data.sg_len, i) { | ||
169 | sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), | ||
170 | min(seg_size, left_size), | ||
171 | offset_in_page(buf + (i * seg_size))); | ||
172 | left_size = left_size - seg_size; | ||
173 | } | ||
174 | } else { | ||
175 | data.sg = &sg; | ||
176 | data.sg_len = 1; | ||
177 | |||
178 | sg_init_one(&sg, buf, left_size); | ||
179 | } | ||
180 | 160 | ||
181 | mmc_set_data_timeout(&data, card); | 161 | mmc_set_data_timeout(&data, card); |
182 | 162 | ||
183 | mmc_wait_for_req(card->host, &mrq); | 163 | mmc_wait_for_req(card->host, &mrq); |
184 | 164 | ||
185 | if (nents > 1) | ||
186 | sg_free_table(&sgtable); | ||
187 | |||
188 | if (cmd.error) | 165 | if (cmd.error) |
189 | return cmd.error; | 166 | return cmd.error; |
190 | if (data.error) | 167 | if (data.error) |
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c deleted file mode 100644 index 16a1c0b6f26..00000000000 --- a/drivers/mmc/core/slot-gpio.c +++ /dev/null | |||
@@ -1,200 +0,0 @@ | |||
1 | /* | ||
2 | * Generic GPIO card-detect helper | ||
3 | * | ||
4 | * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/err.h> | ||
12 | #include <linux/gpio.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/jiffies.h> | ||
15 | #include <linux/mmc/host.h> | ||
16 | #include <linux/mmc/slot-gpio.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | struct mmc_gpio { | ||
21 | int ro_gpio; | ||
22 | int cd_gpio; | ||
23 | char *ro_label; | ||
24 | char cd_label[0]; | ||
25 | }; | ||
26 | |||
27 | static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) | ||
28 | { | ||
29 | /* Schedule a card detection after a debounce timeout */ | ||
30 | struct mmc_host *host = dev_id; | ||
31 | |||
32 | if (host->ops->card_event) | ||
33 | host->ops->card_event(host); | ||
34 | |||
35 | mmc_detect_change(host, msecs_to_jiffies(200)); | ||
36 | |||
37 | return IRQ_HANDLED; | ||
38 | } | ||
39 | |||
40 | static int mmc_gpio_alloc(struct mmc_host *host) | ||
41 | { | ||
42 | size_t len = strlen(dev_name(host->parent)) + 4; | ||
43 | struct mmc_gpio *ctx; | ||
44 | |||
45 | mutex_lock(&host->slot.lock); | ||
46 | |||
47 | ctx = host->slot.handler_priv; | ||
48 | if (!ctx) { | ||
49 | /* | ||
50 | * devm_kzalloc() can be called after device_initialize(), even | ||
51 | * before device_add(), i.e., between mmc_alloc_host() and | ||
52 | * mmc_add_host() | ||
53 | */ | ||
54 | ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len, | ||
55 | GFP_KERNEL); | ||
56 | if (ctx) { | ||
57 | ctx->ro_label = ctx->cd_label + len; | ||
58 | snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); | ||
59 | snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); | ||
60 | ctx->cd_gpio = -EINVAL; | ||
61 | ctx->ro_gpio = -EINVAL; | ||
62 | host->slot.handler_priv = ctx; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | mutex_unlock(&host->slot.lock); | ||
67 | |||
68 | return ctx ? 0 : -ENOMEM; | ||
69 | } | ||
70 | |||
71 | int mmc_gpio_get_ro(struct mmc_host *host) | ||
72 | { | ||
73 | struct mmc_gpio *ctx = host->slot.handler_priv; | ||
74 | |||
75 | if (!ctx || !gpio_is_valid(ctx->ro_gpio)) | ||
76 | return -ENOSYS; | ||
77 | |||
78 | return !gpio_get_value_cansleep(ctx->ro_gpio) ^ | ||
79 | !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); | ||
80 | } | ||
81 | EXPORT_SYMBOL(mmc_gpio_get_ro); | ||
82 | |||
83 | int mmc_gpio_get_cd(struct mmc_host *host) | ||
84 | { | ||
85 | struct mmc_gpio *ctx = host->slot.handler_priv; | ||
86 | |||
87 | if (!ctx || !gpio_is_valid(ctx->cd_gpio)) | ||
88 | return -ENOSYS; | ||
89 | |||
90 | return !gpio_get_value_cansleep(ctx->cd_gpio) ^ | ||
91 | !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); | ||
92 | } | ||
93 | EXPORT_SYMBOL(mmc_gpio_get_cd); | ||
94 | |||
95 | int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) | ||
96 | { | ||
97 | struct mmc_gpio *ctx; | ||
98 | int ret; | ||
99 | |||
100 | if (!gpio_is_valid(gpio)) | ||
101 | return -EINVAL; | ||
102 | |||
103 | ret = mmc_gpio_alloc(host); | ||
104 | if (ret < 0) | ||
105 | return ret; | ||
106 | |||
107 | ctx = host->slot.handler_priv; | ||
108 | |||
109 | ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); | ||
110 | if (ret < 0) | ||
111 | return ret; | ||
112 | |||
113 | ctx->ro_gpio = gpio; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | EXPORT_SYMBOL(mmc_gpio_request_ro); | ||
118 | |||
119 | int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) | ||
120 | { | ||
121 | struct mmc_gpio *ctx; | ||
122 | int irq = gpio_to_irq(gpio); | ||
123 | int ret; | ||
124 | |||
125 | ret = mmc_gpio_alloc(host); | ||
126 | if (ret < 0) | ||
127 | return ret; | ||
128 | |||
129 | ctx = host->slot.handler_priv; | ||
130 | |||
131 | ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label); | ||
132 | if (ret < 0) | ||
133 | /* | ||
134 | * don't bother freeing memory. It might still get used by other | ||
135 | * slot functions, in any case it will be freed, when the device | ||
136 | * is destroyed. | ||
137 | */ | ||
138 | return ret; | ||
139 | |||
140 | /* | ||
141 | * Even if gpio_to_irq() returns a valid IRQ number, the platform might | ||
142 | * still prefer to poll, e.g., because that IRQ number is already used | ||
143 | * by another unit and cannot be shared. | ||
144 | */ | ||
145 | if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) | ||
146 | irq = -EINVAL; | ||
147 | |||
148 | if (irq >= 0) { | ||
149 | ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt, | ||
150 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | ||
151 | ctx->cd_label, host); | ||
152 | if (ret < 0) | ||
153 | irq = ret; | ||
154 | } | ||
155 | |||
156 | host->slot.cd_irq = irq; | ||
157 | |||
158 | if (irq < 0) | ||
159 | host->caps |= MMC_CAP_NEEDS_POLL; | ||
160 | |||
161 | ctx->cd_gpio = gpio; | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | EXPORT_SYMBOL(mmc_gpio_request_cd); | ||
166 | |||
167 | void mmc_gpio_free_ro(struct mmc_host *host) | ||
168 | { | ||
169 | struct mmc_gpio *ctx = host->slot.handler_priv; | ||
170 | int gpio; | ||
171 | |||
172 | if (!ctx || !gpio_is_valid(ctx->ro_gpio)) | ||
173 | return; | ||
174 | |||
175 | gpio = ctx->ro_gpio; | ||
176 | ctx->ro_gpio = -EINVAL; | ||
177 | |||
178 | gpio_free(gpio); | ||
179 | } | ||
180 | EXPORT_SYMBOL(mmc_gpio_free_ro); | ||
181 | |||
182 | void mmc_gpio_free_cd(struct mmc_host *host) | ||
183 | { | ||
184 | struct mmc_gpio *ctx = host->slot.handler_priv; | ||
185 | int gpio; | ||
186 | |||
187 | if (!ctx || !gpio_is_valid(ctx->cd_gpio)) | ||
188 | return; | ||
189 | |||
190 | if (host->slot.cd_irq >= 0) { | ||
191 | free_irq(host->slot.cd_irq, host); | ||
192 | host->slot.cd_irq = -EINVAL; | ||
193 | } | ||
194 | |||
195 | gpio = ctx->cd_gpio; | ||
196 | ctx->cd_gpio = -EINVAL; | ||
197 | |||
198 | gpio_free(gpio); | ||
199 | } | ||
200 | EXPORT_SYMBOL(mmc_gpio_free_cd); | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 8d13c659452..8c87096531e 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -81,18 +81,6 @@ config MMC_RICOH_MMC | |||
81 | 81 | ||
82 | If unsure, say Y. | 82 | If unsure, say Y. |
83 | 83 | ||
84 | config MMC_SDHCI_ACPI | ||
85 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" | ||
86 | depends on MMC_SDHCI && ACPI | ||
87 | help | ||
88 | This selects support for ACPI enumerated SDHCI controllers, | ||
89 | identified by ACPI Compatibility ID PNP0D40 or specific | ||
90 | ACPI Hardware IDs. | ||
91 | |||
92 | If you have a controller with this interface, say Y or M here. | ||
93 | |||
94 | If unsure, say N. | ||
95 | |||
96 | config MMC_SDHCI_PLTFM | 84 | config MMC_SDHCI_PLTFM |
97 | tristate "SDHCI platform and OF driver helper" | 85 | tristate "SDHCI platform and OF driver helper" |
98 | depends on MMC_SDHCI | 86 | depends on MMC_SDHCI |
@@ -142,13 +130,13 @@ config MMC_SDHCI_CNS3XXX | |||
142 | If unsure, say N. | 130 | If unsure, say N. |
143 | 131 | ||
144 | config MMC_SDHCI_ESDHC_IMX | 132 | config MMC_SDHCI_ESDHC_IMX |
145 | tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" | 133 | tristate "SDHCI platform support for the Freescale eSDHC i.MX controller" |
146 | depends on ARCH_MXC | 134 | depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5 |
147 | depends on MMC_SDHCI_PLTFM | 135 | depends on MMC_SDHCI_PLTFM |
148 | select MMC_SDHCI_IO_ACCESSORS | 136 | select MMC_SDHCI_IO_ACCESSORS |
149 | help | 137 | help |
150 | This selects the Freescale eSDHC/uSDHC controller support | 138 | This selects the Freescale eSDHC controller support on the platform |
151 | found on i.MX25, i.MX35 i.MX5x and i.MX6x. | 139 | bus, found on platforms like mx35/51. |
152 | 140 | ||
153 | If you have a controller with this interface, say Y or M here. | 141 | If you have a controller with this interface, say Y or M here. |
154 | 142 | ||
@@ -275,15 +263,30 @@ config MMC_WBSD | |||
275 | 263 | ||
276 | config MMC_AU1X | 264 | config MMC_AU1X |
277 | tristate "Alchemy AU1XX0 MMC Card Interface support" | 265 | tristate "Alchemy AU1XX0 MMC Card Interface support" |
278 | depends on MIPS_ALCHEMY | 266 | depends on SOC_AU1200 |
279 | help | 267 | help |
280 | This selects the AMD Alchemy(R) Multimedia card interface. | 268 | This selects the AMD Alchemy(R) Multimedia card interface. |
281 | If you have a Alchemy platform with a MMC slot, say Y or M here. | 269 | If you have a Alchemy platform with a MMC slot, say Y or M here. |
282 | 270 | ||
283 | If unsure, say N. | 271 | If unsure, say N. |
284 | 272 | ||
273 | choice | ||
274 | prompt "Atmel SD/MMC Driver" | ||
275 | depends on AVR32 || ARCH_AT91 | ||
276 | default MMC_ATMELMCI if AVR32 | ||
277 | help | ||
278 | Choose which driver to use for the Atmel MCI Silicon | ||
279 | |||
280 | config MMC_AT91 | ||
281 | tristate "AT91 SD/MMC Card Interface support" | ||
282 | depends on ARCH_AT91 | ||
283 | help | ||
284 | This selects the AT91 MCI controller. | ||
285 | |||
286 | If unsure, say N. | ||
287 | |||
285 | config MMC_ATMELMCI | 288 | config MMC_ATMELMCI |
286 | tristate "Atmel SD/MMC Driver (Multimedia Card Interface)" | 289 | tristate "Atmel Multimedia Card Interface support" |
287 | depends on AVR32 || ARCH_AT91 | 290 | depends on AVR32 || ARCH_AT91 |
288 | help | 291 | help |
289 | This selects the Atmel Multimedia Card Interface driver. If | 292 | This selects the Atmel Multimedia Card Interface driver. If |
@@ -292,6 +295,8 @@ config MMC_ATMELMCI | |||
292 | 295 | ||
293 | If unsure, say N. | 296 | If unsure, say N. |
294 | 297 | ||
298 | endchoice | ||
299 | |||
295 | config MMC_ATMELMCI_DMA | 300 | config MMC_ATMELMCI_DMA |
296 | bool "Atmel MCI DMA support" | 301 | bool "Atmel MCI DMA support" |
297 | depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE | 302 | depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE |
@@ -302,6 +307,16 @@ config MMC_ATMELMCI_DMA | |||
302 | 307 | ||
303 | If unsure, say N. | 308 | If unsure, say N. |
304 | 309 | ||
310 | config MMC_IMX | ||
311 | tristate "Motorola i.MX Multimedia Card Interface support" | ||
312 | depends on ARCH_MX1 | ||
313 | help | ||
314 | This selects the Motorola i.MX Multimedia card Interface. | ||
315 | If you have a i.MX platform with a Multimedia Card slot, | ||
316 | say Y or M here. | ||
317 | |||
318 | If unsure, say N. | ||
319 | |||
305 | config MMC_MSM | 320 | config MMC_MSM |
306 | tristate "Qualcomm SDCC Controller Support" | 321 | tristate "Qualcomm SDCC Controller Support" |
307 | depends on MMC && ARCH_MSM | 322 | depends on MMC && ARCH_MSM |
@@ -311,11 +326,11 @@ config MMC_MSM | |||
311 | support for SDIO devices. | 326 | support for SDIO devices. |
312 | 327 | ||
313 | config MMC_MXC | 328 | config MMC_MXC |
314 | tristate "Freescale i.MX21/27/31 Multimedia Card Interface support" | 329 | tristate "Freescale i.MX2/3 Multimedia Card Interface support" |
315 | depends on ARCH_MXC | 330 | depends on MACH_MX21 || MACH_MX27 || ARCH_MX31 |
316 | help | 331 | help |
317 | This selects the Freescale i.MX21, i.MX27 and i.MX31 Multimedia card | 332 | This selects the Freescale i.MX2/3 Multimedia card Interface. |
318 | Interface. If you have a i.MX platform with a Multimedia Card slot, | 333 | If you have a i.MX platform with a Multimedia Card slot, |
319 | say Y or M here. | 334 | say Y or M here. |
320 | 335 | ||
321 | If unsure, say N. | 336 | If unsure, say N. |
@@ -380,7 +395,7 @@ config MMC_SPI | |||
380 | 395 | ||
381 | config MMC_S3C | 396 | config MMC_S3C |
382 | tristate "Samsung S3C SD/MMC Card Interface support" | 397 | tristate "Samsung S3C SD/MMC Card Interface support" |
383 | depends on ARCH_S3C24XX | 398 | depends on ARCH_S3C2410 |
384 | help | 399 | help |
385 | This selects a driver for the MCI interface found in | 400 | This selects a driver for the MCI interface found in |
386 | Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. | 401 | Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. |
@@ -462,6 +477,7 @@ config MMC_SDHI | |||
462 | config MMC_CB710 | 477 | config MMC_CB710 |
463 | tristate "ENE CB710 MMC/SD Interface support" | 478 | tristate "ENE CB710 MMC/SD Interface support" |
464 | depends on PCI | 479 | depends on PCI |
480 | select MISC_DEVICES | ||
465 | select CB710_CORE | 481 | select CB710_CORE |
466 | help | 482 | help |
467 | This option enables support for MMC/SD part of ENE CB710/720 Flash | 483 | This option enables support for MMC/SD part of ENE CB710/720 Flash |
@@ -518,40 +534,6 @@ config MMC_DW_IDMAC | |||
518 | Designware Mobile Storage IP block. This disables the external DMA | 534 | Designware Mobile Storage IP block. This disables the external DMA |
519 | interface. | 535 | interface. |
520 | 536 | ||
521 | config MMC_DW_PLTFM | ||
522 | tristate "Synopsys Designware MCI Support as platform device" | ||
523 | depends on MMC_DW | ||
524 | default y | ||
525 | help | ||
526 | This selects the common helper functions support for Host Controller | ||
527 | Interface based platform driver. Please select this option if the IP | ||
528 | is present as a platform device. This is the common interface for the | ||
529 | Synopsys Designware IP. | ||
530 | |||
531 | If you have a controller with this interface, say Y or M here. | ||
532 | |||
533 | If unsure, say Y. | ||
534 | |||
535 | config MMC_DW_EXYNOS | ||
536 | tristate "Exynos specific extensions for Synopsys DW Memory Card Interface" | ||
537 | depends on MMC_DW | ||
538 | select MMC_DW_PLTFM | ||
539 | help | ||
540 | This selects support for Samsung Exynos SoC specific extensions to the | ||
541 | Synopsys DesignWare Memory Card Interface driver. Select this option | ||
542 | for platforms based on Exynos4 and Exynos5 SoC's. | ||
543 | |||
544 | config MMC_DW_PCI | ||
545 | tristate "Synopsys Designware MCI support on PCI bus" | ||
546 | depends on MMC_DW && PCI | ||
547 | help | ||
548 | This selects the PCI bus for the Synopsys Designware Mobile Storage IP. | ||
549 | Select this option if the IP is present on PCI platform. | ||
550 | |||
551 | If you have a controller with this interface, say Y or M here. | ||
552 | |||
553 | If unsure, say N. | ||
554 | |||
555 | config MMC_SH_MMCIF | 537 | config MMC_SH_MMCIF |
556 | tristate "SuperH Internal MMCIF support" | 538 | tristate "SuperH Internal MMCIF support" |
557 | depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) | 539 | depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) |
@@ -613,21 +595,3 @@ config MMC_USHC | |||
613 | 595 | ||
614 | Note: These controllers only support SDIO cards and do not | 596 | Note: These controllers only support SDIO cards and do not |
615 | support MMC or SD memory cards. | 597 | support MMC or SD memory cards. |
616 | |||
617 | config MMC_WMT | ||
618 | tristate "Wondermedia SD/MMC Host Controller support" | ||
619 | depends on ARCH_VT8500 | ||
620 | default y | ||
621 | help | ||
622 | This selects support for the SD/MMC Host Controller on | ||
623 | Wondermedia WM8505/WM8650 based SoCs. | ||
624 | |||
625 | To compile this driver as a module, choose M here: the | ||
626 | module will be called wmt-sdmmc. | ||
627 | |||
628 | config MMC_REALTEK_PCI | ||
629 | tristate "Realtek PCI-E SD/MMC Card Interface Driver" | ||
630 | depends on MFD_RTSX_PCI | ||
631 | help | ||
632 | Say Y here to include driver code to support SD/MMC card interface | ||
633 | of Realtek PCI-E card reader | ||
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index e4e218c930b..f5ea51bd0ed 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -1,15 +1,15 @@ | |||
1 | # | 1 | # |
2 | # Makefile for MMC/SD host controller drivers | 2 | # Makefile for MMC/SD host controller drivers |
3 | # | 3 | # |
4 | GCOV_PROFILE_sdhci-tegra.o := y | ||
4 | 5 | ||
5 | obj-$(CONFIG_MMC_ARMMMCI) += mmci.o | 6 | obj-$(CONFIG_MMC_ARMMMCI) += mmci.o |
6 | obj-$(CONFIG_MMC_PXA) += pxamci.o | 7 | obj-$(CONFIG_MMC_PXA) += pxamci.o |
8 | obj-$(CONFIG_MMC_IMX) += imxmmc.o | ||
7 | obj-$(CONFIG_MMC_MXC) += mxcmmc.o | 9 | obj-$(CONFIG_MMC_MXC) += mxcmmc.o |
8 | obj-$(CONFIG_MMC_MXS) += mxs-mmc.o | 10 | obj-$(CONFIG_MMC_MXS) += mxs-mmc.o |
9 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o | 11 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o |
10 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o | 12 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o |
11 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o | ||
12 | obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o | ||
13 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o | 13 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o |
14 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o | 14 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o |
15 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o | 15 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o |
@@ -18,6 +18,7 @@ obj-$(CONFIG_MMC_WBSD) += wbsd.o | |||
18 | obj-$(CONFIG_MMC_AU1X) += au1xmmc.o | 18 | obj-$(CONFIG_MMC_AU1X) += au1xmmc.o |
19 | obj-$(CONFIG_MMC_OMAP) += omap.o | 19 | obj-$(CONFIG_MMC_OMAP) += omap.o |
20 | obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o | 20 | obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o |
21 | obj-$(CONFIG_MMC_AT91) += at91_mci.o | ||
21 | obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o | 22 | obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o |
22 | obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o | 23 | obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o |
23 | obj-$(CONFIG_MMC_MSM) += msm_sdcc.o | 24 | obj-$(CONFIG_MMC_MSM) += msm_sdcc.o |
@@ -38,18 +39,10 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | |||
38 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
39 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 40 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
40 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 41 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
41 | obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o | ||
42 | obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o | ||
43 | obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o | ||
44 | obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o | 42 | obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o |
45 | obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o | 43 | obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o |
46 | obj-$(CONFIG_MMC_VUB300) += vub300.o | 44 | obj-$(CONFIG_MMC_VUB300) += vub300.o |
47 | obj-$(CONFIG_MMC_USHC) += ushc.o | 45 | obj-$(CONFIG_MMC_USHC) += ushc.o |
48 | obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o | ||
49 | |||
50 | obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o | ||
51 | |||
52 | obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o | ||
53 | 46 | ||
54 | obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o | 47 | obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o |
55 | obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o | 48 | obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o |
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index c97001e1522..fc8a0fe7c5c 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h | |||
@@ -17,148 +17,112 @@ | |||
17 | #define __DRIVERS_MMC_ATMEL_MCI_H__ | 17 | #define __DRIVERS_MMC_ATMEL_MCI_H__ |
18 | 18 | ||
19 | /* MCI Register Definitions */ | 19 | /* MCI Register Definitions */ |
20 | #define ATMCI_CR 0x0000 /* Control */ | 20 | #define MCI_CR 0x0000 /* Control */ |
21 | # define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ | 21 | # define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ |
22 | # define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ | 22 | # define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ |
23 | # define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */ | 23 | # define MCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */ |
24 | # define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */ | 24 | # define MCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */ |
25 | # define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */ | 25 | # define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ |
26 | #define ATMCI_MR 0x0004 /* Mode */ | 26 | #define MCI_MR 0x0004 /* Mode */ |
27 | # define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ | 27 | # define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ |
28 | # define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ | 28 | # define MCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ |
29 | # define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ | 29 | # define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ |
30 | # define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ | 30 | # define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ |
31 | # define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ | 31 | # define MCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ |
32 | # define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ | 32 | # define MCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ |
33 | # define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ | 33 | # define MCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ |
34 | # define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */ | 34 | #define MCI_DTOR 0x0008 /* Data Timeout */ |
35 | #define ATMCI_DTOR 0x0008 /* Data Timeout */ | 35 | # define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ |
36 | # define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ | 36 | # define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ |
37 | # define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ | 37 | #define MCI_SDCR 0x000c /* SD Card / SDIO */ |
38 | #define ATMCI_SDCR 0x000c /* SD Card / SDIO */ | 38 | # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ |
39 | # define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ | 39 | # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ |
40 | # define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ | 40 | # define MCI_SDCSEL_MASK ( 3 << 0) |
41 | # define ATMCI_SDCSEL_MASK ( 3 << 0) | 41 | # define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ |
42 | # define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ | 42 | # define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ |
43 | # define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ | 43 | # define MCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */ |
44 | # define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */ | 44 | # define MCI_SDCBUS_MASK ( 3 << 6) |
45 | # define ATMCI_SDCBUS_MASK ( 3 << 6) | 45 | #define MCI_ARGR 0x0010 /* Command Argument */ |
46 | #define ATMCI_ARGR 0x0010 /* Command Argument */ | 46 | #define MCI_CMDR 0x0014 /* Command */ |
47 | #define ATMCI_CMDR 0x0014 /* Command */ | 47 | # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ |
48 | # define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ | 48 | # define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ |
49 | # define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ | 49 | # define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ |
50 | # define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ | 50 | # define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ |
51 | # define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ | 51 | # define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ |
52 | # define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ | 52 | # define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ |
53 | # define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ | 53 | # define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ |
54 | # define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ | 54 | # define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ |
55 | # define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ | 55 | # define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ |
56 | # define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ | 56 | # define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ |
57 | # define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ | 57 | # define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ |
58 | # define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ | 58 | # define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ |
59 | # define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ | 59 | # define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ |
60 | # define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ | 60 | # define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ |
61 | # define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ | 61 | # define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ |
62 | # define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ | 62 | # define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ |
63 | # define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ | 63 | # define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ |
64 | # define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ | 64 | # define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ |
65 | # define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ | 65 | # define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ |
66 | # define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ | 66 | # define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ |
67 | # define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ | 67 | # define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ |
68 | # define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ | 68 | # define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ |
69 | # define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ | 69 | #define MCI_BLKR 0x0018 /* Block */ |
70 | #define ATMCI_BLKR 0x0018 /* Block */ | 70 | # define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ |
71 | # define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */ | 71 | # define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ |
72 | # define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ | 72 | #define MCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ |
73 | #define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ | 73 | # define MCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ |
74 | # define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ | 74 | # define MCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ |
75 | # define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ | 75 | #define MCI_RSPR 0x0020 /* Response 0 */ |
76 | #define ATMCI_RSPR 0x0020 /* Response 0 */ | 76 | #define MCI_RSPR1 0x0024 /* Response 1 */ |
77 | #define ATMCI_RSPR1 0x0024 /* Response 1 */ | 77 | #define MCI_RSPR2 0x0028 /* Response 2 */ |
78 | #define ATMCI_RSPR2 0x0028 /* Response 2 */ | 78 | #define MCI_RSPR3 0x002c /* Response 3 */ |
79 | #define ATMCI_RSPR3 0x002c /* Response 3 */ | 79 | #define MCI_RDR 0x0030 /* Receive Data */ |
80 | #define ATMCI_RDR 0x0030 /* Receive Data */ | 80 | #define MCI_TDR 0x0034 /* Transmit Data */ |
81 | #define ATMCI_TDR 0x0034 /* Transmit Data */ | 81 | #define MCI_SR 0x0040 /* Status */ |
82 | #define ATMCI_SR 0x0040 /* Status */ | 82 | #define MCI_IER 0x0044 /* Interrupt Enable */ |
83 | #define ATMCI_IER 0x0044 /* Interrupt Enable */ | 83 | #define MCI_IDR 0x0048 /* Interrupt Disable */ |
84 | #define ATMCI_IDR 0x0048 /* Interrupt Disable */ | 84 | #define MCI_IMR 0x004c /* Interrupt Mask */ |
85 | #define ATMCI_IMR 0x004c /* Interrupt Mask */ | 85 | # define MCI_CMDRDY ( 1 << 0) /* Command Ready */ |
86 | # define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */ | 86 | # define MCI_RXRDY ( 1 << 1) /* Receiver Ready */ |
87 | # define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */ | 87 | # define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */ |
88 | # define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */ | 88 | # define MCI_BLKE ( 1 << 3) /* Data Block Ended */ |
89 | # define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */ | 89 | # define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ |
90 | # define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ | 90 | # define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ |
91 | # define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ | 91 | # define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ |
92 | # define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */ | 92 | # define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ |
93 | # define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */ | 93 | # define MCI_RINDE ( 1 << 16) /* Response Index Error */ |
94 | # define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ | 94 | # define MCI_RDIRE ( 1 << 17) /* Response Direction Error */ |
95 | # define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ | 95 | # define MCI_RCRCE ( 1 << 18) /* Response CRC Error */ |
96 | # define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */ | 96 | # define MCI_RENDE ( 1 << 19) /* Response End Bit Error */ |
97 | # define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */ | 97 | # define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */ |
98 | # define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */ | 98 | # define MCI_DCRCE ( 1 << 21) /* Data CRC Error */ |
99 | # define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */ | 99 | # define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ |
100 | # define ATMCI_RINDE ( 1 << 16) /* Response Index Error */ | 100 | # define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ |
101 | # define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */ | 101 | # define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ |
102 | # define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */ | 102 | #define MCI_DMA 0x0050 /* DMA Configuration[2] */ |
103 | # define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */ | 103 | # define MCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ |
104 | # define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */ | 104 | # define MCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ |
105 | # define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */ | 105 | # define MCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */ |
106 | # define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */ | 106 | #define MCI_CFG 0x0054 /* Configuration[2] */ |
107 | # define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */ | 107 | # define MCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */ |
108 | # define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */ | 108 | # define MCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */ |
109 | # define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */ | 109 | # define MCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */ |
110 | # define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */ | 110 | # define MCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */ |
111 | # define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */ | 111 | #define MCI_WPMR 0x00e4 /* Write Protection Mode[2] */ |
112 | # define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */ | 112 | # define MCI_WP_EN ( 1 << 0) /* WP Enable */ |
113 | # define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */ | 113 | # define MCI_WP_KEY (0x4d4349 << 8) /* WP Key */ |
114 | # define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */ | 114 | #define MCI_WPSR 0x00e8 /* Write Protection Status[2] */ |
115 | # define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */ | 115 | # define MCI_GET_WP_VS(x) ((x) & 0x0f) |
116 | #define ATMCI_DMA 0x0050 /* DMA Configuration[2] */ | 116 | # define MCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) |
117 | # define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ | 117 | #define MCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ |
118 | # define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ | ||
119 | # define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */ | ||
120 | #define ATMCI_CFG 0x0054 /* Configuration[2] */ | ||
121 | # define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */ | ||
122 | # define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */ | ||
123 | # define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */ | ||
124 | # define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */ | ||
125 | #define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */ | ||
126 | # define ATMCI_WP_EN ( 1 << 0) /* WP Enable */ | ||
127 | # define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */ | ||
128 | #define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */ | ||
129 | # define ATMCI_GET_WP_VS(x) ((x) & 0x0f) | ||
130 | # define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) | ||
131 | #define ATMCI_VERSION 0x00FC /* Version */ | ||
132 | #define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ | ||
133 | 118 | ||
134 | /* This is not including the FIFO Aperture on MCI2 */ | 119 | /* This is not including the FIFO Aperture on MCI2 */ |
135 | #define ATMCI_REGS_SIZE 0x100 | 120 | #define MCI_REGS_SIZE 0x100 |
136 | 121 | ||
137 | /* Register access macros */ | 122 | /* Register access macros */ |
138 | #define atmci_readl(port,reg) \ | 123 | #define mci_readl(port,reg) \ |
139 | __raw_readl((port)->regs + reg) | 124 | __raw_readl((port)->regs + MCI_##reg) |
140 | #define atmci_writel(port,reg,value) \ | 125 | #define mci_writel(port,reg,value) \ |
141 | __raw_writel((value), (port)->regs + reg) | 126 | __raw_writel((value), (port)->regs + MCI_##reg) |
142 | |||
143 | /* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ | ||
144 | #ifdef CONFIG_AVR32 | ||
145 | # define ATMCI_PDC_CONNECTED 0 | ||
146 | #else | ||
147 | # define ATMCI_PDC_CONNECTED 1 | ||
148 | #endif | ||
149 | |||
150 | /* | ||
151 | * Fix sconfig's burst size according to atmel MCI. We need to convert them as: | ||
152 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
153 | * | ||
154 | * This can be done by finding most significant bit set. | ||
155 | */ | ||
156 | static inline unsigned int atmci_convert_chksize(unsigned int maxburst) | ||
157 | { | ||
158 | if (maxburst > 1) | ||
159 | return fls(maxburst) - 2; | ||
160 | else | ||
161 | return 0; | ||
162 | } | ||
163 | 127 | ||
164 | #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ | 128 | #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 722af1de796..fa8cae1d700 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -19,97 +19,64 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/of.h> | ||
23 | #include <linux/of_device.h> | ||
24 | #include <linux/of_gpio.h> | ||
25 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
26 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
27 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
28 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
29 | #include <linux/stat.h> | 26 | #include <linux/stat.h> |
30 | #include <linux/types.h> | ||
31 | #include <linux/platform_data/atmel.h> | ||
32 | 27 | ||
33 | #include <linux/mmc/host.h> | 28 | #include <linux/mmc/host.h> |
34 | #include <linux/mmc/sdio.h> | 29 | #include <linux/mmc/sdio.h> |
35 | 30 | ||
36 | #include <mach/atmel-mci.h> | 31 | #include <mach/atmel-mci.h> |
37 | #include <linux/atmel-mci.h> | 32 | #include <linux/atmel-mci.h> |
38 | #include <linux/atmel_pdc.h> | ||
39 | 33 | ||
40 | #include <asm/io.h> | 34 | #include <asm/io.h> |
41 | #include <asm/unaligned.h> | 35 | #include <asm/unaligned.h> |
42 | 36 | ||
43 | #include <mach/cpu.h> | 37 | #include <mach/cpu.h> |
38 | #include <mach/board.h> | ||
44 | 39 | ||
45 | #include "atmel-mci-regs.h" | 40 | #include "atmel-mci-regs.h" |
46 | 41 | ||
47 | #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) | 42 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) |
48 | #define ATMCI_DMA_THRESHOLD 16 | 43 | #define ATMCI_DMA_THRESHOLD 16 |
49 | 44 | ||
50 | enum { | 45 | enum { |
51 | EVENT_CMD_RDY = 0, | 46 | EVENT_CMD_COMPLETE = 0, |
52 | EVENT_XFER_COMPLETE, | 47 | EVENT_XFER_COMPLETE, |
53 | EVENT_NOTBUSY, | 48 | EVENT_DATA_COMPLETE, |
54 | EVENT_DATA_ERROR, | 49 | EVENT_DATA_ERROR, |
55 | }; | 50 | }; |
56 | 51 | ||
57 | enum atmel_mci_state { | 52 | enum atmel_mci_state { |
58 | STATE_IDLE = 0, | 53 | STATE_IDLE = 0, |
59 | STATE_SENDING_CMD, | 54 | STATE_SENDING_CMD, |
60 | STATE_DATA_XFER, | 55 | STATE_SENDING_DATA, |
61 | STATE_WAITING_NOTBUSY, | 56 | STATE_DATA_BUSY, |
62 | STATE_SENDING_STOP, | 57 | STATE_SENDING_STOP, |
63 | STATE_END_REQUEST, | 58 | STATE_DATA_ERROR, |
64 | }; | ||
65 | |||
66 | enum atmci_xfer_dir { | ||
67 | XFER_RECEIVE = 0, | ||
68 | XFER_TRANSMIT, | ||
69 | }; | ||
70 | |||
71 | enum atmci_pdc_buf { | ||
72 | PDC_FIRST_BUF = 0, | ||
73 | PDC_SECOND_BUF, | ||
74 | }; | ||
75 | |||
76 | struct atmel_mci_caps { | ||
77 | bool has_dma_conf_reg; | ||
78 | bool has_pdc; | ||
79 | bool has_cfg_reg; | ||
80 | bool has_cstor_reg; | ||
81 | bool has_highspeed; | ||
82 | bool has_rwproof; | ||
83 | bool has_odd_clk_div; | ||
84 | bool has_bad_data_ordering; | ||
85 | bool need_reset_after_xfer; | ||
86 | bool need_blksz_mul_4; | ||
87 | bool need_notbusy_for_read_ops; | ||
88 | }; | 59 | }; |
89 | 60 | ||
90 | struct atmel_mci_dma { | 61 | struct atmel_mci_dma { |
62 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
91 | struct dma_chan *chan; | 63 | struct dma_chan *chan; |
92 | struct dma_async_tx_descriptor *data_desc; | 64 | struct dma_async_tx_descriptor *data_desc; |
65 | #endif | ||
93 | }; | 66 | }; |
94 | 67 | ||
95 | /** | 68 | /** |
96 | * struct atmel_mci - MMC controller state shared between all slots | 69 | * struct atmel_mci - MMC controller state shared between all slots |
97 | * @lock: Spinlock protecting the queue and associated data. | 70 | * @lock: Spinlock protecting the queue and associated data. |
98 | * @regs: Pointer to MMIO registers. | 71 | * @regs: Pointer to MMIO registers. |
99 | * @sg: Scatterlist entry currently being processed by PIO or PDC code. | 72 | * @sg: Scatterlist entry currently being processed by PIO code, if any. |
100 | * @pio_offset: Offset into the current scatterlist entry. | 73 | * @pio_offset: Offset into the current scatterlist entry. |
101 | * @buffer: Buffer used if we don't have the r/w proof capability. We | ||
102 | * don't have the time to switch pdc buffers so we have to use only | ||
103 | * one buffer for the full transaction. | ||
104 | * @buf_size: size of the buffer. | ||
105 | * @phys_buf_addr: buffer address needed for pdc. | ||
106 | * @cur_slot: The slot which is currently using the controller. | 74 | * @cur_slot: The slot which is currently using the controller. |
107 | * @mrq: The request currently being processed on @cur_slot, | 75 | * @mrq: The request currently being processed on @cur_slot, |
108 | * or NULL if the controller is idle. | 76 | * or NULL if the controller is idle. |
109 | * @cmd: The command currently being sent to the card, or NULL. | 77 | * @cmd: The command currently being sent to the card, or NULL. |
110 | * @data: The data currently being transferred, or NULL if no data | 78 | * @data: The data currently being transferred, or NULL if no data |
111 | * transfer is in progress. | 79 | * transfer is in progress. |
112 | * @data_size: just data->blocks * data->blksz. | ||
113 | * @dma: DMA client state. | 80 | * @dma: DMA client state. |
114 | * @data_chan: DMA channel being used for the current data transfer. | 81 | * @data_chan: DMA channel being used for the current data transfer. |
115 | * @cmd_status: Snapshot of SR taken upon completion of the current | 82 | * @cmd_status: Snapshot of SR taken upon completion of the current |
@@ -128,7 +95,6 @@ struct atmel_mci_dma { | |||
128 | * @queue: List of slots waiting for access to the controller. | 95 | * @queue: List of slots waiting for access to the controller. |
129 | * @need_clock_update: Update the clock rate before the next request. | 96 | * @need_clock_update: Update the clock rate before the next request. |
130 | * @need_reset: Reset controller before next request. | 97 | * @need_reset: Reset controller before next request. |
131 | * @timer: Timer to balance the data timeout error flag which cannot rise. | ||
132 | * @mode_reg: Value of the MR register. | 98 | * @mode_reg: Value of the MR register. |
133 | * @cfg_reg: Value of the CFG register. | 99 | * @cfg_reg: Value of the CFG register. |
134 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus | 100 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus |
@@ -137,13 +103,6 @@ struct atmel_mci_dma { | |||
137 | * @mck: The peripheral bus clock hooked up to the MMC controller. | 103 | * @mck: The peripheral bus clock hooked up to the MMC controller. |
138 | * @pdev: Platform device associated with the MMC controller. | 104 | * @pdev: Platform device associated with the MMC controller. |
139 | * @slot: Slots sharing this MMC controller. | 105 | * @slot: Slots sharing this MMC controller. |
140 | * @caps: MCI capabilities depending on MCI version. | ||
141 | * @prepare_data: function to setup MCI before data transfer which | ||
142 | * depends on MCI capabilities. | ||
143 | * @submit_data: function to start data transfer which depends on MCI | ||
144 | * capabilities. | ||
145 | * @stop_transfer: function to stop data transfer which depends on MCI | ||
146 | * capabilities. | ||
147 | * | 106 | * |
148 | * Locking | 107 | * Locking |
149 | * ======= | 108 | * ======= |
@@ -179,19 +138,14 @@ struct atmel_mci { | |||
179 | 138 | ||
180 | struct scatterlist *sg; | 139 | struct scatterlist *sg; |
181 | unsigned int pio_offset; | 140 | unsigned int pio_offset; |
182 | unsigned int *buffer; | ||
183 | unsigned int buf_size; | ||
184 | dma_addr_t buf_phys_addr; | ||
185 | 141 | ||
186 | struct atmel_mci_slot *cur_slot; | 142 | struct atmel_mci_slot *cur_slot; |
187 | struct mmc_request *mrq; | 143 | struct mmc_request *mrq; |
188 | struct mmc_command *cmd; | 144 | struct mmc_command *cmd; |
189 | struct mmc_data *data; | 145 | struct mmc_data *data; |
190 | unsigned int data_size; | ||
191 | 146 | ||
192 | struct atmel_mci_dma dma; | 147 | struct atmel_mci_dma dma; |
193 | struct dma_chan *data_chan; | 148 | struct dma_chan *data_chan; |
194 | struct dma_slave_config dma_conf; | ||
195 | 149 | ||
196 | u32 cmd_status; | 150 | u32 cmd_status; |
197 | u32 data_status; | 151 | u32 data_status; |
@@ -205,7 +159,6 @@ struct atmel_mci { | |||
205 | 159 | ||
206 | bool need_clock_update; | 160 | bool need_clock_update; |
207 | bool need_reset; | 161 | bool need_reset; |
208 | struct timer_list timer; | ||
209 | u32 mode_reg; | 162 | u32 mode_reg; |
210 | u32 cfg_reg; | 163 | u32 cfg_reg; |
211 | unsigned long bus_hz; | 164 | unsigned long bus_hz; |
@@ -213,13 +166,7 @@ struct atmel_mci { | |||
213 | struct clk *mck; | 166 | struct clk *mck; |
214 | struct platform_device *pdev; | 167 | struct platform_device *pdev; |
215 | 168 | ||
216 | struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS]; | 169 | struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; |
217 | |||
218 | struct atmel_mci_caps caps; | ||
219 | |||
220 | u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); | ||
221 | void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); | ||
222 | void (*stop_transfer)(struct atmel_mci *host); | ||
223 | }; | 170 | }; |
224 | 171 | ||
225 | /** | 172 | /** |
@@ -273,6 +220,31 @@ struct atmel_mci_slot { | |||
273 | set_bit(event, &host->pending_events) | 220 | set_bit(event, &host->pending_events) |
274 | 221 | ||
275 | /* | 222 | /* |
223 | * Enable or disable features/registers based on | ||
224 | * whether the processor supports them | ||
225 | */ | ||
226 | static bool mci_has_rwproof(void) | ||
227 | { | ||
228 | if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) | ||
229 | return false; | ||
230 | else | ||
231 | return true; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * The new MCI2 module isn't 100% compatible with the old MCI module, | ||
236 | * and it has a few nice features which we want to use... | ||
237 | */ | ||
238 | static inline bool atmci_is_mci2(void) | ||
239 | { | ||
240 | if (cpu_is_at91sam9g45()) | ||
241 | return true; | ||
242 | |||
243 | return false; | ||
244 | } | ||
245 | |||
246 | |||
247 | /* | ||
276 | * The debugfs stuff below is mostly optimized away when | 248 | * The debugfs stuff below is mostly optimized away when |
277 | * CONFIG_DEBUG_FS is not set. | 249 | * CONFIG_DEBUG_FS is not set. |
278 | */ | 250 | */ |
@@ -380,7 +352,7 @@ static int atmci_regs_show(struct seq_file *s, void *v) | |||
380 | struct atmel_mci *host = s->private; | 352 | struct atmel_mci *host = s->private; |
381 | u32 *buf; | 353 | u32 *buf; |
382 | 354 | ||
383 | buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); | 355 | buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); |
384 | if (!buf) | 356 | if (!buf) |
385 | return -ENOMEM; | 357 | return -ENOMEM; |
386 | 358 | ||
@@ -391,56 +363,47 @@ static int atmci_regs_show(struct seq_file *s, void *v) | |||
391 | */ | 363 | */ |
392 | spin_lock_bh(&host->lock); | 364 | spin_lock_bh(&host->lock); |
393 | clk_enable(host->mck); | 365 | clk_enable(host->mck); |
394 | memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); | 366 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); |
395 | clk_disable(host->mck); | 367 | clk_disable(host->mck); |
396 | spin_unlock_bh(&host->lock); | 368 | spin_unlock_bh(&host->lock); |
397 | 369 | ||
398 | seq_printf(s, "MR:\t0x%08x%s%s ", | 370 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", |
399 | buf[ATMCI_MR / 4], | 371 | buf[MCI_MR / 4], |
400 | buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", | 372 | buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", |
401 | buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); | 373 | buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", |
402 | if (host->caps.has_odd_clk_div) | 374 | buf[MCI_MR / 4] & 0xff); |
403 | seq_printf(s, "{CLKDIV,CLKODD}=%u\n", | 375 | seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); |
404 | ((buf[ATMCI_MR / 4] & 0xff) << 1) | 376 | seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); |
405 | | ((buf[ATMCI_MR / 4] >> 16) & 1)); | 377 | seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); |
406 | else | ||
407 | seq_printf(s, "CLKDIV=%u\n", | ||
408 | (buf[ATMCI_MR / 4] & 0xff)); | ||
409 | seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); | ||
410 | seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); | ||
411 | seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); | ||
412 | seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", | 378 | seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", |
413 | buf[ATMCI_BLKR / 4], | 379 | buf[MCI_BLKR / 4], |
414 | buf[ATMCI_BLKR / 4] & 0xffff, | 380 | buf[MCI_BLKR / 4] & 0xffff, |
415 | (buf[ATMCI_BLKR / 4] >> 16) & 0xffff); | 381 | (buf[MCI_BLKR / 4] >> 16) & 0xffff); |
416 | if (host->caps.has_cstor_reg) | 382 | if (atmci_is_mci2()) |
417 | seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]); | 383 | seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); |
418 | 384 | ||
419 | /* Don't read RSPR and RDR; it will consume the data there */ | 385 | /* Don't read RSPR and RDR; it will consume the data there */ |
420 | 386 | ||
421 | atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); | 387 | atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); |
422 | atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); | 388 | atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); |
423 | 389 | ||
424 | if (host->caps.has_dma_conf_reg) { | 390 | if (atmci_is_mci2()) { |
425 | u32 val; | 391 | u32 val; |
426 | 392 | ||
427 | val = buf[ATMCI_DMA / 4]; | 393 | val = buf[MCI_DMA / 4]; |
428 | seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", | 394 | seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", |
429 | val, val & 3, | 395 | val, val & 3, |
430 | ((val >> 4) & 3) ? | 396 | ((val >> 4) & 3) ? |
431 | 1 << (((val >> 4) & 3) + 1) : 1, | 397 | 1 << (((val >> 4) & 3) + 1) : 1, |
432 | val & ATMCI_DMAEN ? " DMAEN" : ""); | 398 | val & MCI_DMAEN ? " DMAEN" : ""); |
433 | } | ||
434 | if (host->caps.has_cfg_reg) { | ||
435 | u32 val; | ||
436 | 399 | ||
437 | val = buf[ATMCI_CFG / 4]; | 400 | val = buf[MCI_CFG / 4]; |
438 | seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", | 401 | seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", |
439 | val, | 402 | val, |
440 | val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", | 403 | val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", |
441 | val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", | 404 | val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", |
442 | val & ATMCI_CFG_HSMODE ? " HSMODE" : "", | 405 | val & MCI_CFG_HSMODE ? " HSMODE" : "", |
443 | val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); | 406 | val & MCI_CFG_LSYNC ? " LSYNC" : ""); |
444 | } | 407 | } |
445 | 408 | ||
446 | kfree(buf); | 409 | kfree(buf); |
@@ -503,107 +466,10 @@ err: | |||
503 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); | 466 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); |
504 | } | 467 | } |
505 | 468 | ||
506 | #if defined(CONFIG_OF) | 469 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, |
507 | static const struct of_device_id atmci_dt_ids[] = { | ||
508 | { .compatible = "atmel,hsmci" }, | ||
509 | { /* sentinel */ } | ||
510 | }; | ||
511 | |||
512 | MODULE_DEVICE_TABLE(of, atmci_dt_ids); | ||
513 | |||
514 | static struct mci_platform_data* | ||
515 | atmci_of_init(struct platform_device *pdev) | ||
516 | { | ||
517 | struct device_node *np = pdev->dev.of_node; | ||
518 | struct device_node *cnp; | ||
519 | struct mci_platform_data *pdata; | ||
520 | u32 slot_id; | ||
521 | |||
522 | if (!np) { | ||
523 | dev_err(&pdev->dev, "device node not found\n"); | ||
524 | return ERR_PTR(-EINVAL); | ||
525 | } | ||
526 | |||
527 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
528 | if (!pdata) { | ||
529 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | ||
530 | return ERR_PTR(-ENOMEM); | ||
531 | } | ||
532 | |||
533 | for_each_child_of_node(np, cnp) { | ||
534 | if (of_property_read_u32(cnp, "reg", &slot_id)) { | ||
535 | dev_warn(&pdev->dev, "reg property is missing for %s\n", | ||
536 | cnp->full_name); | ||
537 | continue; | ||
538 | } | ||
539 | |||
540 | if (slot_id >= ATMCI_MAX_NR_SLOTS) { | ||
541 | dev_warn(&pdev->dev, "can't have more than %d slots\n", | ||
542 | ATMCI_MAX_NR_SLOTS); | ||
543 | break; | ||
544 | } | ||
545 | |||
546 | if (of_property_read_u32(cnp, "bus-width", | ||
547 | &pdata->slot[slot_id].bus_width)) | ||
548 | pdata->slot[slot_id].bus_width = 1; | ||
549 | |||
550 | pdata->slot[slot_id].detect_pin = | ||
551 | of_get_named_gpio(cnp, "cd-gpios", 0); | ||
552 | |||
553 | pdata->slot[slot_id].detect_is_active_high = | ||
554 | of_property_read_bool(cnp, "cd-inverted"); | ||
555 | |||
556 | pdata->slot[slot_id].wp_pin = | ||
557 | of_get_named_gpio(cnp, "wp-gpios", 0); | ||
558 | } | ||
559 | |||
560 | return pdata; | ||
561 | } | ||
562 | #else /* CONFIG_OF */ | ||
563 | static inline struct mci_platform_data* | ||
564 | atmci_of_init(struct platform_device *dev) | ||
565 | { | ||
566 | return ERR_PTR(-EINVAL); | ||
567 | } | ||
568 | #endif | ||
569 | |||
570 | static inline unsigned int atmci_get_version(struct atmel_mci *host) | ||
571 | { | ||
572 | return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; | ||
573 | } | ||
574 | |||
575 | static void atmci_timeout_timer(unsigned long data) | ||
576 | { | ||
577 | struct atmel_mci *host; | ||
578 | |||
579 | host = (struct atmel_mci *)data; | ||
580 | |||
581 | dev_dbg(&host->pdev->dev, "software timeout\n"); | ||
582 | |||
583 | if (host->mrq->cmd->data) { | ||
584 | host->mrq->cmd->data->error = -ETIMEDOUT; | ||
585 | host->data = NULL; | ||
586 | } else { | ||
587 | host->mrq->cmd->error = -ETIMEDOUT; | ||
588 | host->cmd = NULL; | ||
589 | } | ||
590 | host->need_reset = 1; | ||
591 | host->state = STATE_END_REQUEST; | ||
592 | smp_wmb(); | ||
593 | tasklet_schedule(&host->tasklet); | ||
594 | } | ||
595 | |||
596 | static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, | ||
597 | unsigned int ns) | 470 | unsigned int ns) |
598 | { | 471 | { |
599 | /* | 472 | return (ns * (host->bus_hz / 1000000) + 999) / 1000; |
600 | * It is easier here to use us instead of ns for the timeout, | ||
601 | * it prevents from overflows during calculation. | ||
602 | */ | ||
603 | unsigned int us = DIV_ROUND_UP(ns, 1000); | ||
604 | |||
605 | /* Maximum clock frequency is host->bus_hz/2 */ | ||
606 | return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); | ||
607 | } | 473 | } |
608 | 474 | ||
609 | static void atmci_set_timeout(struct atmel_mci *host, | 475 | static void atmci_set_timeout(struct atmel_mci *host, |
@@ -616,8 +482,7 @@ static void atmci_set_timeout(struct atmel_mci *host, | |||
616 | unsigned dtocyc; | 482 | unsigned dtocyc; |
617 | unsigned dtomul; | 483 | unsigned dtomul; |
618 | 484 | ||
619 | timeout = atmci_ns_to_clocks(host, data->timeout_ns) | 485 | timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; |
620 | + data->timeout_clks; | ||
621 | 486 | ||
622 | for (dtomul = 0; dtomul < 8; dtomul++) { | 487 | for (dtomul = 0; dtomul < 8; dtomul++) { |
623 | unsigned shift = dtomul_to_shift[dtomul]; | 488 | unsigned shift = dtomul_to_shift[dtomul]; |
@@ -633,7 +498,7 @@ static void atmci_set_timeout(struct atmel_mci *host, | |||
633 | 498 | ||
634 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", | 499 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", |
635 | dtocyc << dtomul_to_shift[dtomul]); | 500 | dtocyc << dtomul_to_shift[dtomul]); |
636 | atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc))); | 501 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); |
637 | } | 502 | } |
638 | 503 | ||
639 | /* | 504 | /* |
@@ -647,13 +512,13 @@ static u32 atmci_prepare_command(struct mmc_host *mmc, | |||
647 | 512 | ||
648 | cmd->error = -EINPROGRESS; | 513 | cmd->error = -EINPROGRESS; |
649 | 514 | ||
650 | cmdr = ATMCI_CMDR_CMDNB(cmd->opcode); | 515 | cmdr = MCI_CMDR_CMDNB(cmd->opcode); |
651 | 516 | ||
652 | if (cmd->flags & MMC_RSP_PRESENT) { | 517 | if (cmd->flags & MMC_RSP_PRESENT) { |
653 | if (cmd->flags & MMC_RSP_136) | 518 | if (cmd->flags & MMC_RSP_136) |
654 | cmdr |= ATMCI_CMDR_RSPTYP_136BIT; | 519 | cmdr |= MCI_CMDR_RSPTYP_136BIT; |
655 | else | 520 | else |
656 | cmdr |= ATMCI_CMDR_RSPTYP_48BIT; | 521 | cmdr |= MCI_CMDR_RSPTYP_48BIT; |
657 | } | 522 | } |
658 | 523 | ||
659 | /* | 524 | /* |
@@ -661,34 +526,34 @@ static u32 atmci_prepare_command(struct mmc_host *mmc, | |||
661 | * it's too difficult to determine whether this is an ACMD or | 526 | * it's too difficult to determine whether this is an ACMD or |
662 | * not. Better make it 64. | 527 | * not. Better make it 64. |
663 | */ | 528 | */ |
664 | cmdr |= ATMCI_CMDR_MAXLAT_64CYC; | 529 | cmdr |= MCI_CMDR_MAXLAT_64CYC; |
665 | 530 | ||
666 | if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) | 531 | if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) |
667 | cmdr |= ATMCI_CMDR_OPDCMD; | 532 | cmdr |= MCI_CMDR_OPDCMD; |
668 | 533 | ||
669 | data = cmd->data; | 534 | data = cmd->data; |
670 | if (data) { | 535 | if (data) { |
671 | cmdr |= ATMCI_CMDR_START_XFER; | 536 | cmdr |= MCI_CMDR_START_XFER; |
672 | 537 | ||
673 | if (cmd->opcode == SD_IO_RW_EXTENDED) { | 538 | if (cmd->opcode == SD_IO_RW_EXTENDED) { |
674 | cmdr |= ATMCI_CMDR_SDIO_BLOCK; | 539 | cmdr |= MCI_CMDR_SDIO_BLOCK; |
675 | } else { | 540 | } else { |
676 | if (data->flags & MMC_DATA_STREAM) | 541 | if (data->flags & MMC_DATA_STREAM) |
677 | cmdr |= ATMCI_CMDR_STREAM; | 542 | cmdr |= MCI_CMDR_STREAM; |
678 | else if (data->blocks > 1) | 543 | else if (data->blocks > 1) |
679 | cmdr |= ATMCI_CMDR_MULTI_BLOCK; | 544 | cmdr |= MCI_CMDR_MULTI_BLOCK; |
680 | else | 545 | else |
681 | cmdr |= ATMCI_CMDR_BLOCK; | 546 | cmdr |= MCI_CMDR_BLOCK; |
682 | } | 547 | } |
683 | 548 | ||
684 | if (data->flags & MMC_DATA_READ) | 549 | if (data->flags & MMC_DATA_READ) |
685 | cmdr |= ATMCI_CMDR_TRDIR_READ; | 550 | cmdr |= MCI_CMDR_TRDIR_READ; |
686 | } | 551 | } |
687 | 552 | ||
688 | return cmdr; | 553 | return cmdr; |
689 | } | 554 | } |
690 | 555 | ||
691 | static void atmci_send_command(struct atmel_mci *host, | 556 | static void atmci_start_command(struct atmel_mci *host, |
692 | struct mmc_command *cmd, u32 cmd_flags) | 557 | struct mmc_command *cmd, u32 cmd_flags) |
693 | { | 558 | { |
694 | WARN_ON(host->cmd); | 559 | WARN_ON(host->cmd); |
@@ -698,142 +563,43 @@ static void atmci_send_command(struct atmel_mci *host, | |||
698 | "start command: ARGR=0x%08x CMDR=0x%08x\n", | 563 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
699 | cmd->arg, cmd_flags); | 564 | cmd->arg, cmd_flags); |
700 | 565 | ||
701 | atmci_writel(host, ATMCI_ARGR, cmd->arg); | 566 | mci_writel(host, ARGR, cmd->arg); |
702 | atmci_writel(host, ATMCI_CMDR, cmd_flags); | 567 | mci_writel(host, CMDR, cmd_flags); |
703 | } | ||
704 | |||
705 | static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) | ||
706 | { | ||
707 | dev_dbg(&host->pdev->dev, "send stop command\n"); | ||
708 | atmci_send_command(host, data->stop, host->stop_cmdr); | ||
709 | atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); | ||
710 | } | 568 | } |
711 | 569 | ||
712 | /* | 570 | static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) |
713 | * Configure given PDC buffer taking care of alignement issues. | ||
714 | * Update host->data_size and host->sg. | ||
715 | */ | ||
716 | static void atmci_pdc_set_single_buf(struct atmel_mci *host, | ||
717 | enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) | ||
718 | { | 571 | { |
719 | u32 pointer_reg, counter_reg; | 572 | atmci_start_command(host, data->stop, host->stop_cmdr); |
720 | unsigned int buf_size; | 573 | mci_writel(host, IER, MCI_CMDRDY); |
721 | |||
722 | if (dir == XFER_RECEIVE) { | ||
723 | pointer_reg = ATMEL_PDC_RPR; | ||
724 | counter_reg = ATMEL_PDC_RCR; | ||
725 | } else { | ||
726 | pointer_reg = ATMEL_PDC_TPR; | ||
727 | counter_reg = ATMEL_PDC_TCR; | ||
728 | } | ||
729 | |||
730 | if (buf_nb == PDC_SECOND_BUF) { | ||
731 | pointer_reg += ATMEL_PDC_SCND_BUF_OFF; | ||
732 | counter_reg += ATMEL_PDC_SCND_BUF_OFF; | ||
733 | } | ||
734 | |||
735 | if (!host->caps.has_rwproof) { | ||
736 | buf_size = host->buf_size; | ||
737 | atmci_writel(host, pointer_reg, host->buf_phys_addr); | ||
738 | } else { | ||
739 | buf_size = sg_dma_len(host->sg); | ||
740 | atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); | ||
741 | } | ||
742 | |||
743 | if (host->data_size <= buf_size) { | ||
744 | if (host->data_size & 0x3) { | ||
745 | /* If size is different from modulo 4, transfer bytes */ | ||
746 | atmci_writel(host, counter_reg, host->data_size); | ||
747 | atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); | ||
748 | } else { | ||
749 | /* Else transfer 32-bits words */ | ||
750 | atmci_writel(host, counter_reg, host->data_size / 4); | ||
751 | } | ||
752 | host->data_size = 0; | ||
753 | } else { | ||
754 | /* We assume the size of a page is 32-bits aligned */ | ||
755 | atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); | ||
756 | host->data_size -= sg_dma_len(host->sg); | ||
757 | if (host->data_size) | ||
758 | host->sg = sg_next(host->sg); | ||
759 | } | ||
760 | } | 574 | } |
761 | 575 | ||
762 | /* | 576 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
763 | * Configure PDC buffer according to the data size ie configuring one or two | 577 | static void atmci_dma_cleanup(struct atmel_mci *host) |
764 | * buffers. Don't use this function if you want to configure only the second | ||
765 | * buffer. In this case, use atmci_pdc_set_single_buf. | ||
766 | */ | ||
767 | static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir) | ||
768 | { | ||
769 | atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); | ||
770 | if (host->data_size) | ||
771 | atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * Unmap sg lists, called when transfer is finished. | ||
776 | */ | ||
777 | static void atmci_pdc_cleanup(struct atmel_mci *host) | ||
778 | { | 578 | { |
779 | struct mmc_data *data = host->data; | 579 | struct mmc_data *data = host->data; |
780 | 580 | ||
781 | if (data) | 581 | if (data) |
782 | dma_unmap_sg(&host->pdev->dev, | 582 | dma_unmap_sg(host->dma.chan->device->dev, |
783 | data->sg, data->sg_len, | 583 | data->sg, data->sg_len, |
784 | ((data->flags & MMC_DATA_WRITE) | 584 | ((data->flags & MMC_DATA_WRITE) |
785 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | 585 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); |
786 | } | 586 | } |
787 | 587 | ||
788 | /* | 588 | static void atmci_stop_dma(struct atmel_mci *host) |
789 | * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after | ||
790 | * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY | ||
791 | * interrupt needed for both transfer directions. | ||
792 | */ | ||
793 | static void atmci_pdc_complete(struct atmel_mci *host) | ||
794 | { | 589 | { |
795 | int transfer_size = host->data->blocks * host->data->blksz; | 590 | struct dma_chan *chan = host->data_chan; |
796 | int i; | ||
797 | |||
798 | atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); | ||
799 | |||
800 | if ((!host->caps.has_rwproof) | ||
801 | && (host->data->flags & MMC_DATA_READ)) { | ||
802 | if (host->caps.has_bad_data_ordering) | ||
803 | for (i = 0; i < transfer_size; i++) | ||
804 | host->buffer[i] = swab32(host->buffer[i]); | ||
805 | sg_copy_from_buffer(host->data->sg, host->data->sg_len, | ||
806 | host->buffer, transfer_size); | ||
807 | } | ||
808 | |||
809 | atmci_pdc_cleanup(host); | ||
810 | 591 | ||
811 | /* | 592 | if (chan) { |
812 | * If the card was removed, data will be NULL. No point trying | 593 | dmaengine_terminate_all(chan); |
813 | * to send the stop command or waiting for NBUSY in this case. | 594 | atmci_dma_cleanup(host); |
814 | */ | 595 | } else { |
815 | if (host->data) { | 596 | /* Data transfer was stopped by the interrupt handler */ |
816 | dev_dbg(&host->pdev->dev, | ||
817 | "(%s) set pending xfer complete\n", __func__); | ||
818 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 597 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
819 | tasklet_schedule(&host->tasklet); | 598 | mci_writel(host, IER, MCI_NOTBUSY); |
820 | } | 599 | } |
821 | } | 600 | } |
822 | 601 | ||
823 | static void atmci_dma_cleanup(struct atmel_mci *host) | 602 | /* This function is called by the DMA driver from tasklet context. */ |
824 | { | ||
825 | struct mmc_data *data = host->data; | ||
826 | |||
827 | if (data) | ||
828 | dma_unmap_sg(host->dma.chan->device->dev, | ||
829 | data->sg, data->sg_len, | ||
830 | ((data->flags & MMC_DATA_WRITE) | ||
831 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * This function is called by the DMA driver from tasklet context. | ||
836 | */ | ||
837 | static void atmci_dma_complete(void *arg) | 603 | static void atmci_dma_complete(void *arg) |
838 | { | 604 | { |
839 | struct atmel_mci *host = arg; | 605 | struct atmel_mci *host = arg; |
@@ -841,9 +607,9 @@ static void atmci_dma_complete(void *arg) | |||
841 | 607 | ||
842 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); | 608 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); |
843 | 609 | ||
844 | if (host->caps.has_dma_conf_reg) | 610 | if (atmci_is_mci2()) |
845 | /* Disable DMA hardware handshaking on MCI */ | 611 | /* Disable DMA hardware handshaking on MCI */ |
846 | atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); | 612 | mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); |
847 | 613 | ||
848 | atmci_dma_cleanup(host); | 614 | atmci_dma_cleanup(host); |
849 | 615 | ||
@@ -852,8 +618,6 @@ static void atmci_dma_complete(void *arg) | |||
852 | * to send the stop command or waiting for NBUSY in this case. | 618 | * to send the stop command or waiting for NBUSY in this case. |
853 | */ | 619 | */ |
854 | if (data) { | 620 | if (data) { |
855 | dev_dbg(&host->pdev->dev, | ||
856 | "(%s) set pending xfer complete\n", __func__); | ||
857 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 621 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
858 | tasklet_schedule(&host->tasklet); | 622 | tasklet_schedule(&host->tasklet); |
859 | 623 | ||
@@ -877,104 +641,11 @@ static void atmci_dma_complete(void *arg) | |||
877 | * completion callback" rule of the dma engine | 641 | * completion callback" rule of the dma engine |
878 | * framework. | 642 | * framework. |
879 | */ | 643 | */ |
880 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 644 | mci_writel(host, IER, MCI_NOTBUSY); |
881 | } | 645 | } |
882 | } | 646 | } |
883 | 647 | ||
884 | /* | 648 | static int |
885 | * Returns a mask of interrupt flags to be enabled after the whole | ||
886 | * request has been prepared. | ||
887 | */ | ||
888 | static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) | ||
889 | { | ||
890 | u32 iflags; | ||
891 | |||
892 | data->error = -EINPROGRESS; | ||
893 | |||
894 | host->sg = data->sg; | ||
895 | host->data = data; | ||
896 | host->data_chan = NULL; | ||
897 | |||
898 | iflags = ATMCI_DATA_ERROR_FLAGS; | ||
899 | |||
900 | /* | ||
901 | * Errata: MMC data write operation with less than 12 | ||
902 | * bytes is impossible. | ||
903 | * | ||
904 | * Errata: MCI Transmit Data Register (TDR) FIFO | ||
905 | * corruption when length is not multiple of 4. | ||
906 | */ | ||
907 | if (data->blocks * data->blksz < 12 | ||
908 | || (data->blocks * data->blksz) & 3) | ||
909 | host->need_reset = true; | ||
910 | |||
911 | host->pio_offset = 0; | ||
912 | if (data->flags & MMC_DATA_READ) | ||
913 | iflags |= ATMCI_RXRDY; | ||
914 | else | ||
915 | iflags |= ATMCI_TXRDY; | ||
916 | |||
917 | return iflags; | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * Set interrupt flags and set block length into the MCI mode register even | ||
922 | * if this value is also accessible in the MCI block register. It seems to be | ||
923 | * necessary before the High Speed MCI version. It also map sg and configure | ||
924 | * PDC registers. | ||
925 | */ | ||
926 | static u32 | ||
927 | atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) | ||
928 | { | ||
929 | u32 iflags, tmp; | ||
930 | unsigned int sg_len; | ||
931 | enum dma_data_direction dir; | ||
932 | int i; | ||
933 | |||
934 | data->error = -EINPROGRESS; | ||
935 | |||
936 | host->data = data; | ||
937 | host->sg = data->sg; | ||
938 | iflags = ATMCI_DATA_ERROR_FLAGS; | ||
939 | |||
940 | /* Enable pdc mode */ | ||
941 | atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); | ||
942 | |||
943 | if (data->flags & MMC_DATA_READ) { | ||
944 | dir = DMA_FROM_DEVICE; | ||
945 | iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; | ||
946 | } else { | ||
947 | dir = DMA_TO_DEVICE; | ||
948 | iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; | ||
949 | } | ||
950 | |||
951 | /* Set BLKLEN */ | ||
952 | tmp = atmci_readl(host, ATMCI_MR); | ||
953 | tmp &= 0x0000ffff; | ||
954 | tmp |= ATMCI_BLKLEN(data->blksz); | ||
955 | atmci_writel(host, ATMCI_MR, tmp); | ||
956 | |||
957 | /* Configure PDC */ | ||
958 | host->data_size = data->blocks * data->blksz; | ||
959 | sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); | ||
960 | |||
961 | if ((!host->caps.has_rwproof) | ||
962 | && (host->data->flags & MMC_DATA_WRITE)) { | ||
963 | sg_copy_to_buffer(host->data->sg, host->data->sg_len, | ||
964 | host->buffer, host->data_size); | ||
965 | if (host->caps.has_bad_data_ordering) | ||
966 | for (i = 0; i < host->data_size; i++) | ||
967 | host->buffer[i] = swab32(host->buffer[i]); | ||
968 | } | ||
969 | |||
970 | if (host->data_size) | ||
971 | atmci_pdc_set_both_buf(host, | ||
972 | ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); | ||
973 | |||
974 | return iflags; | ||
975 | } | ||
976 | |||
977 | static u32 | ||
978 | atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | 649 | atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) |
979 | { | 650 | { |
980 | struct dma_chan *chan; | 651 | struct dma_chan *chan; |
@@ -982,18 +653,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
982 | struct scatterlist *sg; | 653 | struct scatterlist *sg; |
983 | unsigned int i; | 654 | unsigned int i; |
984 | enum dma_data_direction direction; | 655 | enum dma_data_direction direction; |
985 | enum dma_transfer_direction slave_dirn; | ||
986 | unsigned int sglen; | 656 | unsigned int sglen; |
987 | u32 maxburst; | ||
988 | u32 iflags; | ||
989 | |||
990 | data->error = -EINPROGRESS; | ||
991 | |||
992 | WARN_ON(host->data); | ||
993 | host->sg = NULL; | ||
994 | host->data = data; | ||
995 | |||
996 | iflags = ATMCI_DATA_ERROR_FLAGS; | ||
997 | 657 | ||
998 | /* | 658 | /* |
999 | * We don't do DMA on "complex" transfers, i.e. with | 659 | * We don't do DMA on "complex" transfers, i.e. with |
@@ -1001,13 +661,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
1001 | * with all the DMA setup overhead for short transfers. | 661 | * with all the DMA setup overhead for short transfers. |
1002 | */ | 662 | */ |
1003 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) | 663 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) |
1004 | return atmci_prepare_data(host, data); | 664 | return -EINVAL; |
1005 | if (data->blksz & 3) | 665 | if (data->blksz & 3) |
1006 | return atmci_prepare_data(host, data); | 666 | return -EINVAL; |
1007 | 667 | ||
1008 | for_each_sg(data->sg, sg, data->sg_len, i) { | 668 | for_each_sg(data->sg, sg, data->sg_len, i) { |
1009 | if (sg->offset & 3 || sg->length & 3) | 669 | if (sg->offset & 3 || sg->length & 3) |
1010 | return atmci_prepare_data(host, data); | 670 | return -EINVAL; |
1011 | } | 671 | } |
1012 | 672 | ||
1013 | /* If we don't have a channel, we can't do DMA */ | 673 | /* If we don't have a channel, we can't do DMA */ |
@@ -1018,26 +678,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
1018 | if (!chan) | 678 | if (!chan) |
1019 | return -ENODEV; | 679 | return -ENODEV; |
1020 | 680 | ||
1021 | if (data->flags & MMC_DATA_READ) { | 681 | if (atmci_is_mci2()) |
682 | mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); | ||
683 | |||
684 | if (data->flags & MMC_DATA_READ) | ||
1022 | direction = DMA_FROM_DEVICE; | 685 | direction = DMA_FROM_DEVICE; |
1023 | host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; | 686 | else |
1024 | maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); | ||
1025 | } else { | ||
1026 | direction = DMA_TO_DEVICE; | 687 | direction = DMA_TO_DEVICE; |
1027 | host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; | ||
1028 | maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); | ||
1029 | } | ||
1030 | |||
1031 | if (host->caps.has_dma_conf_reg) | ||
1032 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | | ||
1033 | ATMCI_DMAEN); | ||
1034 | 688 | ||
1035 | sglen = dma_map_sg(chan->device->dev, data->sg, | 689 | sglen = dma_map_sg(chan->device->dev, data->sg, |
1036 | data->sg_len, direction); | 690 | data->sg_len, direction); |
1037 | 691 | ||
1038 | dmaengine_slave_config(chan, &host->dma_conf); | 692 | desc = chan->device->device_prep_slave_sg(chan, |
1039 | desc = dmaengine_prep_slave_sg(chan, | 693 | data->sg, sglen, direction, |
1040 | data->sg, sglen, slave_dirn, | ||
1041 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 694 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1042 | if (!desc) | 695 | if (!desc) |
1043 | goto unmap_exit; | 696 | goto unmap_exit; |
@@ -1046,32 +699,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
1046 | desc->callback = atmci_dma_complete; | 699 | desc->callback = atmci_dma_complete; |
1047 | desc->callback_param = host; | 700 | desc->callback_param = host; |
1048 | 701 | ||
1049 | return iflags; | 702 | return 0; |
1050 | unmap_exit: | 703 | unmap_exit: |
1051 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); | 704 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); |
1052 | return -ENOMEM; | 705 | return -ENOMEM; |
1053 | } | 706 | } |
1054 | 707 | ||
1055 | static void | 708 | static void atmci_submit_data(struct atmel_mci *host) |
1056 | atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) | ||
1057 | { | ||
1058 | return; | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Start PDC according to transfer direction. | ||
1063 | */ | ||
1064 | static void | ||
1065 | atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) | ||
1066 | { | ||
1067 | if (data->flags & MMC_DATA_READ) | ||
1068 | atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); | ||
1069 | else | ||
1070 | atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); | ||
1071 | } | ||
1072 | |||
1073 | static void | ||
1074 | atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | ||
1075 | { | 709 | { |
1076 | struct dma_chan *chan = host->data_chan; | 710 | struct dma_chan *chan = host->data_chan; |
1077 | struct dma_async_tx_descriptor *desc = host->dma.data_desc; | 711 | struct dma_async_tx_descriptor *desc = host->dma.data_desc; |
@@ -1082,42 +716,64 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
1082 | } | 716 | } |
1083 | } | 717 | } |
1084 | 718 | ||
1085 | static void atmci_stop_transfer(struct atmel_mci *host) | 719 | #else /* CONFIG_MMC_ATMELMCI_DMA */ |
720 | |||
721 | static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | ||
722 | { | ||
723 | return -ENOSYS; | ||
724 | } | ||
725 | |||
726 | static void atmci_submit_data(struct atmel_mci *host) {} | ||
727 | |||
728 | static void atmci_stop_dma(struct atmel_mci *host) | ||
1086 | { | 729 | { |
1087 | dev_dbg(&host->pdev->dev, | 730 | /* Data transfer was stopped by the interrupt handler */ |
1088 | "(%s) set pending xfer complete\n", __func__); | ||
1089 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 731 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
1090 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 732 | mci_writel(host, IER, MCI_NOTBUSY); |
1091 | } | 733 | } |
1092 | 734 | ||
735 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
736 | |||
1093 | /* | 737 | /* |
1094 | * Stop data transfer because error(s) occurred. | 738 | * Returns a mask of interrupt flags to be enabled after the whole |
739 | * request has been prepared. | ||
1095 | */ | 740 | */ |
1096 | static void atmci_stop_transfer_pdc(struct atmel_mci *host) | 741 | static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) |
1097 | { | 742 | { |
1098 | atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); | 743 | u32 iflags; |
1099 | } | ||
1100 | 744 | ||
1101 | static void atmci_stop_transfer_dma(struct atmel_mci *host) | 745 | data->error = -EINPROGRESS; |
1102 | { | ||
1103 | struct dma_chan *chan = host->data_chan; | ||
1104 | 746 | ||
1105 | if (chan) { | 747 | WARN_ON(host->data); |
1106 | dmaengine_terminate_all(chan); | 748 | host->sg = NULL; |
1107 | atmci_dma_cleanup(host); | 749 | host->data = data; |
1108 | } else { | 750 | |
1109 | /* Data transfer was stopped by the interrupt handler */ | 751 | iflags = ATMCI_DATA_ERROR_FLAGS; |
1110 | dev_dbg(&host->pdev->dev, | 752 | if (atmci_prepare_data_dma(host, data)) { |
1111 | "(%s) set pending xfer complete\n", __func__); | 753 | host->data_chan = NULL; |
1112 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 754 | |
1113 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 755 | /* |
756 | * Errata: MMC data write operation with less than 12 | ||
757 | * bytes is impossible. | ||
758 | * | ||
759 | * Errata: MCI Transmit Data Register (TDR) FIFO | ||
760 | * corruption when length is not multiple of 4. | ||
761 | */ | ||
762 | if (data->blocks * data->blksz < 12 | ||
763 | || (data->blocks * data->blksz) & 3) | ||
764 | host->need_reset = true; | ||
765 | |||
766 | host->sg = data->sg; | ||
767 | host->pio_offset = 0; | ||
768 | if (data->flags & MMC_DATA_READ) | ||
769 | iflags |= MCI_RXRDY; | ||
770 | else | ||
771 | iflags |= MCI_TXRDY; | ||
1114 | } | 772 | } |
773 | |||
774 | return iflags; | ||
1115 | } | 775 | } |
1116 | 776 | ||
1117 | /* | ||
1118 | * Start a request: prepare data if needed, prepare the command and activate | ||
1119 | * interrupts. | ||
1120 | */ | ||
1121 | static void atmci_start_request(struct atmel_mci *host, | 777 | static void atmci_start_request(struct atmel_mci *host, |
1122 | struct atmel_mci_slot *slot) | 778 | struct atmel_mci_slot *slot) |
1123 | { | 779 | { |
@@ -1133,33 +789,27 @@ static void atmci_start_request(struct atmel_mci *host, | |||
1133 | 789 | ||
1134 | host->pending_events = 0; | 790 | host->pending_events = 0; |
1135 | host->completed_events = 0; | 791 | host->completed_events = 0; |
1136 | host->cmd_status = 0; | ||
1137 | host->data_status = 0; | 792 | host->data_status = 0; |
1138 | 793 | ||
1139 | dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); | 794 | if (host->need_reset) { |
1140 | 795 | mci_writel(host, CR, MCI_CR_SWRST); | |
1141 | if (host->need_reset || host->caps.need_reset_after_xfer) { | 796 | mci_writel(host, CR, MCI_CR_MCIEN); |
1142 | iflags = atmci_readl(host, ATMCI_IMR); | 797 | mci_writel(host, MR, host->mode_reg); |
1143 | iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); | 798 | if (atmci_is_mci2()) |
1144 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); | 799 | mci_writel(host, CFG, host->cfg_reg); |
1145 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); | ||
1146 | atmci_writel(host, ATMCI_MR, host->mode_reg); | ||
1147 | if (host->caps.has_cfg_reg) | ||
1148 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | ||
1149 | atmci_writel(host, ATMCI_IER, iflags); | ||
1150 | host->need_reset = false; | 800 | host->need_reset = false; |
1151 | } | 801 | } |
1152 | atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); | 802 | mci_writel(host, SDCR, slot->sdc_reg); |
1153 | 803 | ||
1154 | iflags = atmci_readl(host, ATMCI_IMR); | 804 | iflags = mci_readl(host, IMR); |
1155 | if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) | 805 | if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB)) |
1156 | dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", | 806 | dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", |
1157 | iflags); | 807 | iflags); |
1158 | 808 | ||
1159 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { | 809 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { |
1160 | /* Send init sequence (74 clock cycles) */ | 810 | /* Send init sequence (74 clock cycles) */ |
1161 | atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); | 811 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); |
1162 | while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY)) | 812 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) |
1163 | cpu_relax(); | 813 | cpu_relax(); |
1164 | } | 814 | } |
1165 | iflags = 0; | 815 | iflags = 0; |
@@ -1168,31 +818,31 @@ static void atmci_start_request(struct atmel_mci *host, | |||
1168 | atmci_set_timeout(host, slot, data); | 818 | atmci_set_timeout(host, slot, data); |
1169 | 819 | ||
1170 | /* Must set block count/size before sending command */ | 820 | /* Must set block count/size before sending command */ |
1171 | atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) | 821 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) |
1172 | | ATMCI_BLKLEN(data->blksz)); | 822 | | MCI_BLKLEN(data->blksz)); |
1173 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", | 823 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", |
1174 | ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); | 824 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); |
1175 | 825 | ||
1176 | iflags |= host->prepare_data(host, data); | 826 | iflags |= atmci_prepare_data(host, data); |
1177 | } | 827 | } |
1178 | 828 | ||
1179 | iflags |= ATMCI_CMDRDY; | 829 | iflags |= MCI_CMDRDY; |
1180 | cmd = mrq->cmd; | 830 | cmd = mrq->cmd; |
1181 | cmdflags = atmci_prepare_command(slot->mmc, cmd); | 831 | cmdflags = atmci_prepare_command(slot->mmc, cmd); |
1182 | atmci_send_command(host, cmd, cmdflags); | 832 | atmci_start_command(host, cmd, cmdflags); |
1183 | 833 | ||
1184 | if (data) | 834 | if (data) |
1185 | host->submit_data(host, data); | 835 | atmci_submit_data(host); |
1186 | 836 | ||
1187 | if (mrq->stop) { | 837 | if (mrq->stop) { |
1188 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); | 838 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); |
1189 | host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; | 839 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; |
1190 | if (!(data->flags & MMC_DATA_WRITE)) | 840 | if (!(data->flags & MMC_DATA_WRITE)) |
1191 | host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ; | 841 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; |
1192 | if (data->flags & MMC_DATA_STREAM) | 842 | if (data->flags & MMC_DATA_STREAM) |
1193 | host->stop_cmdr |= ATMCI_CMDR_STREAM; | 843 | host->stop_cmdr |= MCI_CMDR_STREAM; |
1194 | else | 844 | else |
1195 | host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK; | 845 | host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; |
1196 | } | 846 | } |
1197 | 847 | ||
1198 | /* | 848 | /* |
@@ -1201,9 +851,7 @@ static void atmci_start_request(struct atmel_mci *host, | |||
1201 | * conditions (e.g. command and data complete, but stop not | 851 | * conditions (e.g. command and data complete, but stop not |
1202 | * prepared yet.) | 852 | * prepared yet.) |
1203 | */ | 853 | */ |
1204 | atmci_writel(host, ATMCI_IER, iflags); | 854 | mci_writel(host, IER, iflags); |
1205 | |||
1206 | mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); | ||
1207 | } | 855 | } |
1208 | 856 | ||
1209 | static void atmci_queue_request(struct atmel_mci *host, | 857 | static void atmci_queue_request(struct atmel_mci *host, |
@@ -1218,7 +866,6 @@ static void atmci_queue_request(struct atmel_mci *host, | |||
1218 | host->state = STATE_SENDING_CMD; | 866 | host->state = STATE_SENDING_CMD; |
1219 | atmci_start_request(host, slot); | 867 | atmci_start_request(host, slot); |
1220 | } else { | 868 | } else { |
1221 | dev_dbg(&host->pdev->dev, "queue request\n"); | ||
1222 | list_add_tail(&slot->queue_node, &host->queue); | 869 | list_add_tail(&slot->queue_node, &host->queue); |
1223 | } | 870 | } |
1224 | spin_unlock_bh(&host->lock); | 871 | spin_unlock_bh(&host->lock); |
@@ -1231,7 +878,6 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1231 | struct mmc_data *data; | 878 | struct mmc_data *data; |
1232 | 879 | ||
1233 | WARN_ON(slot->mrq); | 880 | WARN_ON(slot->mrq); |
1234 | dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); | ||
1235 | 881 | ||
1236 | /* | 882 | /* |
1237 | * We may "know" the card is gone even though there's still an | 883 | * We may "know" the card is gone even though there's still an |
@@ -1263,13 +909,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1263 | struct atmel_mci *host = slot->host; | 909 | struct atmel_mci *host = slot->host; |
1264 | unsigned int i; | 910 | unsigned int i; |
1265 | 911 | ||
1266 | slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; | 912 | slot->sdc_reg &= ~MCI_SDCBUS_MASK; |
1267 | switch (ios->bus_width) { | 913 | switch (ios->bus_width) { |
1268 | case MMC_BUS_WIDTH_1: | 914 | case MMC_BUS_WIDTH_1: |
1269 | slot->sdc_reg |= ATMCI_SDCBUS_1BIT; | 915 | slot->sdc_reg |= MCI_SDCBUS_1BIT; |
1270 | break; | 916 | break; |
1271 | case MMC_BUS_WIDTH_4: | 917 | case MMC_BUS_WIDTH_4: |
1272 | slot->sdc_reg |= ATMCI_SDCBUS_4BIT; | 918 | slot->sdc_reg |= MCI_SDCBUS_4BIT; |
1273 | break; | 919 | break; |
1274 | } | 920 | } |
1275 | 921 | ||
@@ -1280,10 +926,10 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1280 | spin_lock_bh(&host->lock); | 926 | spin_lock_bh(&host->lock); |
1281 | if (!host->mode_reg) { | 927 | if (!host->mode_reg) { |
1282 | clk_enable(host->mck); | 928 | clk_enable(host->mck); |
1283 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); | 929 | mci_writel(host, CR, MCI_CR_SWRST); |
1284 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); | 930 | mci_writel(host, CR, MCI_CR_MCIEN); |
1285 | if (host->caps.has_cfg_reg) | 931 | if (atmci_is_mci2()) |
1286 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | 932 | mci_writel(host, CFG, host->cfg_reg); |
1287 | } | 933 | } |
1288 | 934 | ||
1289 | /* | 935 | /* |
@@ -1291,54 +937,43 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1291 | * core ios update when finding the minimum. | 937 | * core ios update when finding the minimum. |
1292 | */ | 938 | */ |
1293 | slot->clock = ios->clock; | 939 | slot->clock = ios->clock; |
1294 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | 940 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1295 | if (host->slot[i] && host->slot[i]->clock | 941 | if (host->slot[i] && host->slot[i]->clock |
1296 | && host->slot[i]->clock < clock_min) | 942 | && host->slot[i]->clock < clock_min) |
1297 | clock_min = host->slot[i]->clock; | 943 | clock_min = host->slot[i]->clock; |
1298 | } | 944 | } |
1299 | 945 | ||
1300 | /* Calculate clock divider */ | 946 | /* Calculate clock divider */ |
1301 | if (host->caps.has_odd_clk_div) { | 947 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; |
1302 | clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; | 948 | if (clkdiv > 255) { |
1303 | if (clkdiv > 511) { | 949 | dev_warn(&mmc->class_dev, |
1304 | dev_warn(&mmc->class_dev, | 950 | "clock %u too slow; using %lu\n", |
1305 | "clock %u too slow; using %lu\n", | 951 | clock_min, host->bus_hz / (2 * 256)); |
1306 | clock_min, host->bus_hz / (511 + 2)); | 952 | clkdiv = 255; |
1307 | clkdiv = 511; | ||
1308 | } | ||
1309 | host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) | ||
1310 | | ATMCI_MR_CLKODD(clkdiv & 1); | ||
1311 | } else { | ||
1312 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; | ||
1313 | if (clkdiv > 255) { | ||
1314 | dev_warn(&mmc->class_dev, | ||
1315 | "clock %u too slow; using %lu\n", | ||
1316 | clock_min, host->bus_hz / (2 * 256)); | ||
1317 | clkdiv = 255; | ||
1318 | } | ||
1319 | host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); | ||
1320 | } | 953 | } |
1321 | 954 | ||
955 | host->mode_reg = MCI_MR_CLKDIV(clkdiv); | ||
956 | |||
1322 | /* | 957 | /* |
1323 | * WRPROOF and RDPROOF prevent overruns/underruns by | 958 | * WRPROOF and RDPROOF prevent overruns/underruns by |
1324 | * stopping the clock when the FIFO is full/empty. | 959 | * stopping the clock when the FIFO is full/empty. |
1325 | * This state is not expected to last for long. | 960 | * This state is not expected to last for long. |
1326 | */ | 961 | */ |
1327 | if (host->caps.has_rwproof) | 962 | if (mci_has_rwproof()) |
1328 | host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); | 963 | host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); |
1329 | 964 | ||
1330 | if (host->caps.has_cfg_reg) { | 965 | if (atmci_is_mci2()) { |
1331 | /* setup High Speed mode in relation with card capacity */ | 966 | /* setup High Speed mode in relation with card capacity */ |
1332 | if (ios->timing == MMC_TIMING_SD_HS) | 967 | if (ios->timing == MMC_TIMING_SD_HS) |
1333 | host->cfg_reg |= ATMCI_CFG_HSMODE; | 968 | host->cfg_reg |= MCI_CFG_HSMODE; |
1334 | else | 969 | else |
1335 | host->cfg_reg &= ~ATMCI_CFG_HSMODE; | 970 | host->cfg_reg &= ~MCI_CFG_HSMODE; |
1336 | } | 971 | } |
1337 | 972 | ||
1338 | if (list_empty(&host->queue)) { | 973 | if (list_empty(&host->queue)) { |
1339 | atmci_writel(host, ATMCI_MR, host->mode_reg); | 974 | mci_writel(host, MR, host->mode_reg); |
1340 | if (host->caps.has_cfg_reg) | 975 | if (atmci_is_mci2()) |
1341 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | 976 | mci_writel(host, CFG, host->cfg_reg); |
1342 | } else { | 977 | } else { |
1343 | host->need_clock_update = true; | 978 | host->need_clock_update = true; |
1344 | } | 979 | } |
@@ -1349,16 +984,16 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1349 | 984 | ||
1350 | spin_lock_bh(&host->lock); | 985 | spin_lock_bh(&host->lock); |
1351 | slot->clock = 0; | 986 | slot->clock = 0; |
1352 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | 987 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1353 | if (host->slot[i] && host->slot[i]->clock) { | 988 | if (host->slot[i] && host->slot[i]->clock) { |
1354 | any_slot_active = true; | 989 | any_slot_active = true; |
1355 | break; | 990 | break; |
1356 | } | 991 | } |
1357 | } | 992 | } |
1358 | if (!any_slot_active) { | 993 | if (!any_slot_active) { |
1359 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); | 994 | mci_writel(host, CR, MCI_CR_MCIDIS); |
1360 | if (host->mode_reg) { | 995 | if (host->mode_reg) { |
1361 | atmci_readl(host, ATMCI_MR); | 996 | mci_readl(host, MR); |
1362 | clk_disable(host->mck); | 997 | clk_disable(host->mck); |
1363 | } | 998 | } |
1364 | host->mode_reg = 0; | 999 | host->mode_reg = 0; |
@@ -1422,9 +1057,9 @@ static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
1422 | struct atmel_mci *host = slot->host; | 1057 | struct atmel_mci *host = slot->host; |
1423 | 1058 | ||
1424 | if (enable) | 1059 | if (enable) |
1425 | atmci_writel(host, ATMCI_IER, slot->sdio_irq); | 1060 | mci_writel(host, IER, slot->sdio_irq); |
1426 | else | 1061 | else |
1427 | atmci_writel(host, ATMCI_IDR, slot->sdio_irq); | 1062 | mci_writel(host, IDR, slot->sdio_irq); |
1428 | } | 1063 | } |
1429 | 1064 | ||
1430 | static const struct mmc_host_ops atmci_ops = { | 1065 | static const struct mmc_host_ops atmci_ops = { |
@@ -1451,9 +1086,9 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) | |||
1451 | * busy transferring data. | 1086 | * busy transferring data. |
1452 | */ | 1087 | */ |
1453 | if (host->need_clock_update) { | 1088 | if (host->need_clock_update) { |
1454 | atmci_writel(host, ATMCI_MR, host->mode_reg); | 1089 | mci_writel(host, MR, host->mode_reg); |
1455 | if (host->caps.has_cfg_reg) | 1090 | if (atmci_is_mci2()) |
1456 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | 1091 | mci_writel(host, CFG, host->cfg_reg); |
1457 | } | 1092 | } |
1458 | 1093 | ||
1459 | host->cur_slot->mrq = NULL; | 1094 | host->cur_slot->mrq = NULL; |
@@ -1471,8 +1106,6 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) | |||
1471 | host->state = STATE_IDLE; | 1106 | host->state = STATE_IDLE; |
1472 | } | 1107 | } |
1473 | 1108 | ||
1474 | del_timer(&host->timer); | ||
1475 | |||
1476 | spin_unlock(&host->lock); | 1109 | spin_unlock(&host->lock); |
1477 | mmc_request_done(prev_mmc, mrq); | 1110 | mmc_request_done(prev_mmc, mrq); |
1478 | spin_lock(&host->lock); | 1111 | spin_lock(&host->lock); |
@@ -1484,24 +1117,32 @@ static void atmci_command_complete(struct atmel_mci *host, | |||
1484 | u32 status = host->cmd_status; | 1117 | u32 status = host->cmd_status; |
1485 | 1118 | ||
1486 | /* Read the response from the card (up to 16 bytes) */ | 1119 | /* Read the response from the card (up to 16 bytes) */ |
1487 | cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); | 1120 | cmd->resp[0] = mci_readl(host, RSPR); |
1488 | cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); | 1121 | cmd->resp[1] = mci_readl(host, RSPR); |
1489 | cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); | 1122 | cmd->resp[2] = mci_readl(host, RSPR); |
1490 | cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); | 1123 | cmd->resp[3] = mci_readl(host, RSPR); |
1491 | 1124 | ||
1492 | if (status & ATMCI_RTOE) | 1125 | if (status & MCI_RTOE) |
1493 | cmd->error = -ETIMEDOUT; | 1126 | cmd->error = -ETIMEDOUT; |
1494 | else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE)) | 1127 | else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) |
1495 | cmd->error = -EILSEQ; | 1128 | cmd->error = -EILSEQ; |
1496 | else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) | 1129 | else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) |
1497 | cmd->error = -EIO; | 1130 | cmd->error = -EIO; |
1498 | else if (host->mrq->data && (host->mrq->data->blksz & 3)) { | 1131 | else |
1499 | if (host->caps.need_blksz_mul_4) { | ||
1500 | cmd->error = -EINVAL; | ||
1501 | host->need_reset = 1; | ||
1502 | } | ||
1503 | } else | ||
1504 | cmd->error = 0; | 1132 | cmd->error = 0; |
1133 | |||
1134 | if (cmd->error) { | ||
1135 | dev_dbg(&host->pdev->dev, | ||
1136 | "command error: status=0x%08x\n", status); | ||
1137 | |||
1138 | if (cmd->data) { | ||
1139 | atmci_stop_dma(host); | ||
1140 | host->data = NULL; | ||
1141 | mci_writel(host, IDR, MCI_NOTBUSY | ||
1142 | | MCI_TXRDY | MCI_RXRDY | ||
1143 | | ATMCI_DATA_ERROR_FLAGS); | ||
1144 | } | ||
1145 | } | ||
1505 | } | 1146 | } |
1506 | 1147 | ||
1507 | static void atmci_detect_change(unsigned long data) | 1148 | static void atmci_detect_change(unsigned long data) |
@@ -1550,11 +1191,11 @@ static void atmci_detect_change(unsigned long data) | |||
1550 | * Reset controller to terminate any ongoing | 1191 | * Reset controller to terminate any ongoing |
1551 | * commands or data transfers. | 1192 | * commands or data transfers. |
1552 | */ | 1193 | */ |
1553 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); | 1194 | mci_writel(host, CR, MCI_CR_SWRST); |
1554 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); | 1195 | mci_writel(host, CR, MCI_CR_MCIEN); |
1555 | atmci_writel(host, ATMCI_MR, host->mode_reg); | 1196 | mci_writel(host, MR, host->mode_reg); |
1556 | if (host->caps.has_cfg_reg) | 1197 | if (atmci_is_mci2()) |
1557 | atmci_writel(host, ATMCI_CFG, host->cfg_reg); | 1198 | mci_writel(host, CFG, host->cfg_reg); |
1558 | 1199 | ||
1559 | host->data = NULL; | 1200 | host->data = NULL; |
1560 | host->cmd = NULL; | 1201 | host->cmd = NULL; |
@@ -1564,21 +1205,23 @@ static void atmci_detect_change(unsigned long data) | |||
1564 | break; | 1205 | break; |
1565 | case STATE_SENDING_CMD: | 1206 | case STATE_SENDING_CMD: |
1566 | mrq->cmd->error = -ENOMEDIUM; | 1207 | mrq->cmd->error = -ENOMEDIUM; |
1567 | if (mrq->data) | 1208 | if (!mrq->data) |
1568 | host->stop_transfer(host); | 1209 | break; |
1569 | break; | 1210 | /* fall through */ |
1570 | case STATE_DATA_XFER: | 1211 | case STATE_SENDING_DATA: |
1571 | mrq->data->error = -ENOMEDIUM; | ||
1572 | host->stop_transfer(host); | ||
1573 | break; | ||
1574 | case STATE_WAITING_NOTBUSY: | ||
1575 | mrq->data->error = -ENOMEDIUM; | 1212 | mrq->data->error = -ENOMEDIUM; |
1213 | atmci_stop_dma(host); | ||
1576 | break; | 1214 | break; |
1215 | case STATE_DATA_BUSY: | ||
1216 | case STATE_DATA_ERROR: | ||
1217 | if (mrq->data->error == -EINPROGRESS) | ||
1218 | mrq->data->error = -ENOMEDIUM; | ||
1219 | if (!mrq->stop) | ||
1220 | break; | ||
1221 | /* fall through */ | ||
1577 | case STATE_SENDING_STOP: | 1222 | case STATE_SENDING_STOP: |
1578 | mrq->stop->error = -ENOMEDIUM; | 1223 | mrq->stop->error = -ENOMEDIUM; |
1579 | break; | 1224 | break; |
1580 | case STATE_END_REQUEST: | ||
1581 | break; | ||
1582 | } | 1225 | } |
1583 | 1226 | ||
1584 | atmci_request_end(host, mrq); | 1227 | atmci_request_end(host, mrq); |
@@ -1606,6 +1249,7 @@ static void atmci_tasklet_func(unsigned long priv) | |||
1606 | struct atmel_mci *host = (struct atmel_mci *)priv; | 1249 | struct atmel_mci *host = (struct atmel_mci *)priv; |
1607 | struct mmc_request *mrq = host->mrq; | 1250 | struct mmc_request *mrq = host->mrq; |
1608 | struct mmc_data *data = host->data; | 1251 | struct mmc_data *data = host->data; |
1252 | struct mmc_command *cmd = host->cmd; | ||
1609 | enum atmel_mci_state state = host->state; | 1253 | enum atmel_mci_state state = host->state; |
1610 | enum atmel_mci_state prev_state; | 1254 | enum atmel_mci_state prev_state; |
1611 | u32 status; | 1255 | u32 status; |
@@ -1617,191 +1261,111 @@ static void atmci_tasklet_func(unsigned long priv) | |||
1617 | dev_vdbg(&host->pdev->dev, | 1261 | dev_vdbg(&host->pdev->dev, |
1618 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", | 1262 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", |
1619 | state, host->pending_events, host->completed_events, | 1263 | state, host->pending_events, host->completed_events, |
1620 | atmci_readl(host, ATMCI_IMR)); | 1264 | mci_readl(host, IMR)); |
1621 | 1265 | ||
1622 | do { | 1266 | do { |
1623 | prev_state = state; | 1267 | prev_state = state; |
1624 | dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state); | ||
1625 | 1268 | ||
1626 | switch (state) { | 1269 | switch (state) { |
1627 | case STATE_IDLE: | 1270 | case STATE_IDLE: |
1628 | break; | 1271 | break; |
1629 | 1272 | ||
1630 | case STATE_SENDING_CMD: | 1273 | case STATE_SENDING_CMD: |
1631 | /* | ||
1632 | * Command has been sent, we are waiting for command | ||
1633 | * ready. Then we have three next states possible: | ||
1634 | * END_REQUEST by default, WAITING_NOTBUSY if it's a | ||
1635 | * command needing it or DATA_XFER if there is data. | ||
1636 | */ | ||
1637 | dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); | ||
1638 | if (!atmci_test_and_clear_pending(host, | 1274 | if (!atmci_test_and_clear_pending(host, |
1639 | EVENT_CMD_RDY)) | 1275 | EVENT_CMD_COMPLETE)) |
1640 | break; | 1276 | break; |
1641 | 1277 | ||
1642 | dev_dbg(&host->pdev->dev, "set completed cmd ready\n"); | ||
1643 | host->cmd = NULL; | 1278 | host->cmd = NULL; |
1644 | atmci_set_completed(host, EVENT_CMD_RDY); | 1279 | atmci_set_completed(host, EVENT_CMD_COMPLETE); |
1645 | atmci_command_complete(host, mrq->cmd); | 1280 | atmci_command_complete(host, mrq->cmd); |
1646 | if (mrq->data) { | 1281 | if (!mrq->data || cmd->error) { |
1647 | dev_dbg(&host->pdev->dev, | 1282 | atmci_request_end(host, host->mrq); |
1648 | "command with data transfer"); | 1283 | goto unlock; |
1649 | /* | 1284 | } |
1650 | * If there is a command error don't start | ||
1651 | * data transfer. | ||
1652 | */ | ||
1653 | if (mrq->cmd->error) { | ||
1654 | host->stop_transfer(host); | ||
1655 | host->data = NULL; | ||
1656 | atmci_writel(host, ATMCI_IDR, | ||
1657 | ATMCI_TXRDY | ATMCI_RXRDY | ||
1658 | | ATMCI_DATA_ERROR_FLAGS); | ||
1659 | state = STATE_END_REQUEST; | ||
1660 | } else | ||
1661 | state = STATE_DATA_XFER; | ||
1662 | } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) { | ||
1663 | dev_dbg(&host->pdev->dev, | ||
1664 | "command response need waiting notbusy"); | ||
1665 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | ||
1666 | state = STATE_WAITING_NOTBUSY; | ||
1667 | } else | ||
1668 | state = STATE_END_REQUEST; | ||
1669 | 1285 | ||
1670 | break; | 1286 | prev_state = state = STATE_SENDING_DATA; |
1287 | /* fall through */ | ||
1671 | 1288 | ||
1672 | case STATE_DATA_XFER: | 1289 | case STATE_SENDING_DATA: |
1673 | if (atmci_test_and_clear_pending(host, | 1290 | if (atmci_test_and_clear_pending(host, |
1674 | EVENT_DATA_ERROR)) { | 1291 | EVENT_DATA_ERROR)) { |
1675 | dev_dbg(&host->pdev->dev, "set completed data error\n"); | 1292 | atmci_stop_dma(host); |
1676 | atmci_set_completed(host, EVENT_DATA_ERROR); | 1293 | if (data->stop) |
1677 | state = STATE_END_REQUEST; | 1294 | send_stop_cmd(host, data); |
1295 | state = STATE_DATA_ERROR; | ||
1678 | break; | 1296 | break; |
1679 | } | 1297 | } |
1680 | 1298 | ||
1681 | /* | ||
1682 | * A data transfer is in progress. The event expected | ||
1683 | * to move to the next state depends of data transfer | ||
1684 | * type (PDC or DMA). Once transfer done we can move | ||
1685 | * to the next step which is WAITING_NOTBUSY in write | ||
1686 | * case and directly SENDING_STOP in read case. | ||
1687 | */ | ||
1688 | dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n"); | ||
1689 | if (!atmci_test_and_clear_pending(host, | 1299 | if (!atmci_test_and_clear_pending(host, |
1690 | EVENT_XFER_COMPLETE)) | 1300 | EVENT_XFER_COMPLETE)) |
1691 | break; | 1301 | break; |
1692 | 1302 | ||
1693 | dev_dbg(&host->pdev->dev, | ||
1694 | "(%s) set completed xfer complete\n", | ||
1695 | __func__); | ||
1696 | atmci_set_completed(host, EVENT_XFER_COMPLETE); | 1303 | atmci_set_completed(host, EVENT_XFER_COMPLETE); |
1304 | prev_state = state = STATE_DATA_BUSY; | ||
1305 | /* fall through */ | ||
1697 | 1306 | ||
1698 | if (host->caps.need_notbusy_for_read_ops || | 1307 | case STATE_DATA_BUSY: |
1699 | (host->data->flags & MMC_DATA_WRITE)) { | 1308 | if (!atmci_test_and_clear_pending(host, |
1700 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 1309 | EVENT_DATA_COMPLETE)) |
1701 | state = STATE_WAITING_NOTBUSY; | 1310 | break; |
1702 | } else if (host->mrq->stop) { | 1311 | |
1703 | atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); | 1312 | host->data = NULL; |
1704 | atmci_send_stop_cmd(host, data); | 1313 | atmci_set_completed(host, EVENT_DATA_COMPLETE); |
1705 | state = STATE_SENDING_STOP; | 1314 | status = host->data_status; |
1315 | if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { | ||
1316 | if (status & MCI_DTOE) { | ||
1317 | dev_dbg(&host->pdev->dev, | ||
1318 | "data timeout error\n"); | ||
1319 | data->error = -ETIMEDOUT; | ||
1320 | } else if (status & MCI_DCRCE) { | ||
1321 | dev_dbg(&host->pdev->dev, | ||
1322 | "data CRC error\n"); | ||
1323 | data->error = -EILSEQ; | ||
1324 | } else { | ||
1325 | dev_dbg(&host->pdev->dev, | ||
1326 | "data FIFO error (status=%08x)\n", | ||
1327 | status); | ||
1328 | data->error = -EIO; | ||
1329 | } | ||
1706 | } else { | 1330 | } else { |
1707 | host->data = NULL; | ||
1708 | data->bytes_xfered = data->blocks * data->blksz; | 1331 | data->bytes_xfered = data->blocks * data->blksz; |
1709 | data->error = 0; | 1332 | data->error = 0; |
1710 | state = STATE_END_REQUEST; | 1333 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS); |
1711 | } | 1334 | } |
1712 | break; | ||
1713 | 1335 | ||
1714 | case STATE_WAITING_NOTBUSY: | 1336 | if (!data->stop) { |
1715 | /* | 1337 | atmci_request_end(host, host->mrq); |
1716 | * We can be in the state for two reasons: a command | 1338 | goto unlock; |
1717 | * requiring waiting not busy signal (stop command | 1339 | } |
1718 | * included) or a write operation. In the latest case, | ||
1719 | * we need to send a stop command. | ||
1720 | */ | ||
1721 | dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); | ||
1722 | if (!atmci_test_and_clear_pending(host, | ||
1723 | EVENT_NOTBUSY)) | ||
1724 | break; | ||
1725 | 1340 | ||
1726 | dev_dbg(&host->pdev->dev, "set completed not busy\n"); | 1341 | prev_state = state = STATE_SENDING_STOP; |
1727 | atmci_set_completed(host, EVENT_NOTBUSY); | 1342 | if (!data->error) |
1728 | 1343 | send_stop_cmd(host, data); | |
1729 | if (host->data) { | 1344 | /* fall through */ |
1730 | /* | ||
1731 | * For some commands such as CMD53, even if | ||
1732 | * there is data transfer, there is no stop | ||
1733 | * command to send. | ||
1734 | */ | ||
1735 | if (host->mrq->stop) { | ||
1736 | atmci_writel(host, ATMCI_IER, | ||
1737 | ATMCI_CMDRDY); | ||
1738 | atmci_send_stop_cmd(host, data); | ||
1739 | state = STATE_SENDING_STOP; | ||
1740 | } else { | ||
1741 | host->data = NULL; | ||
1742 | data->bytes_xfered = data->blocks | ||
1743 | * data->blksz; | ||
1744 | data->error = 0; | ||
1745 | state = STATE_END_REQUEST; | ||
1746 | } | ||
1747 | } else | ||
1748 | state = STATE_END_REQUEST; | ||
1749 | break; | ||
1750 | 1345 | ||
1751 | case STATE_SENDING_STOP: | 1346 | case STATE_SENDING_STOP: |
1752 | /* | ||
1753 | * In this state, it is important to set host->data to | ||
1754 | * NULL (which is tested in the waiting notbusy state) | ||
1755 | * in order to go to the end request state instead of | ||
1756 | * sending stop again. | ||
1757 | */ | ||
1758 | dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); | ||
1759 | if (!atmci_test_and_clear_pending(host, | 1347 | if (!atmci_test_and_clear_pending(host, |
1760 | EVENT_CMD_RDY)) | 1348 | EVENT_CMD_COMPLETE)) |
1761 | break; | 1349 | break; |
1762 | 1350 | ||
1763 | dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); | ||
1764 | host->cmd = NULL; | 1351 | host->cmd = NULL; |
1765 | data->bytes_xfered = data->blocks * data->blksz; | ||
1766 | data->error = 0; | ||
1767 | atmci_command_complete(host, mrq->stop); | 1352 | atmci_command_complete(host, mrq->stop); |
1768 | if (mrq->stop->error) { | 1353 | atmci_request_end(host, host->mrq); |
1769 | host->stop_transfer(host); | 1354 | goto unlock; |
1770 | atmci_writel(host, ATMCI_IDR, | ||
1771 | ATMCI_TXRDY | ATMCI_RXRDY | ||
1772 | | ATMCI_DATA_ERROR_FLAGS); | ||
1773 | state = STATE_END_REQUEST; | ||
1774 | } else { | ||
1775 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | ||
1776 | state = STATE_WAITING_NOTBUSY; | ||
1777 | } | ||
1778 | host->data = NULL; | ||
1779 | break; | ||
1780 | 1355 | ||
1781 | case STATE_END_REQUEST: | 1356 | case STATE_DATA_ERROR: |
1782 | atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY | 1357 | if (!atmci_test_and_clear_pending(host, |
1783 | | ATMCI_DATA_ERROR_FLAGS); | 1358 | EVENT_XFER_COMPLETE)) |
1784 | status = host->data_status; | 1359 | break; |
1785 | if (unlikely(status)) { | ||
1786 | host->stop_transfer(host); | ||
1787 | host->data = NULL; | ||
1788 | if (status & ATMCI_DTOE) { | ||
1789 | data->error = -ETIMEDOUT; | ||
1790 | } else if (status & ATMCI_DCRCE) { | ||
1791 | data->error = -EILSEQ; | ||
1792 | } else { | ||
1793 | data->error = -EIO; | ||
1794 | } | ||
1795 | } | ||
1796 | 1360 | ||
1797 | atmci_request_end(host, host->mrq); | 1361 | state = STATE_DATA_BUSY; |
1798 | state = STATE_IDLE; | ||
1799 | break; | 1362 | break; |
1800 | } | 1363 | } |
1801 | } while (state != prev_state); | 1364 | } while (state != prev_state); |
1802 | 1365 | ||
1803 | host->state = state; | 1366 | host->state = state; |
1804 | 1367 | ||
1368 | unlock: | ||
1805 | spin_unlock(&host->lock); | 1369 | spin_unlock(&host->lock); |
1806 | } | 1370 | } |
1807 | 1371 | ||
@@ -1816,7 +1380,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1816 | unsigned int nbytes = 0; | 1380 | unsigned int nbytes = 0; |
1817 | 1381 | ||
1818 | do { | 1382 | do { |
1819 | value = atmci_readl(host, ATMCI_RDR); | 1383 | value = mci_readl(host, RDR); |
1820 | if (likely(offset + 4 <= sg->length)) { | 1384 | if (likely(offset + 4 <= sg->length)) { |
1821 | put_unaligned(value, (u32 *)(buf + offset)); | 1385 | put_unaligned(value, (u32 *)(buf + offset)); |
1822 | 1386 | ||
@@ -1848,15 +1412,18 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1848 | nbytes += offset; | 1412 | nbytes += offset; |
1849 | } | 1413 | } |
1850 | 1414 | ||
1851 | status = atmci_readl(host, ATMCI_SR); | 1415 | status = mci_readl(host, SR); |
1852 | if (status & ATMCI_DATA_ERROR_FLAGS) { | 1416 | if (status & ATMCI_DATA_ERROR_FLAGS) { |
1853 | atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY | 1417 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY |
1854 | | ATMCI_DATA_ERROR_FLAGS)); | 1418 | | ATMCI_DATA_ERROR_FLAGS)); |
1855 | host->data_status = status; | 1419 | host->data_status = status; |
1856 | data->bytes_xfered += nbytes; | 1420 | data->bytes_xfered += nbytes; |
1421 | smp_wmb(); | ||
1422 | atmci_set_pending(host, EVENT_DATA_ERROR); | ||
1423 | tasklet_schedule(&host->tasklet); | ||
1857 | return; | 1424 | return; |
1858 | } | 1425 | } |
1859 | } while (status & ATMCI_RXRDY); | 1426 | } while (status & MCI_RXRDY); |
1860 | 1427 | ||
1861 | host->pio_offset = offset; | 1428 | host->pio_offset = offset; |
1862 | data->bytes_xfered += nbytes; | 1429 | data->bytes_xfered += nbytes; |
@@ -1864,8 +1431,8 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1864 | return; | 1431 | return; |
1865 | 1432 | ||
1866 | done: | 1433 | done: |
1867 | atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); | 1434 | mci_writel(host, IDR, MCI_RXRDY); |
1868 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 1435 | mci_writel(host, IER, MCI_NOTBUSY); |
1869 | data->bytes_xfered += nbytes; | 1436 | data->bytes_xfered += nbytes; |
1870 | smp_wmb(); | 1437 | smp_wmb(); |
1871 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 1438 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
@@ -1884,7 +1451,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
1884 | do { | 1451 | do { |
1885 | if (likely(offset + 4 <= sg->length)) { | 1452 | if (likely(offset + 4 <= sg->length)) { |
1886 | value = get_unaligned((u32 *)(buf + offset)); | 1453 | value = get_unaligned((u32 *)(buf + offset)); |
1887 | atmci_writel(host, ATMCI_TDR, value); | 1454 | mci_writel(host, TDR, value); |
1888 | 1455 | ||
1889 | offset += 4; | 1456 | offset += 4; |
1890 | nbytes += 4; | 1457 | nbytes += 4; |
@@ -1905,26 +1472,29 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
1905 | 1472 | ||
1906 | host->sg = sg = sg_next(sg); | 1473 | host->sg = sg = sg_next(sg); |
1907 | if (!sg) { | 1474 | if (!sg) { |
1908 | atmci_writel(host, ATMCI_TDR, value); | 1475 | mci_writel(host, TDR, value); |
1909 | goto done; | 1476 | goto done; |
1910 | } | 1477 | } |
1911 | 1478 | ||
1912 | offset = 4 - remaining; | 1479 | offset = 4 - remaining; |
1913 | buf = sg_virt(sg); | 1480 | buf = sg_virt(sg); |
1914 | memcpy((u8 *)&value + remaining, buf, offset); | 1481 | memcpy((u8 *)&value + remaining, buf, offset); |
1915 | atmci_writel(host, ATMCI_TDR, value); | 1482 | mci_writel(host, TDR, value); |
1916 | nbytes += offset; | 1483 | nbytes += offset; |
1917 | } | 1484 | } |
1918 | 1485 | ||
1919 | status = atmci_readl(host, ATMCI_SR); | 1486 | status = mci_readl(host, SR); |
1920 | if (status & ATMCI_DATA_ERROR_FLAGS) { | 1487 | if (status & ATMCI_DATA_ERROR_FLAGS) { |
1921 | atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY | 1488 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY |
1922 | | ATMCI_DATA_ERROR_FLAGS)); | 1489 | | ATMCI_DATA_ERROR_FLAGS)); |
1923 | host->data_status = status; | 1490 | host->data_status = status; |
1924 | data->bytes_xfered += nbytes; | 1491 | data->bytes_xfered += nbytes; |
1492 | smp_wmb(); | ||
1493 | atmci_set_pending(host, EVENT_DATA_ERROR); | ||
1494 | tasklet_schedule(&host->tasklet); | ||
1925 | return; | 1495 | return; |
1926 | } | 1496 | } |
1927 | } while (status & ATMCI_TXRDY); | 1497 | } while (status & MCI_TXRDY); |
1928 | 1498 | ||
1929 | host->pio_offset = offset; | 1499 | host->pio_offset = offset; |
1930 | data->bytes_xfered += nbytes; | 1500 | data->bytes_xfered += nbytes; |
@@ -1932,18 +1502,28 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
1932 | return; | 1502 | return; |
1933 | 1503 | ||
1934 | done: | 1504 | done: |
1935 | atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); | 1505 | mci_writel(host, IDR, MCI_TXRDY); |
1936 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 1506 | mci_writel(host, IER, MCI_NOTBUSY); |
1937 | data->bytes_xfered += nbytes; | 1507 | data->bytes_xfered += nbytes; |
1938 | smp_wmb(); | 1508 | smp_wmb(); |
1939 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 1509 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
1940 | } | 1510 | } |
1941 | 1511 | ||
1512 | static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) | ||
1513 | { | ||
1514 | mci_writel(host, IDR, MCI_CMDRDY); | ||
1515 | |||
1516 | host->cmd_status = status; | ||
1517 | smp_wmb(); | ||
1518 | atmci_set_pending(host, EVENT_CMD_COMPLETE); | ||
1519 | tasklet_schedule(&host->tasklet); | ||
1520 | } | ||
1521 | |||
1942 | static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) | 1522 | static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) |
1943 | { | 1523 | { |
1944 | int i; | 1524 | int i; |
1945 | 1525 | ||
1946 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | 1526 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1947 | struct atmel_mci_slot *slot = host->slot[i]; | 1527 | struct atmel_mci_slot *slot = host->slot[i]; |
1948 | if (slot && (status & slot->sdio_irq)) { | 1528 | if (slot && (status & slot->sdio_irq)) { |
1949 | mmc_signal_sdio_irq(slot->mmc); | 1529 | mmc_signal_sdio_irq(slot->mmc); |
@@ -1959,120 +1539,40 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) | |||
1959 | unsigned int pass_count = 0; | 1539 | unsigned int pass_count = 0; |
1960 | 1540 | ||
1961 | do { | 1541 | do { |
1962 | status = atmci_readl(host, ATMCI_SR); | 1542 | status = mci_readl(host, SR); |
1963 | mask = atmci_readl(host, ATMCI_IMR); | 1543 | mask = mci_readl(host, IMR); |
1964 | pending = status & mask; | 1544 | pending = status & mask; |
1965 | if (!pending) | 1545 | if (!pending) |
1966 | break; | 1546 | break; |
1967 | 1547 | ||
1968 | if (pending & ATMCI_DATA_ERROR_FLAGS) { | 1548 | if (pending & ATMCI_DATA_ERROR_FLAGS) { |
1969 | dev_dbg(&host->pdev->dev, "IRQ: data error\n"); | 1549 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS |
1970 | atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS | 1550 | | MCI_RXRDY | MCI_TXRDY); |
1971 | | ATMCI_RXRDY | ATMCI_TXRDY | 1551 | pending &= mci_readl(host, IMR); |
1972 | | ATMCI_ENDRX | ATMCI_ENDTX | ||
1973 | | ATMCI_RXBUFF | ATMCI_TXBUFE); | ||
1974 | 1552 | ||
1975 | host->data_status = status; | 1553 | host->data_status = status; |
1976 | dev_dbg(&host->pdev->dev, "set pending data error\n"); | ||
1977 | smp_wmb(); | 1554 | smp_wmb(); |
1978 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1555 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1979 | tasklet_schedule(&host->tasklet); | 1556 | tasklet_schedule(&host->tasklet); |
1980 | } | 1557 | } |
1981 | 1558 | if (pending & MCI_NOTBUSY) { | |
1982 | if (pending & ATMCI_TXBUFE) { | 1559 | mci_writel(host, IDR, |
1983 | dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); | 1560 | ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); |
1984 | atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); | 1561 | if (!host->data_status) |
1985 | atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); | 1562 | host->data_status = status; |
1986 | /* | ||
1987 | * We can receive this interruption before having configured | ||
1988 | * the second pdc buffer, so we need to reconfigure first and | ||
1989 | * second buffers again | ||
1990 | */ | ||
1991 | if (host->data_size) { | ||
1992 | atmci_pdc_set_both_buf(host, XFER_TRANSMIT); | ||
1993 | atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); | ||
1994 | atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); | ||
1995 | } else { | ||
1996 | atmci_pdc_complete(host); | ||
1997 | } | ||
1998 | } else if (pending & ATMCI_ENDTX) { | ||
1999 | dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); | ||
2000 | atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); | ||
2001 | |||
2002 | if (host->data_size) { | ||
2003 | atmci_pdc_set_single_buf(host, | ||
2004 | XFER_TRANSMIT, PDC_SECOND_BUF); | ||
2005 | atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); | ||
2006 | } | ||
2007 | } | ||
2008 | |||
2009 | if (pending & ATMCI_RXBUFF) { | ||
2010 | dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); | ||
2011 | atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); | ||
2012 | atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); | ||
2013 | /* | ||
2014 | * We can receive this interruption before having configured | ||
2015 | * the second pdc buffer, so we need to reconfigure first and | ||
2016 | * second buffers again | ||
2017 | */ | ||
2018 | if (host->data_size) { | ||
2019 | atmci_pdc_set_both_buf(host, XFER_RECEIVE); | ||
2020 | atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); | ||
2021 | atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); | ||
2022 | } else { | ||
2023 | atmci_pdc_complete(host); | ||
2024 | } | ||
2025 | } else if (pending & ATMCI_ENDRX) { | ||
2026 | dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); | ||
2027 | atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); | ||
2028 | |||
2029 | if (host->data_size) { | ||
2030 | atmci_pdc_set_single_buf(host, | ||
2031 | XFER_RECEIVE, PDC_SECOND_BUF); | ||
2032 | atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); | ||
2033 | } | ||
2034 | } | ||
2035 | |||
2036 | /* | ||
2037 | * First mci IPs, so mainly the ones having pdc, have some | ||
2038 | * issues with the notbusy signal. You can't get it after | ||
2039 | * data transmission if you have not sent a stop command. | ||
2040 | * The appropriate workaround is to use the BLKE signal. | ||
2041 | */ | ||
2042 | if (pending & ATMCI_BLKE) { | ||
2043 | dev_dbg(&host->pdev->dev, "IRQ: blke\n"); | ||
2044 | atmci_writel(host, ATMCI_IDR, ATMCI_BLKE); | ||
2045 | smp_wmb(); | 1563 | smp_wmb(); |
2046 | dev_dbg(&host->pdev->dev, "set pending notbusy\n"); | 1564 | atmci_set_pending(host, EVENT_DATA_COMPLETE); |
2047 | atmci_set_pending(host, EVENT_NOTBUSY); | ||
2048 | tasklet_schedule(&host->tasklet); | 1565 | tasklet_schedule(&host->tasklet); |
2049 | } | 1566 | } |
2050 | 1567 | if (pending & MCI_RXRDY) | |
2051 | if (pending & ATMCI_NOTBUSY) { | ||
2052 | dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); | ||
2053 | atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); | ||
2054 | smp_wmb(); | ||
2055 | dev_dbg(&host->pdev->dev, "set pending notbusy\n"); | ||
2056 | atmci_set_pending(host, EVENT_NOTBUSY); | ||
2057 | tasklet_schedule(&host->tasklet); | ||
2058 | } | ||
2059 | |||
2060 | if (pending & ATMCI_RXRDY) | ||
2061 | atmci_read_data_pio(host); | 1568 | atmci_read_data_pio(host); |
2062 | if (pending & ATMCI_TXRDY) | 1569 | if (pending & MCI_TXRDY) |
2063 | atmci_write_data_pio(host); | 1570 | atmci_write_data_pio(host); |
2064 | 1571 | ||
2065 | if (pending & ATMCI_CMDRDY) { | 1572 | if (pending & MCI_CMDRDY) |
2066 | dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); | 1573 | atmci_cmd_interrupt(host, status); |
2067 | atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); | ||
2068 | host->cmd_status = status; | ||
2069 | smp_wmb(); | ||
2070 | dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); | ||
2071 | atmci_set_pending(host, EVENT_CMD_RDY); | ||
2072 | tasklet_schedule(&host->tasklet); | ||
2073 | } | ||
2074 | 1574 | ||
2075 | if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) | 1575 | if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB)) |
2076 | atmci_sdio_interrupt(host, status); | 1576 | atmci_sdio_interrupt(host, status); |
2077 | 1577 | ||
2078 | } while (pass_count++ < 5); | 1578 | } while (pass_count++ < 5); |
@@ -2115,41 +1615,21 @@ static int __init atmci_init_slot(struct atmel_mci *host, | |||
2115 | slot->sdc_reg = sdc_reg; | 1615 | slot->sdc_reg = sdc_reg; |
2116 | slot->sdio_irq = sdio_irq; | 1616 | slot->sdio_irq = sdio_irq; |
2117 | 1617 | ||
2118 | dev_dbg(&mmc->class_dev, | ||
2119 | "slot[%u]: bus_width=%u, detect_pin=%d, " | ||
2120 | "detect_is_active_high=%s, wp_pin=%d\n", | ||
2121 | id, slot_data->bus_width, slot_data->detect_pin, | ||
2122 | slot_data->detect_is_active_high ? "true" : "false", | ||
2123 | slot_data->wp_pin); | ||
2124 | |||
2125 | mmc->ops = &atmci_ops; | 1618 | mmc->ops = &atmci_ops; |
2126 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); | 1619 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); |
2127 | mmc->f_max = host->bus_hz / 2; | 1620 | mmc->f_max = host->bus_hz / 2; |
2128 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 1621 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
2129 | if (sdio_irq) | 1622 | if (sdio_irq) |
2130 | mmc->caps |= MMC_CAP_SDIO_IRQ; | 1623 | mmc->caps |= MMC_CAP_SDIO_IRQ; |
2131 | if (host->caps.has_highspeed) | 1624 | if (atmci_is_mci2()) |
2132 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | 1625 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; |
2133 | /* | 1626 | if (slot_data->bus_width >= 4) |
2134 | * Without the read/write proof capability, it is strongly suggested to | ||
2135 | * use only one bit for data to prevent fifo underruns and overruns | ||
2136 | * which will corrupt data. | ||
2137 | */ | ||
2138 | if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) | ||
2139 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 1627 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
2140 | 1628 | ||
2141 | if (atmci_get_version(host) < 0x200) { | 1629 | mmc->max_segs = 64; |
2142 | mmc->max_segs = 256; | 1630 | mmc->max_req_size = 32768 * 512; |
2143 | mmc->max_blk_size = 4095; | 1631 | mmc->max_blk_size = 32768; |
2144 | mmc->max_blk_count = 256; | 1632 | mmc->max_blk_count = 512; |
2145 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
2146 | mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs; | ||
2147 | } else { | ||
2148 | mmc->max_segs = 64; | ||
2149 | mmc->max_req_size = 32768 * 512; | ||
2150 | mmc->max_blk_size = 32768; | ||
2151 | mmc->max_blk_count = 512; | ||
2152 | } | ||
2153 | 1633 | ||
2154 | /* Assume card is present initially */ | 1634 | /* Assume card is present initially */ |
2155 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1635 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); |
@@ -2224,7 +1704,8 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | |||
2224 | mmc_free_host(slot->mmc); | 1704 | mmc_free_host(slot->mmc); |
2225 | } | 1705 | } |
2226 | 1706 | ||
2227 | static bool atmci_filter(struct dma_chan *chan, void *slave) | 1707 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1708 | static bool filter(struct dma_chan *chan, void *slave) | ||
2228 | { | 1709 | { |
2229 | struct mci_dma_data *sl = slave; | 1710 | struct mci_dma_data *sl = slave; |
2230 | 1711 | ||
@@ -2236,98 +1717,38 @@ static bool atmci_filter(struct dma_chan *chan, void *slave) | |||
2236 | } | 1717 | } |
2237 | } | 1718 | } |
2238 | 1719 | ||
2239 | static bool atmci_configure_dma(struct atmel_mci *host) | 1720 | static void atmci_configure_dma(struct atmel_mci *host) |
2240 | { | 1721 | { |
2241 | struct mci_platform_data *pdata; | 1722 | struct mci_platform_data *pdata; |
2242 | 1723 | ||
2243 | if (host == NULL) | 1724 | if (host == NULL) |
2244 | return false; | 1725 | return; |
2245 | 1726 | ||
2246 | pdata = host->pdev->dev.platform_data; | 1727 | pdata = host->pdev->dev.platform_data; |
2247 | 1728 | ||
2248 | if (!pdata) | 1729 | if (pdata && find_slave_dev(pdata->dma_slave)) { |
2249 | return false; | ||
2250 | |||
2251 | if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) { | ||
2252 | dma_cap_mask_t mask; | 1730 | dma_cap_mask_t mask; |
2253 | 1731 | ||
1732 | setup_dma_addr(pdata->dma_slave, | ||
1733 | host->mapbase + MCI_TDR, | ||
1734 | host->mapbase + MCI_RDR); | ||
1735 | |||
2254 | /* Try to grab a DMA channel */ | 1736 | /* Try to grab a DMA channel */ |
2255 | dma_cap_zero(mask); | 1737 | dma_cap_zero(mask); |
2256 | dma_cap_set(DMA_SLAVE, mask); | 1738 | dma_cap_set(DMA_SLAVE, mask); |
2257 | host->dma.chan = | 1739 | host->dma.chan = |
2258 | dma_request_channel(mask, atmci_filter, pdata->dma_slave); | 1740 | dma_request_channel(mask, filter, pdata->dma_slave); |
2259 | } | 1741 | } |
2260 | if (!host->dma.chan) { | 1742 | if (!host->dma.chan) |
2261 | dev_warn(&host->pdev->dev, "no DMA channel available\n"); | 1743 | dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); |
2262 | return false; | 1744 | else |
2263 | } else { | ||
2264 | dev_info(&host->pdev->dev, | 1745 | dev_info(&host->pdev->dev, |
2265 | "using %s for DMA transfers\n", | 1746 | "Using %s for DMA transfers\n", |
2266 | dma_chan_name(host->dma.chan)); | 1747 | dma_chan_name(host->dma.chan)); |
2267 | |||
2268 | host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; | ||
2269 | host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
2270 | host->dma_conf.src_maxburst = 1; | ||
2271 | host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; | ||
2272 | host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
2273 | host->dma_conf.dst_maxburst = 1; | ||
2274 | host->dma_conf.device_fc = false; | ||
2275 | return true; | ||
2276 | } | ||
2277 | } | ||
2278 | |||
2279 | /* | ||
2280 | * HSMCI (High Speed MCI) module is not fully compatible with MCI module. | ||
2281 | * HSMCI provides DMA support and a new config register but no more supports | ||
2282 | * PDC. | ||
2283 | */ | ||
2284 | static void __init atmci_get_cap(struct atmel_mci *host) | ||
2285 | { | ||
2286 | unsigned int version; | ||
2287 | |||
2288 | version = atmci_get_version(host); | ||
2289 | dev_info(&host->pdev->dev, | ||
2290 | "version: 0x%x\n", version); | ||
2291 | |||
2292 | host->caps.has_dma_conf_reg = 0; | ||
2293 | host->caps.has_pdc = ATMCI_PDC_CONNECTED; | ||
2294 | host->caps.has_cfg_reg = 0; | ||
2295 | host->caps.has_cstor_reg = 0; | ||
2296 | host->caps.has_highspeed = 0; | ||
2297 | host->caps.has_rwproof = 0; | ||
2298 | host->caps.has_odd_clk_div = 0; | ||
2299 | host->caps.has_bad_data_ordering = 1; | ||
2300 | host->caps.need_reset_after_xfer = 1; | ||
2301 | host->caps.need_blksz_mul_4 = 1; | ||
2302 | host->caps.need_notbusy_for_read_ops = 0; | ||
2303 | |||
2304 | /* keep only major version number */ | ||
2305 | switch (version & 0xf00) { | ||
2306 | case 0x500: | ||
2307 | host->caps.has_odd_clk_div = 1; | ||
2308 | case 0x400: | ||
2309 | case 0x300: | ||
2310 | host->caps.has_dma_conf_reg = 1; | ||
2311 | host->caps.has_pdc = 0; | ||
2312 | host->caps.has_cfg_reg = 1; | ||
2313 | host->caps.has_cstor_reg = 1; | ||
2314 | host->caps.has_highspeed = 1; | ||
2315 | case 0x200: | ||
2316 | host->caps.has_rwproof = 1; | ||
2317 | host->caps.need_blksz_mul_4 = 0; | ||
2318 | host->caps.need_notbusy_for_read_ops = 1; | ||
2319 | case 0x100: | ||
2320 | host->caps.has_bad_data_ordering = 0; | ||
2321 | host->caps.need_reset_after_xfer = 0; | ||
2322 | case 0x0: | ||
2323 | break; | ||
2324 | default: | ||
2325 | host->caps.has_pdc = 0; | ||
2326 | dev_warn(&host->pdev->dev, | ||
2327 | "Unmanaged mci version, set minimum capabilities\n"); | ||
2328 | break; | ||
2329 | } | ||
2330 | } | 1748 | } |
1749 | #else | ||
1750 | static void atmci_configure_dma(struct atmel_mci *host) {} | ||
1751 | #endif | ||
2331 | 1752 | ||
2332 | static int __init atmci_probe(struct platform_device *pdev) | 1753 | static int __init atmci_probe(struct platform_device *pdev) |
2333 | { | 1754 | { |
@@ -2342,14 +1763,8 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2342 | if (!regs) | 1763 | if (!regs) |
2343 | return -ENXIO; | 1764 | return -ENXIO; |
2344 | pdata = pdev->dev.platform_data; | 1765 | pdata = pdev->dev.platform_data; |
2345 | if (!pdata) { | 1766 | if (!pdata) |
2346 | pdata = atmci_of_init(pdev); | 1767 | return -ENXIO; |
2347 | if (IS_ERR(pdata)) { | ||
2348 | dev_err(&pdev->dev, "platform data not available\n"); | ||
2349 | return PTR_ERR(pdata); | ||
2350 | } | ||
2351 | } | ||
2352 | |||
2353 | irq = platform_get_irq(pdev, 0); | 1768 | irq = platform_get_irq(pdev, 0); |
2354 | if (irq < 0) | 1769 | if (irq < 0) |
2355 | return irq; | 1770 | return irq; |
@@ -2374,7 +1789,7 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2374 | goto err_ioremap; | 1789 | goto err_ioremap; |
2375 | 1790 | ||
2376 | clk_enable(host->mck); | 1791 | clk_enable(host->mck); |
2377 | atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); | 1792 | mci_writel(host, CR, MCI_CR_SWRST); |
2378 | host->bus_hz = clk_get_rate(host->mck); | 1793 | host->bus_hz = clk_get_rate(host->mck); |
2379 | clk_disable(host->mck); | 1794 | clk_disable(host->mck); |
2380 | 1795 | ||
@@ -2386,48 +1801,24 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2386 | if (ret) | 1801 | if (ret) |
2387 | goto err_request_irq; | 1802 | goto err_request_irq; |
2388 | 1803 | ||
2389 | /* Get MCI capabilities and set operations according to it */ | 1804 | atmci_configure_dma(host); |
2390 | atmci_get_cap(host); | ||
2391 | if (atmci_configure_dma(host)) { | ||
2392 | host->prepare_data = &atmci_prepare_data_dma; | ||
2393 | host->submit_data = &atmci_submit_data_dma; | ||
2394 | host->stop_transfer = &atmci_stop_transfer_dma; | ||
2395 | } else if (host->caps.has_pdc) { | ||
2396 | dev_info(&pdev->dev, "using PDC\n"); | ||
2397 | host->prepare_data = &atmci_prepare_data_pdc; | ||
2398 | host->submit_data = &atmci_submit_data_pdc; | ||
2399 | host->stop_transfer = &atmci_stop_transfer_pdc; | ||
2400 | } else { | ||
2401 | dev_info(&pdev->dev, "using PIO\n"); | ||
2402 | host->prepare_data = &atmci_prepare_data; | ||
2403 | host->submit_data = &atmci_submit_data; | ||
2404 | host->stop_transfer = &atmci_stop_transfer; | ||
2405 | } | ||
2406 | 1805 | ||
2407 | platform_set_drvdata(pdev, host); | 1806 | platform_set_drvdata(pdev, host); |
2408 | 1807 | ||
2409 | setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); | ||
2410 | |||
2411 | /* We need at least one slot to succeed */ | 1808 | /* We need at least one slot to succeed */ |
2412 | nr_slots = 0; | 1809 | nr_slots = 0; |
2413 | ret = -ENODEV; | 1810 | ret = -ENODEV; |
2414 | if (pdata->slot[0].bus_width) { | 1811 | if (pdata->slot[0].bus_width) { |
2415 | ret = atmci_init_slot(host, &pdata->slot[0], | 1812 | ret = atmci_init_slot(host, &pdata->slot[0], |
2416 | 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); | 1813 | 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA); |
2417 | if (!ret) { | 1814 | if (!ret) |
2418 | nr_slots++; | 1815 | nr_slots++; |
2419 | host->buf_size = host->slot[0]->mmc->max_req_size; | ||
2420 | } | ||
2421 | } | 1816 | } |
2422 | if (pdata->slot[1].bus_width) { | 1817 | if (pdata->slot[1].bus_width) { |
2423 | ret = atmci_init_slot(host, &pdata->slot[1], | 1818 | ret = atmci_init_slot(host, &pdata->slot[1], |
2424 | 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); | 1819 | 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB); |
2425 | if (!ret) { | 1820 | if (!ret) |
2426 | nr_slots++; | 1821 | nr_slots++; |
2427 | if (host->slot[1]->mmc->max_req_size > host->buf_size) | ||
2428 | host->buf_size = | ||
2429 | host->slot[1]->mmc->max_req_size; | ||
2430 | } | ||
2431 | } | 1822 | } |
2432 | 1823 | ||
2433 | if (!nr_slots) { | 1824 | if (!nr_slots) { |
@@ -2435,17 +1826,6 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2435 | goto err_init_slot; | 1826 | goto err_init_slot; |
2436 | } | 1827 | } |
2437 | 1828 | ||
2438 | if (!host->caps.has_rwproof) { | ||
2439 | host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, | ||
2440 | &host->buf_phys_addr, | ||
2441 | GFP_KERNEL); | ||
2442 | if (!host->buffer) { | ||
2443 | ret = -ENOMEM; | ||
2444 | dev_err(&pdev->dev, "buffer allocation failed\n"); | ||
2445 | goto err_init_slot; | ||
2446 | } | ||
2447 | } | ||
2448 | |||
2449 | dev_info(&pdev->dev, | 1829 | dev_info(&pdev->dev, |
2450 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", | 1830 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", |
2451 | host->mapbase, irq, nr_slots); | 1831 | host->mapbase, irq, nr_slots); |
@@ -2453,8 +1833,10 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2453 | return 0; | 1833 | return 0; |
2454 | 1834 | ||
2455 | err_init_slot: | 1835 | err_init_slot: |
1836 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
2456 | if (host->dma.chan) | 1837 | if (host->dma.chan) |
2457 | dma_release_channel(host->dma.chan); | 1838 | dma_release_channel(host->dma.chan); |
1839 | #endif | ||
2458 | free_irq(irq, host); | 1840 | free_irq(irq, host); |
2459 | err_request_irq: | 1841 | err_request_irq: |
2460 | iounmap(host->regs); | 1842 | iounmap(host->regs); |
@@ -2472,19 +1854,15 @@ static int __exit atmci_remove(struct platform_device *pdev) | |||
2472 | 1854 | ||
2473 | platform_set_drvdata(pdev, NULL); | 1855 | platform_set_drvdata(pdev, NULL); |
2474 | 1856 | ||
2475 | if (host->buffer) | 1857 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
2476 | dma_free_coherent(&pdev->dev, host->buf_size, | ||
2477 | host->buffer, host->buf_phys_addr); | ||
2478 | |||
2479 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | ||
2480 | if (host->slot[i]) | 1858 | if (host->slot[i]) |
2481 | atmci_cleanup_slot(host->slot[i], i); | 1859 | atmci_cleanup_slot(host->slot[i], i); |
2482 | } | 1860 | } |
2483 | 1861 | ||
2484 | clk_enable(host->mck); | 1862 | clk_enable(host->mck); |
2485 | atmci_writel(host, ATMCI_IDR, ~0UL); | 1863 | mci_writel(host, IDR, ~0UL); |
2486 | atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); | 1864 | mci_writel(host, CR, MCI_CR_MCIDIS); |
2487 | atmci_readl(host, ATMCI_SR); | 1865 | mci_readl(host, SR); |
2488 | clk_disable(host->mck); | 1866 | clk_disable(host->mck); |
2489 | 1867 | ||
2490 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1868 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
@@ -2507,7 +1885,7 @@ static int atmci_suspend(struct device *dev) | |||
2507 | struct atmel_mci *host = dev_get_drvdata(dev); | 1885 | struct atmel_mci *host = dev_get_drvdata(dev); |
2508 | int i; | 1886 | int i; |
2509 | 1887 | ||
2510 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | 1888 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
2511 | struct atmel_mci_slot *slot = host->slot[i]; | 1889 | struct atmel_mci_slot *slot = host->slot[i]; |
2512 | int ret; | 1890 | int ret; |
2513 | 1891 | ||
@@ -2538,7 +1916,7 @@ static int atmci_resume(struct device *dev) | |||
2538 | int i; | 1916 | int i; |
2539 | int ret = 0; | 1917 | int ret = 0; |
2540 | 1918 | ||
2541 | for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { | 1919 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
2542 | struct atmel_mci_slot *slot = host->slot[i]; | 1920 | struct atmel_mci_slot *slot = host->slot[i]; |
2543 | int err; | 1921 | int err; |
2544 | 1922 | ||
@@ -2567,7 +1945,6 @@ static struct platform_driver atmci_driver = { | |||
2567 | .driver = { | 1945 | .driver = { |
2568 | .name = "atmel_mci", | 1946 | .name = "atmel_mci", |
2569 | .pm = ATMCI_PM_OPS, | 1947 | .pm = ATMCI_PM_OPS, |
2570 | .of_match_table = of_match_ptr(atmci_dt_ids), | ||
2571 | }, | 1948 | }, |
2572 | }; | 1949 | }; |
2573 | 1950 | ||
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index 127a8fade4d..ef72e874ca3 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c | |||
@@ -55,7 +55,7 @@ | |||
55 | 55 | ||
56 | #ifdef DEBUG | 56 | #ifdef DEBUG |
57 | #define DBG(fmt, idx, args...) \ | 57 | #define DBG(fmt, idx, args...) \ |
58 | pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args) | 58 | printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) |
59 | #else | 59 | #else |
60 | #define DBG(fmt, idx, args...) do {} while (0) | 60 | #define DBG(fmt, idx, args...) do {} while (0) |
61 | #endif | 61 | #endif |
@@ -64,8 +64,11 @@ | |||
64 | #define AU1XMMC_DESCRIPTOR_COUNT 1 | 64 | #define AU1XMMC_DESCRIPTOR_COUNT 1 |
65 | 65 | ||
66 | /* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ | 66 | /* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ |
67 | #define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff | 67 | #ifdef CONFIG_SOC_AU1100 |
68 | #define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff | 68 | #define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff |
69 | #else /* Au1200 */ | ||
70 | #define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff | ||
71 | #endif | ||
69 | 72 | ||
70 | #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ | 73 | #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ |
71 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ | 74 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ |
@@ -124,7 +127,6 @@ struct au1xmmc_host { | |||
124 | #define HOST_F_XMIT 0x0001 | 127 | #define HOST_F_XMIT 0x0001 |
125 | #define HOST_F_RECV 0x0002 | 128 | #define HOST_F_RECV 0x0002 |
126 | #define HOST_F_DMA 0x0010 | 129 | #define HOST_F_DMA 0x0010 |
127 | #define HOST_F_DBDMA 0x0020 | ||
128 | #define HOST_F_ACTIVE 0x0100 | 130 | #define HOST_F_ACTIVE 0x0100 |
129 | #define HOST_F_STOP 0x1000 | 131 | #define HOST_F_STOP 0x1000 |
130 | 132 | ||
@@ -149,17 +151,6 @@ struct au1xmmc_host { | |||
149 | #define DMA_CHANNEL(h) \ | 151 | #define DMA_CHANNEL(h) \ |
150 | (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) | 152 | (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) |
151 | 153 | ||
152 | static inline int has_dbdma(void) | ||
153 | { | ||
154 | switch (alchemy_get_cputype()) { | ||
155 | case ALCHEMY_CPU_AU1200: | ||
156 | case ALCHEMY_CPU_AU1300: | ||
157 | return 1; | ||
158 | default: | ||
159 | return 0; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) | 154 | static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) |
164 | { | 155 | { |
165 | u32 val = au_readl(HOST_CONFIG(host)); | 156 | u32 val = au_readl(HOST_CONFIG(host)); |
@@ -277,7 +268,7 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
277 | mmccmd |= SD_CMD_RT_3; | 268 | mmccmd |= SD_CMD_RT_3; |
278 | break; | 269 | break; |
279 | default: | 270 | default: |
280 | pr_info("au1xmmc: unhandled response type %02x\n", | 271 | printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", |
281 | mmc_resp_type(cmd)); | 272 | mmc_resp_type(cmd)); |
282 | return -EINVAL; | 273 | return -EINVAL; |
283 | } | 274 | } |
@@ -362,12 +353,14 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) | |||
362 | data->bytes_xfered = 0; | 353 | data->bytes_xfered = 0; |
363 | 354 | ||
364 | if (!data->error) { | 355 | if (!data->error) { |
365 | if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { | 356 | if (host->flags & HOST_F_DMA) { |
357 | #ifdef CONFIG_SOC_AU1200 /* DBDMA */ | ||
366 | u32 chan = DMA_CHANNEL(host); | 358 | u32 chan = DMA_CHANNEL(host); |
367 | 359 | ||
368 | chan_tab_t *c = *((chan_tab_t **)chan); | 360 | chan_tab_t *c = *((chan_tab_t **)chan); |
369 | au1x_dma_chan_t *cp = c->chan_ptr; | 361 | au1x_dma_chan_t *cp = c->chan_ptr; |
370 | data->bytes_xfered = cp->ddma_bytecnt; | 362 | data->bytes_xfered = cp->ddma_bytecnt; |
363 | #endif | ||
371 | } else | 364 | } else |
372 | data->bytes_xfered = | 365 | data->bytes_xfered = |
373 | (data->blocks * data->blksz) - host->pio.len; | 366 | (data->blocks * data->blksz) - host->pio.len; |
@@ -577,10 +570,11 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) | |||
577 | 570 | ||
578 | host->status = HOST_S_DATA; | 571 | host->status = HOST_S_DATA; |
579 | 572 | ||
580 | if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) { | 573 | if (host->flags & HOST_F_DMA) { |
574 | #ifdef CONFIG_SOC_AU1200 /* DBDMA */ | ||
581 | u32 channel = DMA_CHANNEL(host); | 575 | u32 channel = DMA_CHANNEL(host); |
582 | 576 | ||
583 | /* Start the DBDMA as soon as the buffer gets something in it */ | 577 | /* Start the DMA as soon as the buffer gets something in it */ |
584 | 578 | ||
585 | if (host->flags & HOST_F_RECV) { | 579 | if (host->flags & HOST_F_RECV) { |
586 | u32 mask = SD_STATUS_DB | SD_STATUS_NE; | 580 | u32 mask = SD_STATUS_DB | SD_STATUS_NE; |
@@ -590,6 +584,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) | |||
590 | } | 584 | } |
591 | 585 | ||
592 | au1xxx_dbdma_start(channel); | 586 | au1xxx_dbdma_start(channel); |
587 | #endif | ||
593 | } | 588 | } |
594 | } | 589 | } |
595 | 590 | ||
@@ -638,7 +633,8 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host, | |||
638 | 633 | ||
639 | au_writel(data->blksz - 1, HOST_BLKSIZE(host)); | 634 | au_writel(data->blksz - 1, HOST_BLKSIZE(host)); |
640 | 635 | ||
641 | if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { | 636 | if (host->flags & HOST_F_DMA) { |
637 | #ifdef CONFIG_SOC_AU1200 /* DBDMA */ | ||
642 | int i; | 638 | int i; |
643 | u32 channel = DMA_CHANNEL(host); | 639 | u32 channel = DMA_CHANNEL(host); |
644 | 640 | ||
@@ -667,6 +663,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host, | |||
667 | 663 | ||
668 | datalen -= len; | 664 | datalen -= len; |
669 | } | 665 | } |
666 | #endif | ||
670 | } else { | 667 | } else { |
671 | host->pio.index = 0; | 668 | host->pio.index = 0; |
672 | host->pio.offset = 0; | 669 | host->pio.offset = 0; |
@@ -769,15 +766,11 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
769 | 766 | ||
770 | config2 = au_readl(HOST_CONFIG2(host)); | 767 | config2 = au_readl(HOST_CONFIG2(host)); |
771 | switch (ios->bus_width) { | 768 | switch (ios->bus_width) { |
772 | case MMC_BUS_WIDTH_8: | ||
773 | config2 |= SD_CONFIG2_BB; | ||
774 | break; | ||
775 | case MMC_BUS_WIDTH_4: | 769 | case MMC_BUS_WIDTH_4: |
776 | config2 &= ~SD_CONFIG2_BB; | ||
777 | config2 |= SD_CONFIG2_WB; | 770 | config2 |= SD_CONFIG2_WB; |
778 | break; | 771 | break; |
779 | case MMC_BUS_WIDTH_1: | 772 | case MMC_BUS_WIDTH_1: |
780 | config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB); | 773 | config2 &= ~SD_CONFIG2_WB; |
781 | break; | 774 | break; |
782 | } | 775 | } |
783 | au_writel(config2, HOST_CONFIG2(host)); | 776 | au_writel(config2, HOST_CONFIG2(host)); |
@@ -845,6 +838,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id) | |||
845 | return IRQ_HANDLED; | 838 | return IRQ_HANDLED; |
846 | } | 839 | } |
847 | 840 | ||
841 | #ifdef CONFIG_SOC_AU1200 | ||
848 | /* 8bit memory DMA device */ | 842 | /* 8bit memory DMA device */ |
849 | static dbdev_tab_t au1xmmc_mem_dbdev = { | 843 | static dbdev_tab_t au1xmmc_mem_dbdev = { |
850 | .dev_id = DSCR_CMD0_ALWAYS, | 844 | .dev_id = DSCR_CMD0_ALWAYS, |
@@ -911,7 +905,7 @@ static int au1xmmc_dbdma_init(struct au1xmmc_host *host) | |||
911 | au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); | 905 | au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); |
912 | 906 | ||
913 | /* DBDMA is good to go */ | 907 | /* DBDMA is good to go */ |
914 | host->flags |= HOST_F_DMA | HOST_F_DBDMA; | 908 | host->flags |= HOST_F_DMA; |
915 | 909 | ||
916 | return 0; | 910 | return 0; |
917 | } | 911 | } |
@@ -924,6 +918,7 @@ static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) | |||
924 | au1xxx_dbdma_chan_free(host->rx_chan); | 918 | au1xxx_dbdma_chan_free(host->rx_chan); |
925 | } | 919 | } |
926 | } | 920 | } |
921 | #endif | ||
927 | 922 | ||
928 | static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) | 923 | static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) |
929 | { | 924 | { |
@@ -943,12 +938,12 @@ static const struct mmc_host_ops au1xmmc_ops = { | |||
943 | .enable_sdio_irq = au1xmmc_enable_sdio_irq, | 938 | .enable_sdio_irq = au1xmmc_enable_sdio_irq, |
944 | }; | 939 | }; |
945 | 940 | ||
946 | static int au1xmmc_probe(struct platform_device *pdev) | 941 | static int __devinit au1xmmc_probe(struct platform_device *pdev) |
947 | { | 942 | { |
948 | struct mmc_host *mmc; | 943 | struct mmc_host *mmc; |
949 | struct au1xmmc_host *host; | 944 | struct au1xmmc_host *host; |
950 | struct resource *r; | 945 | struct resource *r; |
951 | int ret, iflag; | 946 | int ret; |
952 | 947 | ||
953 | mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); | 948 | mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); |
954 | if (!mmc) { | 949 | if (!mmc) { |
@@ -987,43 +982,29 @@ static int au1xmmc_probe(struct platform_device *pdev) | |||
987 | dev_err(&pdev->dev, "no IRQ defined\n"); | 982 | dev_err(&pdev->dev, "no IRQ defined\n"); |
988 | goto out3; | 983 | goto out3; |
989 | } | 984 | } |
985 | |||
990 | host->irq = r->start; | 986 | host->irq = r->start; |
987 | /* IRQ is shared among both SD controllers */ | ||
988 | ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED, | ||
989 | DRIVER_NAME, host); | ||
990 | if (ret) { | ||
991 | dev_err(&pdev->dev, "cannot grab IRQ\n"); | ||
992 | goto out3; | ||
993 | } | ||
991 | 994 | ||
992 | mmc->ops = &au1xmmc_ops; | 995 | mmc->ops = &au1xmmc_ops; |
993 | 996 | ||
994 | mmc->f_min = 450000; | 997 | mmc->f_min = 450000; |
995 | mmc->f_max = 24000000; | 998 | mmc->f_max = 24000000; |
996 | 999 | ||
1000 | mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; | ||
1001 | mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; | ||
1002 | |||
997 | mmc->max_blk_size = 2048; | 1003 | mmc->max_blk_size = 2048; |
998 | mmc->max_blk_count = 512; | 1004 | mmc->max_blk_count = 512; |
999 | 1005 | ||
1000 | mmc->ocr_avail = AU1XMMC_OCR; | 1006 | mmc->ocr_avail = AU1XMMC_OCR; |
1001 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; | 1007 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; |
1002 | mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; | ||
1003 | |||
1004 | iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */ | ||
1005 | |||
1006 | switch (alchemy_get_cputype()) { | ||
1007 | case ALCHEMY_CPU_AU1100: | ||
1008 | mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE; | ||
1009 | break; | ||
1010 | case ALCHEMY_CPU_AU1200: | ||
1011 | mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; | ||
1012 | break; | ||
1013 | case ALCHEMY_CPU_AU1300: | ||
1014 | iflag = 0; /* nothing is shared */ | ||
1015 | mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; | ||
1016 | mmc->f_max = 52000000; | ||
1017 | if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) | ||
1018 | mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
1019 | break; | ||
1020 | } | ||
1021 | |||
1022 | ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host); | ||
1023 | if (ret) { | ||
1024 | dev_err(&pdev->dev, "cannot grab IRQ\n"); | ||
1025 | goto out3; | ||
1026 | } | ||
1027 | 1008 | ||
1028 | host->status = HOST_S_IDLE; | 1009 | host->status = HOST_S_IDLE; |
1029 | 1010 | ||
@@ -1047,11 +1028,11 @@ static int au1xmmc_probe(struct platform_device *pdev) | |||
1047 | tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, | 1028 | tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, |
1048 | (unsigned long)host); | 1029 | (unsigned long)host); |
1049 | 1030 | ||
1050 | if (has_dbdma()) { | 1031 | #ifdef CONFIG_SOC_AU1200 |
1051 | ret = au1xmmc_dbdma_init(host); | 1032 | ret = au1xmmc_dbdma_init(host); |
1052 | if (ret) | 1033 | if (ret) |
1053 | pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); | 1034 | printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); |
1054 | } | 1035 | #endif |
1055 | 1036 | ||
1056 | #ifdef CONFIG_LEDS_CLASS | 1037 | #ifdef CONFIG_LEDS_CLASS |
1057 | if (host->platdata && host->platdata->led) { | 1038 | if (host->platdata && host->platdata->led) { |
@@ -1075,7 +1056,7 @@ static int au1xmmc_probe(struct platform_device *pdev) | |||
1075 | 1056 | ||
1076 | platform_set_drvdata(pdev, host); | 1057 | platform_set_drvdata(pdev, host); |
1077 | 1058 | ||
1078 | pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X" | 1059 | printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" |
1079 | " (mode=%s)\n", pdev->id, host->iobase, | 1060 | " (mode=%s)\n", pdev->id, host->iobase, |
1080 | host->flags & HOST_F_DMA ? "dma" : "pio"); | 1061 | host->flags & HOST_F_DMA ? "dma" : "pio"); |
1081 | 1062 | ||
@@ -1092,8 +1073,9 @@ out5: | |||
1092 | au_writel(0, HOST_CONFIG2(host)); | 1073 | au_writel(0, HOST_CONFIG2(host)); |
1093 | au_sync(); | 1074 | au_sync(); |
1094 | 1075 | ||
1095 | if (host->flags & HOST_F_DBDMA) | 1076 | #ifdef CONFIG_SOC_AU1200 |
1096 | au1xmmc_dbdma_shutdown(host); | 1077 | au1xmmc_dbdma_shutdown(host); |
1078 | #endif | ||
1097 | 1079 | ||
1098 | tasklet_kill(&host->data_task); | 1080 | tasklet_kill(&host->data_task); |
1099 | tasklet_kill(&host->finish_task); | 1081 | tasklet_kill(&host->finish_task); |
@@ -1114,7 +1096,7 @@ out0: | |||
1114 | return ret; | 1096 | return ret; |
1115 | } | 1097 | } |
1116 | 1098 | ||
1117 | static int au1xmmc_remove(struct platform_device *pdev) | 1099 | static int __devexit au1xmmc_remove(struct platform_device *pdev) |
1118 | { | 1100 | { |
1119 | struct au1xmmc_host *host = platform_get_drvdata(pdev); | 1101 | struct au1xmmc_host *host = platform_get_drvdata(pdev); |
1120 | 1102 | ||
@@ -1138,9 +1120,9 @@ static int au1xmmc_remove(struct platform_device *pdev) | |||
1138 | tasklet_kill(&host->data_task); | 1120 | tasklet_kill(&host->data_task); |
1139 | tasklet_kill(&host->finish_task); | 1121 | tasklet_kill(&host->finish_task); |
1140 | 1122 | ||
1141 | if (host->flags & HOST_F_DBDMA) | 1123 | #ifdef CONFIG_SOC_AU1200 |
1142 | au1xmmc_dbdma_shutdown(host); | 1124 | au1xmmc_dbdma_shutdown(host); |
1143 | 1125 | #endif | |
1144 | au1xmmc_set_power(host, 0); | 1126 | au1xmmc_set_power(host, 0); |
1145 | 1127 | ||
1146 | free_irq(host->irq, host); | 1128 | free_irq(host->irq, host); |
@@ -1199,23 +1181,24 @@ static struct platform_driver au1xmmc_driver = { | |||
1199 | 1181 | ||
1200 | static int __init au1xmmc_init(void) | 1182 | static int __init au1xmmc_init(void) |
1201 | { | 1183 | { |
1202 | if (has_dbdma()) { | 1184 | #ifdef CONFIG_SOC_AU1200 |
1203 | /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride | 1185 | /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride |
1204 | * of 8 bits. And since devices are shared, we need to create | 1186 | * of 8 bits. And since devices are shared, we need to create |
1205 | * our own to avoid freaking out other devices. | 1187 | * our own to avoid freaking out other devices. |
1206 | */ | 1188 | */ |
1207 | memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); | 1189 | memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); |
1208 | if (!memid) | 1190 | if (!memid) |
1209 | pr_err("au1xmmc: cannot add memory dbdma\n"); | 1191 | printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); |
1210 | } | 1192 | #endif |
1211 | return platform_driver_register(&au1xmmc_driver); | 1193 | return platform_driver_register(&au1xmmc_driver); |
1212 | } | 1194 | } |
1213 | 1195 | ||
1214 | static void __exit au1xmmc_exit(void) | 1196 | static void __exit au1xmmc_exit(void) |
1215 | { | 1197 | { |
1216 | if (has_dbdma() && memid) | 1198 | #ifdef CONFIG_SOC_AU1200 |
1199 | if (memid) | ||
1217 | au1xxx_ddma_del_device(memid); | 1200 | au1xxx_ddma_del_device(memid); |
1218 | 1201 | #endif | |
1219 | platform_driver_unregister(&au1xmmc_driver); | 1202 | platform_driver_unregister(&au1xmmc_driver); |
1220 | } | 1203 | } |
1221 | 1204 | ||
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index fb4348c5b6a..0371bf50224 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c | |||
@@ -24,7 +24,9 @@ | |||
24 | #include <asm/portmux.h> | 24 | #include <asm/portmux.h> |
25 | #include <asm/bfin_sdh.h> | 25 | #include <asm/bfin_sdh.h> |
26 | 26 | ||
27 | #if defined(CONFIG_BF51x) || defined(__ADSPBF60x__) | 27 | #if defined(CONFIG_BF51x) |
28 | #define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL | ||
29 | #define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL | ||
28 | #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL | 30 | #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL |
29 | #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL | 31 | #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL |
30 | #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT | 32 | #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT |
@@ -43,18 +45,17 @@ | |||
43 | #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS | 45 | #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS |
44 | #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS | 46 | #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS |
45 | #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 | 47 | #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 |
46 | #define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK | ||
47 | #define bfin_read_SDH_CFG bfin_read_RSI_CFG | 48 | #define bfin_read_SDH_CFG bfin_read_RSI_CFG |
48 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG | 49 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG |
49 | # if defined(__ADSPBF60x__) | ||
50 | # define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ | ||
51 | # define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ | ||
52 | # else | ||
53 | # define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL | ||
54 | # define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL | ||
55 | # endif | ||
56 | #endif | 50 | #endif |
57 | 51 | ||
52 | struct dma_desc_array { | ||
53 | unsigned long start_addr; | ||
54 | unsigned short cfg; | ||
55 | unsigned short x_count; | ||
56 | short x_modify; | ||
57 | } __packed; | ||
58 | |||
58 | struct sdh_host { | 59 | struct sdh_host { |
59 | struct mmc_host *mmc; | 60 | struct mmc_host *mmc; |
60 | spinlock_t lock; | 61 | spinlock_t lock; |
@@ -68,7 +69,6 @@ struct sdh_host { | |||
68 | dma_addr_t sg_dma; | 69 | dma_addr_t sg_dma; |
69 | int dma_len; | 70 | int dma_len; |
70 | 71 | ||
71 | unsigned long sclk; | ||
72 | unsigned int imask; | 72 | unsigned int imask; |
73 | unsigned int power_mode; | 73 | unsigned int power_mode; |
74 | unsigned int clk_div; | 74 | unsigned int clk_div; |
@@ -134,15 +134,11 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
134 | /* Only supports power-of-2 block size */ | 134 | /* Only supports power-of-2 block size */ |
135 | if (data->blksz & (data->blksz - 1)) | 135 | if (data->blksz & (data->blksz - 1)) |
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | #ifndef RSI_BLKSZ | ||
138 | data_ctl |= ((ffs(data->blksz) - 1) << 4); | 137 | data_ctl |= ((ffs(data->blksz) - 1) << 4); |
139 | #else | ||
140 | bfin_write_SDH_BLK_SIZE(data->blksz); | ||
141 | #endif | ||
142 | 138 | ||
143 | bfin_write_SDH_DATA_CTL(data_ctl); | 139 | bfin_write_SDH_DATA_CTL(data_ctl); |
144 | /* the time of a host clock period in ns */ | 140 | /* the time of a host clock period in ns */ |
145 | cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1))); | 141 | cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); |
146 | timeout = data->timeout_ns / cycle_ns; | 142 | timeout = data->timeout_ns / cycle_ns; |
147 | timeout += data->timeout_clks; | 143 | timeout += data->timeout_clks; |
148 | bfin_write_SDH_DATA_TIMER(timeout); | 144 | bfin_write_SDH_DATA_TIMER(timeout); |
@@ -156,13 +152,8 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
156 | 152 | ||
157 | sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); | 153 | sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); |
158 | host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); | 154 | host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); |
159 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) | 155 | #if defined(CONFIG_BF54x) |
160 | dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN; | 156 | dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; |
161 | # ifdef RSI_BLKSZ | ||
162 | dma_cfg |= PSIZE_32 | NDSIZE_3; | ||
163 | # else | ||
164 | dma_cfg |= NDSIZE_5; | ||
165 | # endif | ||
166 | { | 157 | { |
167 | struct scatterlist *sg; | 158 | struct scatterlist *sg; |
168 | int i; | 159 | int i; |
@@ -172,7 +163,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
172 | host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; | 163 | host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; |
173 | host->sg_cpu[i].x_modify = 4; | 164 | host->sg_cpu[i].x_modify = 4; |
174 | dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " | 165 | dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " |
175 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | 166 | "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", |
176 | i, host->sg_cpu[i].start_addr, | 167 | i, host->sg_cpu[i].start_addr, |
177 | host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, | 168 | host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, |
178 | host->sg_cpu[i].x_modify); | 169 | host->sg_cpu[i].x_modify); |
@@ -188,7 +179,6 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
188 | set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); | 179 | set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); |
189 | set_dma_x_count(host->dma_ch, 0); | 180 | set_dma_x_count(host->dma_ch, 0); |
190 | set_dma_x_modify(host->dma_ch, 0); | 181 | set_dma_x_modify(host->dma_ch, 0); |
191 | SSYNC(); | ||
192 | set_dma_config(host->dma_ch, dma_cfg); | 182 | set_dma_config(host->dma_ch, dma_cfg); |
193 | #elif defined(CONFIG_BF51x) | 183 | #elif defined(CONFIG_BF51x) |
194 | /* RSI DMA doesn't work in array mode */ | 184 | /* RSI DMA doesn't work in array mode */ |
@@ -196,7 +186,6 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) | |||
196 | set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); | 186 | set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); |
197 | set_dma_x_count(host->dma_ch, length / 4); | 187 | set_dma_x_count(host->dma_ch, length / 4); |
198 | set_dma_x_modify(host->dma_ch, 4); | 188 | set_dma_x_modify(host->dma_ch, 4); |
199 | SSYNC(); | ||
200 | set_dma_config(host->dma_ch, dma_cfg); | 189 | set_dma_config(host->dma_ch, dma_cfg); |
201 | #endif | 190 | #endif |
202 | bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); | 191 | bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); |
@@ -314,6 +303,7 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat) | |||
314 | else | 303 | else |
315 | data->bytes_xfered = 0; | 304 | data->bytes_xfered = 0; |
316 | 305 | ||
306 | sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); | ||
317 | bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ | 307 | bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ |
318 | DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); | 308 | DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); |
319 | bfin_write_SDH_DATA_CTL(0); | 309 | bfin_write_SDH_DATA_CTL(0); |
@@ -338,115 +328,74 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
338 | dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); | 328 | dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); |
339 | WARN_ON(host->mrq != NULL); | 329 | WARN_ON(host->mrq != NULL); |
340 | 330 | ||
341 | spin_lock(&host->lock); | ||
342 | host->mrq = mrq; | 331 | host->mrq = mrq; |
343 | host->data = mrq->data; | 332 | host->data = mrq->data; |
344 | 333 | ||
345 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) { | 334 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) { |
346 | ret = sdh_setup_data(host, mrq->data); | 335 | ret = sdh_setup_data(host, mrq->data); |
347 | if (ret) | 336 | if (ret) |
348 | goto data_err; | 337 | return; |
349 | } | 338 | } |
350 | 339 | ||
351 | sdh_start_cmd(host, mrq->cmd); | 340 | sdh_start_cmd(host, mrq->cmd); |
352 | data_err: | ||
353 | spin_unlock(&host->lock); | ||
354 | } | 341 | } |
355 | 342 | ||
356 | static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 343 | static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
357 | { | 344 | { |
358 | struct sdh_host *host; | 345 | struct sdh_host *host; |
346 | unsigned long flags; | ||
359 | u16 clk_ctl = 0; | 347 | u16 clk_ctl = 0; |
360 | #ifndef RSI_BLKSZ | ||
361 | u16 pwr_ctl = 0; | 348 | u16 pwr_ctl = 0; |
362 | #endif | ||
363 | u16 cfg; | 349 | u16 cfg; |
364 | host = mmc_priv(mmc); | 350 | host = mmc_priv(mmc); |
365 | 351 | ||
366 | spin_lock(&host->lock); | 352 | spin_lock_irqsave(&host->lock, flags); |
353 | if (ios->clock) { | ||
354 | unsigned long sys_clk, ios_clk; | ||
355 | unsigned char clk_div; | ||
356 | ios_clk = 2 * ios->clock; | ||
357 | sys_clk = get_sclk(); | ||
358 | clk_div = sys_clk / ios_clk; | ||
359 | if (sys_clk % ios_clk == 0) | ||
360 | clk_div -= 1; | ||
361 | clk_div = min_t(unsigned char, clk_div, 0xFF); | ||
362 | clk_ctl |= clk_div; | ||
363 | clk_ctl |= CLK_E; | ||
364 | host->clk_div = clk_div; | ||
365 | } else | ||
366 | sdh_stop_clock(host); | ||
367 | 367 | ||
368 | cfg = bfin_read_SDH_CFG(); | 368 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) |
369 | cfg |= MWE; | 369 | #ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND |
370 | switch (ios->bus_width) { | 370 | pwr_ctl |= ROD_CTL; |
371 | case MMC_BUS_WIDTH_4: | 371 | #else |
372 | #ifndef RSI_BLKSZ | 372 | pwr_ctl |= SD_CMD_OD | ROD_CTL; |
373 | cfg &= ~PD_SDDAT3; | ||
374 | #endif | 373 | #endif |
375 | cfg |= PUP_SDDAT3; | 374 | |
376 | /* Enable 4 bit SDIO */ | 375 | if (ios->bus_width == MMC_BUS_WIDTH_4) { |
377 | cfg |= SD4E; | 376 | cfg = bfin_read_SDH_CFG(); |
378 | clk_ctl |= WIDE_BUS_4; | ||
379 | break; | ||
380 | case MMC_BUS_WIDTH_8: | ||
381 | #ifndef RSI_BLKSZ | ||
382 | cfg &= ~PD_SDDAT3; | 377 | cfg &= ~PD_SDDAT3; |
383 | #endif | ||
384 | cfg |= PUP_SDDAT3; | 378 | cfg |= PUP_SDDAT3; |
385 | /* Disable 4 bit SDIO */ | 379 | /* Enable 4 bit SDIO */ |
386 | cfg &= ~SD4E; | 380 | cfg |= (SD4E | MWE); |
387 | clk_ctl |= BYTE_BUS_8; | 381 | bfin_write_SDH_CFG(cfg); |
388 | break; | 382 | clk_ctl |= WIDE_BUS; |
389 | default: | 383 | } else { |
390 | cfg &= ~PUP_SDDAT3; | 384 | cfg = bfin_read_SDH_CFG(); |
391 | /* Disable 4 bit SDIO */ | 385 | cfg |= MWE; |
392 | cfg &= ~SD4E; | 386 | bfin_write_SDH_CFG(cfg); |
393 | } | 387 | } |
394 | 388 | ||
395 | host->power_mode = ios->power_mode; | 389 | bfin_write_SDH_CLK_CTL(clk_ctl); |
396 | #ifndef RSI_BLKSZ | ||
397 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { | ||
398 | pwr_ctl |= ROD_CTL; | ||
399 | # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND | ||
400 | pwr_ctl |= SD_CMD_OD; | ||
401 | # endif | ||
402 | } | ||
403 | 390 | ||
404 | if (ios->power_mode != MMC_POWER_OFF) | 391 | host->power_mode = ios->power_mode; |
392 | if (ios->power_mode == MMC_POWER_ON) | ||
405 | pwr_ctl |= PWR_ON; | 393 | pwr_ctl |= PWR_ON; |
406 | else | ||
407 | pwr_ctl &= ~PWR_ON; | ||
408 | 394 | ||
409 | bfin_write_SDH_PWR_CTL(pwr_ctl); | 395 | bfin_write_SDH_PWR_CTL(pwr_ctl); |
410 | #else | ||
411 | # ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND | ||
412 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
413 | cfg |= SD_CMD_OD; | ||
414 | else | ||
415 | cfg &= ~SD_CMD_OD; | ||
416 | # endif | ||
417 | |||
418 | |||
419 | if (ios->power_mode != MMC_POWER_OFF) | ||
420 | cfg |= PWR_ON; | ||
421 | else | ||
422 | cfg &= ~PWR_ON; | ||
423 | |||
424 | bfin_write_SDH_CFG(cfg); | ||
425 | #endif | ||
426 | SSYNC(); | 396 | SSYNC(); |
427 | 397 | ||
428 | if (ios->power_mode == MMC_POWER_ON && ios->clock) { | 398 | spin_unlock_irqrestore(&host->lock, flags); |
429 | unsigned char clk_div; | ||
430 | clk_div = (get_sclk() / ios->clock - 1) / 2; | ||
431 | clk_div = min_t(unsigned char, clk_div, 0xFF); | ||
432 | clk_ctl |= clk_div; | ||
433 | clk_ctl |= CLK_E; | ||
434 | host->clk_div = clk_div; | ||
435 | bfin_write_SDH_CLK_CTL(clk_ctl); | ||
436 | |||
437 | } else | ||
438 | sdh_stop_clock(host); | ||
439 | |||
440 | /* set up sdh interrupt mask*/ | ||
441 | if (ios->power_mode == MMC_POWER_ON) | ||
442 | bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | | ||
443 | RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END | | ||
444 | CMD_TIME_OUT | CMD_CRC_FAIL); | ||
445 | else | ||
446 | bfin_write_SDH_MASK0(0); | ||
447 | SSYNC(); | ||
448 | |||
449 | spin_unlock(&host->lock); | ||
450 | 399 | ||
451 | dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", | 400 | dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", |
452 | host->clk_div, | 401 | host->clk_div, |
@@ -463,7 +412,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid) | |||
463 | { | 412 | { |
464 | struct sdh_host *host = devid; | 413 | struct sdh_host *host = devid; |
465 | 414 | ||
466 | dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__, | 415 | dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, |
467 | get_dma_curr_irqstat(host->dma_ch)); | 416 | get_dma_curr_irqstat(host->dma_ch)); |
468 | clear_dma_irqstat(host->dma_ch); | 417 | clear_dma_irqstat(host->dma_ch); |
469 | SSYNC(); | 418 | SSYNC(); |
@@ -478,9 +427,6 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) | |||
478 | int handled = 0; | 427 | int handled = 0; |
479 | 428 | ||
480 | dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); | 429 | dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); |
481 | |||
482 | spin_lock(&host->lock); | ||
483 | |||
484 | status = bfin_read_SDH_E_STATUS(); | 430 | status = bfin_read_SDH_E_STATUS(); |
485 | if (status & SD_CARD_DET) { | 431 | if (status & SD_CARD_DET) { |
486 | mmc_detect_change(host->mmc, 0); | 432 | mmc_detect_change(host->mmc, 0); |
@@ -498,31 +444,12 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) | |||
498 | if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) | 444 | if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) |
499 | handled |= sdh_data_done(host, status); | 445 | handled |= sdh_data_done(host, status); |
500 | 446 | ||
501 | spin_unlock(&host->lock); | ||
502 | |||
503 | dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); | 447 | dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); |
504 | 448 | ||
505 | return IRQ_RETVAL(handled); | 449 | return IRQ_RETVAL(handled); |
506 | } | 450 | } |
507 | 451 | ||
508 | static void sdh_reset(void) | 452 | static int __devinit sdh_probe(struct platform_device *pdev) |
509 | { | ||
510 | #if defined(CONFIG_BF54x) | ||
511 | /* Secure Digital Host shares DMA with Nand controller */ | ||
512 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
513 | #endif | ||
514 | |||
515 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); | ||
516 | SSYNC(); | ||
517 | |||
518 | /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and | ||
519 | * mmc stack will do the detection. | ||
520 | */ | ||
521 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
522 | SSYNC(); | ||
523 | } | ||
524 | |||
525 | static int sdh_probe(struct platform_device *pdev) | ||
526 | { | 453 | { |
527 | struct mmc_host *mmc; | 454 | struct mmc_host *mmc; |
528 | struct sdh_host *host; | 455 | struct sdh_host *host; |
@@ -542,16 +469,8 @@ static int sdh_probe(struct platform_device *pdev) | |||
542 | } | 469 | } |
543 | 470 | ||
544 | mmc->ops = &sdh_ops; | 471 | mmc->ops = &sdh_ops; |
545 | #if defined(CONFIG_BF51x) | 472 | mmc->max_segs = 32; |
546 | mmc->max_segs = 1; | ||
547 | #else | ||
548 | mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array); | ||
549 | #endif | ||
550 | #ifdef RSI_BLKSZ | ||
551 | mmc->max_seg_size = -1; | ||
552 | #else | ||
553 | mmc->max_seg_size = 1 << 16; | 473 | mmc->max_seg_size = 1 << 16; |
554 | #endif | ||
555 | mmc->max_blk_size = 1 << 11; | 474 | mmc->max_blk_size = 1 << 11; |
556 | mmc->max_blk_count = 1 << 11; | 475 | mmc->max_blk_count = 1 << 11; |
557 | mmc->max_req_size = PAGE_SIZE; | 476 | mmc->max_req_size = PAGE_SIZE; |
@@ -561,7 +480,6 @@ static int sdh_probe(struct platform_device *pdev) | |||
561 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; | 480 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; |
562 | host = mmc_priv(mmc); | 481 | host = mmc_priv(mmc); |
563 | host->mmc = mmc; | 482 | host->mmc = mmc; |
564 | host->sclk = get_sclk(); | ||
565 | 483 | ||
566 | spin_lock_init(&host->lock); | 484 | spin_lock_init(&host->lock); |
567 | host->irq = drv_data->irq_int0; | 485 | host->irq = drv_data->irq_int0; |
@@ -586,6 +504,7 @@ static int sdh_probe(struct platform_device *pdev) | |||
586 | } | 504 | } |
587 | 505 | ||
588 | platform_set_drvdata(pdev, mmc); | 506 | platform_set_drvdata(pdev, mmc); |
507 | mmc_add_host(mmc); | ||
589 | 508 | ||
590 | ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); | 509 | ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); |
591 | if (ret) { | 510 | if (ret) { |
@@ -598,10 +517,20 @@ static int sdh_probe(struct platform_device *pdev) | |||
598 | dev_err(&pdev->dev, "unable to request peripheral pins\n"); | 517 | dev_err(&pdev->dev, "unable to request peripheral pins\n"); |
599 | goto out4; | 518 | goto out4; |
600 | } | 519 | } |
520 | #if defined(CONFIG_BF54x) | ||
521 | /* Secure Digital Host shares DMA with Nand controller */ | ||
522 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
523 | #endif | ||
601 | 524 | ||
602 | sdh_reset(); | 525 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); |
526 | SSYNC(); | ||
527 | |||
528 | /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and | ||
529 | * mmc stack will do the detection. | ||
530 | */ | ||
531 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
532 | SSYNC(); | ||
603 | 533 | ||
604 | mmc_add_host(mmc); | ||
605 | return 0; | 534 | return 0; |
606 | 535 | ||
607 | out4: | 536 | out4: |
@@ -617,7 +546,7 @@ out1: | |||
617 | return ret; | 546 | return ret; |
618 | } | 547 | } |
619 | 548 | ||
620 | static int sdh_remove(struct platform_device *pdev) | 549 | static int __devexit sdh_remove(struct platform_device *pdev) |
621 | { | 550 | { |
622 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 551 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
623 | 552 | ||
@@ -649,6 +578,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state) | |||
649 | if (mmc) | 578 | if (mmc) |
650 | ret = mmc_suspend_host(mmc); | 579 | ret = mmc_suspend_host(mmc); |
651 | 580 | ||
581 | bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); | ||
652 | peripheral_free_list(drv_data->pin_req); | 582 | peripheral_free_list(drv_data->pin_req); |
653 | 583 | ||
654 | return ret; | 584 | return ret; |
@@ -666,7 +596,16 @@ static int sdh_resume(struct platform_device *dev) | |||
666 | return ret; | 596 | return ret; |
667 | } | 597 | } |
668 | 598 | ||
669 | sdh_reset(); | 599 | bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); |
600 | #if defined(CONFIG_BF54x) | ||
601 | /* Secure Digital Host shares DMA with Nand controller */ | ||
602 | bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); | ||
603 | #endif | ||
604 | bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); | ||
605 | SSYNC(); | ||
606 | |||
607 | bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); | ||
608 | SSYNC(); | ||
670 | 609 | ||
671 | if (mmc) | 610 | if (mmc) |
672 | ret = mmc_resume_host(mmc); | 611 | ret = mmc_resume_host(mmc); |
@@ -680,7 +619,7 @@ static int sdh_resume(struct platform_device *dev) | |||
680 | 619 | ||
681 | static struct platform_driver sdh_driver = { | 620 | static struct platform_driver sdh_driver = { |
682 | .probe = sdh_probe, | 621 | .probe = sdh_probe, |
683 | .remove = sdh_remove, | 622 | .remove = __devexit_p(sdh_remove), |
684 | .suspend = sdh_suspend, | 623 | .suspend = sdh_suspend, |
685 | .resume = sdh_resume, | 624 | .resume = sdh_resume, |
686 | .driver = { | 625 | .driver = { |
@@ -688,7 +627,17 @@ static struct platform_driver sdh_driver = { | |||
688 | }, | 627 | }, |
689 | }; | 628 | }; |
690 | 629 | ||
691 | module_platform_driver(sdh_driver); | 630 | static int __init sdh_init(void) |
631 | { | ||
632 | return platform_driver_register(&sdh_driver); | ||
633 | } | ||
634 | module_init(sdh_init); | ||
635 | |||
636 | static void __exit sdh_exit(void) | ||
637 | { | ||
638 | platform_driver_unregister(&sdh_driver); | ||
639 | } | ||
640 | module_exit(sdh_exit); | ||
692 | 641 | ||
693 | MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); | 642 | MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); |
694 | MODULE_AUTHOR("Cliff Cai, Roy Huang"); | 643 | MODULE_AUTHOR("Cliff Cai, Roy Huang"); |
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 777ca2046b2..ce2a47b71dd 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c | |||
@@ -690,7 +690,7 @@ static int cb710_mmc_resume(struct platform_device *pdev) | |||
690 | 690 | ||
691 | #endif /* CONFIG_PM */ | 691 | #endif /* CONFIG_PM */ |
692 | 692 | ||
693 | static int cb710_mmc_init(struct platform_device *pdev) | 693 | static int __devinit cb710_mmc_init(struct platform_device *pdev) |
694 | { | 694 | { |
695 | struct cb710_slot *slot = cb710_pdev_to_slot(pdev); | 695 | struct cb710_slot *slot = cb710_pdev_to_slot(pdev); |
696 | struct cb710_chip *chip = cb710_slot_to_chip(slot); | 696 | struct cb710_chip *chip = cb710_slot_to_chip(slot); |
@@ -746,7 +746,7 @@ err_free_mmc: | |||
746 | return err; | 746 | return err; |
747 | } | 747 | } |
748 | 748 | ||
749 | static int cb710_mmc_exit(struct platform_device *pdev) | 749 | static int __devexit cb710_mmc_exit(struct platform_device *pdev) |
750 | { | 750 | { |
751 | struct cb710_slot *slot = cb710_pdev_to_slot(pdev); | 751 | struct cb710_slot *slot = cb710_pdev_to_slot(pdev); |
752 | struct mmc_host *mmc = cb710_slot_to_mmc(slot); | 752 | struct mmc_host *mmc = cb710_slot_to_mmc(slot); |
@@ -773,14 +773,25 @@ static int cb710_mmc_exit(struct platform_device *pdev) | |||
773 | static struct platform_driver cb710_mmc_driver = { | 773 | static struct platform_driver cb710_mmc_driver = { |
774 | .driver.name = "cb710-mmc", | 774 | .driver.name = "cb710-mmc", |
775 | .probe = cb710_mmc_init, | 775 | .probe = cb710_mmc_init, |
776 | .remove = cb710_mmc_exit, | 776 | .remove = __devexit_p(cb710_mmc_exit), |
777 | #ifdef CONFIG_PM | 777 | #ifdef CONFIG_PM |
778 | .suspend = cb710_mmc_suspend, | 778 | .suspend = cb710_mmc_suspend, |
779 | .resume = cb710_mmc_resume, | 779 | .resume = cb710_mmc_resume, |
780 | #endif | 780 | #endif |
781 | }; | 781 | }; |
782 | 782 | ||
783 | module_platform_driver(cb710_mmc_driver); | 783 | static int __init cb710_mmc_init_module(void) |
784 | { | ||
785 | return platform_driver_register(&cb710_mmc_driver); | ||
786 | } | ||
787 | |||
788 | static void __exit cb710_mmc_cleanup_module(void) | ||
789 | { | ||
790 | platform_driver_unregister(&cb710_mmc_driver); | ||
791 | } | ||
792 | |||
793 | module_init(cb710_mmc_init_module); | ||
794 | module_exit(cb710_mmc_cleanup_module); | ||
784 | 795 | ||
785 | MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>"); | 796 | MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>"); |
786 | MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part"); | 797 | MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part"); |
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 20636772c09..0076c7448fe 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c | |||
@@ -30,12 +30,11 @@ | |||
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/dmaengine.h> | ||
34 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
35 | #include <linux/edma.h> | ||
36 | #include <linux/mmc/mmc.h> | 34 | #include <linux/mmc/mmc.h> |
37 | 35 | ||
38 | #include <linux/platform_data/mmc-davinci.h> | 36 | #include <mach/mmc.h> |
37 | #include <mach/edma.h> | ||
39 | 38 | ||
40 | /* | 39 | /* |
41 | * Register Definitions | 40 | * Register Definitions |
@@ -161,16 +160,6 @@ module_param(rw_threshold, uint, S_IRUGO); | |||
161 | MODULE_PARM_DESC(rw_threshold, | 160 | MODULE_PARM_DESC(rw_threshold, |
162 | "Read/Write threshold. Default = 32"); | 161 | "Read/Write threshold. Default = 32"); |
163 | 162 | ||
164 | static unsigned poll_threshold = 128; | ||
165 | module_param(poll_threshold, uint, S_IRUGO); | ||
166 | MODULE_PARM_DESC(poll_threshold, | ||
167 | "Polling transaction size threshold. Default = 128"); | ||
168 | |||
169 | static unsigned poll_loopcount = 32; | ||
170 | module_param(poll_loopcount, uint, S_IRUGO); | ||
171 | MODULE_PARM_DESC(poll_loopcount, | ||
172 | "Maximum polling loop count. Default = 32"); | ||
173 | |||
174 | static unsigned __initdata use_dma = 1; | 163 | static unsigned __initdata use_dma = 1; |
175 | module_param(use_dma, uint, 0); | 164 | module_param(use_dma, uint, 0); |
176 | MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); | 165 | MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); |
@@ -201,12 +190,19 @@ struct mmc_davinci_host { | |||
201 | u32 bytes_left; | 190 | u32 bytes_left; |
202 | 191 | ||
203 | u32 rxdma, txdma; | 192 | u32 rxdma, txdma; |
204 | struct dma_chan *dma_tx; | ||
205 | struct dma_chan *dma_rx; | ||
206 | bool use_dma; | 193 | bool use_dma; |
207 | bool do_dma; | 194 | bool do_dma; |
208 | bool sdio_int; | 195 | bool sdio_int; |
209 | bool active_request; | 196 | |
197 | /* Scatterlist DMA uses one or more parameter RAM entries: | ||
198 | * the main one (associated with rxdma or txdma) plus zero or | ||
199 | * more links. The entries for a given transfer differ only | ||
200 | * by memory buffer (address, length) and link field. | ||
201 | */ | ||
202 | struct edmacc_param tx_template; | ||
203 | struct edmacc_param rx_template; | ||
204 | unsigned n_link; | ||
205 | u32 links[MAX_NR_SG - 1]; | ||
210 | 206 | ||
211 | /* For PIO we walk scatterlists one segment at a time. */ | 207 | /* For PIO we walk scatterlists one segment at a time. */ |
212 | unsigned int sg_len; | 208 | unsigned int sg_len; |
@@ -223,7 +219,6 @@ struct mmc_davinci_host { | |||
223 | #endif | 219 | #endif |
224 | }; | 220 | }; |
225 | 221 | ||
226 | static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); | ||
227 | 222 | ||
228 | /* PIO only */ | 223 | /* PIO only */ |
229 | static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) | 224 | static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) |
@@ -381,20 +376,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, | |||
381 | 376 | ||
382 | writel(cmd->arg, host->base + DAVINCI_MMCARGHL); | 377 | writel(cmd->arg, host->base + DAVINCI_MMCARGHL); |
383 | writel(cmd_reg, host->base + DAVINCI_MMCCMD); | 378 | writel(cmd_reg, host->base + DAVINCI_MMCCMD); |
384 | 379 | writel(im_val, host->base + DAVINCI_MMCIM); | |
385 | host->active_request = true; | ||
386 | |||
387 | if (!host->do_dma && host->bytes_left <= poll_threshold) { | ||
388 | u32 count = poll_loopcount; | ||
389 | |||
390 | while (host->active_request && count--) { | ||
391 | mmc_davinci_irq(0, host); | ||
392 | cpu_relax(); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | if (host->active_request) | ||
397 | writel(im_val, host->base + DAVINCI_MMCIM); | ||
398 | } | 380 | } |
399 | 381 | ||
400 | /*----------------------------------------------------------------------*/ | 382 | /*----------------------------------------------------------------------*/ |
@@ -403,74 +385,153 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, | |||
403 | 385 | ||
404 | static void davinci_abort_dma(struct mmc_davinci_host *host) | 386 | static void davinci_abort_dma(struct mmc_davinci_host *host) |
405 | { | 387 | { |
406 | struct dma_chan *sync_dev; | 388 | int sync_dev; |
407 | 389 | ||
408 | if (host->data_dir == DAVINCI_MMC_DATADIR_READ) | 390 | if (host->data_dir == DAVINCI_MMC_DATADIR_READ) |
409 | sync_dev = host->dma_rx; | 391 | sync_dev = host->rxdma; |
410 | else | 392 | else |
411 | sync_dev = host->dma_tx; | 393 | sync_dev = host->txdma; |
394 | |||
395 | edma_stop(sync_dev); | ||
396 | edma_clean_channel(sync_dev); | ||
397 | } | ||
398 | |||
399 | static void | ||
400 | mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); | ||
401 | |||
402 | static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) | ||
403 | { | ||
404 | if (DMA_COMPLETE != ch_status) { | ||
405 | struct mmc_davinci_host *host = data; | ||
406 | |||
407 | /* Currently means: DMA Event Missed, or "null" transfer | ||
408 | * request was seen. In the future, TC errors (like bad | ||
409 | * addresses) might be presented too. | ||
410 | */ | ||
411 | dev_warn(mmc_dev(host->mmc), "DMA %s error\n", | ||
412 | (host->data->flags & MMC_DATA_WRITE) | ||
413 | ? "write" : "read"); | ||
414 | host->data->error = -EIO; | ||
415 | mmc_davinci_xfer_done(host, host->data); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* Set up tx or rx template, to be modified and updated later */ | ||
420 | static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, | ||
421 | bool tx, struct edmacc_param *template) | ||
422 | { | ||
423 | unsigned sync_dev; | ||
424 | const u16 acnt = 4; | ||
425 | const u16 bcnt = rw_threshold >> 2; | ||
426 | const u16 ccnt = 0; | ||
427 | u32 src_port = 0; | ||
428 | u32 dst_port = 0; | ||
429 | s16 src_bidx, dst_bidx; | ||
430 | s16 src_cidx, dst_cidx; | ||
431 | |||
432 | /* | ||
433 | * A-B Sync transfer: each DMA request is for one "frame" of | ||
434 | * rw_threshold bytes, broken into "acnt"-size chunks repeated | ||
435 | * "bcnt" times. Each segment needs "ccnt" such frames; since | ||
436 | * we tell the block layer our mmc->max_seg_size limit, we can | ||
437 | * trust (later) that it's within bounds. | ||
438 | * | ||
439 | * The FIFOs are read/written in 4-byte chunks (acnt == 4) and | ||
440 | * EDMA will optimize memory operations to use larger bursts. | ||
441 | */ | ||
442 | if (tx) { | ||
443 | sync_dev = host->txdma; | ||
444 | |||
445 | /* src_prt, ccnt, and link to be set up later */ | ||
446 | src_bidx = acnt; | ||
447 | src_cidx = acnt * bcnt; | ||
448 | |||
449 | dst_port = host->mem_res->start + DAVINCI_MMCDXR; | ||
450 | dst_bidx = 0; | ||
451 | dst_cidx = 0; | ||
452 | } else { | ||
453 | sync_dev = host->rxdma; | ||
412 | 454 | ||
413 | dmaengine_terminate_all(sync_dev); | 455 | src_port = host->mem_res->start + DAVINCI_MMCDRR; |
456 | src_bidx = 0; | ||
457 | src_cidx = 0; | ||
458 | |||
459 | /* dst_prt, ccnt, and link to be set up later */ | ||
460 | dst_bidx = acnt; | ||
461 | dst_cidx = acnt * bcnt; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * We can't use FIFO mode for the FIFOs because MMC FIFO addresses | ||
466 | * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT | ||
467 | * parameter is ignored. | ||
468 | */ | ||
469 | edma_set_src(sync_dev, src_port, INCR, W8BIT); | ||
470 | edma_set_dest(sync_dev, dst_port, INCR, W8BIT); | ||
471 | |||
472 | edma_set_src_index(sync_dev, src_bidx, src_cidx); | ||
473 | edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); | ||
474 | |||
475 | edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); | ||
476 | |||
477 | edma_read_slot(sync_dev, template); | ||
478 | |||
479 | /* don't bother with irqs or chaining */ | ||
480 | template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; | ||
414 | } | 481 | } |
415 | 482 | ||
416 | static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, | 483 | static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, |
417 | struct mmc_data *data) | 484 | struct mmc_data *data) |
418 | { | 485 | { |
419 | struct dma_chan *chan; | 486 | struct edmacc_param *template; |
420 | struct dma_async_tx_descriptor *desc; | 487 | int channel, slot; |
421 | int ret = 0; | 488 | unsigned link; |
489 | struct scatterlist *sg; | ||
490 | unsigned sg_len; | ||
491 | unsigned bytes_left = host->bytes_left; | ||
492 | const unsigned shift = ffs(rw_threshold) - 1; | ||
422 | 493 | ||
423 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { | 494 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { |
424 | struct dma_slave_config dma_tx_conf = { | 495 | template = &host->tx_template; |
425 | .direction = DMA_MEM_TO_DEV, | 496 | channel = host->txdma; |
426 | .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, | ||
427 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
428 | .dst_maxburst = | ||
429 | rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
430 | }; | ||
431 | chan = host->dma_tx; | ||
432 | dmaengine_slave_config(host->dma_tx, &dma_tx_conf); | ||
433 | |||
434 | desc = dmaengine_prep_slave_sg(host->dma_tx, | ||
435 | data->sg, | ||
436 | host->sg_len, | ||
437 | DMA_MEM_TO_DEV, | ||
438 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
439 | if (!desc) { | ||
440 | dev_dbg(mmc_dev(host->mmc), | ||
441 | "failed to allocate DMA TX descriptor"); | ||
442 | ret = -1; | ||
443 | goto out; | ||
444 | } | ||
445 | } else { | 497 | } else { |
446 | struct dma_slave_config dma_rx_conf = { | 498 | template = &host->rx_template; |
447 | .direction = DMA_DEV_TO_MEM, | 499 | channel = host->rxdma; |
448 | .src_addr = host->mem_res->start + DAVINCI_MMCDRR, | ||
449 | .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
450 | .src_maxburst = | ||
451 | rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
452 | }; | ||
453 | chan = host->dma_rx; | ||
454 | dmaengine_slave_config(host->dma_rx, &dma_rx_conf); | ||
455 | |||
456 | desc = dmaengine_prep_slave_sg(host->dma_rx, | ||
457 | data->sg, | ||
458 | host->sg_len, | ||
459 | DMA_DEV_TO_MEM, | ||
460 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
461 | if (!desc) { | ||
462 | dev_dbg(mmc_dev(host->mmc), | ||
463 | "failed to allocate DMA RX descriptor"); | ||
464 | ret = -1; | ||
465 | goto out; | ||
466 | } | ||
467 | } | 500 | } |
468 | 501 | ||
469 | dmaengine_submit(desc); | 502 | /* We know sg_len and ccnt will never be out of range because |
470 | dma_async_issue_pending(chan); | 503 | * we told the mmc layer which in turn tells the block layer |
504 | * to ensure that it only hands us one scatterlist segment | ||
505 | * per EDMA PARAM entry. Update the PARAM | ||
506 | * entries needed for each segment of this scatterlist. | ||
507 | */ | ||
508 | for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; | ||
509 | sg_len-- != 0 && bytes_left; | ||
510 | sg = sg_next(sg), slot = host->links[link++]) { | ||
511 | u32 buf = sg_dma_address(sg); | ||
512 | unsigned count = sg_dma_len(sg); | ||
513 | |||
514 | template->link_bcntrld = sg_len | ||
515 | ? (EDMA_CHAN_SLOT(host->links[link]) << 5) | ||
516 | : 0xffff; | ||
517 | |||
518 | if (count > bytes_left) | ||
519 | count = bytes_left; | ||
520 | bytes_left -= count; | ||
521 | |||
522 | if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) | ||
523 | template->src = buf; | ||
524 | else | ||
525 | template->dst = buf; | ||
526 | template->ccnt = count >> shift; | ||
527 | |||
528 | edma_write_slot(slot, template); | ||
529 | } | ||
471 | 530 | ||
472 | out: | 531 | if (host->version == MMC_CTLR_VERSION_2) |
473 | return ret; | 532 | edma_clear_event(channel); |
533 | |||
534 | edma_start(channel); | ||
474 | } | 535 | } |
475 | 536 | ||
476 | static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | 537 | static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, |
@@ -478,7 +539,6 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | |||
478 | { | 539 | { |
479 | int i; | 540 | int i; |
480 | int mask = rw_threshold - 1; | 541 | int mask = rw_threshold - 1; |
481 | int ret = 0; | ||
482 | 542 | ||
483 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 543 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
484 | ((data->flags & MMC_DATA_WRITE) | 544 | ((data->flags & MMC_DATA_WRITE) |
@@ -498,48 +558,70 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, | |||
498 | } | 558 | } |
499 | 559 | ||
500 | host->do_dma = 1; | 560 | host->do_dma = 1; |
501 | ret = mmc_davinci_send_dma_request(host, data); | 561 | mmc_davinci_send_dma_request(host, data); |
502 | 562 | ||
503 | return ret; | 563 | return 0; |
504 | } | 564 | } |
505 | 565 | ||
506 | static void __init_or_module | 566 | static void __init_or_module |
507 | davinci_release_dma_channels(struct mmc_davinci_host *host) | 567 | davinci_release_dma_channels(struct mmc_davinci_host *host) |
508 | { | 568 | { |
569 | unsigned i; | ||
570 | |||
509 | if (!host->use_dma) | 571 | if (!host->use_dma) |
510 | return; | 572 | return; |
511 | 573 | ||
512 | dma_release_channel(host->dma_tx); | 574 | for (i = 0; i < host->n_link; i++) |
513 | dma_release_channel(host->dma_rx); | 575 | edma_free_slot(host->links[i]); |
576 | |||
577 | edma_free_channel(host->txdma); | ||
578 | edma_free_channel(host->rxdma); | ||
514 | } | 579 | } |
515 | 580 | ||
516 | static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) | 581 | static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) |
517 | { | 582 | { |
518 | int r; | 583 | u32 link_size; |
519 | dma_cap_mask_t mask; | 584 | int r, i; |
520 | 585 | ||
521 | dma_cap_zero(mask); | 586 | /* Acquire master DMA write channel */ |
522 | dma_cap_set(DMA_SLAVE, mask); | 587 | r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, |
523 | 588 | EVENTQ_DEFAULT); | |
524 | host->dma_tx = | 589 | if (r < 0) { |
525 | dma_request_channel(mask, edma_filter_fn, &host->txdma); | 590 | dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", |
526 | if (!host->dma_tx) { | 591 | "tx", r); |
527 | dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); | 592 | return r; |
528 | return -ENODEV; | ||
529 | } | 593 | } |
530 | 594 | mmc_davinci_dma_setup(host, true, &host->tx_template); | |
531 | host->dma_rx = | 595 | |
532 | dma_request_channel(mask, edma_filter_fn, &host->rxdma); | 596 | /* Acquire master DMA read channel */ |
533 | if (!host->dma_rx) { | 597 | r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, |
534 | dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); | 598 | EVENTQ_DEFAULT); |
535 | r = -ENODEV; | 599 | if (r < 0) { |
600 | dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", | ||
601 | "rx", r); | ||
536 | goto free_master_write; | 602 | goto free_master_write; |
537 | } | 603 | } |
604 | mmc_davinci_dma_setup(host, false, &host->rx_template); | ||
605 | |||
606 | /* Allocate parameter RAM slots, which will later be bound to a | ||
607 | * channel as needed to handle a scatterlist. | ||
608 | */ | ||
609 | link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); | ||
610 | for (i = 0; i < link_size; i++) { | ||
611 | r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); | ||
612 | if (r < 0) { | ||
613 | dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", | ||
614 | r); | ||
615 | break; | ||
616 | } | ||
617 | host->links[i] = r; | ||
618 | } | ||
619 | host->n_link = i; | ||
538 | 620 | ||
539 | return 0; | 621 | return 0; |
540 | 622 | ||
541 | free_master_write: | 623 | free_master_write: |
542 | dma_release_channel(host->dma_tx); | 624 | edma_free_channel(host->txdma); |
543 | 625 | ||
544 | return r; | 626 | return r; |
545 | } | 627 | } |
@@ -725,25 +807,12 @@ static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) | |||
725 | static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 807 | static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
726 | { | 808 | { |
727 | struct mmc_davinci_host *host = mmc_priv(mmc); | 809 | struct mmc_davinci_host *host = mmc_priv(mmc); |
728 | struct platform_device *pdev = to_platform_device(mmc->parent); | ||
729 | struct davinci_mmc_config *config = pdev->dev.platform_data; | ||
730 | 810 | ||
731 | dev_dbg(mmc_dev(host->mmc), | 811 | dev_dbg(mmc_dev(host->mmc), |
732 | "clock %dHz busmode %d powermode %d Vdd %04x\n", | 812 | "clock %dHz busmode %d powermode %d Vdd %04x\n", |
733 | ios->clock, ios->bus_mode, ios->power_mode, | 813 | ios->clock, ios->bus_mode, ios->power_mode, |
734 | ios->vdd); | 814 | ios->vdd); |
735 | 815 | ||
736 | switch (ios->power_mode) { | ||
737 | case MMC_POWER_OFF: | ||
738 | if (config && config->set_power) | ||
739 | config->set_power(pdev->id, false); | ||
740 | break; | ||
741 | case MMC_POWER_UP: | ||
742 | if (config && config->set_power) | ||
743 | config->set_power(pdev->id, true); | ||
744 | break; | ||
745 | } | ||
746 | |||
747 | switch (ios->bus_width) { | 816 | switch (ios->bus_width) { |
748 | case MMC_BUS_WIDTH_8: | 817 | case MMC_BUS_WIDTH_8: |
749 | dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); | 818 | dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); |
@@ -833,7 +902,6 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) | |||
833 | if (!data->stop || (host->cmd && host->cmd->error)) { | 902 | if (!data->stop || (host->cmd && host->cmd->error)) { |
834 | mmc_request_done(host->mmc, data->mrq); | 903 | mmc_request_done(host->mmc, data->mrq); |
835 | writel(0, host->base + DAVINCI_MMCIM); | 904 | writel(0, host->base + DAVINCI_MMCIM); |
836 | host->active_request = false; | ||
837 | } else | 905 | } else |
838 | mmc_davinci_start_command(host, data->stop); | 906 | mmc_davinci_start_command(host, data->stop); |
839 | } | 907 | } |
@@ -861,7 +929,6 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, | |||
861 | cmd->mrq->cmd->retries = 0; | 929 | cmd->mrq->cmd->retries = 0; |
862 | mmc_request_done(host->mmc, cmd->mrq); | 930 | mmc_request_done(host->mmc, cmd->mrq); |
863 | writel(0, host->base + DAVINCI_MMCIM); | 931 | writel(0, host->base + DAVINCI_MMCIM); |
864 | host->active_request = false; | ||
865 | } | 932 | } |
866 | } | 933 | } |
867 | 934 | ||
@@ -929,33 +996,12 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) | |||
929 | * by read. So, it is not unbouned loop even in the case of | 996 | * by read. So, it is not unbouned loop even in the case of |
930 | * non-dma. | 997 | * non-dma. |
931 | */ | 998 | */ |
932 | if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { | 999 | while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { |
933 | unsigned long im_val; | 1000 | davinci_fifo_data_trans(host, rw_threshold); |
934 | 1001 | status = readl(host->base + DAVINCI_MMCST0); | |
935 | /* | 1002 | if (!status) |
936 | * If interrupts fire during the following loop, they will be | 1003 | break; |
937 | * handled by the handler, but the PIC will still buffer these. | 1004 | qstatus |= status; |
938 | * As a result, the handler will be called again to serve these | ||
939 | * needlessly. In order to avoid these spurious interrupts, | ||
940 | * keep interrupts masked during the loop. | ||
941 | */ | ||
942 | im_val = readl(host->base + DAVINCI_MMCIM); | ||
943 | writel(0, host->base + DAVINCI_MMCIM); | ||
944 | |||
945 | do { | ||
946 | davinci_fifo_data_trans(host, rw_threshold); | ||
947 | status = readl(host->base + DAVINCI_MMCST0); | ||
948 | qstatus |= status; | ||
949 | } while (host->bytes_left && | ||
950 | (status & (MMCST0_DXRDY | MMCST0_DRRDY))); | ||
951 | |||
952 | /* | ||
953 | * If an interrupt is pending, it is assumed it will fire when | ||
954 | * it is unmasked. This assumption is also taken when the MMCIM | ||
955 | * is first set. Otherwise, writing to MMCIM after reading the | ||
956 | * status is race-prone. | ||
957 | */ | ||
958 | writel(im_val, host->base + DAVINCI_MMCIM); | ||
959 | } | 1005 | } |
960 | 1006 | ||
961 | if (qstatus & MMCST0_DATDNE) { | 1007 | if (qstatus & MMCST0_DATDNE) { |
@@ -1252,7 +1298,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) | |||
1252 | * Each hw_seg uses one EDMA parameter RAM slot, always one | 1298 | * Each hw_seg uses one EDMA parameter RAM slot, always one |
1253 | * channel and then usually some linked slots. | 1299 | * channel and then usually some linked slots. |
1254 | */ | 1300 | */ |
1255 | mmc->max_segs = MAX_NR_SG; | 1301 | mmc->max_segs = 1 + host->n_link; |
1256 | 1302 | ||
1257 | /* EDMA limit per hw segment (one or two MBytes) */ | 1303 | /* EDMA limit per hw segment (one or two MBytes) */ |
1258 | mmc->max_seg_size = MAX_CCNT * rw_threshold; | 1304 | mmc->max_seg_size = MAX_CCNT * rw_threshold; |
@@ -1359,14 +1405,17 @@ static int davinci_mmcsd_suspend(struct device *dev) | |||
1359 | struct mmc_davinci_host *host = platform_get_drvdata(pdev); | 1405 | struct mmc_davinci_host *host = platform_get_drvdata(pdev); |
1360 | int ret; | 1406 | int ret; |
1361 | 1407 | ||
1408 | mmc_host_enable(host->mmc); | ||
1362 | ret = mmc_suspend_host(host->mmc); | 1409 | ret = mmc_suspend_host(host->mmc); |
1363 | if (!ret) { | 1410 | if (!ret) { |
1364 | writel(0, host->base + DAVINCI_MMCIM); | 1411 | writel(0, host->base + DAVINCI_MMCIM); |
1365 | mmc_davinci_reset_ctrl(host, 1); | 1412 | mmc_davinci_reset_ctrl(host, 1); |
1413 | mmc_host_disable(host->mmc); | ||
1366 | clk_disable(host->clk); | 1414 | clk_disable(host->clk); |
1367 | host->suspended = 1; | 1415 | host->suspended = 1; |
1368 | } else { | 1416 | } else { |
1369 | host->suspended = 0; | 1417 | host->suspended = 0; |
1418 | mmc_host_disable(host->mmc); | ||
1370 | } | 1419 | } |
1371 | 1420 | ||
1372 | return ret; | 1421 | return ret; |
@@ -1382,6 +1431,7 @@ static int davinci_mmcsd_resume(struct device *dev) | |||
1382 | return 0; | 1431 | return 0; |
1383 | 1432 | ||
1384 | clk_enable(host->clk); | 1433 | clk_enable(host->clk); |
1434 | mmc_host_enable(host->mmc); | ||
1385 | 1435 | ||
1386 | mmc_davinci_reset_ctrl(host, 0); | 1436 | mmc_davinci_reset_ctrl(host, 0); |
1387 | ret = mmc_resume_host(host->mmc); | 1437 | ret = mmc_resume_host(host->mmc); |
@@ -1426,5 +1476,4 @@ module_exit(davinci_mmcsd_exit); | |||
1426 | MODULE_AUTHOR("Texas Instruments India"); | 1476 | MODULE_AUTHOR("Texas Instruments India"); |
1427 | MODULE_LICENSE("GPL"); | 1477 | MODULE_LICENSE("GPL"); |
1428 | MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); | 1478 | MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); |
1429 | MODULE_ALIAS("platform:davinci_mmc"); | ||
1430 | 1479 | ||
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c deleted file mode 100644 index 4d50da61816..00000000000 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ /dev/null | |||
@@ -1,253 +0,0 @@ | |||
1 | /* | ||
2 | * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver | ||
3 | * | ||
4 | * Copyright (C) 2012, Samsung Electronics Co., Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/mmc/host.h> | ||
16 | #include <linux/mmc/dw_mmc.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/of_gpio.h> | ||
19 | |||
20 | #include "dw_mmc.h" | ||
21 | #include "dw_mmc-pltfm.h" | ||
22 | |||
23 | #define NUM_PINS(x) (x + 2) | ||
24 | |||
25 | #define SDMMC_CLKSEL 0x09C | ||
26 | #define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0) | ||
27 | #define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16) | ||
28 | #define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24) | ||
29 | #define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7) | ||
30 | #define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ | ||
31 | SDMMC_CLKSEL_CCLK_DRIVE(y) | \ | ||
32 | SDMMC_CLKSEL_CCLK_DIVIDER(z)) | ||
33 | |||
34 | #define SDMMC_CMD_USE_HOLD_REG BIT(29) | ||
35 | |||
36 | #define EXYNOS4210_FIXED_CIU_CLK_DIV 2 | ||
37 | #define EXYNOS4412_FIXED_CIU_CLK_DIV 4 | ||
38 | |||
39 | /* Variations in Exynos specific dw-mshc controller */ | ||
40 | enum dw_mci_exynos_type { | ||
41 | DW_MCI_TYPE_EXYNOS4210, | ||
42 | DW_MCI_TYPE_EXYNOS4412, | ||
43 | DW_MCI_TYPE_EXYNOS5250, | ||
44 | }; | ||
45 | |||
46 | /* Exynos implementation specific driver private data */ | ||
47 | struct dw_mci_exynos_priv_data { | ||
48 | enum dw_mci_exynos_type ctrl_type; | ||
49 | u8 ciu_div; | ||
50 | u32 sdr_timing; | ||
51 | u32 ddr_timing; | ||
52 | }; | ||
53 | |||
54 | static struct dw_mci_exynos_compatible { | ||
55 | char *compatible; | ||
56 | enum dw_mci_exynos_type ctrl_type; | ||
57 | } exynos_compat[] = { | ||
58 | { | ||
59 | .compatible = "samsung,exynos4210-dw-mshc", | ||
60 | .ctrl_type = DW_MCI_TYPE_EXYNOS4210, | ||
61 | }, { | ||
62 | .compatible = "samsung,exynos4412-dw-mshc", | ||
63 | .ctrl_type = DW_MCI_TYPE_EXYNOS4412, | ||
64 | }, { | ||
65 | .compatible = "samsung,exynos5250-dw-mshc", | ||
66 | .ctrl_type = DW_MCI_TYPE_EXYNOS5250, | ||
67 | }, | ||
68 | }; | ||
69 | |||
70 | static int dw_mci_exynos_priv_init(struct dw_mci *host) | ||
71 | { | ||
72 | struct dw_mci_exynos_priv_data *priv; | ||
73 | int idx; | ||
74 | |||
75 | priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); | ||
76 | if (!priv) { | ||
77 | dev_err(host->dev, "mem alloc failed for private data\n"); | ||
78 | return -ENOMEM; | ||
79 | } | ||
80 | |||
81 | for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { | ||
82 | if (of_device_is_compatible(host->dev->of_node, | ||
83 | exynos_compat[idx].compatible)) | ||
84 | priv->ctrl_type = exynos_compat[idx].ctrl_type; | ||
85 | } | ||
86 | |||
87 | host->priv = priv; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int dw_mci_exynos_setup_clock(struct dw_mci *host) | ||
92 | { | ||
93 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
94 | |||
95 | if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250) | ||
96 | host->bus_hz /= (priv->ciu_div + 1); | ||
97 | else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) | ||
98 | host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV; | ||
99 | else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) | ||
100 | host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) | ||
106 | { | ||
107 | /* | ||
108 | * Exynos4412 and Exynos5250 extends the use of CMD register with the | ||
109 | * use of bit 29 (which is reserved on standard MSHC controllers) for | ||
110 | * optionally bypassing the HOLD register for command and data. The | ||
111 | * HOLD register should be bypassed in case there is no phase shift | ||
112 | * applied on CMD/DATA that is sent to the card. | ||
113 | */ | ||
114 | if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) | ||
115 | *cmdr |= SDMMC_CMD_USE_HOLD_REG; | ||
116 | } | ||
117 | |||
118 | static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) | ||
119 | { | ||
120 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
121 | |||
122 | if (ios->timing == MMC_TIMING_UHS_DDR50) | ||
123 | mci_writel(host, CLKSEL, priv->ddr_timing); | ||
124 | else | ||
125 | mci_writel(host, CLKSEL, priv->sdr_timing); | ||
126 | } | ||
127 | |||
128 | static int dw_mci_exynos_parse_dt(struct dw_mci *host) | ||
129 | { | ||
130 | struct dw_mci_exynos_priv_data *priv = host->priv; | ||
131 | struct device_node *np = host->dev->of_node; | ||
132 | u32 timing[2]; | ||
133 | u32 div = 0; | ||
134 | int ret; | ||
135 | |||
136 | of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); | ||
137 | priv->ciu_div = div; | ||
138 | |||
139 | ret = of_property_read_u32_array(np, | ||
140 | "samsung,dw-mshc-sdr-timing", timing, 2); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | |||
144 | priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); | ||
145 | |||
146 | ret = of_property_read_u32_array(np, | ||
147 | "samsung,dw-mshc-ddr-timing", timing, 2); | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static int dw_mci_exynos_setup_bus(struct dw_mci *host, | ||
156 | struct device_node *slot_np, u8 bus_width) | ||
157 | { | ||
158 | int idx, gpio, ret; | ||
159 | |||
160 | if (!slot_np) | ||
161 | return -EINVAL; | ||
162 | |||
163 | /* cmd + clock + bus-width pins */ | ||
164 | for (idx = 0; idx < NUM_PINS(bus_width); idx++) { | ||
165 | gpio = of_get_gpio(slot_np, idx); | ||
166 | if (!gpio_is_valid(gpio)) { | ||
167 | dev_err(host->dev, "invalid gpio: %d\n", gpio); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | ret = devm_gpio_request(host->dev, gpio, "dw-mci-bus"); | ||
172 | if (ret) { | ||
173 | dev_err(host->dev, "gpio [%d] request failed\n", gpio); | ||
174 | return -EBUSY; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | gpio = of_get_named_gpio(slot_np, "wp-gpios", 0); | ||
179 | if (gpio_is_valid(gpio)) { | ||
180 | if (devm_gpio_request(host->dev, gpio, "dw-mci-wp")) | ||
181 | dev_info(host->dev, "gpio [%d] request failed\n", | ||
182 | gpio); | ||
183 | } else { | ||
184 | dev_info(host->dev, "wp gpio not available"); | ||
185 | host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT; | ||
186 | } | ||
187 | |||
188 | if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) | ||
189 | return 0; | ||
190 | |||
191 | gpio = of_get_named_gpio(slot_np, "samsung,cd-pinmux-gpio", 0); | ||
192 | if (gpio_is_valid(gpio)) { | ||
193 | if (devm_gpio_request(host->dev, gpio, "dw-mci-cd")) | ||
194 | dev_err(host->dev, "gpio [%d] request failed\n", gpio); | ||
195 | } else { | ||
196 | dev_info(host->dev, "cd gpio not available"); | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /* Exynos5250 controller specific capabilities */ | ||
203 | static unsigned long exynos5250_dwmmc_caps[4] = { | ||
204 | MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | | ||
205 | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, | ||
206 | MMC_CAP_CMD23, | ||
207 | MMC_CAP_CMD23, | ||
208 | MMC_CAP_CMD23, | ||
209 | }; | ||
210 | |||
211 | static const struct dw_mci_drv_data exynos5250_drv_data = { | ||
212 | .caps = exynos5250_dwmmc_caps, | ||
213 | .init = dw_mci_exynos_priv_init, | ||
214 | .setup_clock = dw_mci_exynos_setup_clock, | ||
215 | .prepare_command = dw_mci_exynos_prepare_command, | ||
216 | .set_ios = dw_mci_exynos_set_ios, | ||
217 | .parse_dt = dw_mci_exynos_parse_dt, | ||
218 | .setup_bus = dw_mci_exynos_setup_bus, | ||
219 | }; | ||
220 | |||
221 | static const struct of_device_id dw_mci_exynos_match[] = { | ||
222 | { .compatible = "samsung,exynos5250-dw-mshc", | ||
223 | .data = &exynos5250_drv_data, }, | ||
224 | {}, | ||
225 | }; | ||
226 | MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); | ||
227 | |||
228 | int dw_mci_exynos_probe(struct platform_device *pdev) | ||
229 | { | ||
230 | const struct dw_mci_drv_data *drv_data; | ||
231 | const struct of_device_id *match; | ||
232 | |||
233 | match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); | ||
234 | drv_data = match->data; | ||
235 | return dw_mci_pltfm_register(pdev, drv_data); | ||
236 | } | ||
237 | |||
238 | static struct platform_driver dw_mci_exynos_pltfm_driver = { | ||
239 | .probe = dw_mci_exynos_probe, | ||
240 | .remove = __exit_p(dw_mci_pltfm_remove), | ||
241 | .driver = { | ||
242 | .name = "dwmmc_exynos", | ||
243 | .of_match_table = of_match_ptr(dw_mci_exynos_match), | ||
244 | .pm = &dw_mci_pltfm_pmops, | ||
245 | }, | ||
246 | }; | ||
247 | |||
248 | module_platform_driver(dw_mci_exynos_pltfm_driver); | ||
249 | |||
250 | MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); | ||
251 | MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); | ||
252 | MODULE_LICENSE("GPL v2"); | ||
253 | MODULE_ALIAS("platform:dwmmc-exynos"); | ||
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c deleted file mode 100644 index 083fcd29c9c..00000000000 --- a/drivers/mmc/host/dw_mmc-pci.c +++ /dev/null | |||
@@ -1,147 +0,0 @@ | |||
1 | /* | ||
2 | * Synopsys DesignWare Multimedia Card PCI Interface driver | ||
3 | * | ||
4 | * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mmc/host.h> | ||
19 | #include <linux/mmc/mmc.h> | ||
20 | #include <linux/mmc/dw_mmc.h> | ||
21 | #include "dw_mmc.h" | ||
22 | |||
23 | #define PCI_BAR_NO 2 | ||
24 | #define COMPLETE_BAR 0 | ||
25 | #define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 | ||
26 | #define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 | ||
27 | /* Defining the Capabilities */ | ||
28 | #define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ | ||
29 | MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ | ||
30 | MMC_CAP_SDIO_IRQ) | ||
31 | |||
32 | static struct dw_mci_board pci_board_data = { | ||
33 | .num_slots = 1, | ||
34 | .caps = DW_MCI_CAPABILITIES, | ||
35 | .bus_hz = 33 * 1000 * 1000, | ||
36 | .detect_delay_ms = 200, | ||
37 | .fifo_depth = 32, | ||
38 | }; | ||
39 | |||
40 | static int dw_mci_pci_probe(struct pci_dev *pdev, | ||
41 | const struct pci_device_id *entries) | ||
42 | { | ||
43 | struct dw_mci *host; | ||
44 | int ret; | ||
45 | |||
46 | ret = pci_enable_device(pdev); | ||
47 | if (ret) | ||
48 | return ret; | ||
49 | if (pci_request_regions(pdev, "dw_mmc_pci")) { | ||
50 | ret = -ENODEV; | ||
51 | goto err_disable_dev; | ||
52 | } | ||
53 | |||
54 | host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); | ||
55 | if (!host) { | ||
56 | ret = -ENOMEM; | ||
57 | goto err_release; | ||
58 | } | ||
59 | |||
60 | host->irq = pdev->irq; | ||
61 | host->irq_flags = IRQF_SHARED; | ||
62 | host->dev = &pdev->dev; | ||
63 | host->pdata = &pci_board_data; | ||
64 | |||
65 | host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR); | ||
66 | if (!host->regs) { | ||
67 | ret = -EIO; | ||
68 | goto err_unmap; | ||
69 | } | ||
70 | |||
71 | pci_set_drvdata(pdev, host); | ||
72 | ret = dw_mci_probe(host); | ||
73 | if (ret) | ||
74 | goto err_probe_failed; | ||
75 | return ret; | ||
76 | |||
77 | err_probe_failed: | ||
78 | pci_iounmap(pdev, host->regs); | ||
79 | err_unmap: | ||
80 | kfree(host); | ||
81 | err_release: | ||
82 | pci_release_regions(pdev); | ||
83 | err_disable_dev: | ||
84 | pci_disable_device(pdev); | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | static void dw_mci_pci_remove(struct pci_dev *pdev) | ||
89 | { | ||
90 | struct dw_mci *host = pci_get_drvdata(pdev); | ||
91 | |||
92 | dw_mci_remove(host); | ||
93 | pci_set_drvdata(pdev, NULL); | ||
94 | pci_release_regions(pdev); | ||
95 | pci_iounmap(pdev, host->regs); | ||
96 | kfree(host); | ||
97 | pci_disable_device(pdev); | ||
98 | } | ||
99 | |||
100 | #ifdef CONFIG_PM_SLEEP | ||
101 | static int dw_mci_pci_suspend(struct device *dev) | ||
102 | { | ||
103 | int ret; | ||
104 | struct pci_dev *pdev = to_pci_dev(dev); | ||
105 | struct dw_mci *host = pci_get_drvdata(pdev); | ||
106 | |||
107 | ret = dw_mci_suspend(host); | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | static int dw_mci_pci_resume(struct device *dev) | ||
112 | { | ||
113 | int ret; | ||
114 | struct pci_dev *pdev = to_pci_dev(dev); | ||
115 | struct dw_mci *host = pci_get_drvdata(pdev); | ||
116 | |||
117 | ret = dw_mci_resume(host); | ||
118 | return ret; | ||
119 | } | ||
120 | #else | ||
121 | #define dw_mci_pci_suspend NULL | ||
122 | #define dw_mci_pci_resume NULL | ||
123 | #endif /* CONFIG_PM_SLEEP */ | ||
124 | |||
125 | static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); | ||
126 | |||
127 | static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { | ||
128 | { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, | ||
129 | {} | ||
130 | }; | ||
131 | MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); | ||
132 | |||
133 | static struct pci_driver dw_mci_pci_driver = { | ||
134 | .name = "dw_mmc_pci", | ||
135 | .id_table = dw_mci_pci_id, | ||
136 | .probe = dw_mci_pci_probe, | ||
137 | .remove = dw_mci_pci_remove, | ||
138 | .driver = { | ||
139 | .pm = &dw_mci_pci_pmops | ||
140 | }, | ||
141 | }; | ||
142 | |||
143 | module_pci_driver(dw_mci_pci_driver); | ||
144 | |||
145 | MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); | ||
146 | MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c deleted file mode 100644 index 5e1fb1d2c42..00000000000 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ /dev/null | |||
@@ -1,136 +0,0 @@ | |||
1 | /* | ||
2 | * Synopsys DesignWare Multimedia Card Interface driver | ||
3 | * | ||
4 | * Copyright (C) 2009 NXP Semiconductors | ||
5 | * Copyright (C) 2009, 2010 Imagination Technologies Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/irq.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/mmc/host.h> | ||
20 | #include <linux/mmc/mmc.h> | ||
21 | #include <linux/mmc/dw_mmc.h> | ||
22 | #include <linux/of.h> | ||
23 | |||
24 | #include "dw_mmc.h" | ||
25 | |||
26 | int dw_mci_pltfm_register(struct platform_device *pdev, | ||
27 | const struct dw_mci_drv_data *drv_data) | ||
28 | { | ||
29 | struct dw_mci *host; | ||
30 | struct resource *regs; | ||
31 | int ret; | ||
32 | |||
33 | host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); | ||
34 | if (!host) | ||
35 | return -ENOMEM; | ||
36 | |||
37 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
38 | if (!regs) | ||
39 | return -ENXIO; | ||
40 | |||
41 | host->irq = platform_get_irq(pdev, 0); | ||
42 | if (host->irq < 0) | ||
43 | return host->irq; | ||
44 | |||
45 | host->drv_data = drv_data; | ||
46 | host->dev = &pdev->dev; | ||
47 | host->irq_flags = 0; | ||
48 | host->pdata = pdev->dev.platform_data; | ||
49 | host->regs = devm_request_and_ioremap(&pdev->dev, regs); | ||
50 | if (!host->regs) | ||
51 | return -ENOMEM; | ||
52 | |||
53 | if (drv_data && drv_data->init) { | ||
54 | ret = drv_data->init(host); | ||
55 | if (ret) | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | platform_set_drvdata(pdev, host); | ||
60 | ret = dw_mci_probe(host); | ||
61 | return ret; | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_register); | ||
64 | |||
65 | static int dw_mci_pltfm_probe(struct platform_device *pdev) | ||
66 | { | ||
67 | return dw_mci_pltfm_register(pdev, NULL); | ||
68 | } | ||
69 | |||
70 | static int dw_mci_pltfm_remove(struct platform_device *pdev) | ||
71 | { | ||
72 | struct dw_mci *host = platform_get_drvdata(pdev); | ||
73 | |||
74 | platform_set_drvdata(pdev, NULL); | ||
75 | dw_mci_remove(host); | ||
76 | return 0; | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove); | ||
79 | |||
80 | #ifdef CONFIG_PM_SLEEP | ||
81 | /* | ||
82 | * TODO: we should probably disable the clock to the card in the suspend path. | ||
83 | */ | ||
84 | static int dw_mci_pltfm_suspend(struct device *dev) | ||
85 | { | ||
86 | int ret; | ||
87 | struct dw_mci *host = dev_get_drvdata(dev); | ||
88 | |||
89 | ret = dw_mci_suspend(host); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int dw_mci_pltfm_resume(struct device *dev) | ||
97 | { | ||
98 | int ret; | ||
99 | struct dw_mci *host = dev_get_drvdata(dev); | ||
100 | |||
101 | ret = dw_mci_resume(host); | ||
102 | if (ret) | ||
103 | return ret; | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | #else | ||
108 | #define dw_mci_pltfm_suspend NULL | ||
109 | #define dw_mci_pltfm_resume NULL | ||
110 | #endif /* CONFIG_PM_SLEEP */ | ||
111 | |||
112 | SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); | ||
113 | EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops); | ||
114 | |||
115 | static const struct of_device_id dw_mci_pltfm_match[] = { | ||
116 | { .compatible = "snps,dw-mshc", }, | ||
117 | {}, | ||
118 | }; | ||
119 | MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); | ||
120 | |||
121 | static struct platform_driver dw_mci_pltfm_driver = { | ||
122 | .probe = dw_mci_pltfm_probe, | ||
123 | .remove = dw_mci_pltfm_remove, | ||
124 | .driver = { | ||
125 | .name = "dw_mmc", | ||
126 | .of_match_table = of_match_ptr(dw_mci_pltfm_match), | ||
127 | .pm = &dw_mci_pltfm_pmops, | ||
128 | }, | ||
129 | }; | ||
130 | |||
131 | module_platform_driver(dw_mci_pltfm_driver); | ||
132 | |||
133 | MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); | ||
134 | MODULE_AUTHOR("NXP Semiconductor VietNam"); | ||
135 | MODULE_AUTHOR("Imagination Technologies Ltd"); | ||
136 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h deleted file mode 100644 index 68e7fd2f614..00000000000 --- a/drivers/mmc/host/dw_mmc-pltfm.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * Synopsys DesignWare Multimedia Card Interface Platform driver | ||
3 | * | ||
4 | * Copyright (C) 2012, Samsung Electronics Co., Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _DW_MMC_PLTFM_H_ | ||
13 | #define _DW_MMC_PLTFM_H_ | ||
14 | |||
15 | extern int dw_mci_pltfm_register(struct platform_device *pdev, | ||
16 | const struct dw_mci_drv_data *drv_data); | ||
17 | extern int dw_mci_pltfm_remove(struct platform_device *pdev); | ||
18 | extern const struct dev_pm_ops dw_mci_pltfm_pmops; | ||
19 | |||
20 | #endif /* _DW_MMC_PLTFM_H_ */ | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 323c5022c2c..ff0f714b012 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/ioport.h> | 22 | #include <linux/ioport.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/scatterlist.h> | ||
25 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | #include <linux/stat.h> | 28 | #include <linux/stat.h> |
@@ -33,7 +34,6 @@ | |||
33 | #include <linux/bitops.h> | 34 | #include <linux/bitops.h> |
34 | #include <linux/regulator/consumer.h> | 35 | #include <linux/regulator/consumer.h> |
35 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
36 | #include <linux/of.h> | ||
37 | 37 | ||
38 | #include "dw_mmc.h" | 38 | #include "dw_mmc.h" |
39 | 39 | ||
@@ -101,6 +101,8 @@ struct dw_mci_slot { | |||
101 | int last_detect_state; | 101 | int last_detect_state; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static struct workqueue_struct *dw_mci_card_workqueue; | ||
105 | |||
104 | #if defined(CONFIG_DEBUG_FS) | 106 | #if defined(CONFIG_DEBUG_FS) |
105 | static int dw_mci_req_show(struct seq_file *s, void *v) | 107 | static int dw_mci_req_show(struct seq_file *s, void *v) |
106 | { | 108 | { |
@@ -231,8 +233,6 @@ static void dw_mci_set_timeout(struct dw_mci *host) | |||
231 | static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) | 233 | static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) |
232 | { | 234 | { |
233 | struct mmc_data *data; | 235 | struct mmc_data *data; |
234 | struct dw_mci_slot *slot = mmc_priv(mmc); | ||
235 | const struct dw_mci_drv_data *drv_data = slot->host->drv_data; | ||
236 | u32 cmdr; | 236 | u32 cmdr; |
237 | cmd->error = -EINPROGRESS; | 237 | cmd->error = -EINPROGRESS; |
238 | 238 | ||
@@ -262,9 +262,6 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) | |||
262 | cmdr |= SDMMC_CMD_DAT_WR; | 262 | cmdr |= SDMMC_CMD_DAT_WR; |
263 | } | 263 | } |
264 | 264 | ||
265 | if (drv_data && drv_data->prepare_command) | ||
266 | drv_data->prepare_command(slot->host, &cmdr); | ||
267 | |||
268 | return cmdr; | 265 | return cmdr; |
269 | } | 266 | } |
270 | 267 | ||
@@ -272,7 +269,7 @@ static void dw_mci_start_command(struct dw_mci *host, | |||
272 | struct mmc_command *cmd, u32 cmd_flags) | 269 | struct mmc_command *cmd, u32 cmd_flags) |
273 | { | 270 | { |
274 | host->cmd = cmd; | 271 | host->cmd = cmd; |
275 | dev_vdbg(host->dev, | 272 | dev_vdbg(&host->pdev->dev, |
276 | "start command: ARGR=0x%08x CMDR=0x%08x\n", | 273 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
277 | cmd->arg, cmd_flags); | 274 | cmd->arg, cmd_flags); |
278 | 275 | ||
@@ -299,25 +296,15 @@ static void dw_mci_stop_dma(struct dw_mci *host) | |||
299 | } | 296 | } |
300 | } | 297 | } |
301 | 298 | ||
302 | static int dw_mci_get_dma_dir(struct mmc_data *data) | ||
303 | { | ||
304 | if (data->flags & MMC_DATA_WRITE) | ||
305 | return DMA_TO_DEVICE; | ||
306 | else | ||
307 | return DMA_FROM_DEVICE; | ||
308 | } | ||
309 | |||
310 | #ifdef CONFIG_MMC_DW_IDMAC | 299 | #ifdef CONFIG_MMC_DW_IDMAC |
311 | static void dw_mci_dma_cleanup(struct dw_mci *host) | 300 | static void dw_mci_dma_cleanup(struct dw_mci *host) |
312 | { | 301 | { |
313 | struct mmc_data *data = host->data; | 302 | struct mmc_data *data = host->data; |
314 | 303 | ||
315 | if (data) | 304 | if (data) |
316 | if (!data->host_cookie) | 305 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, |
317 | dma_unmap_sg(host->dev, | 306 | ((data->flags & MMC_DATA_WRITE) |
318 | data->sg, | 307 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); |
319 | data->sg_len, | ||
320 | dw_mci_get_dma_dir(data)); | ||
321 | } | 308 | } |
322 | 309 | ||
323 | static void dw_mci_idmac_stop_dma(struct dw_mci *host) | 310 | static void dw_mci_idmac_stop_dma(struct dw_mci *host) |
@@ -340,7 +327,7 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host) | |||
340 | { | 327 | { |
341 | struct mmc_data *data = host->data; | 328 | struct mmc_data *data = host->data; |
342 | 329 | ||
343 | dev_vdbg(host->dev, "DMA complete\n"); | 330 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); |
344 | 331 | ||
345 | host->dma_ops->cleanup(host); | 332 | host->dma_ops->cleanup(host); |
346 | 333 | ||
@@ -424,8 +411,6 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
424 | p->des3 = host->sg_dma; | 411 | p->des3 = host->sg_dma; |
425 | p->des0 = IDMAC_DES0_ER; | 412 | p->des0 = IDMAC_DES0_ER; |
426 | 413 | ||
427 | mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); | ||
428 | |||
429 | /* Mask out interrupts - get Tx & Rx complete only */ | 414 | /* Mask out interrupts - get Tx & Rx complete only */ |
430 | mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | | 415 | mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | |
431 | SDMMC_IDMAC_INT_TI); | 416 | SDMMC_IDMAC_INT_TI); |
@@ -435,7 +420,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
435 | return 0; | 420 | return 0; |
436 | } | 421 | } |
437 | 422 | ||
438 | static const struct dw_mci_dma_ops dw_mci_idmac_ops = { | 423 | static struct dw_mci_dma_ops dw_mci_idmac_ops = { |
439 | .init = dw_mci_idmac_init, | 424 | .init = dw_mci_idmac_init, |
440 | .start = dw_mci_idmac_start_dma, | 425 | .start = dw_mci_idmac_start_dma, |
441 | .stop = dw_mci_idmac_stop_dma, | 426 | .stop = dw_mci_idmac_stop_dma, |
@@ -444,15 +429,17 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = { | |||
444 | }; | 429 | }; |
445 | #endif /* CONFIG_MMC_DW_IDMAC */ | 430 | #endif /* CONFIG_MMC_DW_IDMAC */ |
446 | 431 | ||
447 | static int dw_mci_pre_dma_transfer(struct dw_mci *host, | 432 | static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) |
448 | struct mmc_data *data, | ||
449 | bool next) | ||
450 | { | 433 | { |
451 | struct scatterlist *sg; | 434 | struct scatterlist *sg; |
452 | unsigned int i, sg_len; | 435 | unsigned int i, direction, sg_len; |
436 | u32 temp; | ||
453 | 437 | ||
454 | if (!next && data->host_cookie) | 438 | host->using_dma = 0; |
455 | return data->host_cookie; | 439 | |
440 | /* If we don't have a channel, we can't do DMA */ | ||
441 | if (!host->use_dma) | ||
442 | return -ENODEV; | ||
456 | 443 | ||
457 | /* | 444 | /* |
458 | * We don't do DMA on "complex" transfers, i.e. with | 445 | * We don't do DMA on "complex" transfers, i.e. with |
@@ -461,7 +448,6 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host, | |||
461 | */ | 448 | */ |
462 | if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) | 449 | if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) |
463 | return -EINVAL; | 450 | return -EINVAL; |
464 | |||
465 | if (data->blksz & 3) | 451 | if (data->blksz & 3) |
466 | return -EINVAL; | 452 | return -EINVAL; |
467 | 453 | ||
@@ -470,76 +456,17 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host, | |||
470 | return -EINVAL; | 456 | return -EINVAL; |
471 | } | 457 | } |
472 | 458 | ||
473 | sg_len = dma_map_sg(host->dev, | 459 | host->using_dma = 1; |
474 | data->sg, | ||
475 | data->sg_len, | ||
476 | dw_mci_get_dma_dir(data)); | ||
477 | if (sg_len == 0) | ||
478 | return -EINVAL; | ||
479 | |||
480 | if (next) | ||
481 | data->host_cookie = sg_len; | ||
482 | |||
483 | return sg_len; | ||
484 | } | ||
485 | |||
486 | static void dw_mci_pre_req(struct mmc_host *mmc, | ||
487 | struct mmc_request *mrq, | ||
488 | bool is_first_req) | ||
489 | { | ||
490 | struct dw_mci_slot *slot = mmc_priv(mmc); | ||
491 | struct mmc_data *data = mrq->data; | ||
492 | |||
493 | if (!slot->host->use_dma || !data) | ||
494 | return; | ||
495 | |||
496 | if (data->host_cookie) { | ||
497 | data->host_cookie = 0; | ||
498 | return; | ||
499 | } | ||
500 | |||
501 | if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) | ||
502 | data->host_cookie = 0; | ||
503 | } | ||
504 | |||
505 | static void dw_mci_post_req(struct mmc_host *mmc, | ||
506 | struct mmc_request *mrq, | ||
507 | int err) | ||
508 | { | ||
509 | struct dw_mci_slot *slot = mmc_priv(mmc); | ||
510 | struct mmc_data *data = mrq->data; | ||
511 | |||
512 | if (!slot->host->use_dma || !data) | ||
513 | return; | ||
514 | |||
515 | if (data->host_cookie) | ||
516 | dma_unmap_sg(slot->host->dev, | ||
517 | data->sg, | ||
518 | data->sg_len, | ||
519 | dw_mci_get_dma_dir(data)); | ||
520 | data->host_cookie = 0; | ||
521 | } | ||
522 | |||
523 | static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) | ||
524 | { | ||
525 | int sg_len; | ||
526 | u32 temp; | ||
527 | |||
528 | host->using_dma = 0; | ||
529 | |||
530 | /* If we don't have a channel, we can't do DMA */ | ||
531 | if (!host->use_dma) | ||
532 | return -ENODEV; | ||
533 | 460 | ||
534 | sg_len = dw_mci_pre_dma_transfer(host, data, 0); | 461 | if (data->flags & MMC_DATA_READ) |
535 | if (sg_len < 0) { | 462 | direction = DMA_FROM_DEVICE; |
536 | host->dma_ops->stop(host); | 463 | else |
537 | return sg_len; | 464 | direction = DMA_TO_DEVICE; |
538 | } | ||
539 | 465 | ||
540 | host->using_dma = 1; | 466 | sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, |
467 | direction); | ||
541 | 468 | ||
542 | dev_vdbg(host->dev, | 469 | dev_vdbg(&host->pdev->dev, |
543 | "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", | 470 | "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", |
544 | (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, | 471 | (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, |
545 | sg_len); | 472 | sg_len); |
@@ -575,14 +502,8 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) | |||
575 | host->dir_status = DW_MCI_SEND_STATUS; | 502 | host->dir_status = DW_MCI_SEND_STATUS; |
576 | 503 | ||
577 | if (dw_mci_submit_data_dma(host, data)) { | 504 | if (dw_mci_submit_data_dma(host, data)) { |
578 | int flags = SG_MITER_ATOMIC; | ||
579 | if (host->data->flags & MMC_DATA_READ) | ||
580 | flags |= SG_MITER_TO_SG; | ||
581 | else | ||
582 | flags |= SG_MITER_FROM_SG; | ||
583 | |||
584 | sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); | ||
585 | host->sg = data->sg; | 505 | host->sg = data->sg; |
506 | host->pio_offset = 0; | ||
586 | host->part_buf_start = 0; | 507 | host->part_buf_start = 0; |
587 | host->part_buf_count = 0; | 508 | host->part_buf_count = 0; |
588 | 509 | ||
@@ -617,22 +538,20 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) | |||
617 | cmd, arg, cmd_status); | 538 | cmd, arg, cmd_status); |
618 | } | 539 | } |
619 | 540 | ||
620 | static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) | 541 | static void dw_mci_setup_bus(struct dw_mci_slot *slot) |
621 | { | 542 | { |
622 | struct dw_mci *host = slot->host; | 543 | struct dw_mci *host = slot->host; |
623 | u32 div; | 544 | u32 div; |
624 | u32 clk_en_a; | ||
625 | 545 | ||
626 | if (slot->clock != host->current_speed || force_clkinit) { | 546 | if (slot->clock != host->current_speed) { |
627 | div = host->bus_hz / slot->clock; | 547 | if (host->bus_hz % slot->clock) |
628 | if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) | ||
629 | /* | 548 | /* |
630 | * move the + 1 after the divide to prevent | 549 | * move the + 1 after the divide to prevent |
631 | * over-clocking the card. | 550 | * over-clocking the card. |
632 | */ | 551 | */ |
633 | div += 1; | 552 | div = ((host->bus_hz / slot->clock) >> 1) + 1; |
634 | 553 | else | |
635 | div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; | 554 | div = (host->bus_hz / slot->clock) >> 1; |
636 | 555 | ||
637 | dev_info(&slot->mmc->class_dev, | 556 | dev_info(&slot->mmc->class_dev, |
638 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" | 557 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" |
@@ -654,11 +573,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) | |||
654 | mci_send_cmd(slot, | 573 | mci_send_cmd(slot, |
655 | SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); | 574 | SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); |
656 | 575 | ||
657 | /* enable clock; only low power if no SDIO */ | 576 | /* enable clock */ |
658 | clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; | 577 | mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE | |
659 | if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) | 578 | SDMMC_CLKEN_LOW_PWR); |
660 | clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; | ||
661 | mci_writel(host, CLKENA, clk_en_a); | ||
662 | 579 | ||
663 | /* inform CIU */ | 580 | /* inform CIU */ |
664 | mci_send_cmd(slot, | 581 | mci_send_cmd(slot, |
@@ -671,11 +588,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) | |||
671 | mci_writel(host, CTYPE, (slot->ctype << slot->id)); | 588 | mci_writel(host, CTYPE, (slot->ctype << slot->id)); |
672 | } | 589 | } |
673 | 590 | ||
674 | static void __dw_mci_start_request(struct dw_mci *host, | 591 | static void dw_mci_start_request(struct dw_mci *host, |
675 | struct dw_mci_slot *slot, | 592 | struct dw_mci_slot *slot) |
676 | struct mmc_command *cmd) | ||
677 | { | 593 | { |
678 | struct mmc_request *mrq; | 594 | struct mmc_request *mrq; |
595 | struct mmc_command *cmd; | ||
679 | struct mmc_data *data; | 596 | struct mmc_data *data; |
680 | u32 cmdflags; | 597 | u32 cmdflags; |
681 | 598 | ||
@@ -683,6 +600,9 @@ static void __dw_mci_start_request(struct dw_mci *host, | |||
683 | if (host->pdata->select_slot) | 600 | if (host->pdata->select_slot) |
684 | host->pdata->select_slot(slot->id); | 601 | host->pdata->select_slot(slot->id); |
685 | 602 | ||
603 | /* Slot specific timing and width adjustment */ | ||
604 | dw_mci_setup_bus(slot); | ||
605 | |||
686 | host->cur_slot = slot; | 606 | host->cur_slot = slot; |
687 | host->mrq = mrq; | 607 | host->mrq = mrq; |
688 | 608 | ||
@@ -690,13 +610,14 @@ static void __dw_mci_start_request(struct dw_mci *host, | |||
690 | host->completed_events = 0; | 610 | host->completed_events = 0; |
691 | host->data_status = 0; | 611 | host->data_status = 0; |
692 | 612 | ||
693 | data = cmd->data; | 613 | data = mrq->data; |
694 | if (data) { | 614 | if (data) { |
695 | dw_mci_set_timeout(host); | 615 | dw_mci_set_timeout(host); |
696 | mci_writel(host, BYTCNT, data->blksz*data->blocks); | 616 | mci_writel(host, BYTCNT, data->blksz*data->blocks); |
697 | mci_writel(host, BLKSIZ, data->blksz); | 617 | mci_writel(host, BLKSIZ, data->blksz); |
698 | } | 618 | } |
699 | 619 | ||
620 | cmd = mrq->cmd; | ||
700 | cmdflags = dw_mci_prepare_command(slot->mmc, cmd); | 621 | cmdflags = dw_mci_prepare_command(slot->mmc, cmd); |
701 | 622 | ||
702 | /* this is the first command, send the initialization clock */ | 623 | /* this is the first command, send the initialization clock */ |
@@ -714,16 +635,6 @@ static void __dw_mci_start_request(struct dw_mci *host, | |||
714 | host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); | 635 | host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); |
715 | } | 636 | } |
716 | 637 | ||
717 | static void dw_mci_start_request(struct dw_mci *host, | ||
718 | struct dw_mci_slot *slot) | ||
719 | { | ||
720 | struct mmc_request *mrq = slot->mrq; | ||
721 | struct mmc_command *cmd; | ||
722 | |||
723 | cmd = mrq->sbc ? mrq->sbc : mrq->cmd; | ||
724 | __dw_mci_start_request(host, slot, cmd); | ||
725 | } | ||
726 | |||
727 | /* must be called with host->lock held */ | 638 | /* must be called with host->lock held */ |
728 | static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, | 639 | static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, |
729 | struct mmc_request *mrq) | 640 | struct mmc_request *mrq) |
@@ -770,30 +681,29 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
770 | static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 681 | static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
771 | { | 682 | { |
772 | struct dw_mci_slot *slot = mmc_priv(mmc); | 683 | struct dw_mci_slot *slot = mmc_priv(mmc); |
773 | const struct dw_mci_drv_data *drv_data = slot->host->drv_data; | ||
774 | u32 regs; | 684 | u32 regs; |
775 | 685 | ||
686 | /* set default 1 bit mode */ | ||
687 | slot->ctype = SDMMC_CTYPE_1BIT; | ||
688 | |||
776 | switch (ios->bus_width) { | 689 | switch (ios->bus_width) { |
690 | case MMC_BUS_WIDTH_1: | ||
691 | slot->ctype = SDMMC_CTYPE_1BIT; | ||
692 | break; | ||
777 | case MMC_BUS_WIDTH_4: | 693 | case MMC_BUS_WIDTH_4: |
778 | slot->ctype = SDMMC_CTYPE_4BIT; | 694 | slot->ctype = SDMMC_CTYPE_4BIT; |
779 | break; | 695 | break; |
780 | case MMC_BUS_WIDTH_8: | 696 | case MMC_BUS_WIDTH_8: |
781 | slot->ctype = SDMMC_CTYPE_8BIT; | 697 | slot->ctype = SDMMC_CTYPE_8BIT; |
782 | break; | 698 | break; |
783 | default: | ||
784 | /* set default 1 bit mode */ | ||
785 | slot->ctype = SDMMC_CTYPE_1BIT; | ||
786 | } | 699 | } |
787 | 700 | ||
788 | regs = mci_readl(slot->host, UHS_REG); | ||
789 | |||
790 | /* DDR mode set */ | 701 | /* DDR mode set */ |
791 | if (ios->timing == MMC_TIMING_UHS_DDR50) | 702 | if (ios->timing == MMC_TIMING_UHS_DDR50) { |
703 | regs = mci_readl(slot->host, UHS_REG); | ||
792 | regs |= (0x1 << slot->id) << 16; | 704 | regs |= (0x1 << slot->id) << 16; |
793 | else | 705 | mci_writel(slot->host, UHS_REG, regs); |
794 | regs &= ~(0x1 << slot->id) << 16; | 706 | } |
795 | |||
796 | mci_writel(slot->host, UHS_REG, regs); | ||
797 | 707 | ||
798 | if (ios->clock) { | 708 | if (ios->clock) { |
799 | /* | 709 | /* |
@@ -803,12 +713,6 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
803 | slot->clock = ios->clock; | 713 | slot->clock = ios->clock; |
804 | } | 714 | } |
805 | 715 | ||
806 | if (drv_data && drv_data->set_ios) | ||
807 | drv_data->set_ios(slot->host, ios); | ||
808 | |||
809 | /* Slot specific timing and width adjustment */ | ||
810 | dw_mci_setup_bus(slot, false); | ||
811 | |||
812 | switch (ios->power_mode) { | 716 | switch (ios->power_mode) { |
813 | case MMC_POWER_UP: | 717 | case MMC_POWER_UP: |
814 | set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); | 718 | set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); |
@@ -825,9 +729,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc) | |||
825 | struct dw_mci_board *brd = slot->host->pdata; | 729 | struct dw_mci_board *brd = slot->host->pdata; |
826 | 730 | ||
827 | /* Use platform get_ro function, else try on board write protect */ | 731 | /* Use platform get_ro function, else try on board write protect */ |
828 | if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT) | 732 | if (brd->get_ro) |
829 | read_only = 0; | ||
830 | else if (brd->get_ro) | ||
831 | read_only = brd->get_ro(slot->id); | 733 | read_only = brd->get_ro(slot->id); |
832 | else | 734 | else |
833 | read_only = | 735 | read_only = |
@@ -862,63 +764,11 @@ static int dw_mci_get_cd(struct mmc_host *mmc) | |||
862 | return present; | 764 | return present; |
863 | } | 765 | } |
864 | 766 | ||
865 | /* | ||
866 | * Disable lower power mode. | ||
867 | * | ||
868 | * Low power mode will stop the card clock when idle. According to the | ||
869 | * description of the CLKENA register we should disable low power mode | ||
870 | * for SDIO cards if we need SDIO interrupts to work. | ||
871 | * | ||
872 | * This function is fast if low power mode is already disabled. | ||
873 | */ | ||
874 | static void dw_mci_disable_low_power(struct dw_mci_slot *slot) | ||
875 | { | ||
876 | struct dw_mci *host = slot->host; | ||
877 | u32 clk_en_a; | ||
878 | const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; | ||
879 | |||
880 | clk_en_a = mci_readl(host, CLKENA); | ||
881 | |||
882 | if (clk_en_a & clken_low_pwr) { | ||
883 | mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); | ||
884 | mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | | ||
885 | SDMMC_CMD_PRV_DAT_WAIT, 0); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) | ||
890 | { | ||
891 | struct dw_mci_slot *slot = mmc_priv(mmc); | ||
892 | struct dw_mci *host = slot->host; | ||
893 | u32 int_mask; | ||
894 | |||
895 | /* Enable/disable Slot Specific SDIO interrupt */ | ||
896 | int_mask = mci_readl(host, INTMASK); | ||
897 | if (enb) { | ||
898 | /* | ||
899 | * Turn off low power mode if it was enabled. This is a bit of | ||
900 | * a heavy operation and we disable / enable IRQs a lot, so | ||
901 | * we'll leave low power mode disabled and it will get | ||
902 | * re-enabled again in dw_mci_setup_bus(). | ||
903 | */ | ||
904 | dw_mci_disable_low_power(slot); | ||
905 | |||
906 | mci_writel(host, INTMASK, | ||
907 | (int_mask | SDMMC_INT_SDIO(slot->id))); | ||
908 | } else { | ||
909 | mci_writel(host, INTMASK, | ||
910 | (int_mask & ~SDMMC_INT_SDIO(slot->id))); | ||
911 | } | ||
912 | } | ||
913 | |||
914 | static const struct mmc_host_ops dw_mci_ops = { | 767 | static const struct mmc_host_ops dw_mci_ops = { |
915 | .request = dw_mci_request, | 768 | .request = dw_mci_request, |
916 | .pre_req = dw_mci_pre_req, | 769 | .set_ios = dw_mci_set_ios, |
917 | .post_req = dw_mci_post_req, | 770 | .get_ro = dw_mci_get_ro, |
918 | .set_ios = dw_mci_set_ios, | 771 | .get_cd = dw_mci_get_cd, |
919 | .get_ro = dw_mci_get_ro, | ||
920 | .get_cd = dw_mci_get_cd, | ||
921 | .enable_sdio_irq = dw_mci_enable_sdio_irq, | ||
922 | }; | 772 | }; |
923 | 773 | ||
924 | static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) | 774 | static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) |
@@ -936,12 +786,12 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) | |||
936 | slot = list_entry(host->queue.next, | 786 | slot = list_entry(host->queue.next, |
937 | struct dw_mci_slot, queue_node); | 787 | struct dw_mci_slot, queue_node); |
938 | list_del(&slot->queue_node); | 788 | list_del(&slot->queue_node); |
939 | dev_vdbg(host->dev, "list not empty: %s is next\n", | 789 | dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", |
940 | mmc_hostname(slot->mmc)); | 790 | mmc_hostname(slot->mmc)); |
941 | host->state = STATE_SENDING_CMD; | 791 | host->state = STATE_SENDING_CMD; |
942 | dw_mci_start_request(host, slot); | 792 | dw_mci_start_request(host, slot); |
943 | } else { | 793 | } else { |
944 | dev_vdbg(host->dev, "list empty\n"); | 794 | dev_vdbg(&host->pdev->dev, "list empty\n"); |
945 | host->state = STATE_IDLE; | 795 | host->state = STATE_IDLE; |
946 | } | 796 | } |
947 | 797 | ||
@@ -986,8 +836,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd | |||
986 | mdelay(20); | 836 | mdelay(20); |
987 | 837 | ||
988 | if (cmd->data) { | 838 | if (cmd->data) { |
989 | dw_mci_stop_dma(host); | ||
990 | host->data = NULL; | 839 | host->data = NULL; |
840 | dw_mci_stop_dma(host); | ||
991 | } | 841 | } |
992 | } | 842 | } |
993 | } | 843 | } |
@@ -1021,14 +871,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
1021 | cmd = host->cmd; | 871 | cmd = host->cmd; |
1022 | host->cmd = NULL; | 872 | host->cmd = NULL; |
1023 | set_bit(EVENT_CMD_COMPLETE, &host->completed_events); | 873 | set_bit(EVENT_CMD_COMPLETE, &host->completed_events); |
1024 | dw_mci_command_complete(host, cmd); | 874 | dw_mci_command_complete(host, host->mrq->cmd); |
1025 | if (cmd == host->mrq->sbc && !cmd->error) { | ||
1026 | prev_state = state = STATE_SENDING_CMD; | ||
1027 | __dw_mci_start_request(host, host->cur_slot, | ||
1028 | host->mrq->cmd); | ||
1029 | goto unlock; | ||
1030 | } | ||
1031 | |||
1032 | if (!host->mrq->data || cmd->error) { | 875 | if (!host->mrq->data || cmd->error) { |
1033 | dw_mci_request_end(host, host->mrq); | 876 | dw_mci_request_end(host, host->mrq); |
1034 | goto unlock; | 877 | goto unlock; |
@@ -1080,7 +923,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
1080 | data->bytes_xfered = 0; | 923 | data->bytes_xfered = 0; |
1081 | data->error = -ETIMEDOUT; | 924 | data->error = -ETIMEDOUT; |
1082 | } else { | 925 | } else { |
1083 | dev_err(host->dev, | 926 | dev_err(&host->pdev->dev, |
1084 | "data FIFO error " | 927 | "data FIFO error " |
1085 | "(status=%08x)\n", | 928 | "(status=%08x)\n", |
1086 | status); | 929 | status); |
@@ -1092,7 +935,6 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
1092 | * generates a block interrupt, hence setting | 935 | * generates a block interrupt, hence setting |
1093 | * the scatter-gather pointer to NULL. | 936 | * the scatter-gather pointer to NULL. |
1094 | */ | 937 | */ |
1095 | sg_miter_stop(&host->sg_miter); | ||
1096 | host->sg = NULL; | 938 | host->sg = NULL; |
1097 | ctrl = mci_readl(host, CTRL); | 939 | ctrl = mci_readl(host, CTRL); |
1098 | ctrl |= SDMMC_CTRL_FIFO_RESET; | 940 | ctrl |= SDMMC_CTRL_FIFO_RESET; |
@@ -1107,12 +949,6 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
1107 | goto unlock; | 949 | goto unlock; |
1108 | } | 950 | } |
1109 | 951 | ||
1110 | if (host->mrq->sbc && !data->error) { | ||
1111 | data->stop->error = 0; | ||
1112 | dw_mci_request_end(host, host->mrq); | ||
1113 | goto unlock; | ||
1114 | } | ||
1115 | |||
1116 | prev_state = state = STATE_SENDING_STOP; | 952 | prev_state = state = STATE_SENDING_STOP; |
1117 | if (!data->error) | 953 | if (!data->error) |
1118 | send_stop_cmd(host, data); | 954 | send_stop_cmd(host, data); |
@@ -1189,8 +1025,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) | |||
1189 | buf += len; | 1025 | buf += len; |
1190 | cnt -= len; | 1026 | cnt -= len; |
1191 | if (!sg_next(host->sg) || host->part_buf_count == 2) { | 1027 | if (!sg_next(host->sg) || host->part_buf_count == 2) { |
1192 | mci_writew(host, DATA(host->data_offset), | 1028 | mci_writew(host, DATA, host->part_buf16); |
1193 | host->part_buf16); | ||
1194 | host->part_buf_count = 0; | 1029 | host->part_buf_count = 0; |
1195 | } | 1030 | } |
1196 | } | 1031 | } |
@@ -1207,23 +1042,21 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) | |||
1207 | cnt -= len; | 1042 | cnt -= len; |
1208 | /* push data from aligned buffer into fifo */ | 1043 | /* push data from aligned buffer into fifo */ |
1209 | for (i = 0; i < items; ++i) | 1044 | for (i = 0; i < items; ++i) |
1210 | mci_writew(host, DATA(host->data_offset), | 1045 | mci_writew(host, DATA, aligned_buf[i]); |
1211 | aligned_buf[i]); | ||
1212 | } | 1046 | } |
1213 | } else | 1047 | } else |
1214 | #endif | 1048 | #endif |
1215 | { | 1049 | { |
1216 | u16 *pdata = buf; | 1050 | u16 *pdata = buf; |
1217 | for (; cnt >= 2; cnt -= 2) | 1051 | for (; cnt >= 2; cnt -= 2) |
1218 | mci_writew(host, DATA(host->data_offset), *pdata++); | 1052 | mci_writew(host, DATA, *pdata++); |
1219 | buf = pdata; | 1053 | buf = pdata; |
1220 | } | 1054 | } |
1221 | /* put anything remaining in the part_buf */ | 1055 | /* put anything remaining in the part_buf */ |
1222 | if (cnt) { | 1056 | if (cnt) { |
1223 | dw_mci_set_part_bytes(host, buf, cnt); | 1057 | dw_mci_set_part_bytes(host, buf, cnt); |
1224 | if (!sg_next(host->sg)) | 1058 | if (!sg_next(host->sg)) |
1225 | mci_writew(host, DATA(host->data_offset), | 1059 | mci_writew(host, DATA, host->part_buf16); |
1226 | host->part_buf16); | ||
1227 | } | 1060 | } |
1228 | } | 1061 | } |
1229 | 1062 | ||
@@ -1238,8 +1071,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) | |||
1238 | int items = len >> 1; | 1071 | int items = len >> 1; |
1239 | int i; | 1072 | int i; |
1240 | for (i = 0; i < items; ++i) | 1073 | for (i = 0; i < items; ++i) |
1241 | aligned_buf[i] = mci_readw(host, | 1074 | aligned_buf[i] = mci_readw(host, DATA); |
1242 | DATA(host->data_offset)); | ||
1243 | /* memcpy from aligned buffer into output buffer */ | 1075 | /* memcpy from aligned buffer into output buffer */ |
1244 | memcpy(buf, aligned_buf, len); | 1076 | memcpy(buf, aligned_buf, len); |
1245 | buf += len; | 1077 | buf += len; |
@@ -1250,11 +1082,11 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) | |||
1250 | { | 1082 | { |
1251 | u16 *pdata = buf; | 1083 | u16 *pdata = buf; |
1252 | for (; cnt >= 2; cnt -= 2) | 1084 | for (; cnt >= 2; cnt -= 2) |
1253 | *pdata++ = mci_readw(host, DATA(host->data_offset)); | 1085 | *pdata++ = mci_readw(host, DATA); |
1254 | buf = pdata; | 1086 | buf = pdata; |
1255 | } | 1087 | } |
1256 | if (cnt) { | 1088 | if (cnt) { |
1257 | host->part_buf16 = mci_readw(host, DATA(host->data_offset)); | 1089 | host->part_buf16 = mci_readw(host, DATA); |
1258 | dw_mci_pull_final_bytes(host, buf, cnt); | 1090 | dw_mci_pull_final_bytes(host, buf, cnt); |
1259 | } | 1091 | } |
1260 | } | 1092 | } |
@@ -1267,8 +1099,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) | |||
1267 | buf += len; | 1099 | buf += len; |
1268 | cnt -= len; | 1100 | cnt -= len; |
1269 | if (!sg_next(host->sg) || host->part_buf_count == 4) { | 1101 | if (!sg_next(host->sg) || host->part_buf_count == 4) { |
1270 | mci_writel(host, DATA(host->data_offset), | 1102 | mci_writel(host, DATA, host->part_buf32); |
1271 | host->part_buf32); | ||
1272 | host->part_buf_count = 0; | 1103 | host->part_buf_count = 0; |
1273 | } | 1104 | } |
1274 | } | 1105 | } |
@@ -1285,23 +1116,21 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) | |||
1285 | cnt -= len; | 1116 | cnt -= len; |
1286 | /* push data from aligned buffer into fifo */ | 1117 | /* push data from aligned buffer into fifo */ |
1287 | for (i = 0; i < items; ++i) | 1118 | for (i = 0; i < items; ++i) |
1288 | mci_writel(host, DATA(host->data_offset), | 1119 | mci_writel(host, DATA, aligned_buf[i]); |
1289 | aligned_buf[i]); | ||
1290 | } | 1120 | } |
1291 | } else | 1121 | } else |
1292 | #endif | 1122 | #endif |
1293 | { | 1123 | { |
1294 | u32 *pdata = buf; | 1124 | u32 *pdata = buf; |
1295 | for (; cnt >= 4; cnt -= 4) | 1125 | for (; cnt >= 4; cnt -= 4) |
1296 | mci_writel(host, DATA(host->data_offset), *pdata++); | 1126 | mci_writel(host, DATA, *pdata++); |
1297 | buf = pdata; | 1127 | buf = pdata; |
1298 | } | 1128 | } |
1299 | /* put anything remaining in the part_buf */ | 1129 | /* put anything remaining in the part_buf */ |
1300 | if (cnt) { | 1130 | if (cnt) { |
1301 | dw_mci_set_part_bytes(host, buf, cnt); | 1131 | dw_mci_set_part_bytes(host, buf, cnt); |
1302 | if (!sg_next(host->sg)) | 1132 | if (!sg_next(host->sg)) |
1303 | mci_writel(host, DATA(host->data_offset), | 1133 | mci_writel(host, DATA, host->part_buf32); |
1304 | host->part_buf32); | ||
1305 | } | 1134 | } |
1306 | } | 1135 | } |
1307 | 1136 | ||
@@ -1316,8 +1145,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) | |||
1316 | int items = len >> 2; | 1145 | int items = len >> 2; |
1317 | int i; | 1146 | int i; |
1318 | for (i = 0; i < items; ++i) | 1147 | for (i = 0; i < items; ++i) |
1319 | aligned_buf[i] = mci_readl(host, | 1148 | aligned_buf[i] = mci_readl(host, DATA); |
1320 | DATA(host->data_offset)); | ||
1321 | /* memcpy from aligned buffer into output buffer */ | 1149 | /* memcpy from aligned buffer into output buffer */ |
1322 | memcpy(buf, aligned_buf, len); | 1150 | memcpy(buf, aligned_buf, len); |
1323 | buf += len; | 1151 | buf += len; |
@@ -1328,11 +1156,11 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) | |||
1328 | { | 1156 | { |
1329 | u32 *pdata = buf; | 1157 | u32 *pdata = buf; |
1330 | for (; cnt >= 4; cnt -= 4) | 1158 | for (; cnt >= 4; cnt -= 4) |
1331 | *pdata++ = mci_readl(host, DATA(host->data_offset)); | 1159 | *pdata++ = mci_readl(host, DATA); |
1332 | buf = pdata; | 1160 | buf = pdata; |
1333 | } | 1161 | } |
1334 | if (cnt) { | 1162 | if (cnt) { |
1335 | host->part_buf32 = mci_readl(host, DATA(host->data_offset)); | 1163 | host->part_buf32 = mci_readl(host, DATA); |
1336 | dw_mci_pull_final_bytes(host, buf, cnt); | 1164 | dw_mci_pull_final_bytes(host, buf, cnt); |
1337 | } | 1165 | } |
1338 | } | 1166 | } |
@@ -1345,8 +1173,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) | |||
1345 | buf += len; | 1173 | buf += len; |
1346 | cnt -= len; | 1174 | cnt -= len; |
1347 | if (!sg_next(host->sg) || host->part_buf_count == 8) { | 1175 | if (!sg_next(host->sg) || host->part_buf_count == 8) { |
1348 | mci_writew(host, DATA(host->data_offset), | 1176 | mci_writew(host, DATA, host->part_buf); |
1349 | host->part_buf); | ||
1350 | host->part_buf_count = 0; | 1177 | host->part_buf_count = 0; |
1351 | } | 1178 | } |
1352 | } | 1179 | } |
@@ -1363,23 +1190,21 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) | |||
1363 | cnt -= len; | 1190 | cnt -= len; |
1364 | /* push data from aligned buffer into fifo */ | 1191 | /* push data from aligned buffer into fifo */ |
1365 | for (i = 0; i < items; ++i) | 1192 | for (i = 0; i < items; ++i) |
1366 | mci_writeq(host, DATA(host->data_offset), | 1193 | mci_writeq(host, DATA, aligned_buf[i]); |
1367 | aligned_buf[i]); | ||
1368 | } | 1194 | } |
1369 | } else | 1195 | } else |
1370 | #endif | 1196 | #endif |
1371 | { | 1197 | { |
1372 | u64 *pdata = buf; | 1198 | u64 *pdata = buf; |
1373 | for (; cnt >= 8; cnt -= 8) | 1199 | for (; cnt >= 8; cnt -= 8) |
1374 | mci_writeq(host, DATA(host->data_offset), *pdata++); | 1200 | mci_writeq(host, DATA, *pdata++); |
1375 | buf = pdata; | 1201 | buf = pdata; |
1376 | } | 1202 | } |
1377 | /* put anything remaining in the part_buf */ | 1203 | /* put anything remaining in the part_buf */ |
1378 | if (cnt) { | 1204 | if (cnt) { |
1379 | dw_mci_set_part_bytes(host, buf, cnt); | 1205 | dw_mci_set_part_bytes(host, buf, cnt); |
1380 | if (!sg_next(host->sg)) | 1206 | if (!sg_next(host->sg)) |
1381 | mci_writeq(host, DATA(host->data_offset), | 1207 | mci_writeq(host, DATA, host->part_buf); |
1382 | host->part_buf); | ||
1383 | } | 1208 | } |
1384 | } | 1209 | } |
1385 | 1210 | ||
@@ -1394,8 +1219,7 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) | |||
1394 | int items = len >> 3; | 1219 | int items = len >> 3; |
1395 | int i; | 1220 | int i; |
1396 | for (i = 0; i < items; ++i) | 1221 | for (i = 0; i < items; ++i) |
1397 | aligned_buf[i] = mci_readq(host, | 1222 | aligned_buf[i] = mci_readq(host, DATA); |
1398 | DATA(host->data_offset)); | ||
1399 | /* memcpy from aligned buffer into output buffer */ | 1223 | /* memcpy from aligned buffer into output buffer */ |
1400 | memcpy(buf, aligned_buf, len); | 1224 | memcpy(buf, aligned_buf, len); |
1401 | buf += len; | 1225 | buf += len; |
@@ -1406,11 +1230,11 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) | |||
1406 | { | 1230 | { |
1407 | u64 *pdata = buf; | 1231 | u64 *pdata = buf; |
1408 | for (; cnt >= 8; cnt -= 8) | 1232 | for (; cnt >= 8; cnt -= 8) |
1409 | *pdata++ = mci_readq(host, DATA(host->data_offset)); | 1233 | *pdata++ = mci_readq(host, DATA); |
1410 | buf = pdata; | 1234 | buf = pdata; |
1411 | } | 1235 | } |
1412 | if (cnt) { | 1236 | if (cnt) { |
1413 | host->part_buf = mci_readq(host, DATA(host->data_offset)); | 1237 | host->part_buf = mci_readq(host, DATA); |
1414 | dw_mci_pull_final_bytes(host, buf, cnt); | 1238 | dw_mci_pull_final_bytes(host, buf, cnt); |
1415 | } | 1239 | } |
1416 | } | 1240 | } |
@@ -1432,110 +1256,136 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) | |||
1432 | 1256 | ||
1433 | static void dw_mci_read_data_pio(struct dw_mci *host) | 1257 | static void dw_mci_read_data_pio(struct dw_mci *host) |
1434 | { | 1258 | { |
1435 | struct sg_mapping_iter *sg_miter = &host->sg_miter; | 1259 | struct scatterlist *sg = host->sg; |
1436 | void *buf; | 1260 | void *buf = sg_virt(sg); |
1437 | unsigned int offset; | 1261 | unsigned int offset = host->pio_offset; |
1438 | struct mmc_data *data = host->data; | 1262 | struct mmc_data *data = host->data; |
1439 | int shift = host->data_shift; | 1263 | int shift = host->data_shift; |
1440 | u32 status; | 1264 | u32 status; |
1441 | unsigned int nbytes = 0, len; | 1265 | unsigned int nbytes = 0, len; |
1442 | unsigned int remain, fcnt; | ||
1443 | 1266 | ||
1444 | do { | 1267 | do { |
1445 | if (!sg_miter_next(sg_miter)) | 1268 | len = host->part_buf_count + |
1446 | goto done; | 1269 | (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); |
1447 | 1270 | if (offset + len <= sg->length) { | |
1448 | host->sg = sg_miter->__sg; | ||
1449 | buf = sg_miter->addr; | ||
1450 | remain = sg_miter->length; | ||
1451 | offset = 0; | ||
1452 | |||
1453 | do { | ||
1454 | fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) | ||
1455 | << shift) + host->part_buf_count; | ||
1456 | len = min(remain, fcnt); | ||
1457 | if (!len) | ||
1458 | break; | ||
1459 | dw_mci_pull_data(host, (void *)(buf + offset), len); | 1271 | dw_mci_pull_data(host, (void *)(buf + offset), len); |
1272 | |||
1460 | offset += len; | 1273 | offset += len; |
1461 | nbytes += len; | 1274 | nbytes += len; |
1462 | remain -= len; | ||
1463 | } while (remain); | ||
1464 | 1275 | ||
1465 | sg_miter->consumed = offset; | 1276 | if (offset == sg->length) { |
1277 | flush_dcache_page(sg_page(sg)); | ||
1278 | host->sg = sg = sg_next(sg); | ||
1279 | if (!sg) | ||
1280 | goto done; | ||
1281 | |||
1282 | offset = 0; | ||
1283 | buf = sg_virt(sg); | ||
1284 | } | ||
1285 | } else { | ||
1286 | unsigned int remaining = sg->length - offset; | ||
1287 | dw_mci_pull_data(host, (void *)(buf + offset), | ||
1288 | remaining); | ||
1289 | nbytes += remaining; | ||
1290 | |||
1291 | flush_dcache_page(sg_page(sg)); | ||
1292 | host->sg = sg = sg_next(sg); | ||
1293 | if (!sg) | ||
1294 | goto done; | ||
1295 | |||
1296 | offset = len - remaining; | ||
1297 | buf = sg_virt(sg); | ||
1298 | dw_mci_pull_data(host, buf, offset); | ||
1299 | nbytes += offset; | ||
1300 | } | ||
1301 | |||
1466 | status = mci_readl(host, MINTSTS); | 1302 | status = mci_readl(host, MINTSTS); |
1467 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); | 1303 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
1304 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | ||
1305 | host->data_status = status; | ||
1306 | data->bytes_xfered += nbytes; | ||
1307 | smp_wmb(); | ||
1308 | |||
1309 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | ||
1310 | |||
1311 | tasklet_schedule(&host->tasklet); | ||
1312 | return; | ||
1313 | } | ||
1468 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ | 1314 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ |
1315 | host->pio_offset = offset; | ||
1469 | data->bytes_xfered += nbytes; | 1316 | data->bytes_xfered += nbytes; |
1470 | |||
1471 | if (!remain) { | ||
1472 | if (!sg_miter_next(sg_miter)) | ||
1473 | goto done; | ||
1474 | sg_miter->consumed = 0; | ||
1475 | } | ||
1476 | sg_miter_stop(sg_miter); | ||
1477 | return; | 1317 | return; |
1478 | 1318 | ||
1479 | done: | 1319 | done: |
1480 | data->bytes_xfered += nbytes; | 1320 | data->bytes_xfered += nbytes; |
1481 | sg_miter_stop(sg_miter); | ||
1482 | host->sg = NULL; | ||
1483 | smp_wmb(); | 1321 | smp_wmb(); |
1484 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); | 1322 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); |
1485 | } | 1323 | } |
1486 | 1324 | ||
1487 | static void dw_mci_write_data_pio(struct dw_mci *host) | 1325 | static void dw_mci_write_data_pio(struct dw_mci *host) |
1488 | { | 1326 | { |
1489 | struct sg_mapping_iter *sg_miter = &host->sg_miter; | 1327 | struct scatterlist *sg = host->sg; |
1490 | void *buf; | 1328 | void *buf = sg_virt(sg); |
1491 | unsigned int offset; | 1329 | unsigned int offset = host->pio_offset; |
1492 | struct mmc_data *data = host->data; | 1330 | struct mmc_data *data = host->data; |
1493 | int shift = host->data_shift; | 1331 | int shift = host->data_shift; |
1494 | u32 status; | 1332 | u32 status; |
1495 | unsigned int nbytes = 0, len; | 1333 | unsigned int nbytes = 0, len; |
1496 | unsigned int fifo_depth = host->fifo_depth; | ||
1497 | unsigned int remain, fcnt; | ||
1498 | 1334 | ||
1499 | do { | 1335 | do { |
1500 | if (!sg_miter_next(sg_miter)) | 1336 | len = ((host->fifo_depth - |
1501 | goto done; | 1337 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift) |
1502 | 1338 | - host->part_buf_count; | |
1503 | host->sg = sg_miter->__sg; | 1339 | if (offset + len <= sg->length) { |
1504 | buf = sg_miter->addr; | ||
1505 | remain = sg_miter->length; | ||
1506 | offset = 0; | ||
1507 | |||
1508 | do { | ||
1509 | fcnt = ((fifo_depth - | ||
1510 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) | ||
1511 | << shift) - host->part_buf_count; | ||
1512 | len = min(remain, fcnt); | ||
1513 | if (!len) | ||
1514 | break; | ||
1515 | host->push_data(host, (void *)(buf + offset), len); | 1340 | host->push_data(host, (void *)(buf + offset), len); |
1341 | |||
1516 | offset += len; | 1342 | offset += len; |
1517 | nbytes += len; | 1343 | nbytes += len; |
1518 | remain -= len; | 1344 | if (offset == sg->length) { |
1519 | } while (remain); | 1345 | host->sg = sg = sg_next(sg); |
1346 | if (!sg) | ||
1347 | goto done; | ||
1348 | |||
1349 | offset = 0; | ||
1350 | buf = sg_virt(sg); | ||
1351 | } | ||
1352 | } else { | ||
1353 | unsigned int remaining = sg->length - offset; | ||
1354 | |||
1355 | host->push_data(host, (void *)(buf + offset), | ||
1356 | remaining); | ||
1357 | nbytes += remaining; | ||
1358 | |||
1359 | host->sg = sg = sg_next(sg); | ||
1360 | if (!sg) | ||
1361 | goto done; | ||
1362 | |||
1363 | offset = len - remaining; | ||
1364 | buf = sg_virt(sg); | ||
1365 | host->push_data(host, (void *)buf, offset); | ||
1366 | nbytes += offset; | ||
1367 | } | ||
1520 | 1368 | ||
1521 | sg_miter->consumed = offset; | ||
1522 | status = mci_readl(host, MINTSTS); | 1369 | status = mci_readl(host, MINTSTS); |
1523 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); | 1370 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
1371 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | ||
1372 | host->data_status = status; | ||
1373 | data->bytes_xfered += nbytes; | ||
1374 | |||
1375 | smp_wmb(); | ||
1376 | |||
1377 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | ||
1378 | |||
1379 | tasklet_schedule(&host->tasklet); | ||
1380 | return; | ||
1381 | } | ||
1524 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ | 1382 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ |
1383 | host->pio_offset = offset; | ||
1525 | data->bytes_xfered += nbytes; | 1384 | data->bytes_xfered += nbytes; |
1526 | |||
1527 | if (!remain) { | ||
1528 | if (!sg_miter_next(sg_miter)) | ||
1529 | goto done; | ||
1530 | sg_miter->consumed = 0; | ||
1531 | } | ||
1532 | sg_miter_stop(sg_miter); | ||
1533 | return; | 1385 | return; |
1534 | 1386 | ||
1535 | done: | 1387 | done: |
1536 | data->bytes_xfered += nbytes; | 1388 | data->bytes_xfered += nbytes; |
1537 | sg_miter_stop(sg_miter); | ||
1538 | host->sg = NULL; | ||
1539 | smp_wmb(); | 1389 | smp_wmb(); |
1540 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); | 1390 | set_bit(EVENT_XFER_COMPLETE, &host->pending_events); |
1541 | } | 1391 | } |
@@ -1554,11 +1404,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) | |||
1554 | static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | 1404 | static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) |
1555 | { | 1405 | { |
1556 | struct dw_mci *host = dev_id; | 1406 | struct dw_mci *host = dev_id; |
1557 | u32 pending; | 1407 | u32 status, pending; |
1558 | unsigned int pass_count = 0; | 1408 | unsigned int pass_count = 0; |
1559 | int i; | ||
1560 | 1409 | ||
1561 | do { | 1410 | do { |
1411 | status = mci_readl(host, RINTSTS); | ||
1562 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ | 1412 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
1563 | 1413 | ||
1564 | /* | 1414 | /* |
@@ -1576,7 +1426,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1576 | 1426 | ||
1577 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { | 1427 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { |
1578 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); | 1428 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); |
1579 | host->cmd_status = pending; | 1429 | host->cmd_status = status; |
1580 | smp_wmb(); | 1430 | smp_wmb(); |
1581 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); | 1431 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); |
1582 | } | 1432 | } |
@@ -1584,16 +1434,18 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1584 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { | 1434 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { |
1585 | /* if there is an error report DATA_ERROR */ | 1435 | /* if there is an error report DATA_ERROR */ |
1586 | mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); | 1436 | mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); |
1587 | host->data_status = pending; | 1437 | host->data_status = status; |
1588 | smp_wmb(); | 1438 | smp_wmb(); |
1589 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | 1439 | set_bit(EVENT_DATA_ERROR, &host->pending_events); |
1590 | tasklet_schedule(&host->tasklet); | 1440 | if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | |
1441 | SDMMC_INT_SBE | SDMMC_INT_EBE))) | ||
1442 | tasklet_schedule(&host->tasklet); | ||
1591 | } | 1443 | } |
1592 | 1444 | ||
1593 | if (pending & SDMMC_INT_DATA_OVER) { | 1445 | if (pending & SDMMC_INT_DATA_OVER) { |
1594 | mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); | 1446 | mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); |
1595 | if (!host->data_status) | 1447 | if (!host->data_status) |
1596 | host->data_status = pending; | 1448 | host->data_status = status; |
1597 | smp_wmb(); | 1449 | smp_wmb(); |
1598 | if (host->dir_status == DW_MCI_RECV_STATUS) { | 1450 | if (host->dir_status == DW_MCI_RECV_STATUS) { |
1599 | if (host->sg != NULL) | 1451 | if (host->sg != NULL) |
@@ -1617,21 +1469,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1617 | 1469 | ||
1618 | if (pending & SDMMC_INT_CMD_DONE) { | 1470 | if (pending & SDMMC_INT_CMD_DONE) { |
1619 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); | 1471 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); |
1620 | dw_mci_cmd_interrupt(host, pending); | 1472 | dw_mci_cmd_interrupt(host, status); |
1621 | } | 1473 | } |
1622 | 1474 | ||
1623 | if (pending & SDMMC_INT_CD) { | 1475 | if (pending & SDMMC_INT_CD) { |
1624 | mci_writel(host, RINTSTS, SDMMC_INT_CD); | 1476 | mci_writel(host, RINTSTS, SDMMC_INT_CD); |
1625 | queue_work(host->card_workqueue, &host->card_work); | 1477 | queue_work(dw_mci_card_workqueue, &host->card_work); |
1626 | } | ||
1627 | |||
1628 | /* Handle SDIO Interrupts */ | ||
1629 | for (i = 0; i < host->num_slots; i++) { | ||
1630 | struct dw_mci_slot *slot = host->slot[i]; | ||
1631 | if (pending & SDMMC_INT_SDIO(i)) { | ||
1632 | mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); | ||
1633 | mmc_signal_sdio_irq(slot->mmc); | ||
1634 | } | ||
1635 | } | 1478 | } |
1636 | 1479 | ||
1637 | } while (pass_count++ < 5); | 1480 | } while (pass_count++ < 5); |
@@ -1642,6 +1485,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1642 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { | 1485 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { |
1643 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); | 1486 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); |
1644 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); | 1487 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); |
1488 | set_bit(EVENT_DATA_COMPLETE, &host->pending_events); | ||
1645 | host->dma_ops->complete(host); | 1489 | host->dma_ops->complete(host); |
1646 | } | 1490 | } |
1647 | #endif | 1491 | #endif |
@@ -1734,7 +1578,6 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
1734 | * block interrupt, hence setting the | 1578 | * block interrupt, hence setting the |
1735 | * scatter-gather pointer to NULL. | 1579 | * scatter-gather pointer to NULL. |
1736 | */ | 1580 | */ |
1737 | sg_miter_stop(&host->sg_miter); | ||
1738 | host->sg = NULL; | 1581 | host->sg = NULL; |
1739 | 1582 | ||
1740 | ctrl = mci_readl(host, CTRL); | 1583 | ctrl = mci_readl(host, CTRL); |
@@ -1743,8 +1586,7 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
1743 | 1586 | ||
1744 | #ifdef CONFIG_MMC_DW_IDMAC | 1587 | #ifdef CONFIG_MMC_DW_IDMAC |
1745 | ctrl = mci_readl(host, BMOD); | 1588 | ctrl = mci_readl(host, BMOD); |
1746 | /* Software reset of DMA */ | 1589 | ctrl |= 0x01; /* Software reset of DMA */ |
1747 | ctrl |= SDMMC_IDMAC_SWRESET; | ||
1748 | mci_writel(host, BMOD, ctrl); | 1590 | mci_writel(host, BMOD, ctrl); |
1749 | #endif | 1591 | #endif |
1750 | 1592 | ||
@@ -1764,61 +1606,12 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
1764 | } | 1606 | } |
1765 | } | 1607 | } |
1766 | 1608 | ||
1767 | #ifdef CONFIG_OF | 1609 | static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) |
1768 | /* given a slot id, find out the device node representing that slot */ | ||
1769 | static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | ||
1770 | { | ||
1771 | struct device_node *np; | ||
1772 | const __be32 *addr; | ||
1773 | int len; | ||
1774 | |||
1775 | if (!dev || !dev->of_node) | ||
1776 | return NULL; | ||
1777 | |||
1778 | for_each_child_of_node(dev->of_node, np) { | ||
1779 | addr = of_get_property(np, "reg", &len); | ||
1780 | if (!addr || (len < sizeof(int))) | ||
1781 | continue; | ||
1782 | if (be32_to_cpup(addr) == slot) | ||
1783 | return np; | ||
1784 | } | ||
1785 | return NULL; | ||
1786 | } | ||
1787 | |||
1788 | /* find out bus-width for a given slot */ | ||
1789 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | ||
1790 | { | ||
1791 | struct device_node *np = dw_mci_of_find_slot_node(dev, slot); | ||
1792 | u32 bus_wd = 1; | ||
1793 | |||
1794 | if (!np) | ||
1795 | return 1; | ||
1796 | |||
1797 | if (of_property_read_u32(np, "bus-width", &bus_wd)) | ||
1798 | dev_err(dev, "bus-width property not found, assuming width" | ||
1799 | " as 1\n"); | ||
1800 | return bus_wd; | ||
1801 | } | ||
1802 | #else /* CONFIG_OF */ | ||
1803 | static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) | ||
1804 | { | ||
1805 | return 1; | ||
1806 | } | ||
1807 | static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) | ||
1808 | { | ||
1809 | return NULL; | ||
1810 | } | ||
1811 | #endif /* CONFIG_OF */ | ||
1812 | |||
1813 | static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | ||
1814 | { | 1610 | { |
1815 | struct mmc_host *mmc; | 1611 | struct mmc_host *mmc; |
1816 | struct dw_mci_slot *slot; | 1612 | struct dw_mci_slot *slot; |
1817 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
1818 | int ctrl_id, ret; | ||
1819 | u8 bus_width; | ||
1820 | 1613 | ||
1821 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); | 1614 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev); |
1822 | if (!mmc) | 1615 | if (!mmc) |
1823 | return -ENOMEM; | 1616 | return -ENOMEM; |
1824 | 1617 | ||
@@ -1826,7 +1619,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1826 | slot->id = id; | 1619 | slot->id = id; |
1827 | slot->mmc = mmc; | 1620 | slot->mmc = mmc; |
1828 | slot->host = host; | 1621 | slot->host = host; |
1829 | host->slot[id] = slot; | ||
1830 | 1622 | ||
1831 | mmc->ops = &dw_mci_ops; | 1623 | mmc->ops = &dw_mci_ops; |
1832 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); | 1624 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); |
@@ -1846,48 +1638,23 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1846 | 1638 | ||
1847 | if (host->pdata->caps) | 1639 | if (host->pdata->caps) |
1848 | mmc->caps = host->pdata->caps; | 1640 | mmc->caps = host->pdata->caps; |
1849 | |||
1850 | if (host->pdata->pm_caps) | ||
1851 | mmc->pm_caps = host->pdata->pm_caps; | ||
1852 | |||
1853 | if (host->dev->of_node) { | ||
1854 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
1855 | if (ctrl_id < 0) | ||
1856 | ctrl_id = 0; | ||
1857 | } else { | ||
1858 | ctrl_id = to_platform_device(host->dev)->id; | ||
1859 | } | ||
1860 | if (drv_data && drv_data->caps) | ||
1861 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
1862 | |||
1863 | if (host->pdata->caps2) | ||
1864 | mmc->caps2 = host->pdata->caps2; | ||
1865 | |||
1866 | if (host->pdata->get_bus_wd) | ||
1867 | bus_width = host->pdata->get_bus_wd(slot->id); | ||
1868 | else if (host->dev->of_node) | ||
1869 | bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id); | ||
1870 | else | 1641 | else |
1871 | bus_width = 1; | 1642 | mmc->caps = 0; |
1872 | |||
1873 | if (drv_data && drv_data->setup_bus) { | ||
1874 | struct device_node *slot_np; | ||
1875 | slot_np = dw_mci_of_find_slot_node(host->dev, slot->id); | ||
1876 | ret = drv_data->setup_bus(host, slot_np, bus_width); | ||
1877 | if (ret) | ||
1878 | goto err_setup_bus; | ||
1879 | } | ||
1880 | 1643 | ||
1881 | switch (bus_width) { | 1644 | if (host->pdata->get_bus_wd) |
1882 | case 8: | 1645 | if (host->pdata->get_bus_wd(slot->id) >= 4) |
1883 | mmc->caps |= MMC_CAP_8_BIT_DATA; | 1646 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
1884 | case 4: | ||
1885 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
1886 | } | ||
1887 | 1647 | ||
1888 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) | 1648 | if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) |
1889 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; | 1649 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
1890 | 1650 | ||
1651 | #ifdef CONFIG_MMC_DW_IDMAC | ||
1652 | mmc->max_segs = host->ring_size; | ||
1653 | mmc->max_blk_size = 65536; | ||
1654 | mmc->max_blk_count = host->ring_size; | ||
1655 | mmc->max_seg_size = 0x1000; | ||
1656 | mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; | ||
1657 | #else | ||
1891 | if (host->pdata->blk_settings) { | 1658 | if (host->pdata->blk_settings) { |
1892 | mmc->max_segs = host->pdata->blk_settings->max_segs; | 1659 | mmc->max_segs = host->pdata->blk_settings->max_segs; |
1893 | mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; | 1660 | mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; |
@@ -1896,24 +1663,17 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1896 | mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; | 1663 | mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; |
1897 | } else { | 1664 | } else { |
1898 | /* Useful defaults if platform data is unset. */ | 1665 | /* Useful defaults if platform data is unset. */ |
1899 | #ifdef CONFIG_MMC_DW_IDMAC | ||
1900 | mmc->max_segs = host->ring_size; | ||
1901 | mmc->max_blk_size = 65536; | ||
1902 | mmc->max_blk_count = host->ring_size; | ||
1903 | mmc->max_seg_size = 0x1000; | ||
1904 | mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; | ||
1905 | #else | ||
1906 | mmc->max_segs = 64; | 1666 | mmc->max_segs = 64; |
1907 | mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ | 1667 | mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ |
1908 | mmc->max_blk_count = 512; | 1668 | mmc->max_blk_count = 512; |
1909 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | 1669 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
1910 | mmc->max_seg_size = mmc->max_req_size; | 1670 | mmc->max_seg_size = mmc->max_req_size; |
1911 | #endif /* CONFIG_MMC_DW_IDMAC */ | ||
1912 | } | 1671 | } |
1672 | #endif /* CONFIG_MMC_DW_IDMAC */ | ||
1913 | 1673 | ||
1914 | host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc"); | 1674 | host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); |
1915 | if (IS_ERR(host->vmmc)) { | 1675 | if (IS_ERR(host->vmmc)) { |
1916 | pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); | 1676 | printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); |
1917 | host->vmmc = NULL; | 1677 | host->vmmc = NULL; |
1918 | } else | 1678 | } else |
1919 | regulator_enable(host->vmmc); | 1679 | regulator_enable(host->vmmc); |
@@ -1923,6 +1683,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1923 | else | 1683 | else |
1924 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); | 1684 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); |
1925 | 1685 | ||
1686 | host->slot[id] = slot; | ||
1926 | mmc_add_host(mmc); | 1687 | mmc_add_host(mmc); |
1927 | 1688 | ||
1928 | #if defined(CONFIG_DEBUG_FS) | 1689 | #if defined(CONFIG_DEBUG_FS) |
@@ -1936,13 +1697,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1936 | * Card may have been plugged in prior to boot so we | 1697 | * Card may have been plugged in prior to boot so we |
1937 | * need to run the detect tasklet | 1698 | * need to run the detect tasklet |
1938 | */ | 1699 | */ |
1939 | queue_work(host->card_workqueue, &host->card_work); | 1700 | queue_work(dw_mci_card_workqueue, &host->card_work); |
1940 | 1701 | ||
1941 | return 0; | 1702 | return 0; |
1942 | |||
1943 | err_setup_bus: | ||
1944 | mmc_free_host(mmc); | ||
1945 | return -EINVAL; | ||
1946 | } | 1703 | } |
1947 | 1704 | ||
1948 | static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) | 1705 | static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) |
@@ -1960,10 +1717,10 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) | |||
1960 | static void dw_mci_init_dma(struct dw_mci *host) | 1717 | static void dw_mci_init_dma(struct dw_mci *host) |
1961 | { | 1718 | { |
1962 | /* Alloc memory for sg translation */ | 1719 | /* Alloc memory for sg translation */ |
1963 | host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, | 1720 | host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE, |
1964 | &host->sg_dma, GFP_KERNEL); | 1721 | &host->sg_dma, GFP_KERNEL); |
1965 | if (!host->sg_cpu) { | 1722 | if (!host->sg_cpu) { |
1966 | dev_err(host->dev, "%s: could not alloc DMA memory\n", | 1723 | dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n", |
1967 | __func__); | 1724 | __func__); |
1968 | goto no_dma; | 1725 | goto no_dma; |
1969 | } | 1726 | } |
@@ -1971,21 +1728,20 @@ static void dw_mci_init_dma(struct dw_mci *host) | |||
1971 | /* Determine which DMA interface to use */ | 1728 | /* Determine which DMA interface to use */ |
1972 | #ifdef CONFIG_MMC_DW_IDMAC | 1729 | #ifdef CONFIG_MMC_DW_IDMAC |
1973 | host->dma_ops = &dw_mci_idmac_ops; | 1730 | host->dma_ops = &dw_mci_idmac_ops; |
1974 | dev_info(host->dev, "Using internal DMA controller.\n"); | 1731 | dev_info(&host->pdev->dev, "Using internal DMA controller.\n"); |
1975 | #endif | 1732 | #endif |
1976 | 1733 | ||
1977 | if (!host->dma_ops) | 1734 | if (!host->dma_ops) |
1978 | goto no_dma; | 1735 | goto no_dma; |
1979 | 1736 | ||
1980 | if (host->dma_ops->init && host->dma_ops->start && | 1737 | if (host->dma_ops->init) { |
1981 | host->dma_ops->stop && host->dma_ops->cleanup) { | ||
1982 | if (host->dma_ops->init(host)) { | 1738 | if (host->dma_ops->init(host)) { |
1983 | dev_err(host->dev, "%s: Unable to initialize " | 1739 | dev_err(&host->pdev->dev, "%s: Unable to initialize " |
1984 | "DMA Controller.\n", __func__); | 1740 | "DMA Controller.\n", __func__); |
1985 | goto no_dma; | 1741 | goto no_dma; |
1986 | } | 1742 | } |
1987 | } else { | 1743 | } else { |
1988 | dev_err(host->dev, "DMA initialization not found.\n"); | 1744 | dev_err(&host->pdev->dev, "DMA initialization not found.\n"); |
1989 | goto no_dma; | 1745 | goto no_dma; |
1990 | } | 1746 | } |
1991 | 1747 | ||
@@ -1993,7 +1749,7 @@ static void dw_mci_init_dma(struct dw_mci *host) | |||
1993 | return; | 1749 | return; |
1994 | 1750 | ||
1995 | no_dma: | 1751 | no_dma: |
1996 | dev_info(host->dev, "Using PIO mode.\n"); | 1752 | dev_info(&host->pdev->dev, "Using PIO mode.\n"); |
1997 | host->use_dma = 0; | 1753 | host->use_dma = 0; |
1998 | return; | 1754 | return; |
1999 | } | 1755 | } |
@@ -2019,144 +1775,63 @@ static bool mci_wait_reset(struct device *dev, struct dw_mci *host) | |||
2019 | return false; | 1775 | return false; |
2020 | } | 1776 | } |
2021 | 1777 | ||
2022 | #ifdef CONFIG_OF | 1778 | static int dw_mci_probe(struct platform_device *pdev) |
2023 | static struct dw_mci_of_quirks { | ||
2024 | char *quirk; | ||
2025 | int id; | ||
2026 | } of_quirks[] = { | ||
2027 | { | ||
2028 | .quirk = "supports-highspeed", | ||
2029 | .id = DW_MCI_QUIRK_HIGHSPEED, | ||
2030 | }, { | ||
2031 | .quirk = "broken-cd", | ||
2032 | .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, | ||
2033 | }, | ||
2034 | }; | ||
2035 | |||
2036 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | ||
2037 | { | 1779 | { |
1780 | struct dw_mci *host; | ||
1781 | struct resource *regs; | ||
2038 | struct dw_mci_board *pdata; | 1782 | struct dw_mci_board *pdata; |
2039 | struct device *dev = host->dev; | 1783 | int irq, ret, i, width; |
2040 | struct device_node *np = dev->of_node; | ||
2041 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
2042 | int idx, ret; | ||
2043 | |||
2044 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
2045 | if (!pdata) { | ||
2046 | dev_err(dev, "could not allocate memory for pdata\n"); | ||
2047 | return ERR_PTR(-ENOMEM); | ||
2048 | } | ||
2049 | |||
2050 | /* find out number of slots supported */ | ||
2051 | if (of_property_read_u32(dev->of_node, "num-slots", | ||
2052 | &pdata->num_slots)) { | ||
2053 | dev_info(dev, "num-slots property not found, " | ||
2054 | "assuming 1 slot is available\n"); | ||
2055 | pdata->num_slots = 1; | ||
2056 | } | ||
2057 | |||
2058 | /* get quirks */ | ||
2059 | for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) | ||
2060 | if (of_get_property(np, of_quirks[idx].quirk, NULL)) | ||
2061 | pdata->quirks |= of_quirks[idx].id; | ||
2062 | |||
2063 | if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) | ||
2064 | dev_info(dev, "fifo-depth property not found, using " | ||
2065 | "value of FIFOTH register as default\n"); | ||
2066 | |||
2067 | of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); | ||
2068 | |||
2069 | if (drv_data && drv_data->parse_dt) { | ||
2070 | ret = drv_data->parse_dt(host); | ||
2071 | if (ret) | ||
2072 | return ERR_PTR(ret); | ||
2073 | } | ||
2074 | |||
2075 | if (of_find_property(np, "keep-power-in-suspend", NULL)) | ||
2076 | pdata->pm_caps |= MMC_PM_KEEP_POWER; | ||
2077 | |||
2078 | if (of_find_property(np, "enable-sdio-wakeup", NULL)) | ||
2079 | pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; | ||
2080 | |||
2081 | return pdata; | ||
2082 | } | ||
2083 | |||
2084 | #else /* CONFIG_OF */ | ||
2085 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | ||
2086 | { | ||
2087 | return ERR_PTR(-EINVAL); | ||
2088 | } | ||
2089 | #endif /* CONFIG_OF */ | ||
2090 | |||
2091 | int dw_mci_probe(struct dw_mci *host) | ||
2092 | { | ||
2093 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
2094 | int width, i, ret = 0; | ||
2095 | u32 fifo_size; | 1784 | u32 fifo_size; |
2096 | int init_slots = 0; | ||
2097 | 1785 | ||
2098 | if (!host->pdata) { | 1786 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2099 | host->pdata = dw_mci_parse_dt(host); | 1787 | if (!regs) |
2100 | if (IS_ERR(host->pdata)) { | 1788 | return -ENXIO; |
2101 | dev_err(host->dev, "platform data not available\n"); | ||
2102 | return -EINVAL; | ||
2103 | } | ||
2104 | } | ||
2105 | 1789 | ||
2106 | if (!host->pdata->select_slot && host->pdata->num_slots > 1) { | 1790 | irq = platform_get_irq(pdev, 0); |
2107 | dev_err(host->dev, | 1791 | if (irq < 0) |
2108 | "Platform data must supply select_slot function\n"); | 1792 | return irq; |
2109 | return -ENODEV; | ||
2110 | } | ||
2111 | 1793 | ||
2112 | host->biu_clk = devm_clk_get(host->dev, "biu"); | 1794 | host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); |
2113 | if (IS_ERR(host->biu_clk)) { | 1795 | if (!host) |
2114 | dev_dbg(host->dev, "biu clock not available\n"); | 1796 | return -ENOMEM; |
2115 | } else { | ||
2116 | ret = clk_prepare_enable(host->biu_clk); | ||
2117 | if (ret) { | ||
2118 | dev_err(host->dev, "failed to enable biu clock\n"); | ||
2119 | return ret; | ||
2120 | } | ||
2121 | } | ||
2122 | 1797 | ||
2123 | host->ciu_clk = devm_clk_get(host->dev, "ciu"); | 1798 | host->pdev = pdev; |
2124 | if (IS_ERR(host->ciu_clk)) { | 1799 | host->pdata = pdata = pdev->dev.platform_data; |
2125 | dev_dbg(host->dev, "ciu clock not available\n"); | 1800 | if (!pdata || !pdata->init) { |
2126 | } else { | 1801 | dev_err(&pdev->dev, |
2127 | ret = clk_prepare_enable(host->ciu_clk); | 1802 | "Platform data must supply init function\n"); |
2128 | if (ret) { | 1803 | ret = -ENODEV; |
2129 | dev_err(host->dev, "failed to enable ciu clock\n"); | 1804 | goto err_freehost; |
2130 | goto err_clk_biu; | ||
2131 | } | ||
2132 | } | 1805 | } |
2133 | 1806 | ||
2134 | if (IS_ERR(host->ciu_clk)) | 1807 | if (!pdata->select_slot && pdata->num_slots > 1) { |
2135 | host->bus_hz = host->pdata->bus_hz; | 1808 | dev_err(&pdev->dev, |
2136 | else | 1809 | "Platform data must supply select_slot function\n"); |
2137 | host->bus_hz = clk_get_rate(host->ciu_clk); | 1810 | ret = -ENODEV; |
2138 | 1811 | goto err_freehost; | |
2139 | if (drv_data && drv_data->setup_clock) { | ||
2140 | ret = drv_data->setup_clock(host); | ||
2141 | if (ret) { | ||
2142 | dev_err(host->dev, | ||
2143 | "implementation specific clock setup failed\n"); | ||
2144 | goto err_clk_ciu; | ||
2145 | } | ||
2146 | } | 1812 | } |
2147 | 1813 | ||
2148 | if (!host->bus_hz) { | 1814 | if (!pdata->bus_hz) { |
2149 | dev_err(host->dev, | 1815 | dev_err(&pdev->dev, |
2150 | "Platform data must supply bus speed\n"); | 1816 | "Platform data must supply bus speed\n"); |
2151 | ret = -ENODEV; | 1817 | ret = -ENODEV; |
2152 | goto err_clk_ciu; | 1818 | goto err_freehost; |
2153 | } | 1819 | } |
2154 | 1820 | ||
2155 | host->quirks = host->pdata->quirks; | 1821 | host->bus_hz = pdata->bus_hz; |
1822 | host->quirks = pdata->quirks; | ||
2156 | 1823 | ||
2157 | spin_lock_init(&host->lock); | 1824 | spin_lock_init(&host->lock); |
2158 | INIT_LIST_HEAD(&host->queue); | 1825 | INIT_LIST_HEAD(&host->queue); |
2159 | 1826 | ||
1827 | ret = -ENOMEM; | ||
1828 | host->regs = ioremap(regs->start, resource_size(regs)); | ||
1829 | if (!host->regs) | ||
1830 | goto err_freehost; | ||
1831 | |||
1832 | host->dma_ops = pdata->dma_ops; | ||
1833 | dw_mci_init_dma(host); | ||
1834 | |||
2160 | /* | 1835 | /* |
2161 | * Get the host data width - this assumes that HCON has been set with | 1836 | * Get the host data width - this assumes that HCON has been set with |
2162 | * the correct values. | 1837 | * the correct values. |
@@ -2184,11 +1859,10 @@ int dw_mci_probe(struct dw_mci *host) | |||
2184 | } | 1859 | } |
2185 | 1860 | ||
2186 | /* Reset all blocks */ | 1861 | /* Reset all blocks */ |
2187 | if (!mci_wait_reset(host->dev, host)) | 1862 | if (!mci_wait_reset(&pdev->dev, host)) { |
2188 | return -ENODEV; | 1863 | ret = -ENODEV; |
2189 | 1864 | goto err_dmaunmap; | |
2190 | host->dma_ops = host->pdata->dma_ops; | 1865 | } |
2191 | dw_mci_init_dma(host); | ||
2192 | 1866 | ||
2193 | /* Clear the interrupts for the host controller */ | 1867 | /* Clear the interrupts for the host controller */ |
2194 | mci_writel(host, RINTSTS, 0xFFFFFFFF); | 1868 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
@@ -2209,7 +1883,7 @@ int dw_mci_probe(struct dw_mci *host) | |||
2209 | * should put it in the platform data. | 1883 | * should put it in the platform data. |
2210 | */ | 1884 | */ |
2211 | fifo_size = mci_readl(host, FIFOTH); | 1885 | fifo_size = mci_readl(host, FIFOTH); |
2212 | fifo_size = 1 + ((fifo_size >> 16) & 0xfff); | 1886 | fifo_size = 1 + ((fifo_size >> 16) & 0x7ff); |
2213 | } else { | 1887 | } else { |
2214 | fifo_size = host->pdata->fifo_depth; | 1888 | fifo_size = host->pdata->fifo_depth; |
2215 | } | 1889 | } |
@@ -2223,21 +1897,32 @@ int dw_mci_probe(struct dw_mci *host) | |||
2223 | mci_writel(host, CLKSRC, 0); | 1897 | mci_writel(host, CLKSRC, 0); |
2224 | 1898 | ||
2225 | tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); | 1899 | tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); |
2226 | host->card_workqueue = alloc_workqueue("dw-mci-card", | 1900 | dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", |
2227 | WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); | 1901 | WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); |
2228 | if (!host->card_workqueue) | 1902 | if (!dw_mci_card_workqueue) |
2229 | goto err_dmaunmap; | 1903 | goto err_dmaunmap; |
2230 | INIT_WORK(&host->card_work, dw_mci_work_routine_card); | 1904 | INIT_WORK(&host->card_work, dw_mci_work_routine_card); |
2231 | ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, | 1905 | |
2232 | host->irq_flags, "dw-mci", host); | 1906 | ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); |
2233 | if (ret) | 1907 | if (ret) |
2234 | goto err_workqueue; | 1908 | goto err_workqueue; |
2235 | 1909 | ||
1910 | platform_set_drvdata(pdev, host); | ||
1911 | |||
2236 | if (host->pdata->num_slots) | 1912 | if (host->pdata->num_slots) |
2237 | host->num_slots = host->pdata->num_slots; | 1913 | host->num_slots = host->pdata->num_slots; |
2238 | else | 1914 | else |
2239 | host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; | 1915 | host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; |
2240 | 1916 | ||
1917 | /* We need at least one slot to succeed */ | ||
1918 | for (i = 0; i < host->num_slots; i++) { | ||
1919 | ret = dw_mci_init_slot(host, i); | ||
1920 | if (ret) { | ||
1921 | ret = -ENODEV; | ||
1922 | goto err_init_slot; | ||
1923 | } | ||
1924 | } | ||
1925 | |||
2241 | /* | 1926 | /* |
2242 | * Enable interrupts for command done, data over, data empty, card det, | 1927 | * Enable interrupts for command done, data over, data empty, card det, |
2243 | * receive ready and error such as transmit, receive timeout, crc error | 1928 | * receive ready and error such as transmit, receive timeout, crc error |
@@ -2248,76 +1933,57 @@ int dw_mci_probe(struct dw_mci *host) | |||
2248 | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); | 1933 | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); |
2249 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ | 1934 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ |
2250 | 1935 | ||
2251 | dev_info(host->dev, "DW MMC controller at irq %d, " | 1936 | dev_info(&pdev->dev, "DW MMC controller at irq %d, " |
2252 | "%d bit host data width, " | 1937 | "%d bit host data width, " |
2253 | "%u deep fifo\n", | 1938 | "%u deep fifo\n", |
2254 | host->irq, width, fifo_size); | 1939 | irq, width, fifo_size); |
2255 | |||
2256 | /* We need at least one slot to succeed */ | ||
2257 | for (i = 0; i < host->num_slots; i++) { | ||
2258 | ret = dw_mci_init_slot(host, i); | ||
2259 | if (ret) | ||
2260 | dev_dbg(host->dev, "slot %d init failed\n", i); | ||
2261 | else | ||
2262 | init_slots++; | ||
2263 | } | ||
2264 | |||
2265 | if (init_slots) { | ||
2266 | dev_info(host->dev, "%d slots initialized\n", init_slots); | ||
2267 | } else { | ||
2268 | dev_dbg(host->dev, "attempted to initialize %d slots, " | ||
2269 | "but failed on all\n", host->num_slots); | ||
2270 | goto err_workqueue; | ||
2271 | } | ||
2272 | |||
2273 | /* | ||
2274 | * In 2.40a spec, Data offset is changed. | ||
2275 | * Need to check the version-id and set data-offset for DATA register. | ||
2276 | */ | ||
2277 | host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); | ||
2278 | dev_info(host->dev, "Version ID is %04x\n", host->verid); | ||
2279 | |||
2280 | if (host->verid < DW_MMC_240A) | ||
2281 | host->data_offset = DATA_OFFSET; | ||
2282 | else | ||
2283 | host->data_offset = DATA_240A_OFFSET; | ||
2284 | |||
2285 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) | 1940 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) |
2286 | dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); | 1941 | dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); |
2287 | 1942 | ||
2288 | return 0; | 1943 | return 0; |
2289 | 1944 | ||
1945 | err_init_slot: | ||
1946 | /* De-init any initialized slots */ | ||
1947 | while (i > 0) { | ||
1948 | if (host->slot[i]) | ||
1949 | dw_mci_cleanup_slot(host->slot[i], i); | ||
1950 | i--; | ||
1951 | } | ||
1952 | free_irq(irq, host); | ||
1953 | |||
2290 | err_workqueue: | 1954 | err_workqueue: |
2291 | destroy_workqueue(host->card_workqueue); | 1955 | destroy_workqueue(dw_mci_card_workqueue); |
2292 | 1956 | ||
2293 | err_dmaunmap: | 1957 | err_dmaunmap: |
2294 | if (host->use_dma && host->dma_ops->exit) | 1958 | if (host->use_dma && host->dma_ops->exit) |
2295 | host->dma_ops->exit(host); | 1959 | host->dma_ops->exit(host); |
1960 | dma_free_coherent(&host->pdev->dev, PAGE_SIZE, | ||
1961 | host->sg_cpu, host->sg_dma); | ||
1962 | iounmap(host->regs); | ||
2296 | 1963 | ||
2297 | if (host->vmmc) | 1964 | if (host->vmmc) { |
2298 | regulator_disable(host->vmmc); | 1965 | regulator_disable(host->vmmc); |
1966 | regulator_put(host->vmmc); | ||
1967 | } | ||
2299 | 1968 | ||
2300 | err_clk_ciu: | ||
2301 | if (!IS_ERR(host->ciu_clk)) | ||
2302 | clk_disable_unprepare(host->ciu_clk); | ||
2303 | |||
2304 | err_clk_biu: | ||
2305 | if (!IS_ERR(host->biu_clk)) | ||
2306 | clk_disable_unprepare(host->biu_clk); | ||
2307 | 1969 | ||
1970 | err_freehost: | ||
1971 | kfree(host); | ||
2308 | return ret; | 1972 | return ret; |
2309 | } | 1973 | } |
2310 | EXPORT_SYMBOL(dw_mci_probe); | ||
2311 | 1974 | ||
2312 | void dw_mci_remove(struct dw_mci *host) | 1975 | static int __exit dw_mci_remove(struct platform_device *pdev) |
2313 | { | 1976 | { |
1977 | struct dw_mci *host = platform_get_drvdata(pdev); | ||
2314 | int i; | 1978 | int i; |
2315 | 1979 | ||
2316 | mci_writel(host, RINTSTS, 0xFFFFFFFF); | 1980 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
2317 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ | 1981 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ |
2318 | 1982 | ||
1983 | platform_set_drvdata(pdev, NULL); | ||
1984 | |||
2319 | for (i = 0; i < host->num_slots; i++) { | 1985 | for (i = 0; i < host->num_slots; i++) { |
2320 | dev_dbg(host->dev, "remove slot %d\n", i); | 1986 | dev_dbg(&pdev->dev, "remove slot %d\n", i); |
2321 | if (host->slot[i]) | 1987 | if (host->slot[i]) |
2322 | dw_mci_cleanup_slot(host->slot[i], i); | 1988 | dw_mci_cleanup_slot(host->slot[i], i); |
2323 | } | 1989 | } |
@@ -2326,31 +1992,32 @@ void dw_mci_remove(struct dw_mci *host) | |||
2326 | mci_writel(host, CLKENA, 0); | 1992 | mci_writel(host, CLKENA, 0); |
2327 | mci_writel(host, CLKSRC, 0); | 1993 | mci_writel(host, CLKSRC, 0); |
2328 | 1994 | ||
2329 | destroy_workqueue(host->card_workqueue); | 1995 | free_irq(platform_get_irq(pdev, 0), host); |
1996 | destroy_workqueue(dw_mci_card_workqueue); | ||
1997 | dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); | ||
2330 | 1998 | ||
2331 | if (host->use_dma && host->dma_ops->exit) | 1999 | if (host->use_dma && host->dma_ops->exit) |
2332 | host->dma_ops->exit(host); | 2000 | host->dma_ops->exit(host); |
2333 | 2001 | ||
2334 | if (host->vmmc) | 2002 | if (host->vmmc) { |
2335 | regulator_disable(host->vmmc); | 2003 | regulator_disable(host->vmmc); |
2004 | regulator_put(host->vmmc); | ||
2005 | } | ||
2336 | 2006 | ||
2337 | if (!IS_ERR(host->ciu_clk)) | 2007 | iounmap(host->regs); |
2338 | clk_disable_unprepare(host->ciu_clk); | ||
2339 | 2008 | ||
2340 | if (!IS_ERR(host->biu_clk)) | 2009 | kfree(host); |
2341 | clk_disable_unprepare(host->biu_clk); | 2010 | return 0; |
2342 | } | 2011 | } |
2343 | EXPORT_SYMBOL(dw_mci_remove); | ||
2344 | |||
2345 | 2012 | ||
2346 | 2013 | #ifdef CONFIG_PM | |
2347 | #ifdef CONFIG_PM_SLEEP | ||
2348 | /* | 2014 | /* |
2349 | * TODO: we should probably disable the clock to the card in the suspend path. | 2015 | * TODO: we should probably disable the clock to the card in the suspend path. |
2350 | */ | 2016 | */ |
2351 | int dw_mci_suspend(struct dw_mci *host) | 2017 | static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) |
2352 | { | 2018 | { |
2353 | int i, ret = 0; | 2019 | int i, ret; |
2020 | struct dw_mci *host = platform_get_drvdata(pdev); | ||
2354 | 2021 | ||
2355 | for (i = 0; i < host->num_slots; i++) { | 2022 | for (i = 0; i < host->num_slots; i++) { |
2356 | struct dw_mci_slot *slot = host->slot[i]; | 2023 | struct dw_mci_slot *slot = host->slot[i]; |
@@ -2372,23 +2039,23 @@ int dw_mci_suspend(struct dw_mci *host) | |||
2372 | 2039 | ||
2373 | return 0; | 2040 | return 0; |
2374 | } | 2041 | } |
2375 | EXPORT_SYMBOL(dw_mci_suspend); | ||
2376 | 2042 | ||
2377 | int dw_mci_resume(struct dw_mci *host) | 2043 | static int dw_mci_resume(struct platform_device *pdev) |
2378 | { | 2044 | { |
2379 | int i, ret; | 2045 | int i, ret; |
2046 | struct dw_mci *host = platform_get_drvdata(pdev); | ||
2380 | 2047 | ||
2381 | if (host->vmmc) | 2048 | if (host->vmmc) |
2382 | regulator_enable(host->vmmc); | 2049 | regulator_enable(host->vmmc); |
2383 | 2050 | ||
2384 | if (!mci_wait_reset(host->dev, host)) { | 2051 | if (host->dma_ops->init) |
2052 | host->dma_ops->init(host); | ||
2053 | |||
2054 | if (!mci_wait_reset(&pdev->dev, host)) { | ||
2385 | ret = -ENODEV; | 2055 | ret = -ENODEV; |
2386 | return ret; | 2056 | return ret; |
2387 | } | 2057 | } |
2388 | 2058 | ||
2389 | if (host->use_dma && host->dma_ops->init) | ||
2390 | host->dma_ops->init(host); | ||
2391 | |||
2392 | /* Restore the old value at FIFOTH register */ | 2059 | /* Restore the old value at FIFOTH register */ |
2393 | mci_writel(host, FIFOTH, host->fifoth_val); | 2060 | mci_writel(host, FIFOTH, host->fifoth_val); |
2394 | 2061 | ||
@@ -2402,28 +2069,35 @@ int dw_mci_resume(struct dw_mci *host) | |||
2402 | struct dw_mci_slot *slot = host->slot[i]; | 2069 | struct dw_mci_slot *slot = host->slot[i]; |
2403 | if (!slot) | 2070 | if (!slot) |
2404 | continue; | 2071 | continue; |
2405 | if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { | ||
2406 | dw_mci_set_ios(slot->mmc, &slot->mmc->ios); | ||
2407 | dw_mci_setup_bus(slot, true); | ||
2408 | } | ||
2409 | |||
2410 | ret = mmc_resume_host(host->slot[i]->mmc); | 2072 | ret = mmc_resume_host(host->slot[i]->mmc); |
2411 | if (ret < 0) | 2073 | if (ret < 0) |
2412 | return ret; | 2074 | return ret; |
2413 | } | 2075 | } |
2076 | |||
2414 | return 0; | 2077 | return 0; |
2415 | } | 2078 | } |
2416 | EXPORT_SYMBOL(dw_mci_resume); | 2079 | #else |
2417 | #endif /* CONFIG_PM_SLEEP */ | 2080 | #define dw_mci_suspend NULL |
2081 | #define dw_mci_resume NULL | ||
2082 | #endif /* CONFIG_PM */ | ||
2083 | |||
2084 | static struct platform_driver dw_mci_driver = { | ||
2085 | .remove = __exit_p(dw_mci_remove), | ||
2086 | .suspend = dw_mci_suspend, | ||
2087 | .resume = dw_mci_resume, | ||
2088 | .driver = { | ||
2089 | .name = "dw_mmc", | ||
2090 | }, | ||
2091 | }; | ||
2418 | 2092 | ||
2419 | static int __init dw_mci_init(void) | 2093 | static int __init dw_mci_init(void) |
2420 | { | 2094 | { |
2421 | printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver"); | 2095 | return platform_driver_probe(&dw_mci_driver, dw_mci_probe); |
2422 | return 0; | ||
2423 | } | 2096 | } |
2424 | 2097 | ||
2425 | static void __exit dw_mci_exit(void) | 2098 | static void __exit dw_mci_exit(void) |
2426 | { | 2099 | { |
2100 | platform_driver_unregister(&dw_mci_driver); | ||
2427 | } | 2101 | } |
2428 | 2102 | ||
2429 | module_init(dw_mci_init); | 2103 | module_init(dw_mci_init); |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 53b8fd987e4..027d3773539 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #ifndef _DW_MMC_H_ | 14 | #ifndef _DW_MMC_H_ |
15 | #define _DW_MMC_H_ | 15 | #define _DW_MMC_H_ |
16 | 16 | ||
17 | #define DW_MMC_240A 0x240a | ||
18 | |||
19 | #define SDMMC_CTRL 0x000 | 17 | #define SDMMC_CTRL 0x000 |
20 | #define SDMMC_PWREN 0x004 | 18 | #define SDMMC_PWREN 0x004 |
21 | #define SDMMC_CLKDIV 0x008 | 19 | #define SDMMC_CLKDIV 0x008 |
@@ -53,14 +51,7 @@ | |||
53 | #define SDMMC_IDINTEN 0x090 | 51 | #define SDMMC_IDINTEN 0x090 |
54 | #define SDMMC_DSCADDR 0x094 | 52 | #define SDMMC_DSCADDR 0x094 |
55 | #define SDMMC_BUFADDR 0x098 | 53 | #define SDMMC_BUFADDR 0x098 |
56 | #define SDMMC_DATA(x) (x) | 54 | #define SDMMC_DATA 0x100 |
57 | |||
58 | /* | ||
59 | * Data offset is difference according to Version | ||
60 | * Lower than 2.40a : data register offest is 0x100 | ||
61 | */ | ||
62 | #define DATA_OFFSET 0x100 | ||
63 | #define DATA_240A_OFFSET 0x200 | ||
64 | 55 | ||
65 | /* shift bit field */ | 56 | /* shift bit field */ |
66 | #define _SBF(f, v) ((v) << (f)) | 57 | #define _SBF(f, v) ((v) << (f)) |
@@ -91,7 +82,7 @@ | |||
91 | #define SDMMC_CTYPE_4BIT BIT(0) | 82 | #define SDMMC_CTYPE_4BIT BIT(0) |
92 | #define SDMMC_CTYPE_1BIT 0 | 83 | #define SDMMC_CTYPE_1BIT 0 |
93 | /* Interrupt status & mask register defines */ | 84 | /* Interrupt status & mask register defines */ |
94 | #define SDMMC_INT_SDIO(n) BIT(16 + (n)) | 85 | #define SDMMC_INT_SDIO BIT(16) |
95 | #define SDMMC_INT_EBE BIT(15) | 86 | #define SDMMC_INT_EBE BIT(15) |
96 | #define SDMMC_INT_ACD BIT(14) | 87 | #define SDMMC_INT_ACD BIT(14) |
97 | #define SDMMC_INT_SBE BIT(13) | 88 | #define SDMMC_INT_SBE BIT(13) |
@@ -126,7 +117,7 @@ | |||
126 | #define SDMMC_CMD_RESP_EXP BIT(6) | 117 | #define SDMMC_CMD_RESP_EXP BIT(6) |
127 | #define SDMMC_CMD_INDX(n) ((n) & 0x1F) | 118 | #define SDMMC_CMD_INDX(n) ((n) & 0x1F) |
128 | /* Status register defines */ | 119 | /* Status register defines */ |
129 | #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) | 120 | #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) |
130 | /* Internal DMAC interrupt defines */ | 121 | /* Internal DMAC interrupt defines */ |
131 | #define SDMMC_IDMAC_INT_AI BIT(9) | 122 | #define SDMMC_IDMAC_INT_AI BIT(9) |
132 | #define SDMMC_IDMAC_INT_NI BIT(8) | 123 | #define SDMMC_IDMAC_INT_NI BIT(8) |
@@ -139,8 +130,6 @@ | |||
139 | #define SDMMC_IDMAC_ENABLE BIT(7) | 130 | #define SDMMC_IDMAC_ENABLE BIT(7) |
140 | #define SDMMC_IDMAC_FB BIT(1) | 131 | #define SDMMC_IDMAC_FB BIT(1) |
141 | #define SDMMC_IDMAC_SWRESET BIT(0) | 132 | #define SDMMC_IDMAC_SWRESET BIT(0) |
142 | /* Version ID register define */ | ||
143 | #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) | ||
144 | 133 | ||
145 | /* Register access macros */ | 134 | /* Register access macros */ |
146 | #define mci_readl(dev, reg) \ | 135 | #define mci_readl(dev, reg) \ |
@@ -175,35 +164,4 @@ | |||
175 | (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) | 164 | (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) |
176 | #endif | 165 | #endif |
177 | 166 | ||
178 | extern int dw_mci_probe(struct dw_mci *host); | ||
179 | extern void dw_mci_remove(struct dw_mci *host); | ||
180 | #ifdef CONFIG_PM | ||
181 | extern int dw_mci_suspend(struct dw_mci *host); | ||
182 | extern int dw_mci_resume(struct dw_mci *host); | ||
183 | #endif | ||
184 | |||
185 | /** | ||
186 | * dw_mci driver data - dw-mshc implementation specific driver data. | ||
187 | * @caps: mmc subsystem specified capabilities of the controller(s). | ||
188 | * @init: early implementation specific initialization. | ||
189 | * @setup_clock: implementation specific clock configuration. | ||
190 | * @prepare_command: handle CMD register extensions. | ||
191 | * @set_ios: handle bus specific extensions. | ||
192 | * @parse_dt: parse implementation specific device tree properties. | ||
193 | * @setup_bus: initialize io-interface | ||
194 | * | ||
195 | * Provide controller implementation specific extensions. The usage of this | ||
196 | * data structure is fully optional and usage of each member in this structure | ||
197 | * is optional as well. | ||
198 | */ | ||
199 | struct dw_mci_drv_data { | ||
200 | unsigned long *caps; | ||
201 | int (*init)(struct dw_mci *host); | ||
202 | int (*setup_clock)(struct dw_mci *host); | ||
203 | void (*prepare_command)(struct dw_mci *host, u32 *cmdr); | ||
204 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); | ||
205 | int (*parse_dt)(struct dw_mci *host); | ||
206 | int (*setup_bus)(struct dw_mci *host, | ||
207 | struct device_node *slot_np, u8 bus_width); | ||
208 | }; | ||
209 | #endif /* _DW_MMC_H_ */ | 167 | #endif /* _DW_MMC_H_ */ |
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 2391c6b7a4b..74218ad677e 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c | |||
@@ -702,7 +702,7 @@ static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = { | |||
702 | JZ_GPIO_BULK_PIN(MSC_DATA3), | 702 | JZ_GPIO_BULK_PIN(MSC_DATA3), |
703 | }; | 703 | }; |
704 | 704 | ||
705 | static int jz4740_mmc_request_gpio(struct device *dev, int gpio, | 705 | static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, |
706 | const char *name, bool output, int value) | 706 | const char *name, bool output, int value) |
707 | { | 707 | { |
708 | int ret; | 708 | int ret; |
@@ -724,7 +724,7 @@ static int jz4740_mmc_request_gpio(struct device *dev, int gpio, | |||
724 | return 0; | 724 | return 0; |
725 | } | 725 | } |
726 | 726 | ||
727 | static int jz4740_mmc_request_gpios(struct platform_device *pdev) | 727 | static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev) |
728 | { | 728 | { |
729 | int ret; | 729 | int ret; |
730 | struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; | 730 | struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; |
@@ -759,7 +759,7 @@ err: | |||
759 | return ret; | 759 | return ret; |
760 | } | 760 | } |
761 | 761 | ||
762 | static int jz4740_mmc_request_cd_irq(struct platform_device *pdev, | 762 | static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev, |
763 | struct jz4740_mmc_host *host) | 763 | struct jz4740_mmc_host *host) |
764 | { | 764 | { |
765 | struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; | 765 | struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; |
@@ -802,7 +802,7 @@ static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) | |||
802 | return num_pins; | 802 | return num_pins; |
803 | } | 803 | } |
804 | 804 | ||
805 | static int jz4740_mmc_probe(struct platform_device* pdev) | 805 | static int __devinit jz4740_mmc_probe(struct platform_device* pdev) |
806 | { | 806 | { |
807 | int ret; | 807 | int ret; |
808 | struct mmc_host *mmc; | 808 | struct mmc_host *mmc; |
@@ -938,7 +938,7 @@ err_free_host: | |||
938 | return ret; | 938 | return ret; |
939 | } | 939 | } |
940 | 940 | ||
941 | static int jz4740_mmc_remove(struct platform_device *pdev) | 941 | static int __devexit jz4740_mmc_remove(struct platform_device *pdev) |
942 | { | 942 | { |
943 | struct jz4740_mmc_host *host = platform_get_drvdata(pdev); | 943 | struct jz4740_mmc_host *host = platform_get_drvdata(pdev); |
944 | 944 | ||
@@ -1004,7 +1004,7 @@ const struct dev_pm_ops jz4740_mmc_pm_ops = { | |||
1004 | 1004 | ||
1005 | static struct platform_driver jz4740_mmc_driver = { | 1005 | static struct platform_driver jz4740_mmc_driver = { |
1006 | .probe = jz4740_mmc_probe, | 1006 | .probe = jz4740_mmc_probe, |
1007 | .remove = jz4740_mmc_remove, | 1007 | .remove = __devexit_p(jz4740_mmc_remove), |
1008 | .driver = { | 1008 | .driver = { |
1009 | .name = "jz4740-mmc", | 1009 | .name = "jz4740-mmc", |
1010 | .owner = THIS_MODULE, | 1010 | .owner = THIS_MODULE, |
@@ -1012,7 +1012,17 @@ static struct platform_driver jz4740_mmc_driver = { | |||
1012 | }, | 1012 | }, |
1013 | }; | 1013 | }; |
1014 | 1014 | ||
1015 | module_platform_driver(jz4740_mmc_driver); | 1015 | static int __init jz4740_mmc_init(void) |
1016 | { | ||
1017 | return platform_driver_register(&jz4740_mmc_driver); | ||
1018 | } | ||
1019 | module_init(jz4740_mmc_init); | ||
1020 | |||
1021 | static void __exit jz4740_mmc_exit(void) | ||
1022 | { | ||
1023 | platform_driver_unregister(&jz4740_mmc_driver); | ||
1024 | } | ||
1025 | module_exit(jz4740_mmc_exit); | ||
1016 | 1026 | ||
1017 | MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); | 1027 | MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); |
1018 | MODULE_LICENSE("GPL"); | 1028 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 74145d1d51f..7c1e16aaf17 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/module.h> | ||
31 | #include <linux/bio.h> | 30 | #include <linux/bio.h> |
32 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
33 | #include <linux/crc7.h> | 32 | #include <linux/crc7.h> |
@@ -1485,7 +1484,7 @@ nomem: | |||
1485 | } | 1484 | } |
1486 | 1485 | ||
1487 | 1486 | ||
1488 | static int mmc_spi_remove(struct spi_device *spi) | 1487 | static int __devexit mmc_spi_remove(struct spi_device *spi) |
1489 | { | 1488 | { |
1490 | struct mmc_host *mmc = dev_get_drvdata(&spi->dev); | 1489 | struct mmc_host *mmc = dev_get_drvdata(&spi->dev); |
1491 | struct mmc_spi_host *host; | 1490 | struct mmc_spi_host *host; |
@@ -1517,7 +1516,7 @@ static int mmc_spi_remove(struct spi_device *spi) | |||
1517 | return 0; | 1516 | return 0; |
1518 | } | 1517 | } |
1519 | 1518 | ||
1520 | static struct of_device_id mmc_spi_of_match_table[] = { | 1519 | static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { |
1521 | { .compatible = "mmc-spi-slot", }, | 1520 | { .compatible = "mmc-spi-slot", }, |
1522 | {}, | 1521 | {}, |
1523 | }; | 1522 | }; |
@@ -1525,14 +1524,28 @@ static struct of_device_id mmc_spi_of_match_table[] = { | |||
1525 | static struct spi_driver mmc_spi_driver = { | 1524 | static struct spi_driver mmc_spi_driver = { |
1526 | .driver = { | 1525 | .driver = { |
1527 | .name = "mmc_spi", | 1526 | .name = "mmc_spi", |
1527 | .bus = &spi_bus_type, | ||
1528 | .owner = THIS_MODULE, | 1528 | .owner = THIS_MODULE, |
1529 | .of_match_table = mmc_spi_of_match_table, | 1529 | .of_match_table = mmc_spi_of_match_table, |
1530 | }, | 1530 | }, |
1531 | .probe = mmc_spi_probe, | 1531 | .probe = mmc_spi_probe, |
1532 | .remove = mmc_spi_remove, | 1532 | .remove = __devexit_p(mmc_spi_remove), |
1533 | }; | 1533 | }; |
1534 | 1534 | ||
1535 | module_spi_driver(mmc_spi_driver); | 1535 | |
1536 | static int __init mmc_spi_init(void) | ||
1537 | { | ||
1538 | return spi_register_driver(&mmc_spi_driver); | ||
1539 | } | ||
1540 | module_init(mmc_spi_init); | ||
1541 | |||
1542 | |||
1543 | static void __exit mmc_spi_exit(void) | ||
1544 | { | ||
1545 | spi_unregister_driver(&mmc_spi_driver); | ||
1546 | } | ||
1547 | module_exit(mmc_spi_exit); | ||
1548 | |||
1536 | 1549 | ||
1537 | MODULE_AUTHOR("Mike Lavender, David Brownell, " | 1550 | MODULE_AUTHOR("Mike Lavender, David Brownell, " |
1538 | "Hans-Peter Nilsson, Jan Nikitenko"); | 1551 | "Hans-Peter Nilsson, Jan Nikitenko"); |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 150772395cc..d8eac248ee4 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/slab.h> | ||
19 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
21 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
@@ -26,14 +25,10 @@ | |||
26 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
27 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
28 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
29 | #include <linux/of_gpio.h> | ||
30 | #include <linux/regulator/consumer.h> | 28 | #include <linux/regulator/consumer.h> |
31 | #include <linux/dmaengine.h> | 29 | #include <linux/dmaengine.h> |
32 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
33 | #include <linux/amba/mmci.h> | 31 | #include <linux/amba/mmci.h> |
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/pinctrl/consumer.h> | ||
37 | 32 | ||
38 | #include <asm/div64.h> | 33 | #include <asm/div64.h> |
39 | #include <asm/io.h> | 34 | #include <asm/io.h> |
@@ -57,8 +52,6 @@ static unsigned int fmax = 515633; | |||
57 | * @sdio: variant supports SDIO | 52 | * @sdio: variant supports SDIO |
58 | * @st_clkdiv: true if using a ST-specific clock divider algorithm | 53 | * @st_clkdiv: true if using a ST-specific clock divider algorithm |
59 | * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register | 54 | * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register |
60 | * @pwrreg_powerup: power up value for MMCIPOWER register | ||
61 | * @signal_direction: input/out direction of bus signals can be indicated | ||
62 | */ | 55 | */ |
63 | struct variant_data { | 56 | struct variant_data { |
64 | unsigned int clkreg; | 57 | unsigned int clkreg; |
@@ -69,22 +62,18 @@ struct variant_data { | |||
69 | bool sdio; | 62 | bool sdio; |
70 | bool st_clkdiv; | 63 | bool st_clkdiv; |
71 | bool blksz_datactrl16; | 64 | bool blksz_datactrl16; |
72 | u32 pwrreg_powerup; | ||
73 | bool signal_direction; | ||
74 | }; | 65 | }; |
75 | 66 | ||
76 | static struct variant_data variant_arm = { | 67 | static struct variant_data variant_arm = { |
77 | .fifosize = 16 * 4, | 68 | .fifosize = 16 * 4, |
78 | .fifohalfsize = 8 * 4, | 69 | .fifohalfsize = 8 * 4, |
79 | .datalength_bits = 16, | 70 | .datalength_bits = 16, |
80 | .pwrreg_powerup = MCI_PWR_UP, | ||
81 | }; | 71 | }; |
82 | 72 | ||
83 | static struct variant_data variant_arm_extended_fifo = { | 73 | static struct variant_data variant_arm_extended_fifo = { |
84 | .fifosize = 128 * 4, | 74 | .fifosize = 128 * 4, |
85 | .fifohalfsize = 64 * 4, | 75 | .fifohalfsize = 64 * 4, |
86 | .datalength_bits = 16, | 76 | .datalength_bits = 16, |
87 | .pwrreg_powerup = MCI_PWR_UP, | ||
88 | }; | 77 | }; |
89 | 78 | ||
90 | static struct variant_data variant_u300 = { | 79 | static struct variant_data variant_u300 = { |
@@ -93,19 +82,6 @@ static struct variant_data variant_u300 = { | |||
93 | .clkreg_enable = MCI_ST_U300_HWFCEN, | 82 | .clkreg_enable = MCI_ST_U300_HWFCEN, |
94 | .datalength_bits = 16, | 83 | .datalength_bits = 16, |
95 | .sdio = true, | 84 | .sdio = true, |
96 | .pwrreg_powerup = MCI_PWR_ON, | ||
97 | .signal_direction = true, | ||
98 | }; | ||
99 | |||
100 | static struct variant_data variant_nomadik = { | ||
101 | .fifosize = 16 * 4, | ||
102 | .fifohalfsize = 8 * 4, | ||
103 | .clkreg = MCI_CLK_ENABLE, | ||
104 | .datalength_bits = 24, | ||
105 | .sdio = true, | ||
106 | .st_clkdiv = true, | ||
107 | .pwrreg_powerup = MCI_PWR_ON, | ||
108 | .signal_direction = true, | ||
109 | }; | 85 | }; |
110 | 86 | ||
111 | static struct variant_data variant_ux500 = { | 87 | static struct variant_data variant_ux500 = { |
@@ -116,8 +92,6 @@ static struct variant_data variant_ux500 = { | |||
116 | .datalength_bits = 24, | 92 | .datalength_bits = 24, |
117 | .sdio = true, | 93 | .sdio = true, |
118 | .st_clkdiv = true, | 94 | .st_clkdiv = true, |
119 | .pwrreg_powerup = MCI_PWR_ON, | ||
120 | .signal_direction = true, | ||
121 | }; | 95 | }; |
122 | 96 | ||
123 | static struct variant_data variant_ux500v2 = { | 97 | static struct variant_data variant_ux500v2 = { |
@@ -129,35 +103,11 @@ static struct variant_data variant_ux500v2 = { | |||
129 | .sdio = true, | 103 | .sdio = true, |
130 | .st_clkdiv = true, | 104 | .st_clkdiv = true, |
131 | .blksz_datactrl16 = true, | 105 | .blksz_datactrl16 = true, |
132 | .pwrreg_powerup = MCI_PWR_ON, | ||
133 | .signal_direction = true, | ||
134 | }; | 106 | }; |
135 | 107 | ||
136 | /* | 108 | /* |
137 | * This must be called with host->lock held | 109 | * This must be called with host->lock held |
138 | */ | 110 | */ |
139 | static void mmci_write_clkreg(struct mmci_host *host, u32 clk) | ||
140 | { | ||
141 | if (host->clk_reg != clk) { | ||
142 | host->clk_reg = clk; | ||
143 | writel(clk, host->base + MMCICLOCK); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * This must be called with host->lock held | ||
149 | */ | ||
150 | static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) | ||
151 | { | ||
152 | if (host->pwr_reg != pwr) { | ||
153 | host->pwr_reg = pwr; | ||
154 | writel(pwr, host->base + MMCIPOWER); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * This must be called with host->lock held | ||
160 | */ | ||
161 | static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) | 111 | static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) |
162 | { | 112 | { |
163 | struct variant_data *variant = host->variant; | 113 | struct variant_data *variant = host->variant; |
@@ -202,7 +152,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) | |||
202 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) | 152 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) |
203 | clk |= MCI_ST_8BIT_BUS; | 153 | clk |= MCI_ST_8BIT_BUS; |
204 | 154 | ||
205 | mmci_write_clkreg(host, clk); | 155 | writel(clk, host->base + MMCICLOCK); |
206 | } | 156 | } |
207 | 157 | ||
208 | static void | 158 | static void |
@@ -215,10 +165,13 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
215 | host->mrq = NULL; | 165 | host->mrq = NULL; |
216 | host->cmd = NULL; | 166 | host->cmd = NULL; |
217 | 167 | ||
168 | /* | ||
169 | * Need to drop the host lock here; mmc_request_done may call | ||
170 | * back into the driver... | ||
171 | */ | ||
172 | spin_unlock(&host->lock); | ||
218 | mmc_request_done(host->mmc, mrq); | 173 | mmc_request_done(host->mmc, mrq); |
219 | 174 | spin_lock(&host->lock); | |
220 | pm_runtime_mark_last_busy(mmc_dev(host->mmc)); | ||
221 | pm_runtime_put_autosuspend(mmc_dev(host->mmc)); | ||
222 | } | 175 | } |
223 | 176 | ||
224 | static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) | 177 | static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) |
@@ -262,7 +215,7 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) | |||
262 | * no custom DMA interfaces are supported. | 215 | * no custom DMA interfaces are supported. |
263 | */ | 216 | */ |
264 | #ifdef CONFIG_DMA_ENGINE | 217 | #ifdef CONFIG_DMA_ENGINE |
265 | static void mmci_dma_setup(struct mmci_host *host) | 218 | static void __devinit mmci_dma_setup(struct mmci_host *host) |
266 | { | 219 | { |
267 | struct mmci_platform_data *plat = host->plat; | 220 | struct mmci_platform_data *plat = host->plat; |
268 | const char *rxname, *txname; | 221 | const char *rxname, *txname; |
@@ -338,7 +291,7 @@ static void mmci_dma_setup(struct mmci_host *host) | |||
338 | } | 291 | } |
339 | 292 | ||
340 | /* | 293 | /* |
341 | * This is used in or so inline it | 294 | * This is used in __devinit or __devexit so inline it |
342 | * so it can be discarded. | 295 | * so it can be discarded. |
343 | */ | 296 | */ |
344 | static inline void mmci_dma_release(struct mmci_host *host) | 297 | static inline void mmci_dma_release(struct mmci_host *host) |
@@ -415,12 +368,10 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
415 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | 368 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
416 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 369 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
417 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 370 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
418 | .device_fc = false, | ||
419 | }; | 371 | }; |
420 | struct dma_chan *chan; | 372 | struct dma_chan *chan; |
421 | struct dma_device *device; | 373 | struct dma_device *device; |
422 | struct dma_async_tx_descriptor *desc; | 374 | struct dma_async_tx_descriptor *desc; |
423 | enum dma_data_direction buffer_dirn; | ||
424 | int nr_sg; | 375 | int nr_sg; |
425 | 376 | ||
426 | /* Check if next job is already prepared */ | 377 | /* Check if next job is already prepared */ |
@@ -434,12 +385,10 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
434 | } | 385 | } |
435 | 386 | ||
436 | if (data->flags & MMC_DATA_READ) { | 387 | if (data->flags & MMC_DATA_READ) { |
437 | conf.direction = DMA_DEV_TO_MEM; | 388 | conf.direction = DMA_FROM_DEVICE; |
438 | buffer_dirn = DMA_FROM_DEVICE; | ||
439 | chan = host->dma_rx_channel; | 389 | chan = host->dma_rx_channel; |
440 | } else { | 390 | } else { |
441 | conf.direction = DMA_MEM_TO_DEV; | 391 | conf.direction = DMA_TO_DEVICE; |
442 | buffer_dirn = DMA_TO_DEVICE; | ||
443 | chan = host->dma_tx_channel; | 392 | chan = host->dma_tx_channel; |
444 | } | 393 | } |
445 | 394 | ||
@@ -452,12 +401,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
452 | return -EINVAL; | 401 | return -EINVAL; |
453 | 402 | ||
454 | device = chan->device; | 403 | device = chan->device; |
455 | nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); | 404 | nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); |
456 | if (nr_sg == 0) | 405 | if (nr_sg == 0) |
457 | return -EINVAL; | 406 | return -EINVAL; |
458 | 407 | ||
459 | dmaengine_slave_config(chan, &conf); | 408 | dmaengine_slave_config(chan, &conf); |
460 | desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, | 409 | desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, |
461 | conf.direction, DMA_CTRL_ACK); | 410 | conf.direction, DMA_CTRL_ACK); |
462 | if (!desc) | 411 | if (!desc) |
463 | goto unmap_exit; | 412 | goto unmap_exit; |
@@ -475,7 +424,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
475 | unmap_exit: | 424 | unmap_exit: |
476 | if (!next) | 425 | if (!next) |
477 | dmaengine_terminate_all(chan); | 426 | dmaengine_terminate_all(chan); |
478 | dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); | 427 | dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); |
479 | return -ENOMEM; | 428 | return -ENOMEM; |
480 | } | 429 | } |
481 | 430 | ||
@@ -515,7 +464,7 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) | |||
515 | struct mmci_host_next *next = &host->next_data; | 464 | struct mmci_host_next *next = &host->next_data; |
516 | 465 | ||
517 | if (data->host_cookie && data->host_cookie != next->cookie) { | 466 | if (data->host_cookie && data->host_cookie != next->cookie) { |
518 | pr_warning("[%s] invalid cookie: data->host_cookie %d" | 467 | printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" |
519 | " host->next_data.cookie %d\n", | 468 | " host->next_data.cookie %d\n", |
520 | __func__, data->host_cookie, host->next_data.cookie); | 469 | __func__, data->host_cookie, host->next_data.cookie); |
521 | data->host_cookie = 0; | 470 | data->host_cookie = 0; |
@@ -580,7 +529,7 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, | |||
580 | if (chan) { | 529 | if (chan) { |
581 | if (err) | 530 | if (err) |
582 | dmaengine_terminate_all(chan); | 531 | dmaengine_terminate_all(chan); |
583 | if (data->host_cookie) | 532 | if (err || data->host_cookie) |
584 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 533 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, |
585 | data->sg_len, dir); | 534 | data->sg_len, dir); |
586 | mrq->data->host_cookie = 0; | 535 | mrq->data->host_cookie = 0; |
@@ -653,33 +602,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
653 | if (data->flags & MMC_DATA_READ) | 602 | if (data->flags & MMC_DATA_READ) |
654 | datactrl |= MCI_DPSM_DIRECTION; | 603 | datactrl |= MCI_DPSM_DIRECTION; |
655 | 604 | ||
656 | /* The ST Micro variants has a special bit to enable SDIO */ | ||
657 | if (variant->sdio && host->mmc->card) | ||
658 | if (mmc_card_sdio(host->mmc->card)) { | ||
659 | /* | ||
660 | * The ST Micro variants has a special bit | ||
661 | * to enable SDIO. | ||
662 | */ | ||
663 | u32 clk; | ||
664 | |||
665 | datactrl |= MCI_ST_DPSM_SDIOEN; | ||
666 | |||
667 | /* | ||
668 | * The ST Micro variant for SDIO small write transfers | ||
669 | * needs to have clock H/W flow control disabled, | ||
670 | * otherwise the transfer will not start. The threshold | ||
671 | * depends on the rate of MCLK. | ||
672 | */ | ||
673 | if (data->flags & MMC_DATA_WRITE && | ||
674 | (host->size < 8 || | ||
675 | (host->size <= 8 && host->mclk > 50000000))) | ||
676 | clk = host->clk_reg & ~variant->clkreg_enable; | ||
677 | else | ||
678 | clk = host->clk_reg | variant->clkreg_enable; | ||
679 | |||
680 | mmci_write_clkreg(host, clk); | ||
681 | } | ||
682 | |||
683 | /* | 605 | /* |
684 | * Attempt to use DMA operation mode, if this | 606 | * Attempt to use DMA operation mode, if this |
685 | * should fail, fall back to PIO mode | 607 | * should fail, fall back to PIO mode |
@@ -708,6 +630,11 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
708 | irqmask = MCI_TXFIFOHALFEMPTYMASK; | 630 | irqmask = MCI_TXFIFOHALFEMPTYMASK; |
709 | } | 631 | } |
710 | 632 | ||
633 | /* The ST Micro variants has a special bit to enable SDIO */ | ||
634 | if (variant->sdio && host->mmc->card) | ||
635 | if (mmc_card_sdio(host->mmc->card)) | ||
636 | datactrl |= MCI_ST_DPSM_SDIOEN; | ||
637 | |||
711 | writel(datactrl, base + MMCIDATACTRL); | 638 | writel(datactrl, base + MMCIDATACTRL); |
712 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); | 639 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); |
713 | mmci_set_mask1(host, irqmask); | 640 | mmci_set_mask1(host, irqmask); |
@@ -854,24 +781,7 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema | |||
854 | if (count <= 0) | 781 | if (count <= 0) |
855 | break; | 782 | break; |
856 | 783 | ||
857 | /* | 784 | readsl(base + MMCIFIFO, ptr, count >> 2); |
858 | * SDIO especially may want to send something that is | ||
859 | * not divisible by 4 (as opposed to card sectors | ||
860 | * etc). Therefore make sure to always read the last bytes | ||
861 | * while only doing full 32-bit reads towards the FIFO. | ||
862 | */ | ||
863 | if (unlikely(count & 0x3)) { | ||
864 | if (count < 4) { | ||
865 | unsigned char buf[4]; | ||
866 | ioread32_rep(base + MMCIFIFO, buf, 1); | ||
867 | memcpy(ptr, buf, count); | ||
868 | } else { | ||
869 | ioread32_rep(base + MMCIFIFO, ptr, count >> 2); | ||
870 | count &= ~0x3; | ||
871 | } | ||
872 | } else { | ||
873 | ioread32_rep(base + MMCIFIFO, ptr, count >> 2); | ||
874 | } | ||
875 | 785 | ||
876 | ptr += count; | 786 | ptr += count; |
877 | remain -= count; | 787 | remain -= count; |
@@ -900,6 +810,23 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem | |||
900 | count = min(remain, maxcnt); | 810 | count = min(remain, maxcnt); |
901 | 811 | ||
902 | /* | 812 | /* |
813 | * The ST Micro variant for SDIO transfer sizes | ||
814 | * less then 8 bytes should have clock H/W flow | ||
815 | * control disabled. | ||
816 | */ | ||
817 | if (variant->sdio && | ||
818 | mmc_card_sdio(host->mmc->card)) { | ||
819 | if (count < 8) | ||
820 | writel(readl(host->base + MMCICLOCK) & | ||
821 | ~variant->clkreg_enable, | ||
822 | host->base + MMCICLOCK); | ||
823 | else | ||
824 | writel(readl(host->base + MMCICLOCK) | | ||
825 | variant->clkreg_enable, | ||
826 | host->base + MMCICLOCK); | ||
827 | } | ||
828 | |||
829 | /* | ||
903 | * SDIO especially may want to send something that is | 830 | * SDIO especially may want to send something that is |
904 | * not divisible by 4 (as opposed to card sectors | 831 | * not divisible by 4 (as opposed to card sectors |
905 | * etc), and the FIFO only accept full 32-bit writes. | 832 | * etc), and the FIFO only accept full 32-bit writes. |
@@ -907,7 +834,7 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem | |||
907 | * byte become a 32bit write, 7 bytes will be two | 834 | * byte become a 32bit write, 7 bytes will be two |
908 | * 32bit writes etc. | 835 | * 32bit writes etc. |
909 | */ | 836 | */ |
910 | iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); | 837 | writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); |
911 | 838 | ||
912 | ptr += count; | 839 | ptr += count; |
913 | remain -= count; | 840 | remain -= count; |
@@ -1063,8 +990,6 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1063 | return; | 990 | return; |
1064 | } | 991 | } |
1065 | 992 | ||
1066 | pm_runtime_get_sync(mmc_dev(mmc)); | ||
1067 | |||
1068 | spin_lock_irqsave(&host->lock, flags); | 993 | spin_lock_irqsave(&host->lock, flags); |
1069 | 994 | ||
1070 | host->mrq = mrq; | 995 | host->mrq = mrq; |
@@ -1083,17 +1008,10 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1083 | static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 1008 | static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
1084 | { | 1009 | { |
1085 | struct mmci_host *host = mmc_priv(mmc); | 1010 | struct mmci_host *host = mmc_priv(mmc); |
1086 | struct variant_data *variant = host->variant; | ||
1087 | u32 pwr = 0; | 1011 | u32 pwr = 0; |
1088 | unsigned long flags; | 1012 | unsigned long flags; |
1089 | int ret; | 1013 | int ret; |
1090 | 1014 | ||
1091 | pm_runtime_get_sync(mmc_dev(mmc)); | ||
1092 | |||
1093 | if (host->plat->ios_handler && | ||
1094 | host->plat->ios_handler(mmc_dev(mmc), ios)) | ||
1095 | dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); | ||
1096 | |||
1097 | switch (ios->power_mode) { | 1015 | switch (ios->power_mode) { |
1098 | case MMC_POWER_OFF: | 1016 | case MMC_POWER_OFF: |
1099 | if (host->vcc) | 1017 | if (host->vcc) |
@@ -1110,38 +1028,22 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1110 | * power should be rare so we print an error | 1028 | * power should be rare so we print an error |
1111 | * and return here. | 1029 | * and return here. |
1112 | */ | 1030 | */ |
1113 | goto out; | 1031 | return; |
1114 | } | 1032 | } |
1115 | } | 1033 | } |
1116 | /* | 1034 | if (host->plat->vdd_handler) |
1117 | * The ST Micro variant doesn't have the PL180s MCI_PWR_UP | 1035 | pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, |
1118 | * and instead uses MCI_PWR_ON so apply whatever value is | 1036 | ios->power_mode); |
1119 | * configured in the variant data. | 1037 | /* The ST version does not have this, fall through to POWER_ON */ |
1120 | */ | 1038 | if (host->hw_designer != AMBA_VENDOR_ST) { |
1121 | pwr |= variant->pwrreg_powerup; | 1039 | pwr |= MCI_PWR_UP; |
1122 | 1040 | break; | |
1123 | break; | 1041 | } |
1124 | case MMC_POWER_ON: | 1042 | case MMC_POWER_ON: |
1125 | pwr |= MCI_PWR_ON; | 1043 | pwr |= MCI_PWR_ON; |
1126 | break; | 1044 | break; |
1127 | } | 1045 | } |
1128 | 1046 | ||
1129 | if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { | ||
1130 | /* | ||
1131 | * The ST Micro variant has some additional bits | ||
1132 | * indicating signal direction for the signals in | ||
1133 | * the SD/MMC bus and feedback-clock usage. | ||
1134 | */ | ||
1135 | pwr |= host->plat->sigdir; | ||
1136 | |||
1137 | if (ios->bus_width == MMC_BUS_WIDTH_4) | ||
1138 | pwr &= ~MCI_ST_DATA74DIREN; | ||
1139 | else if (ios->bus_width == MMC_BUS_WIDTH_1) | ||
1140 | pwr &= (~MCI_ST_DATA74DIREN & | ||
1141 | ~MCI_ST_DATA31DIREN & | ||
1142 | ~MCI_ST_DATA2DIREN); | ||
1143 | } | ||
1144 | |||
1145 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { | 1047 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { |
1146 | if (host->hw_designer != AMBA_VENDOR_ST) | 1048 | if (host->hw_designer != AMBA_VENDOR_ST) |
1147 | pwr |= MCI_ROD; | 1049 | pwr |= MCI_ROD; |
@@ -1157,13 +1059,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1157 | spin_lock_irqsave(&host->lock, flags); | 1059 | spin_lock_irqsave(&host->lock, flags); |
1158 | 1060 | ||
1159 | mmci_set_clkreg(host, ios->clock); | 1061 | mmci_set_clkreg(host, ios->clock); |
1160 | mmci_write_pwrreg(host, pwr); | ||
1161 | 1062 | ||
1162 | spin_unlock_irqrestore(&host->lock, flags); | 1063 | if (host->pwr != pwr) { |
1064 | host->pwr = pwr; | ||
1065 | writel(pwr, host->base + MMCIPOWER); | ||
1066 | } | ||
1163 | 1067 | ||
1164 | out: | 1068 | spin_unlock_irqrestore(&host->lock, flags); |
1165 | pm_runtime_mark_last_busy(mmc_dev(mmc)); | ||
1166 | pm_runtime_put_autosuspend(mmc_dev(mmc)); | ||
1167 | } | 1069 | } |
1168 | 1070 | ||
1169 | static int mmci_get_ro(struct mmc_host *mmc) | 1071 | static int mmci_get_ro(struct mmc_host *mmc) |
@@ -1216,77 +1118,21 @@ static const struct mmc_host_ops mmci_ops = { | |||
1216 | .get_cd = mmci_get_cd, | 1118 | .get_cd = mmci_get_cd, |
1217 | }; | 1119 | }; |
1218 | 1120 | ||
1219 | #ifdef CONFIG_OF | 1121 | static int __devinit mmci_probe(struct amba_device *dev, |
1220 | static void mmci_dt_populate_generic_pdata(struct device_node *np, | ||
1221 | struct mmci_platform_data *pdata) | ||
1222 | { | ||
1223 | int bus_width = 0; | ||
1224 | |||
1225 | pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); | ||
1226 | pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); | ||
1227 | |||
1228 | if (of_get_property(np, "cd-inverted", NULL)) | ||
1229 | pdata->cd_invert = true; | ||
1230 | else | ||
1231 | pdata->cd_invert = false; | ||
1232 | |||
1233 | of_property_read_u32(np, "max-frequency", &pdata->f_max); | ||
1234 | if (!pdata->f_max) | ||
1235 | pr_warn("%s has no 'max-frequency' property\n", np->full_name); | ||
1236 | |||
1237 | if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) | ||
1238 | pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; | ||
1239 | if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) | ||
1240 | pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; | ||
1241 | |||
1242 | of_property_read_u32(np, "bus-width", &bus_width); | ||
1243 | switch (bus_width) { | ||
1244 | case 0 : | ||
1245 | /* No bus-width supplied. */ | ||
1246 | break; | ||
1247 | case 4 : | ||
1248 | pdata->capabilities |= MMC_CAP_4_BIT_DATA; | ||
1249 | break; | ||
1250 | case 8 : | ||
1251 | pdata->capabilities |= MMC_CAP_8_BIT_DATA; | ||
1252 | break; | ||
1253 | default : | ||
1254 | pr_warn("%s: Unsupported bus width\n", np->full_name); | ||
1255 | } | ||
1256 | } | ||
1257 | #else | ||
1258 | static void mmci_dt_populate_generic_pdata(struct device_node *np, | ||
1259 | struct mmci_platform_data *pdata) | ||
1260 | { | ||
1261 | return; | ||
1262 | } | ||
1263 | #endif | ||
1264 | |||
1265 | static int mmci_probe(struct amba_device *dev, | ||
1266 | const struct amba_id *id) | 1122 | const struct amba_id *id) |
1267 | { | 1123 | { |
1268 | struct mmci_platform_data *plat = dev->dev.platform_data; | 1124 | struct mmci_platform_data *plat = dev->dev.platform_data; |
1269 | struct device_node *np = dev->dev.of_node; | ||
1270 | struct variant_data *variant = id->data; | 1125 | struct variant_data *variant = id->data; |
1271 | struct mmci_host *host; | 1126 | struct mmci_host *host; |
1272 | struct mmc_host *mmc; | 1127 | struct mmc_host *mmc; |
1273 | int ret; | 1128 | int ret; |
1274 | 1129 | ||
1275 | /* Must have platform data or Device Tree. */ | 1130 | /* must have platform data */ |
1276 | if (!plat && !np) { | ||
1277 | dev_err(&dev->dev, "No plat data or DT found\n"); | ||
1278 | return -EINVAL; | ||
1279 | } | ||
1280 | |||
1281 | if (!plat) { | 1131 | if (!plat) { |
1282 | plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); | 1132 | ret = -EINVAL; |
1283 | if (!plat) | 1133 | goto out; |
1284 | return -ENOMEM; | ||
1285 | } | 1134 | } |
1286 | 1135 | ||
1287 | if (np) | ||
1288 | mmci_dt_populate_generic_pdata(np, plat); | ||
1289 | |||
1290 | ret = amba_request_regions(dev, DRIVER_NAME); | 1136 | ret = amba_request_regions(dev, DRIVER_NAME); |
1291 | if (ret) | 1137 | if (ret) |
1292 | goto out; | 1138 | goto out; |
@@ -1316,7 +1162,7 @@ static int mmci_probe(struct amba_device *dev, | |||
1316 | goto host_free; | 1162 | goto host_free; |
1317 | } | 1163 | } |
1318 | 1164 | ||
1319 | ret = clk_prepare_enable(host->clk); | 1165 | ret = clk_enable(host->clk); |
1320 | if (ret) | 1166 | if (ret) |
1321 | goto clk_free; | 1167 | goto clk_free; |
1322 | 1168 | ||
@@ -1367,23 +1213,6 @@ static int mmci_probe(struct amba_device *dev, | |||
1367 | mmc->f_max = min(host->mclk, fmax); | 1213 | mmc->f_max = min(host->mclk, fmax); |
1368 | dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); | 1214 | dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); |
1369 | 1215 | ||
1370 | host->pinctrl = devm_pinctrl_get(&dev->dev); | ||
1371 | if (IS_ERR(host->pinctrl)) { | ||
1372 | ret = PTR_ERR(host->pinctrl); | ||
1373 | goto clk_disable; | ||
1374 | } | ||
1375 | |||
1376 | host->pins_default = pinctrl_lookup_state(host->pinctrl, | ||
1377 | PINCTRL_STATE_DEFAULT); | ||
1378 | |||
1379 | /* enable pins to be muxed in and configured */ | ||
1380 | if (!IS_ERR(host->pins_default)) { | ||
1381 | ret = pinctrl_select_state(host->pinctrl, host->pins_default); | ||
1382 | if (ret) | ||
1383 | dev_warn(&dev->dev, "could not set default pins\n"); | ||
1384 | } else | ||
1385 | dev_warn(&dev->dev, "could not get default pinstate\n"); | ||
1386 | |||
1387 | #ifdef CONFIG_REGULATOR | 1216 | #ifdef CONFIG_REGULATOR |
1388 | /* If we're using the regulator framework, try to fetch a regulator */ | 1217 | /* If we're using the regulator framework, try to fetch a regulator */ |
1389 | host->vcc = regulator_get(&dev->dev, "vmmc"); | 1218 | host->vcc = regulator_get(&dev->dev, "vmmc"); |
@@ -1408,7 +1237,6 @@ static int mmci_probe(struct amba_device *dev, | |||
1408 | if (host->vcc == NULL) | 1237 | if (host->vcc == NULL) |
1409 | mmc->ocr_avail = plat->ocr_mask; | 1238 | mmc->ocr_avail = plat->ocr_mask; |
1410 | mmc->caps = plat->capabilities; | 1239 | mmc->caps = plat->capabilities; |
1411 | mmc->caps2 = plat->capabilities2; | ||
1412 | 1240 | ||
1413 | /* | 1241 | /* |
1414 | * We can do SGIO | 1242 | * We can do SGIO |
@@ -1431,13 +1259,12 @@ static int mmci_probe(struct amba_device *dev, | |||
1431 | /* | 1259 | /* |
1432 | * Block size can be up to 2048 bytes, but must be a power of two. | 1260 | * Block size can be up to 2048 bytes, but must be a power of two. |
1433 | */ | 1261 | */ |
1434 | mmc->max_blk_size = 1 << 11; | 1262 | mmc->max_blk_size = 2048; |
1435 | 1263 | ||
1436 | /* | 1264 | /* |
1437 | * Limit the number of blocks transferred so that we don't overflow | 1265 | * No limit on the number of blocks transferred. |
1438 | * the maximum request size. | ||
1439 | */ | 1266 | */ |
1440 | mmc->max_blk_count = mmc->max_req_size >> 11; | 1267 | mmc->max_blk_count = mmc->max_req_size; |
1441 | 1268 | ||
1442 | spin_lock_init(&host->lock); | 1269 | spin_lock_init(&host->lock); |
1443 | 1270 | ||
@@ -1445,10 +1272,6 @@ static int mmci_probe(struct amba_device *dev, | |||
1445 | writel(0, host->base + MMCIMASK1); | 1272 | writel(0, host->base + MMCIMASK1); |
1446 | writel(0xfff, host->base + MMCICLEAR); | 1273 | writel(0xfff, host->base + MMCICLEAR); |
1447 | 1274 | ||
1448 | if (plat->gpio_cd == -EPROBE_DEFER) { | ||
1449 | ret = -EPROBE_DEFER; | ||
1450 | goto err_gpio_cd; | ||
1451 | } | ||
1452 | if (gpio_is_valid(plat->gpio_cd)) { | 1275 | if (gpio_is_valid(plat->gpio_cd)) { |
1453 | ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); | 1276 | ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); |
1454 | if (ret == 0) | 1277 | if (ret == 0) |
@@ -1472,10 +1295,6 @@ static int mmci_probe(struct amba_device *dev, | |||
1472 | if (ret >= 0) | 1295 | if (ret >= 0) |
1473 | host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); | 1296 | host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); |
1474 | } | 1297 | } |
1475 | if (plat->gpio_wp == -EPROBE_DEFER) { | ||
1476 | ret = -EPROBE_DEFER; | ||
1477 | goto err_gpio_wp; | ||
1478 | } | ||
1479 | if (gpio_is_valid(plat->gpio_wp)) { | 1298 | if (gpio_is_valid(plat->gpio_wp)) { |
1480 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); | 1299 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); |
1481 | if (ret == 0) | 1300 | if (ret == 0) |
@@ -1494,7 +1313,7 @@ static int mmci_probe(struct amba_device *dev, | |||
1494 | if (ret) | 1313 | if (ret) |
1495 | goto unmap; | 1314 | goto unmap; |
1496 | 1315 | ||
1497 | if (!dev->irq[1]) | 1316 | if (dev->irq[1] == NO_IRQ) |
1498 | host->singleirq = true; | 1317 | host->singleirq = true; |
1499 | else { | 1318 | else { |
1500 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, | 1319 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, |
@@ -1514,10 +1333,6 @@ static int mmci_probe(struct amba_device *dev, | |||
1514 | 1333 | ||
1515 | mmci_dma_setup(host); | 1334 | mmci_dma_setup(host); |
1516 | 1335 | ||
1517 | pm_runtime_set_autosuspend_delay(&dev->dev, 50); | ||
1518 | pm_runtime_use_autosuspend(&dev->dev); | ||
1519 | pm_runtime_put(&dev->dev); | ||
1520 | |||
1521 | mmc_add_host(mmc); | 1336 | mmc_add_host(mmc); |
1522 | 1337 | ||
1523 | return 0; | 1338 | return 0; |
@@ -1535,7 +1350,7 @@ static int mmci_probe(struct amba_device *dev, | |||
1535 | err_gpio_cd: | 1350 | err_gpio_cd: |
1536 | iounmap(host->base); | 1351 | iounmap(host->base); |
1537 | clk_disable: | 1352 | clk_disable: |
1538 | clk_disable_unprepare(host->clk); | 1353 | clk_disable(host->clk); |
1539 | clk_free: | 1354 | clk_free: |
1540 | clk_put(host->clk); | 1355 | clk_put(host->clk); |
1541 | host_free: | 1356 | host_free: |
@@ -1546,7 +1361,7 @@ static int mmci_probe(struct amba_device *dev, | |||
1546 | return ret; | 1361 | return ret; |
1547 | } | 1362 | } |
1548 | 1363 | ||
1549 | static int mmci_remove(struct amba_device *dev) | 1364 | static int __devexit mmci_remove(struct amba_device *dev) |
1550 | { | 1365 | { |
1551 | struct mmc_host *mmc = amba_get_drvdata(dev); | 1366 | struct mmc_host *mmc = amba_get_drvdata(dev); |
1552 | 1367 | ||
@@ -1555,12 +1370,6 @@ static int mmci_remove(struct amba_device *dev) | |||
1555 | if (mmc) { | 1370 | if (mmc) { |
1556 | struct mmci_host *host = mmc_priv(mmc); | 1371 | struct mmci_host *host = mmc_priv(mmc); |
1557 | 1372 | ||
1558 | /* | ||
1559 | * Undo pm_runtime_put() in probe. We use the _sync | ||
1560 | * version here so that we can access the primecell. | ||
1561 | */ | ||
1562 | pm_runtime_get_sync(&dev->dev); | ||
1563 | |||
1564 | mmc_remove_host(mmc); | 1373 | mmc_remove_host(mmc); |
1565 | 1374 | ||
1566 | writel(0, host->base + MMCIMASK0); | 1375 | writel(0, host->base + MMCIMASK0); |
@@ -1582,7 +1391,7 @@ static int mmci_remove(struct amba_device *dev) | |||
1582 | gpio_free(host->gpio_cd); | 1391 | gpio_free(host->gpio_cd); |
1583 | 1392 | ||
1584 | iounmap(host->base); | 1393 | iounmap(host->base); |
1585 | clk_disable_unprepare(host->clk); | 1394 | clk_disable(host->clk); |
1586 | clk_put(host->clk); | 1395 | clk_put(host->clk); |
1587 | 1396 | ||
1588 | if (host->vcc) | 1397 | if (host->vcc) |
@@ -1597,49 +1406,43 @@ static int mmci_remove(struct amba_device *dev) | |||
1597 | return 0; | 1406 | return 0; |
1598 | } | 1407 | } |
1599 | 1408 | ||
1600 | #ifdef CONFIG_SUSPEND | 1409 | #ifdef CONFIG_PM |
1601 | static int mmci_suspend(struct device *dev) | 1410 | static int mmci_suspend(struct amba_device *dev, pm_message_t state) |
1602 | { | 1411 | { |
1603 | struct amba_device *adev = to_amba_device(dev); | 1412 | struct mmc_host *mmc = amba_get_drvdata(dev); |
1604 | struct mmc_host *mmc = amba_get_drvdata(adev); | ||
1605 | int ret = 0; | 1413 | int ret = 0; |
1606 | 1414 | ||
1607 | if (mmc) { | 1415 | if (mmc) { |
1608 | struct mmci_host *host = mmc_priv(mmc); | 1416 | struct mmci_host *host = mmc_priv(mmc); |
1609 | 1417 | ||
1610 | ret = mmc_suspend_host(mmc); | 1418 | ret = mmc_suspend_host(mmc); |
1611 | if (ret == 0) { | 1419 | if (ret == 0) |
1612 | pm_runtime_get_sync(dev); | ||
1613 | writel(0, host->base + MMCIMASK0); | 1420 | writel(0, host->base + MMCIMASK0); |
1614 | } | ||
1615 | } | 1421 | } |
1616 | 1422 | ||
1617 | return ret; | 1423 | return ret; |
1618 | } | 1424 | } |
1619 | 1425 | ||
1620 | static int mmci_resume(struct device *dev) | 1426 | static int mmci_resume(struct amba_device *dev) |
1621 | { | 1427 | { |
1622 | struct amba_device *adev = to_amba_device(dev); | 1428 | struct mmc_host *mmc = amba_get_drvdata(dev); |
1623 | struct mmc_host *mmc = amba_get_drvdata(adev); | ||
1624 | int ret = 0; | 1429 | int ret = 0; |
1625 | 1430 | ||
1626 | if (mmc) { | 1431 | if (mmc) { |
1627 | struct mmci_host *host = mmc_priv(mmc); | 1432 | struct mmci_host *host = mmc_priv(mmc); |
1628 | 1433 | ||
1629 | writel(MCI_IRQENABLE, host->base + MMCIMASK0); | 1434 | writel(MCI_IRQENABLE, host->base + MMCIMASK0); |
1630 | pm_runtime_put(dev); | ||
1631 | 1435 | ||
1632 | ret = mmc_resume_host(mmc); | 1436 | ret = mmc_resume_host(mmc); |
1633 | } | 1437 | } |
1634 | 1438 | ||
1635 | return ret; | 1439 | return ret; |
1636 | } | 1440 | } |
1441 | #else | ||
1442 | #define mmci_suspend NULL | ||
1443 | #define mmci_resume NULL | ||
1637 | #endif | 1444 | #endif |
1638 | 1445 | ||
1639 | static const struct dev_pm_ops mmci_dev_pm_ops = { | ||
1640 | SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) | ||
1641 | }; | ||
1642 | |||
1643 | static struct amba_id mmci_ids[] = { | 1446 | static struct amba_id mmci_ids[] = { |
1644 | { | 1447 | { |
1645 | .id = 0x00041180, | 1448 | .id = 0x00041180, |
@@ -1663,11 +1466,6 @@ static struct amba_id mmci_ids[] = { | |||
1663 | .data = &variant_u300, | 1466 | .data = &variant_u300, |
1664 | }, | 1467 | }, |
1665 | { | 1468 | { |
1666 | .id = 0x10180180, | ||
1667 | .mask = 0xf0ffffff, | ||
1668 | .data = &variant_nomadik, | ||
1669 | }, | ||
1670 | { | ||
1671 | .id = 0x00280180, | 1469 | .id = 0x00280180, |
1672 | .mask = 0x00ffffff, | 1470 | .mask = 0x00ffffff, |
1673 | .data = &variant_u300, | 1471 | .data = &variant_u300, |
@@ -1685,20 +1483,29 @@ static struct amba_id mmci_ids[] = { | |||
1685 | { 0, 0 }, | 1483 | { 0, 0 }, |
1686 | }; | 1484 | }; |
1687 | 1485 | ||
1688 | MODULE_DEVICE_TABLE(amba, mmci_ids); | ||
1689 | |||
1690 | static struct amba_driver mmci_driver = { | 1486 | static struct amba_driver mmci_driver = { |
1691 | .drv = { | 1487 | .drv = { |
1692 | .name = DRIVER_NAME, | 1488 | .name = DRIVER_NAME, |
1693 | .pm = &mmci_dev_pm_ops, | ||
1694 | }, | 1489 | }, |
1695 | .probe = mmci_probe, | 1490 | .probe = mmci_probe, |
1696 | .remove = mmci_remove, | 1491 | .remove = __devexit_p(mmci_remove), |
1492 | .suspend = mmci_suspend, | ||
1493 | .resume = mmci_resume, | ||
1697 | .id_table = mmci_ids, | 1494 | .id_table = mmci_ids, |
1698 | }; | 1495 | }; |
1699 | 1496 | ||
1700 | module_amba_driver(mmci_driver); | 1497 | static int __init mmci_init(void) |
1498 | { | ||
1499 | return amba_driver_register(&mmci_driver); | ||
1500 | } | ||
1501 | |||
1502 | static void __exit mmci_exit(void) | ||
1503 | { | ||
1504 | amba_driver_unregister(&mmci_driver); | ||
1505 | } | ||
1701 | 1506 | ||
1507 | module_init(mmci_init); | ||
1508 | module_exit(mmci_exit); | ||
1702 | module_param(fmax, uint, 0444); | 1509 | module_param(fmax, uint, 0444); |
1703 | 1510 | ||
1704 | MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); | 1511 | MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); |
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index d34d8c0add8..79e4143ab9d 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h | |||
@@ -13,6 +13,16 @@ | |||
13 | #define MCI_PWR_ON 0x03 | 13 | #define MCI_PWR_ON 0x03 |
14 | #define MCI_OD (1 << 6) | 14 | #define MCI_OD (1 << 6) |
15 | #define MCI_ROD (1 << 7) | 15 | #define MCI_ROD (1 << 7) |
16 | /* | ||
17 | * The ST Micro version does not have ROD and reuse the voltage registers | ||
18 | * for direction settings | ||
19 | */ | ||
20 | #define MCI_ST_DATA2DIREN (1 << 2) | ||
21 | #define MCI_ST_CMDDIREN (1 << 3) | ||
22 | #define MCI_ST_DATA0DIREN (1 << 4) | ||
23 | #define MCI_ST_DATA31DIREN (1 << 5) | ||
24 | #define MCI_ST_FBCLKEN (1 << 7) | ||
25 | #define MCI_ST_DATA74DIREN (1 << 8) | ||
16 | 26 | ||
17 | #define MMCICLOCK 0x004 | 27 | #define MMCICLOCK 0x004 |
18 | #define MCI_CLK_ENABLE (1 << 8) | 28 | #define MCI_CLK_ENABLE (1 << 8) |
@@ -150,7 +160,7 @@ | |||
150 | (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ | 160 | (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ |
151 | MCI_TXFIFOHALFEMPTYMASK) | 161 | MCI_TXFIFOHALFEMPTYMASK) |
152 | 162 | ||
153 | #define NR_SG 128 | 163 | #define NR_SG 16 |
154 | 164 | ||
155 | struct clk; | 165 | struct clk; |
156 | struct variant_data; | 166 | struct variant_data; |
@@ -179,8 +189,7 @@ struct mmci_host { | |||
179 | 189 | ||
180 | unsigned int mclk; | 190 | unsigned int mclk; |
181 | unsigned int cclk; | 191 | unsigned int cclk; |
182 | u32 pwr_reg; | 192 | u32 pwr; |
183 | u32 clk_reg; | ||
184 | struct mmci_platform_data *plat; | 193 | struct mmci_platform_data *plat; |
185 | struct variant_data *variant; | 194 | struct variant_data *variant; |
186 | 195 | ||
@@ -195,10 +204,6 @@ struct mmci_host { | |||
195 | unsigned int size; | 204 | unsigned int size; |
196 | struct regulator *vcc; | 205 | struct regulator *vcc; |
197 | 206 | ||
198 | /* pinctrl handles */ | ||
199 | struct pinctrl *pinctrl; | ||
200 | struct pinctrl_state *pins_default; | ||
201 | |||
202 | #ifdef CONFIG_DMA_ENGINE | 207 | #ifdef CONFIG_DMA_ENGINE |
203 | /* DMA stuff */ | 208 | /* DMA stuff */ |
204 | struct dma_chan *dma_current; | 209 | struct dma_chan *dma_current; |
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 7c0af0e8004..a4c865a5286 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include <asm/div64.h> | 42 | #include <asm/div64.h> |
43 | #include <asm/sizes.h> | 43 | #include <asm/sizes.h> |
44 | 44 | ||
45 | #include <linux/platform_data/mmc-msm_sdcc.h> | 45 | #include <mach/mmc.h> |
46 | #include <mach/msm_iomap.h> | 46 | #include <mach/msm_iomap.h> |
47 | #include <mach/dma.h> | 47 | #include <mach/dma.h> |
48 | #include <mach/clk.h> | 48 | #include <mach/clk.h> |
@@ -213,8 +213,7 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd) | |||
213 | msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER); | 213 | msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER); |
214 | msmsdcc_writel(host, (unsigned int)host->curr.xfer_size, | 214 | msmsdcc_writel(host, (unsigned int)host->curr.xfer_size, |
215 | MMCIDATALENGTH); | 215 | MMCIDATALENGTH); |
216 | msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & | 216 | msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1); |
217 | (~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0); | ||
218 | msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL); | 217 | msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL); |
219 | 218 | ||
220 | if (host->cmd_cmd) { | 219 | if (host->cmd_cmd) { |
@@ -389,7 +388,7 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) | |||
389 | n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, | 388 | n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, |
390 | host->dma.num_ents, host->dma.dir); | 389 | host->dma.num_ents, host->dma.dir); |
391 | if (n == 0) { | 390 | if (n == 0) { |
392 | pr_err("%s: Unable to map in all sg elements\n", | 391 | printk(KERN_ERR "%s: Unable to map in all sg elements\n", |
393 | mmc_hostname(host->mmc)); | 392 | mmc_hostname(host->mmc)); |
394 | host->dma.sg = NULL; | 393 | host->dma.sg = NULL; |
395 | host->dma.num_ents = 0; | 394 | host->dma.num_ents = 0; |
@@ -475,7 +474,7 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host, | |||
475 | *c |= MCI_CSPM_MCIABORT; | 474 | *c |= MCI_CSPM_MCIABORT; |
476 | 475 | ||
477 | if (host->curr.cmd != NULL) { | 476 | if (host->curr.cmd != NULL) { |
478 | pr_err("%s: Overlapping command requests\n", | 477 | printk(KERN_ERR "%s: Overlapping command requests\n", |
479 | mmc_hostname(host->mmc)); | 478 | mmc_hostname(host->mmc)); |
480 | } | 479 | } |
481 | host->curr.cmd = cmd; | 480 | host->curr.cmd = cmd; |
@@ -544,9 +543,7 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data, | |||
544 | 543 | ||
545 | msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH); | 544 | msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH); |
546 | 545 | ||
547 | msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & | 546 | msmsdcc_writel(host, pio_irqmask, MMCIMASK1); |
548 | (~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0); | ||
549 | |||
550 | msmsdcc_writel(host, datactrl, MMCIDATACTRL); | 547 | msmsdcc_writel(host, datactrl, MMCIDATACTRL); |
551 | 548 | ||
552 | if (cmd) { | 549 | if (cmd) { |
@@ -662,13 +659,8 @@ msmsdcc_pio_irq(int irq, void *dev_id) | |||
662 | { | 659 | { |
663 | struct msmsdcc_host *host = dev_id; | 660 | struct msmsdcc_host *host = dev_id; |
664 | uint32_t status; | 661 | uint32_t status; |
665 | u32 mci_mask0; | ||
666 | 662 | ||
667 | status = msmsdcc_readl(host, MMCISTATUS); | 663 | status = msmsdcc_readl(host, MMCISTATUS); |
668 | mci_mask0 = msmsdcc_readl(host, MMCIMASK0); | ||
669 | |||
670 | if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0) | ||
671 | return IRQ_NONE; | ||
672 | 664 | ||
673 | do { | 665 | do { |
674 | unsigned long flags; | 666 | unsigned long flags; |
@@ -689,8 +681,8 @@ msmsdcc_pio_irq(int irq, void *dev_id) | |||
689 | 681 | ||
690 | /* Map the current scatter buffer */ | 682 | /* Map the current scatter buffer */ |
691 | local_irq_save(flags); | 683 | local_irq_save(flags); |
692 | buffer = kmap_atomic(sg_page(host->pio.sg)) | 684 | buffer = kmap_atomic(sg_page(host->pio.sg), |
693 | + host->pio.sg->offset; | 685 | KM_BIO_SRC_IRQ) + host->pio.sg->offset; |
694 | buffer += host->pio.sg_off; | 686 | buffer += host->pio.sg_off; |
695 | remain = host->pio.sg->length - host->pio.sg_off; | 687 | remain = host->pio.sg->length - host->pio.sg_off; |
696 | len = 0; | 688 | len = 0; |
@@ -700,7 +692,7 @@ msmsdcc_pio_irq(int irq, void *dev_id) | |||
700 | len = msmsdcc_pio_write(host, buffer, remain, status); | 692 | len = msmsdcc_pio_write(host, buffer, remain, status); |
701 | 693 | ||
702 | /* Unmap the buffer */ | 694 | /* Unmap the buffer */ |
703 | kunmap_atomic(buffer); | 695 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); |
704 | local_irq_restore(flags); | 696 | local_irq_restore(flags); |
705 | 697 | ||
706 | host->pio.sg_off += len; | 698 | host->pio.sg_off += len; |
@@ -727,12 +719,10 @@ msmsdcc_pio_irq(int irq, void *dev_id) | |||
727 | } while (1); | 719 | } while (1); |
728 | 720 | ||
729 | if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) | 721 | if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) |
730 | msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | | 722 | msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1); |
731 | MCI_RXDATAAVLBLMASK, MMCIMASK0); | ||
732 | 723 | ||
733 | if (!host->curr.xfer_remain) | 724 | if (!host->curr.xfer_remain) |
734 | msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0, | 725 | msmsdcc_writel(host, 0, MMCIMASK1); |
735 | MMCIMASK0); | ||
736 | 726 | ||
737 | return IRQ_HANDLED; | 727 | return IRQ_HANDLED; |
738 | } | 728 | } |
@@ -864,8 +854,6 @@ msmsdcc_irq(int irq, void *dev_id) | |||
864 | do { | 854 | do { |
865 | status = msmsdcc_readl(host, MMCISTATUS); | 855 | status = msmsdcc_readl(host, MMCISTATUS); |
866 | status &= msmsdcc_readl(host, MMCIMASK0); | 856 | status &= msmsdcc_readl(host, MMCIMASK0); |
867 | if ((status & (~MCI_IRQ_PIO)) == 0) | ||
868 | break; | ||
869 | msmsdcc_writel(host, status, MMCICLEAR); | 857 | msmsdcc_writel(host, status, MMCICLEAR); |
870 | 858 | ||
871 | if (status & MCI_SDIOINTR) | 859 | if (status & MCI_SDIOINTR) |
@@ -951,7 +939,7 @@ static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable) | |||
951 | struct msm_mmc_gpio_data *curr; | 939 | struct msm_mmc_gpio_data *curr; |
952 | int i, rc = 0; | 940 | int i, rc = 0; |
953 | 941 | ||
954 | if (!host->plat->gpio_data || host->gpio_config_status == enable) | 942 | if (!host->plat->gpio_data && host->gpio_config_status == enable) |
955 | return; | 943 | return; |
956 | 944 | ||
957 | curr = host->plat->gpio_data; | 945 | curr = host->plat->gpio_data; |
@@ -1064,19 +1052,10 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
1064 | spin_unlock_irqrestore(&host->lock, flags); | 1052 | spin_unlock_irqrestore(&host->lock, flags); |
1065 | } | 1053 | } |
1066 | 1054 | ||
1067 | static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card) | ||
1068 | { | ||
1069 | struct msmsdcc_host *host = mmc_priv(mmc); | ||
1070 | |||
1071 | if (host->plat->init_card) | ||
1072 | host->plat->init_card(card); | ||
1073 | } | ||
1074 | |||
1075 | static const struct mmc_host_ops msmsdcc_ops = { | 1055 | static const struct mmc_host_ops msmsdcc_ops = { |
1076 | .request = msmsdcc_request, | 1056 | .request = msmsdcc_request, |
1077 | .set_ios = msmsdcc_set_ios, | 1057 | .set_ios = msmsdcc_set_ios, |
1078 | .enable_sdio_irq = msmsdcc_enable_sdio_irq, | 1058 | .enable_sdio_irq = msmsdcc_enable_sdio_irq, |
1079 | .init_card = msmsdcc_init_card, | ||
1080 | }; | 1059 | }; |
1081 | 1060 | ||
1082 | static void | 1061 | static void |
@@ -1113,7 +1092,7 @@ msmsdcc_platform_status_irq(int irq, void *dev_id) | |||
1113 | { | 1092 | { |
1114 | struct msmsdcc_host *host = dev_id; | 1093 | struct msmsdcc_host *host = dev_id; |
1115 | 1094 | ||
1116 | pr_debug("%s: %d\n", __func__, irq); | 1095 | printk(KERN_DEBUG "%s: %d\n", __func__, irq); |
1117 | msmsdcc_check_status((unsigned long) host); | 1096 | msmsdcc_check_status((unsigned long) host); |
1118 | return IRQ_HANDLED; | 1097 | return IRQ_HANDLED; |
1119 | } | 1098 | } |
@@ -1123,7 +1102,7 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id) | |||
1123 | { | 1102 | { |
1124 | struct msmsdcc_host *host = dev_id; | 1103 | struct msmsdcc_host *host = dev_id; |
1125 | 1104 | ||
1126 | pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc), | 1105 | printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc), |
1127 | card_present); | 1106 | card_present); |
1128 | msmsdcc_check_status((unsigned long) host); | 1107 | msmsdcc_check_status((unsigned long) host); |
1129 | } | 1108 | } |
@@ -1171,6 +1150,7 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1171 | struct msmsdcc_host *host; | 1150 | struct msmsdcc_host *host; |
1172 | struct mmc_host *mmc; | 1151 | struct mmc_host *mmc; |
1173 | struct resource *cmd_irqres = NULL; | 1152 | struct resource *cmd_irqres = NULL; |
1153 | struct resource *pio_irqres = NULL; | ||
1174 | struct resource *stat_irqres = NULL; | 1154 | struct resource *stat_irqres = NULL; |
1175 | struct resource *memres = NULL; | 1155 | struct resource *memres = NULL; |
1176 | struct resource *dmares = NULL; | 1156 | struct resource *dmares = NULL; |
@@ -1195,10 +1175,12 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1195 | dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 1175 | dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1196 | cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, | 1176 | cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, |
1197 | "cmd_irq"); | 1177 | "cmd_irq"); |
1178 | pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, | ||
1179 | "pio_irq"); | ||
1198 | stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, | 1180 | stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, |
1199 | "status_irq"); | 1181 | "status_irq"); |
1200 | 1182 | ||
1201 | if (!cmd_irqres || !memres) { | 1183 | if (!cmd_irqres || !pio_irqres || !memres) { |
1202 | pr_err("%s: Invalid resource\n", __func__); | 1184 | pr_err("%s: Invalid resource\n", __func__); |
1203 | return -ENXIO; | 1185 | return -ENXIO; |
1204 | } | 1186 | } |
@@ -1218,20 +1200,17 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1218 | host->plat = plat; | 1200 | host->plat = plat; |
1219 | host->mmc = mmc; | 1201 | host->mmc = mmc; |
1220 | host->curr.cmd = NULL; | 1202 | host->curr.cmd = NULL; |
1221 | init_timer(&host->busclk_timer); | ||
1222 | host->busclk_timer.data = (unsigned long) host; | ||
1223 | host->busclk_timer.function = msmsdcc_busclk_expired; | ||
1224 | |||
1225 | 1203 | ||
1226 | host->cmdpoll = 1; | 1204 | host->cmdpoll = 1; |
1227 | 1205 | ||
1228 | host->base = ioremap(memres->start, PAGE_SIZE); | 1206 | host->base = ioremap(memres->start, PAGE_SIZE); |
1229 | if (!host->base) { | 1207 | if (!host->base) { |
1230 | ret = -ENOMEM; | 1208 | ret = -ENOMEM; |
1231 | goto host_free; | 1209 | goto out; |
1232 | } | 1210 | } |
1233 | 1211 | ||
1234 | host->cmd_irqres = cmd_irqres; | 1212 | host->cmd_irqres = cmd_irqres; |
1213 | host->pio_irqres = pio_irqres; | ||
1235 | host->memres = memres; | 1214 | host->memres = memres; |
1236 | host->dmares = dmares; | 1215 | host->dmares = dmares; |
1237 | spin_lock_init(&host->lock); | 1216 | spin_lock_init(&host->lock); |
@@ -1242,19 +1221,13 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1242 | /* | 1221 | /* |
1243 | * Setup DMA | 1222 | * Setup DMA |
1244 | */ | 1223 | */ |
1245 | if (host->dmares) { | 1224 | msmsdcc_init_dma(host); |
1246 | ret = msmsdcc_init_dma(host); | ||
1247 | if (ret) | ||
1248 | goto ioremap_free; | ||
1249 | } else { | ||
1250 | host->dma.channel = -1; | ||
1251 | } | ||
1252 | 1225 | ||
1253 | /* Get our clocks */ | 1226 | /* Get our clocks */ |
1254 | host->pclk = clk_get(&pdev->dev, "sdc_pclk"); | 1227 | host->pclk = clk_get(&pdev->dev, "sdc_pclk"); |
1255 | if (IS_ERR(host->pclk)) { | 1228 | if (IS_ERR(host->pclk)) { |
1256 | ret = PTR_ERR(host->pclk); | 1229 | ret = PTR_ERR(host->pclk); |
1257 | goto dma_free; | 1230 | goto host_free; |
1258 | } | 1231 | } |
1259 | 1232 | ||
1260 | host->clk = clk_get(&pdev->dev, "sdc_clk"); | 1233 | host->clk = clk_get(&pdev->dev, "sdc_clk"); |
@@ -1263,17 +1236,17 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1263 | goto pclk_put; | 1236 | goto pclk_put; |
1264 | } | 1237 | } |
1265 | 1238 | ||
1266 | ret = clk_set_rate(host->clk, msmsdcc_fmin); | ||
1267 | if (ret) { | ||
1268 | pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); | ||
1269 | goto clk_put; | ||
1270 | } | ||
1271 | |||
1272 | /* Enable clocks */ | 1239 | /* Enable clocks */ |
1273 | ret = msmsdcc_enable_clocks(host); | 1240 | ret = msmsdcc_enable_clocks(host); |
1274 | if (ret) | 1241 | if (ret) |
1275 | goto clk_put; | 1242 | goto clk_put; |
1276 | 1243 | ||
1244 | ret = clk_set_rate(host->clk, msmsdcc_fmin); | ||
1245 | if (ret) { | ||
1246 | pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); | ||
1247 | goto clk_disable; | ||
1248 | } | ||
1249 | |||
1277 | host->pclk_rate = clk_get_rate(host->pclk); | 1250 | host->pclk_rate = clk_get_rate(host->pclk); |
1278 | host->clk_rate = clk_get_rate(host->clk); | 1251 | host->clk_rate = clk_get_rate(host->clk); |
1279 | 1252 | ||
@@ -1343,12 +1316,16 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1343 | host->eject = !host->oldstat; | 1316 | host->eject = !host->oldstat; |
1344 | } | 1317 | } |
1345 | 1318 | ||
1319 | init_timer(&host->busclk_timer); | ||
1320 | host->busclk_timer.data = (unsigned long) host; | ||
1321 | host->busclk_timer.function = msmsdcc_busclk_expired; | ||
1322 | |||
1346 | ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, | 1323 | ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, |
1347 | DRIVER_NAME " (cmd)", host); | 1324 | DRIVER_NAME " (cmd)", host); |
1348 | if (ret) | 1325 | if (ret) |
1349 | goto stat_irq_free; | 1326 | goto stat_irq_free; |
1350 | 1327 | ||
1351 | ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, | 1328 | ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, |
1352 | DRIVER_NAME " (pio)", host); | 1329 | DRIVER_NAME " (pio)", host); |
1353 | if (ret) | 1330 | if (ret) |
1354 | goto cmd_irq_free; | 1331 | goto cmd_irq_free; |
@@ -1391,13 +1368,6 @@ msmsdcc_probe(struct platform_device *pdev) | |||
1391 | clk_put(host->clk); | 1368 | clk_put(host->clk); |
1392 | pclk_put: | 1369 | pclk_put: |
1393 | clk_put(host->pclk); | 1370 | clk_put(host->pclk); |
1394 | dma_free: | ||
1395 | if (host->dmares) | ||
1396 | dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), | ||
1397 | host->dma.nc, host->dma.nc_busaddr); | ||
1398 | ioremap_free: | ||
1399 | tasklet_kill(&host->dma_tlet); | ||
1400 | iounmap(host->base); | ||
1401 | host_free: | 1371 | host_free: |
1402 | mmc_free_host(mmc); | 1372 | mmc_free_host(mmc); |
1403 | out: | 1373 | out: |
@@ -1480,7 +1450,18 @@ static struct platform_driver msmsdcc_driver = { | |||
1480 | }, | 1450 | }, |
1481 | }; | 1451 | }; |
1482 | 1452 | ||
1483 | module_platform_driver(msmsdcc_driver); | 1453 | static int __init msmsdcc_init(void) |
1454 | { | ||
1455 | return platform_driver_register(&msmsdcc_driver); | ||
1456 | } | ||
1457 | |||
1458 | static void __exit msmsdcc_exit(void) | ||
1459 | { | ||
1460 | platform_driver_unregister(&msmsdcc_driver); | ||
1461 | } | ||
1462 | |||
1463 | module_init(msmsdcc_init); | ||
1464 | module_exit(msmsdcc_exit); | ||
1484 | 1465 | ||
1485 | MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver"); | 1466 | MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver"); |
1486 | MODULE_LICENSE("GPL"); | 1467 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h index 402028d16b8..42d7bbc977c 100644 --- a/drivers/mmc/host/msm_sdcc.h +++ b/drivers/mmc/host/msm_sdcc.h | |||
@@ -140,11 +140,6 @@ | |||
140 | MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ | 140 | MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ |
141 | MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK) | 141 | MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK) |
142 | 142 | ||
143 | #define MCI_IRQ_PIO \ | ||
144 | (MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | MCI_RXFIFOEMPTYMASK | \ | ||
145 | MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK | MCI_TXFIFOFULLMASK | \ | ||
146 | MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK | \ | ||
147 | MCI_RXACTIVEMASK | MCI_TXACTIVEMASK) | ||
148 | /* | 143 | /* |
149 | * The size of the FIFO in bytes. | 144 | * The size of the FIFO in bytes. |
150 | */ | 145 | */ |
@@ -207,6 +202,7 @@ struct msmsdcc_stats { | |||
207 | 202 | ||
208 | struct msmsdcc_host { | 203 | struct msmsdcc_host { |
209 | struct resource *cmd_irqres; | 204 | struct resource *cmd_irqres; |
205 | struct resource *pio_irqres; | ||
210 | struct resource *memres; | 206 | struct resource *memres; |
211 | struct resource *dmares; | 207 | struct resource *dmares; |
212 | void __iomem *base; | 208 | void __iomem *base; |
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index de4c20b3936..a5bf60e01af 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c | |||
@@ -19,13 +19,12 @@ | |||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/scatterlist.h> | 20 | #include <linux/scatterlist.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/clk.h> | ||
23 | #include <linux/gpio.h> | 22 | #include <linux/gpio.h> |
24 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
25 | 24 | ||
26 | #include <asm/sizes.h> | 25 | #include <asm/sizes.h> |
27 | #include <asm/unaligned.h> | 26 | #include <asm/unaligned.h> |
28 | #include <linux/platform_data/mmc-mvsdio.h> | 27 | #include <plat/mvsdio.h> |
29 | 28 | ||
30 | #include "mvsdio.h" | 29 | #include "mvsdio.h" |
31 | 30 | ||
@@ -52,7 +51,6 @@ struct mvsd_host { | |||
52 | struct device *dev; | 51 | struct device *dev; |
53 | struct resource *res; | 52 | struct resource *res; |
54 | int irq; | 53 | int irq; |
55 | struct clk *clk; | ||
56 | int gpio_card_detect; | 54 | int gpio_card_detect; |
57 | int gpio_write_protect; | 55 | int gpio_write_protect; |
58 | }; | 56 | }; |
@@ -119,7 +117,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) | |||
119 | host->pio_size = data->blocks * data->blksz; | 117 | host->pio_size = data->blocks * data->blksz; |
120 | host->pio_ptr = sg_virt(data->sg); | 118 | host->pio_ptr = sg_virt(data->sg); |
121 | if (!nodma) | 119 | if (!nodma) |
122 | pr_debug("%s: fallback to PIO for data " | 120 | printk(KERN_DEBUG "%s: fallback to PIO for data " |
123 | "at 0x%p size %d\n", | 121 | "at 0x%p size %d\n", |
124 | mmc_hostname(host->mmc), | 122 | mmc_hostname(host->mmc), |
125 | host->pio_ptr, host->pio_size); | 123 | host->pio_ptr, host->pio_size); |
@@ -473,7 +471,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev) | |||
473 | if (mrq->data) | 471 | if (mrq->data) |
474 | err_status = mvsd_finish_data(host, mrq->data, err_status); | 472 | err_status = mvsd_finish_data(host, mrq->data, err_status); |
475 | if (err_status) { | 473 | if (err_status) { |
476 | pr_err("%s: unhandled error status %#04x\n", | 474 | printk(KERN_ERR "%s: unhandled error status %#04x\n", |
477 | mmc_hostname(host->mmc), err_status); | 475 | mmc_hostname(host->mmc), err_status); |
478 | cmd->error = -ENOMSG; | 476 | cmd->error = -ENOMSG; |
479 | } | 477 | } |
@@ -491,7 +489,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev) | |||
491 | if (irq_handled) | 489 | if (irq_handled) |
492 | return IRQ_HANDLED; | 490 | return IRQ_HANDLED; |
493 | 491 | ||
494 | pr_err("%s: unhandled interrupt status=0x%04x en=0x%04x " | 492 | printk(KERN_ERR "%s: unhandled interrupt status=0x%04x en=0x%04x " |
495 | "pio=%d\n", mmc_hostname(host->mmc), intr_status, | 493 | "pio=%d\n", mmc_hostname(host->mmc), intr_status, |
496 | host->intr_en, host->pio_size); | 494 | host->intr_en, host->pio_size); |
497 | return IRQ_NONE; | 495 | return IRQ_NONE; |
@@ -507,9 +505,9 @@ static void mvsd_timeout_timer(unsigned long data) | |||
507 | spin_lock_irqsave(&host->lock, flags); | 505 | spin_lock_irqsave(&host->lock, flags); |
508 | mrq = host->mrq; | 506 | mrq = host->mrq; |
509 | if (mrq) { | 507 | if (mrq) { |
510 | pr_err("%s: Timeout waiting for hardware interrupt.\n", | 508 | printk(KERN_ERR "%s: Timeout waiting for hardware interrupt.\n", |
511 | mmc_hostname(host->mmc)); | 509 | mmc_hostname(host->mmc)); |
512 | pr_err("%s: hw_state=0x%04x, intr_status=0x%04x " | 510 | printk(KERN_ERR "%s: hw_state=0x%04x, intr_status=0x%04x " |
513 | "intr_en=0x%04x\n", mmc_hostname(host->mmc), | 511 | "intr_en=0x%04x\n", mmc_hostname(host->mmc), |
514 | mvsd_read(MVSD_HW_STATE), | 512 | mvsd_read(MVSD_HW_STATE), |
515 | mvsd_read(MVSD_NOR_INTR_STATUS), | 513 | mvsd_read(MVSD_NOR_INTR_STATUS), |
@@ -681,9 +679,8 @@ static const struct mmc_host_ops mvsd_ops = { | |||
681 | .enable_sdio_irq = mvsd_enable_sdio_irq, | 679 | .enable_sdio_irq = mvsd_enable_sdio_irq, |
682 | }; | 680 | }; |
683 | 681 | ||
684 | static void __init | 682 | static void __init mv_conf_mbus_windows(struct mvsd_host *host, |
685 | mv_conf_mbus_windows(struct mvsd_host *host, | 683 | struct mbus_dram_target_info *dram) |
686 | const struct mbus_dram_target_info *dram) | ||
687 | { | 684 | { |
688 | void __iomem *iobase = host->base; | 685 | void __iomem *iobase = host->base; |
689 | int i; | 686 | int i; |
@@ -694,7 +691,7 @@ mv_conf_mbus_windows(struct mvsd_host *host, | |||
694 | } | 691 | } |
695 | 692 | ||
696 | for (i = 0; i < dram->num_cs; i++) { | 693 | for (i = 0; i < dram->num_cs; i++) { |
697 | const struct mbus_dram_window *cs = dram->cs + i; | 694 | struct mbus_dram_window *cs = dram->cs + i; |
698 | writel(((cs->size - 1) & 0xffff0000) | | 695 | writel(((cs->size - 1) & 0xffff0000) | |
699 | (cs->mbus_attr << 8) | | 696 | (cs->mbus_attr << 8) | |
700 | (dram->mbus_dram_target_id << 4) | 1, | 697 | (dram->mbus_dram_target_id << 4) | 1, |
@@ -708,7 +705,6 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
708 | struct mmc_host *mmc = NULL; | 705 | struct mmc_host *mmc = NULL; |
709 | struct mvsd_host *host = NULL; | 706 | struct mvsd_host *host = NULL; |
710 | const struct mvsdio_platform_data *mvsd_data; | 707 | const struct mvsdio_platform_data *mvsd_data; |
711 | const struct mbus_dram_target_info *dram; | ||
712 | struct resource *r; | 708 | struct resource *r; |
713 | int ret, irq; | 709 | int ret, irq; |
714 | 710 | ||
@@ -759,26 +755,18 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
759 | } | 755 | } |
760 | 756 | ||
761 | /* (Re-)program MBUS remapping windows if we are asked to. */ | 757 | /* (Re-)program MBUS remapping windows if we are asked to. */ |
762 | dram = mv_mbus_dram_info(); | 758 | if (mvsd_data->dram != NULL) |
763 | if (dram) | 759 | mv_conf_mbus_windows(host, mvsd_data->dram); |
764 | mv_conf_mbus_windows(host, dram); | ||
765 | 760 | ||
766 | mvsd_power_down(host); | 761 | mvsd_power_down(host); |
767 | 762 | ||
768 | ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); | 763 | ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); |
769 | if (ret) { | 764 | if (ret) { |
770 | pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); | 765 | printk(KERN_ERR "%s: cannot assign irq %d\n", DRIVER_NAME, irq); |
771 | goto out; | 766 | goto out; |
772 | } else | 767 | } else |
773 | host->irq = irq; | 768 | host->irq = irq; |
774 | 769 | ||
775 | /* Not all platforms can gate the clock, so it is not | ||
776 | an error if the clock does not exists. */ | ||
777 | host->clk = clk_get(&pdev->dev, NULL); | ||
778 | if (!IS_ERR(host->clk)) { | ||
779 | clk_prepare_enable(host->clk); | ||
780 | } | ||
781 | |||
782 | if (mvsd_data->gpio_card_detect) { | 770 | if (mvsd_data->gpio_card_detect) { |
783 | ret = gpio_request(mvsd_data->gpio_card_detect, | 771 | ret = gpio_request(mvsd_data->gpio_card_detect, |
784 | DRIVER_NAME " cd"); | 772 | DRIVER_NAME " cd"); |
@@ -814,7 +802,7 @@ static int __init mvsd_probe(struct platform_device *pdev) | |||
814 | if (ret) | 802 | if (ret) |
815 | goto out; | 803 | goto out; |
816 | 804 | ||
817 | pr_notice("%s: %s driver initialized, ", | 805 | printk(KERN_NOTICE "%s: %s driver initialized, ", |
818 | mmc_hostname(mmc), DRIVER_NAME); | 806 | mmc_hostname(mmc), DRIVER_NAME); |
819 | if (host->gpio_card_detect) | 807 | if (host->gpio_card_detect) |
820 | printk("using GPIO %d for card detection\n", | 808 | printk("using GPIO %d for card detection\n", |
@@ -839,10 +827,6 @@ out: | |||
839 | if (r) | 827 | if (r) |
840 | release_resource(r); | 828 | release_resource(r); |
841 | if (mmc) | 829 | if (mmc) |
842 | if (!IS_ERR_OR_NULL(host->clk)) { | ||
843 | clk_disable_unprepare(host->clk); | ||
844 | clk_put(host->clk); | ||
845 | } | ||
846 | mmc_free_host(mmc); | 830 | mmc_free_host(mmc); |
847 | 831 | ||
848 | return ret; | 832 | return ret; |
@@ -867,11 +851,6 @@ static int __exit mvsd_remove(struct platform_device *pdev) | |||
867 | mvsd_power_down(host); | 851 | mvsd_power_down(host); |
868 | iounmap(host->base); | 852 | iounmap(host->base); |
869 | release_resource(host->res); | 853 | release_resource(host->res); |
870 | |||
871 | if (!IS_ERR(host->clk)) { | ||
872 | clk_disable_unprepare(host->clk); | ||
873 | clk_put(host->clk); | ||
874 | } | ||
875 | mmc_free_host(mmc); | 854 | mmc_free_host(mmc); |
876 | } | 855 | } |
877 | platform_set_drvdata(pdev, NULL); | 856 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index a72936eea6f..b87143d0aeb 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c | |||
@@ -33,17 +33,15 @@ | |||
33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
35 | #include <linux/dmaengine.h> | 35 | #include <linux/dmaengine.h> |
36 | #include <linux/types.h> | ||
37 | 36 | ||
38 | #include <asm/dma.h> | 37 | #include <asm/dma.h> |
39 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
40 | #include <asm/sizes.h> | 39 | #include <asm/sizes.h> |
41 | #include <linux/platform_data/mmc-mxcmmc.h> | 40 | #include <mach/mmc.h> |
42 | 41 | ||
43 | #include <linux/platform_data/dma-imx.h> | 42 | #include <mach/dma.h> |
44 | 43 | ||
45 | #define DRIVER_NAME "mxc-mmc" | 44 | #define DRIVER_NAME "mxc-mmc" |
46 | #define MXCMCI_TIMEOUT_MS 10000 | ||
47 | 45 | ||
48 | #define MMC_REG_STR_STP_CLK 0x00 | 46 | #define MMC_REG_STR_STP_CLK 0x00 |
49 | #define MMC_REG_STATUS 0x04 | 47 | #define MMC_REG_STATUS 0x04 |
@@ -112,11 +110,6 @@ | |||
112 | #define INT_WRITE_OP_DONE_EN (1 << 1) | 110 | #define INT_WRITE_OP_DONE_EN (1 << 1) |
113 | #define INT_READ_OP_EN (1 << 0) | 111 | #define INT_READ_OP_EN (1 << 0) |
114 | 112 | ||
115 | enum mxcmci_type { | ||
116 | IMX21_MMC, | ||
117 | IMX31_MMC, | ||
118 | }; | ||
119 | |||
120 | struct mxcmci_host { | 113 | struct mxcmci_host { |
121 | struct mmc_host *mmc; | 114 | struct mmc_host *mmc; |
122 | struct resource *res; | 115 | struct resource *res; |
@@ -141,8 +134,7 @@ struct mxcmci_host { | |||
141 | u16 rev_no; | 134 | u16 rev_no; |
142 | unsigned int cmdat; | 135 | unsigned int cmdat; |
143 | 136 | ||
144 | struct clk *clk_ipg; | 137 | struct clk *clk; |
145 | struct clk *clk_per; | ||
146 | 138 | ||
147 | int clock; | 139 | int clock; |
148 | 140 | ||
@@ -155,28 +147,7 @@ struct mxcmci_host { | |||
155 | int dmareq; | 147 | int dmareq; |
156 | struct dma_slave_config dma_slave_config; | 148 | struct dma_slave_config dma_slave_config; |
157 | struct imx_dma_data dma_data; | 149 | struct imx_dma_data dma_data; |
158 | |||
159 | struct timer_list watchdog; | ||
160 | enum mxcmci_type devtype; | ||
161 | }; | ||
162 | |||
163 | static struct platform_device_id mxcmci_devtype[] = { | ||
164 | { | ||
165 | .name = "imx21-mmc", | ||
166 | .driver_data = IMX21_MMC, | ||
167 | }, { | ||
168 | .name = "imx31-mmc", | ||
169 | .driver_data = IMX31_MMC, | ||
170 | }, { | ||
171 | /* sentinel */ | ||
172 | } | ||
173 | }; | 150 | }; |
174 | MODULE_DEVICE_TABLE(platform, mxcmci_devtype); | ||
175 | |||
176 | static inline int is_imx31_mmc(struct mxcmci_host *host) | ||
177 | { | ||
178 | return host->devtype == IMX31_MMC; | ||
179 | } | ||
180 | 151 | ||
181 | static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); | 152 | static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); |
182 | 153 | ||
@@ -246,7 +217,6 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
246 | unsigned int blksz = data->blksz; | 217 | unsigned int blksz = data->blksz; |
247 | unsigned int datasize = nob * blksz; | 218 | unsigned int datasize = nob * blksz; |
248 | struct scatterlist *sg; | 219 | struct scatterlist *sg; |
249 | enum dma_transfer_direction slave_dirn; | ||
250 | int i, nents; | 220 | int i, nents; |
251 | 221 | ||
252 | if (data->flags & MMC_DATA_STREAM) | 222 | if (data->flags & MMC_DATA_STREAM) |
@@ -263,27 +233,24 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
263 | return 0; | 233 | return 0; |
264 | 234 | ||
265 | for_each_sg(data->sg, sg, data->sg_len, i) { | 235 | for_each_sg(data->sg, sg, data->sg_len, i) { |
266 | if (sg->offset & 3 || sg->length & 3 || sg->length < 512) { | 236 | if (sg->offset & 3 || sg->length & 3) { |
267 | host->do_dma = 0; | 237 | host->do_dma = 0; |
268 | return 0; | 238 | return 0; |
269 | } | 239 | } |
270 | } | 240 | } |
271 | 241 | ||
272 | if (data->flags & MMC_DATA_READ) { | 242 | if (data->flags & MMC_DATA_READ) |
273 | host->dma_dir = DMA_FROM_DEVICE; | 243 | host->dma_dir = DMA_FROM_DEVICE; |
274 | slave_dirn = DMA_DEV_TO_MEM; | 244 | else |
275 | } else { | ||
276 | host->dma_dir = DMA_TO_DEVICE; | 245 | host->dma_dir = DMA_TO_DEVICE; |
277 | slave_dirn = DMA_MEM_TO_DEV; | ||
278 | } | ||
279 | 246 | ||
280 | nents = dma_map_sg(host->dma->device->dev, data->sg, | 247 | nents = dma_map_sg(host->dma->device->dev, data->sg, |
281 | data->sg_len, host->dma_dir); | 248 | data->sg_len, host->dma_dir); |
282 | if (nents != data->sg_len) | 249 | if (nents != data->sg_len) |
283 | return -EINVAL; | 250 | return -EINVAL; |
284 | 251 | ||
285 | host->desc = dmaengine_prep_slave_sg(host->dma, | 252 | host->desc = host->dma->device->device_prep_slave_sg(host->dma, |
286 | data->sg, data->sg_len, slave_dirn, | 253 | data->sg, data->sg_len, host->dma_dir, |
287 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 254 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
288 | 255 | ||
289 | if (!host->desc) { | 256 | if (!host->desc) { |
@@ -295,34 +262,10 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
295 | wmb(); | 262 | wmb(); |
296 | 263 | ||
297 | dmaengine_submit(host->desc); | 264 | dmaengine_submit(host->desc); |
298 | dma_async_issue_pending(host->dma); | ||
299 | |||
300 | mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); | ||
301 | 265 | ||
302 | return 0; | 266 | return 0; |
303 | } | 267 | } |
304 | 268 | ||
305 | static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); | ||
306 | static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); | ||
307 | |||
308 | static void mxcmci_dma_callback(void *data) | ||
309 | { | ||
310 | struct mxcmci_host *host = data; | ||
311 | u32 stat; | ||
312 | |||
313 | del_timer(&host->watchdog); | ||
314 | |||
315 | stat = readl(host->base + MMC_REG_STATUS); | ||
316 | writel(stat & ~STATUS_DATA_TRANS_DONE, host->base + MMC_REG_STATUS); | ||
317 | |||
318 | dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); | ||
319 | |||
320 | if (stat & STATUS_READ_OP_DONE) | ||
321 | writel(STATUS_READ_OP_DONE, host->base + MMC_REG_STATUS); | ||
322 | |||
323 | mxcmci_data_done(host, stat); | ||
324 | } | ||
325 | |||
326 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, | 269 | static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, |
327 | unsigned int cmdat) | 270 | unsigned int cmdat) |
328 | { | 271 | { |
@@ -354,14 +297,8 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, | |||
354 | 297 | ||
355 | int_cntr = INT_END_CMD_RES_EN; | 298 | int_cntr = INT_END_CMD_RES_EN; |
356 | 299 | ||
357 | if (mxcmci_use_dma(host)) { | 300 | if (mxcmci_use_dma(host)) |
358 | if (host->dma_dir == DMA_FROM_DEVICE) { | 301 | int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; |
359 | host->desc->callback = mxcmci_dma_callback; | ||
360 | host->desc->callback_param = host; | ||
361 | } else { | ||
362 | int_cntr |= INT_WRITE_OP_DONE_EN; | ||
363 | } | ||
364 | } | ||
365 | 302 | ||
366 | spin_lock_irqsave(&host->lock, flags); | 303 | spin_lock_irqsave(&host->lock, flags); |
367 | if (host->use_sdio) | 304 | if (host->use_sdio) |
@@ -400,9 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) | |||
400 | struct mmc_data *data = host->data; | 337 | struct mmc_data *data = host->data; |
401 | int data_error; | 338 | int data_error; |
402 | 339 | ||
403 | if (mxcmci_use_dma(host)) | 340 | if (mxcmci_use_dma(host)) { |
341 | dmaengine_terminate_all(host->dma); | ||
404 | dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, | 342 | dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, |
405 | host->dma_dir); | 343 | host->dma_dir); |
344 | } | ||
406 | 345 | ||
407 | if (stat & STATUS_ERR_MASK) { | 346 | if (stat & STATUS_ERR_MASK) { |
408 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", | 347 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", |
@@ -677,10 +616,8 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) | |||
677 | mxcmci_cmd_done(host, stat); | 616 | mxcmci_cmd_done(host, stat); |
678 | 617 | ||
679 | if (mxcmci_use_dma(host) && | 618 | if (mxcmci_use_dma(host) && |
680 | (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) { | 619 | (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) |
681 | del_timer(&host->watchdog); | ||
682 | mxcmci_data_done(host, stat); | 620 | mxcmci_data_done(host, stat); |
683 | } | ||
684 | 621 | ||
685 | if (host->default_irq_mask && | 622 | if (host->default_irq_mask && |
686 | (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) | 623 | (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) |
@@ -728,7 +665,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) | |||
728 | { | 665 | { |
729 | unsigned int divider; | 666 | unsigned int divider; |
730 | int prescaler = 0; | 667 | int prescaler = 0; |
731 | unsigned int clk_in = clk_get_rate(host->clk_per); | 668 | unsigned int clk_in = clk_get_rate(host->clk); |
732 | 669 | ||
733 | while (prescaler <= 0x800) { | 670 | while (prescaler <= 0x800) { |
734 | for (divider = 1; divider <= 0xF; divider++) { | 671 | for (divider = 1; divider <= 0xF; divider++) { |
@@ -768,7 +705,6 @@ static int mxcmci_setup_dma(struct mmc_host *mmc) | |||
768 | config->src_addr_width = 4; | 705 | config->src_addr_width = 4; |
769 | config->dst_maxburst = host->burstlen; | 706 | config->dst_maxburst = host->burstlen; |
770 | config->src_maxburst = host->burstlen; | 707 | config->src_maxburst = host->burstlen; |
771 | config->device_fc = false; | ||
772 | 708 | ||
773 | return dmaengine_slave_config(host->dma, config); | 709 | return dmaengine_slave_config(host->dma, config); |
774 | } | 710 | } |
@@ -866,8 +802,6 @@ static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
866 | 802 | ||
867 | static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) | 803 | static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) |
868 | { | 804 | { |
869 | struct mxcmci_host *mxcmci = mmc_priv(host); | ||
870 | |||
871 | /* | 805 | /* |
872 | * MX3 SoCs have a silicon bug which corrupts CRC calculation of | 806 | * MX3 SoCs have a silicon bug which corrupts CRC calculation of |
873 | * multi-block transfers when connected SDIO peripheral doesn't | 807 | * multi-block transfers when connected SDIO peripheral doesn't |
@@ -875,7 +809,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) | |||
875 | * One way to prevent this is to only allow 1-bit transfers. | 809 | * One way to prevent this is to only allow 1-bit transfers. |
876 | */ | 810 | */ |
877 | 811 | ||
878 | if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO) | 812 | if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO) |
879 | host->caps &= ~MMC_CAP_4_BIT_DATA; | 813 | host->caps &= ~MMC_CAP_4_BIT_DATA; |
880 | else | 814 | else |
881 | host->caps |= MMC_CAP_4_BIT_DATA; | 815 | host->caps |= MMC_CAP_4_BIT_DATA; |
@@ -893,34 +827,6 @@ static bool filter(struct dma_chan *chan, void *param) | |||
893 | return true; | 827 | return true; |
894 | } | 828 | } |
895 | 829 | ||
896 | static void mxcmci_watchdog(unsigned long data) | ||
897 | { | ||
898 | struct mmc_host *mmc = (struct mmc_host *)data; | ||
899 | struct mxcmci_host *host = mmc_priv(mmc); | ||
900 | struct mmc_request *req = host->req; | ||
901 | unsigned int stat = readl(host->base + MMC_REG_STATUS); | ||
902 | |||
903 | if (host->dma_dir == DMA_FROM_DEVICE) { | ||
904 | dmaengine_terminate_all(host->dma); | ||
905 | dev_err(mmc_dev(host->mmc), | ||
906 | "%s: read time out (status = 0x%08x)\n", | ||
907 | __func__, stat); | ||
908 | } else { | ||
909 | dev_err(mmc_dev(host->mmc), | ||
910 | "%s: write time out (status = 0x%08x)\n", | ||
911 | __func__, stat); | ||
912 | mxcmci_softreset(host); | ||
913 | } | ||
914 | |||
915 | /* Mark transfer as erroneus and inform the upper layers */ | ||
916 | |||
917 | host->data->error = -ETIMEDOUT; | ||
918 | host->req = NULL; | ||
919 | host->cmd = NULL; | ||
920 | host->data = NULL; | ||
921 | mmc_request_done(host->mmc, req); | ||
922 | } | ||
923 | |||
924 | static const struct mmc_host_ops mxcmci_ops = { | 830 | static const struct mmc_host_ops mxcmci_ops = { |
925 | .request = mxcmci_request, | 831 | .request = mxcmci_request, |
926 | .set_ios = mxcmci_set_ios, | 832 | .set_ios = mxcmci_set_ios, |
@@ -937,7 +843,7 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
937 | int ret = 0, irq; | 843 | int ret = 0, irq; |
938 | dma_cap_mask_t mask; | 844 | dma_cap_mask_t mask; |
939 | 845 | ||
940 | pr_info("i.MX SDHC driver\n"); | 846 | printk(KERN_INFO "i.MX SDHC driver\n"); |
941 | 847 | ||
942 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 848 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
943 | irq = platform_get_irq(pdev, 0); | 849 | irq = platform_get_irq(pdev, 0); |
@@ -973,7 +879,6 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
973 | 879 | ||
974 | host->mmc = mmc; | 880 | host->mmc = mmc; |
975 | host->pdata = pdev->dev.platform_data; | 881 | host->pdata = pdev->dev.platform_data; |
976 | host->devtype = pdev->id_entry->driver_data; | ||
977 | spin_lock_init(&host->lock); | 882 | spin_lock_init(&host->lock); |
978 | 883 | ||
979 | mxcmci_init_ocr(host); | 884 | mxcmci_init_ocr(host); |
@@ -987,20 +892,12 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
987 | host->res = r; | 892 | host->res = r; |
988 | host->irq = irq; | 893 | host->irq = irq; |
989 | 894 | ||
990 | host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 895 | host->clk = clk_get(&pdev->dev, NULL); |
991 | if (IS_ERR(host->clk_ipg)) { | 896 | if (IS_ERR(host->clk)) { |
992 | ret = PTR_ERR(host->clk_ipg); | 897 | ret = PTR_ERR(host->clk); |
993 | goto out_iounmap; | 898 | goto out_iounmap; |
994 | } | 899 | } |
995 | 900 | clk_enable(host->clk); | |
996 | host->clk_per = devm_clk_get(&pdev->dev, "per"); | ||
997 | if (IS_ERR(host->clk_per)) { | ||
998 | ret = PTR_ERR(host->clk_per); | ||
999 | goto out_iounmap; | ||
1000 | } | ||
1001 | |||
1002 | clk_prepare_enable(host->clk_per); | ||
1003 | clk_prepare_enable(host->clk_ipg); | ||
1004 | 901 | ||
1005 | mxcmci_softreset(host); | 902 | mxcmci_softreset(host); |
1006 | 903 | ||
@@ -1012,8 +909,8 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
1012 | goto out_clk_put; | 909 | goto out_clk_put; |
1013 | } | 910 | } |
1014 | 911 | ||
1015 | mmc->f_min = clk_get_rate(host->clk_per) >> 16; | 912 | mmc->f_min = clk_get_rate(host->clk) >> 16; |
1016 | mmc->f_max = clk_get_rate(host->clk_per) >> 1; | 913 | mmc->f_max = clk_get_rate(host->clk) >> 1; |
1017 | 914 | ||
1018 | /* recommended in data sheet */ | 915 | /* recommended in data sheet */ |
1019 | writew(0x2db4, host->base + MMC_REG_READ_TO); | 916 | writew(0x2db4, host->base + MMC_REG_READ_TO); |
@@ -1054,10 +951,6 @@ static int mxcmci_probe(struct platform_device *pdev) | |||
1054 | 951 | ||
1055 | mmc_add_host(mmc); | 952 | mmc_add_host(mmc); |
1056 | 953 | ||
1057 | init_timer(&host->watchdog); | ||
1058 | host->watchdog.function = &mxcmci_watchdog; | ||
1059 | host->watchdog.data = (unsigned long)mmc; | ||
1060 | |||
1061 | return 0; | 954 | return 0; |
1062 | 955 | ||
1063 | out_free_irq: | 956 | out_free_irq: |
@@ -1066,8 +959,8 @@ out_free_dma: | |||
1066 | if (host->dma) | 959 | if (host->dma) |
1067 | dma_release_channel(host->dma); | 960 | dma_release_channel(host->dma); |
1068 | out_clk_put: | 961 | out_clk_put: |
1069 | clk_disable_unprepare(host->clk_per); | 962 | clk_disable(host->clk); |
1070 | clk_disable_unprepare(host->clk_ipg); | 963 | clk_put(host->clk); |
1071 | out_iounmap: | 964 | out_iounmap: |
1072 | iounmap(host->base); | 965 | iounmap(host->base); |
1073 | out_free: | 966 | out_free: |
@@ -1098,8 +991,8 @@ static int mxcmci_remove(struct platform_device *pdev) | |||
1098 | if (host->dma) | 991 | if (host->dma) |
1099 | dma_release_channel(host->dma); | 992 | dma_release_channel(host->dma); |
1100 | 993 | ||
1101 | clk_disable_unprepare(host->clk_per); | 994 | clk_disable(host->clk); |
1102 | clk_disable_unprepare(host->clk_ipg); | 995 | clk_put(host->clk); |
1103 | 996 | ||
1104 | release_mem_region(host->res->start, resource_size(host->res)); | 997 | release_mem_region(host->res->start, resource_size(host->res)); |
1105 | 998 | ||
@@ -1117,8 +1010,7 @@ static int mxcmci_suspend(struct device *dev) | |||
1117 | 1010 | ||
1118 | if (mmc) | 1011 | if (mmc) |
1119 | ret = mmc_suspend_host(mmc); | 1012 | ret = mmc_suspend_host(mmc); |
1120 | clk_disable_unprepare(host->clk_per); | 1013 | clk_disable(host->clk); |
1121 | clk_disable_unprepare(host->clk_ipg); | ||
1122 | 1014 | ||
1123 | return ret; | 1015 | return ret; |
1124 | } | 1016 | } |
@@ -1129,8 +1021,7 @@ static int mxcmci_resume(struct device *dev) | |||
1129 | struct mxcmci_host *host = mmc_priv(mmc); | 1021 | struct mxcmci_host *host = mmc_priv(mmc); |
1130 | int ret = 0; | 1022 | int ret = 0; |
1131 | 1023 | ||
1132 | clk_prepare_enable(host->clk_per); | 1024 | clk_enable(host->clk); |
1133 | clk_prepare_enable(host->clk_ipg); | ||
1134 | if (mmc) | 1025 | if (mmc) |
1135 | ret = mmc_resume_host(mmc); | 1026 | ret = mmc_resume_host(mmc); |
1136 | 1027 | ||
@@ -1146,7 +1037,6 @@ static const struct dev_pm_ops mxcmci_pm_ops = { | |||
1146 | static struct platform_driver mxcmci_driver = { | 1037 | static struct platform_driver mxcmci_driver = { |
1147 | .probe = mxcmci_probe, | 1038 | .probe = mxcmci_probe, |
1148 | .remove = mxcmci_remove, | 1039 | .remove = mxcmci_remove, |
1149 | .id_table = mxcmci_devtype, | ||
1150 | .driver = { | 1040 | .driver = { |
1151 | .name = DRIVER_NAME, | 1041 | .name = DRIVER_NAME, |
1152 | .owner = THIS_MODULE, | 1042 | .owner = THIS_MODULE, |
@@ -1156,9 +1046,20 @@ static struct platform_driver mxcmci_driver = { | |||
1156 | } | 1046 | } |
1157 | }; | 1047 | }; |
1158 | 1048 | ||
1159 | module_platform_driver(mxcmci_driver); | 1049 | static int __init mxcmci_init(void) |
1050 | { | ||
1051 | return platform_driver_register(&mxcmci_driver); | ||
1052 | } | ||
1053 | |||
1054 | static void __exit mxcmci_exit(void) | ||
1055 | { | ||
1056 | platform_driver_unregister(&mxcmci_driver); | ||
1057 | } | ||
1058 | |||
1059 | module_init(mxcmci_init); | ||
1060 | module_exit(mxcmci_exit); | ||
1160 | 1061 | ||
1161 | MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); | 1062 | MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); |
1162 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | 1063 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); |
1163 | MODULE_LICENSE("GPL"); | 1064 | MODULE_LICENSE("GPL"); |
1164 | MODULE_ALIAS("platform:mxc-mmc"); | 1065 | MODULE_ALIAS("platform:imx-mmc"); |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 206fe499ded..d513d47364d 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -23,9 +23,6 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
26 | #include <linux/of.h> | ||
27 | #include <linux/of_device.h> | ||
28 | #include <linux/of_gpio.h> | ||
29 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
30 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
31 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
@@ -40,13 +37,95 @@ | |||
40 | #include <linux/mmc/sdio.h> | 37 | #include <linux/mmc/sdio.h> |
41 | #include <linux/gpio.h> | 38 | #include <linux/gpio.h> |
42 | #include <linux/regulator/consumer.h> | 39 | #include <linux/regulator/consumer.h> |
43 | #include <linux/module.h> | 40 | |
44 | #include <linux/pinctrl/consumer.h> | 41 | #include <mach/mxs.h> |
45 | #include <linux/stmp_device.h> | 42 | #include <mach/common.h> |
46 | #include <linux/spi/mxs-spi.h> | 43 | #include <mach/dma.h> |
44 | #include <mach/mmc.h> | ||
47 | 45 | ||
48 | #define DRIVER_NAME "mxs-mmc" | 46 | #define DRIVER_NAME "mxs-mmc" |
49 | 47 | ||
48 | /* card detect polling timeout */ | ||
49 | #define MXS_MMC_DETECT_TIMEOUT (HZ/2) | ||
50 | |||
51 | #define SSP_VERSION_LATEST 4 | ||
52 | #define ssp_is_old() (host->version < SSP_VERSION_LATEST) | ||
53 | |||
54 | /* SSP registers */ | ||
55 | #define HW_SSP_CTRL0 0x000 | ||
56 | #define BM_SSP_CTRL0_RUN (1 << 29) | ||
57 | #define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) | ||
58 | #define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) | ||
59 | #define BM_SSP_CTRL0_READ (1 << 25) | ||
60 | #define BM_SSP_CTRL0_DATA_XFER (1 << 24) | ||
61 | #define BP_SSP_CTRL0_BUS_WIDTH (22) | ||
62 | #define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) | ||
63 | #define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) | ||
64 | #define BM_SSP_CTRL0_LONG_RESP (1 << 19) | ||
65 | #define BM_SSP_CTRL0_GET_RESP (1 << 17) | ||
66 | #define BM_SSP_CTRL0_ENABLE (1 << 16) | ||
67 | #define BP_SSP_CTRL0_XFER_COUNT (0) | ||
68 | #define BM_SSP_CTRL0_XFER_COUNT (0xffff) | ||
69 | #define HW_SSP_CMD0 0x010 | ||
70 | #define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) | ||
71 | #define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) | ||
72 | #define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) | ||
73 | #define BM_SSP_CMD0_APPEND_8CYC (1 << 20) | ||
74 | #define BP_SSP_CMD0_BLOCK_SIZE (16) | ||
75 | #define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) | ||
76 | #define BP_SSP_CMD0_BLOCK_COUNT (8) | ||
77 | #define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) | ||
78 | #define BP_SSP_CMD0_CMD (0) | ||
79 | #define BM_SSP_CMD0_CMD (0xff) | ||
80 | #define HW_SSP_CMD1 0x020 | ||
81 | #define HW_SSP_XFER_SIZE 0x030 | ||
82 | #define HW_SSP_BLOCK_SIZE 0x040 | ||
83 | #define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4) | ||
84 | #define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) | ||
85 | #define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) | ||
86 | #define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) | ||
87 | #define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070) | ||
88 | #define BP_SSP_TIMING_TIMEOUT (16) | ||
89 | #define BM_SSP_TIMING_TIMEOUT (0xffff << 16) | ||
90 | #define BP_SSP_TIMING_CLOCK_DIVIDE (8) | ||
91 | #define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) | ||
92 | #define BP_SSP_TIMING_CLOCK_RATE (0) | ||
93 | #define BM_SSP_TIMING_CLOCK_RATE (0xff) | ||
94 | #define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080) | ||
95 | #define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) | ||
96 | #define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) | ||
97 | #define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) | ||
98 | #define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) | ||
99 | #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) | ||
100 | #define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) | ||
101 | #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) | ||
102 | #define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) | ||
103 | #define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) | ||
104 | #define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) | ||
105 | #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) | ||
106 | #define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) | ||
107 | #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) | ||
108 | #define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) | ||
109 | #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) | ||
110 | #define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) | ||
111 | #define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) | ||
112 | #define BM_SSP_CTRL1_POLARITY (1 << 9) | ||
113 | #define BP_SSP_CTRL1_WORD_LENGTH (4) | ||
114 | #define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) | ||
115 | #define BP_SSP_CTRL1_SSP_MODE (0) | ||
116 | #define BM_SSP_CTRL1_SSP_MODE (0xf) | ||
117 | #define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0) | ||
118 | #define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0) | ||
119 | #define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0) | ||
120 | #define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0) | ||
121 | #define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100) | ||
122 | #define BM_SSP_STATUS_CARD_DETECT (1 << 28) | ||
123 | #define BM_SSP_STATUS_SDIO_IRQ (1 << 17) | ||
124 | #define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130) | ||
125 | #define BP_SSP_VERSION_MAJOR (24) | ||
126 | |||
127 | #define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) | ||
128 | |||
50 | #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ | 129 | #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ |
51 | BM_SSP_CTRL1_RESP_ERR_IRQ | \ | 130 | BM_SSP_CTRL1_RESP_ERR_IRQ | \ |
52 | BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ | 131 | BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ |
@@ -56,55 +135,60 @@ | |||
56 | BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ | 135 | BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ |
57 | BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) | 136 | BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) |
58 | 137 | ||
59 | /* card detect polling timeout */ | 138 | #define SSP_PIO_NUM 3 |
60 | #define MXS_MMC_DETECT_TIMEOUT (HZ/2) | ||
61 | 139 | ||
62 | struct mxs_mmc_host { | 140 | struct mxs_mmc_host { |
63 | struct mxs_ssp ssp; | ||
64 | |||
65 | struct mmc_host *mmc; | 141 | struct mmc_host *mmc; |
66 | struct mmc_request *mrq; | 142 | struct mmc_request *mrq; |
67 | struct mmc_command *cmd; | 143 | struct mmc_command *cmd; |
68 | struct mmc_data *data; | 144 | struct mmc_data *data; |
69 | 145 | ||
146 | void __iomem *base; | ||
147 | int irq; | ||
148 | struct resource *res; | ||
149 | struct resource *dma_res; | ||
150 | struct clk *clk; | ||
151 | unsigned int clk_rate; | ||
152 | |||
153 | struct dma_chan *dmach; | ||
154 | struct mxs_dma_data dma_data; | ||
155 | unsigned int dma_dir; | ||
156 | u32 ssp_pio_words[SSP_PIO_NUM]; | ||
157 | |||
158 | unsigned int version; | ||
70 | unsigned char bus_width; | 159 | unsigned char bus_width; |
71 | spinlock_t lock; | 160 | spinlock_t lock; |
72 | int sdio_irq_en; | 161 | int sdio_irq_en; |
73 | int wp_gpio; | ||
74 | bool wp_inverted; | ||
75 | }; | 162 | }; |
76 | 163 | ||
77 | static int mxs_mmc_get_ro(struct mmc_host *mmc) | 164 | static int mxs_mmc_get_ro(struct mmc_host *mmc) |
78 | { | 165 | { |
79 | struct mxs_mmc_host *host = mmc_priv(mmc); | 166 | struct mxs_mmc_host *host = mmc_priv(mmc); |
80 | int ret; | 167 | struct mxs_mmc_platform_data *pdata = |
81 | 168 | mmc_dev(host->mmc)->platform_data; | |
82 | if (!gpio_is_valid(host->wp_gpio)) | ||
83 | return -EINVAL; | ||
84 | 169 | ||
85 | ret = gpio_get_value(host->wp_gpio); | 170 | if (!pdata) |
171 | return -EFAULT; | ||
86 | 172 | ||
87 | if (host->wp_inverted) | 173 | if (!gpio_is_valid(pdata->wp_gpio)) |
88 | ret = !ret; | 174 | return -EINVAL; |
89 | 175 | ||
90 | return ret; | 176 | return gpio_get_value(pdata->wp_gpio); |
91 | } | 177 | } |
92 | 178 | ||
93 | static int mxs_mmc_get_cd(struct mmc_host *mmc) | 179 | static int mxs_mmc_get_cd(struct mmc_host *mmc) |
94 | { | 180 | { |
95 | struct mxs_mmc_host *host = mmc_priv(mmc); | 181 | struct mxs_mmc_host *host = mmc_priv(mmc); |
96 | struct mxs_ssp *ssp = &host->ssp; | ||
97 | 182 | ||
98 | return !(readl(ssp->base + HW_SSP_STATUS(ssp)) & | 183 | return !(readl(host->base + HW_SSP_STATUS) & |
99 | BM_SSP_STATUS_CARD_DETECT); | 184 | BM_SSP_STATUS_CARD_DETECT); |
100 | } | 185 | } |
101 | 186 | ||
102 | static void mxs_mmc_reset(struct mxs_mmc_host *host) | 187 | static void mxs_mmc_reset(struct mxs_mmc_host *host) |
103 | { | 188 | { |
104 | struct mxs_ssp *ssp = &host->ssp; | ||
105 | u32 ctrl0, ctrl1; | 189 | u32 ctrl0, ctrl1; |
106 | 190 | ||
107 | stmp_reset_block(ssp->base); | 191 | mxs_reset_block(host->base); |
108 | 192 | ||
109 | ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; | 193 | ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; |
110 | ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | | 194 | ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | |
@@ -120,15 +204,15 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host) | |||
120 | writel(BF_SSP(0xffff, TIMING_TIMEOUT) | | 204 | writel(BF_SSP(0xffff, TIMING_TIMEOUT) | |
121 | BF_SSP(2, TIMING_CLOCK_DIVIDE) | | 205 | BF_SSP(2, TIMING_CLOCK_DIVIDE) | |
122 | BF_SSP(0, TIMING_CLOCK_RATE), | 206 | BF_SSP(0, TIMING_CLOCK_RATE), |
123 | ssp->base + HW_SSP_TIMING(ssp)); | 207 | host->base + HW_SSP_TIMING); |
124 | 208 | ||
125 | if (host->sdio_irq_en) { | 209 | if (host->sdio_irq_en) { |
126 | ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; | 210 | ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; |
127 | ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; | 211 | ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; |
128 | } | 212 | } |
129 | 213 | ||
130 | writel(ctrl0, ssp->base + HW_SSP_CTRL0); | 214 | writel(ctrl0, host->base + HW_SSP_CTRL0); |
131 | writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp)); | 215 | writel(ctrl1, host->base + HW_SSP_CTRL1); |
132 | } | 216 | } |
133 | 217 | ||
134 | static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, | 218 | static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, |
@@ -139,22 +223,21 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host) | |||
139 | struct mmc_command *cmd = host->cmd; | 223 | struct mmc_command *cmd = host->cmd; |
140 | struct mmc_data *data = host->data; | 224 | struct mmc_data *data = host->data; |
141 | struct mmc_request *mrq = host->mrq; | 225 | struct mmc_request *mrq = host->mrq; |
142 | struct mxs_ssp *ssp = &host->ssp; | ||
143 | 226 | ||
144 | if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { | 227 | if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { |
145 | if (mmc_resp_type(cmd) & MMC_RSP_136) { | 228 | if (mmc_resp_type(cmd) & MMC_RSP_136) { |
146 | cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); | 229 | cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0); |
147 | cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp)); | 230 | cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1); |
148 | cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp)); | 231 | cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2); |
149 | cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp)); | 232 | cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3); |
150 | } else { | 233 | } else { |
151 | cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); | 234 | cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0); |
152 | } | 235 | } |
153 | } | 236 | } |
154 | 237 | ||
155 | if (data) { | 238 | if (data) { |
156 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 239 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, |
157 | data->sg_len, ssp->dma_dir); | 240 | data->sg_len, host->dma_dir); |
158 | /* | 241 | /* |
159 | * If there was an error on any block, we mark all | 242 | * If there was an error on any block, we mark all |
160 | * data blocks as being in error. | 243 | * data blocks as being in error. |
@@ -187,20 +270,19 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) | |||
187 | struct mxs_mmc_host *host = dev_id; | 270 | struct mxs_mmc_host *host = dev_id; |
188 | struct mmc_command *cmd = host->cmd; | 271 | struct mmc_command *cmd = host->cmd; |
189 | struct mmc_data *data = host->data; | 272 | struct mmc_data *data = host->data; |
190 | struct mxs_ssp *ssp = &host->ssp; | ||
191 | u32 stat; | 273 | u32 stat; |
192 | 274 | ||
193 | spin_lock(&host->lock); | 275 | spin_lock(&host->lock); |
194 | 276 | ||
195 | stat = readl(ssp->base + HW_SSP_CTRL1(ssp)); | 277 | stat = readl(host->base + HW_SSP_CTRL1); |
196 | writel(stat & MXS_MMC_IRQ_BITS, | 278 | writel(stat & MXS_MMC_IRQ_BITS, |
197 | ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); | 279 | host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); |
198 | |||
199 | spin_unlock(&host->lock); | ||
200 | 280 | ||
201 | if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) | 281 | if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) |
202 | mmc_signal_sdio_irq(host->mmc); | 282 | mmc_signal_sdio_irq(host->mmc); |
203 | 283 | ||
284 | spin_unlock(&host->lock); | ||
285 | |||
204 | if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) | 286 | if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) |
205 | cmd->error = -ETIMEDOUT; | 287 | cmd->error = -ETIMEDOUT; |
206 | else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) | 288 | else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) |
@@ -221,9 +303,8 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) | |||
221 | } | 303 | } |
222 | 304 | ||
223 | static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | 305 | static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( |
224 | struct mxs_mmc_host *host, unsigned long flags) | 306 | struct mxs_mmc_host *host, unsigned int append) |
225 | { | 307 | { |
226 | struct mxs_ssp *ssp = &host->ssp; | ||
227 | struct dma_async_tx_descriptor *desc; | 308 | struct dma_async_tx_descriptor *desc; |
228 | struct mmc_data *data = host->data; | 309 | struct mmc_data *data = host->data; |
229 | struct scatterlist * sgl; | 310 | struct scatterlist * sgl; |
@@ -232,24 +313,24 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | |||
232 | if (data) { | 313 | if (data) { |
233 | /* data */ | 314 | /* data */ |
234 | dma_map_sg(mmc_dev(host->mmc), data->sg, | 315 | dma_map_sg(mmc_dev(host->mmc), data->sg, |
235 | data->sg_len, ssp->dma_dir); | 316 | data->sg_len, host->dma_dir); |
236 | sgl = data->sg; | 317 | sgl = data->sg; |
237 | sg_len = data->sg_len; | 318 | sg_len = data->sg_len; |
238 | } else { | 319 | } else { |
239 | /* pio */ | 320 | /* pio */ |
240 | sgl = (struct scatterlist *) ssp->ssp_pio_words; | 321 | sgl = (struct scatterlist *) host->ssp_pio_words; |
241 | sg_len = SSP_PIO_NUM; | 322 | sg_len = SSP_PIO_NUM; |
242 | } | 323 | } |
243 | 324 | ||
244 | desc = dmaengine_prep_slave_sg(ssp->dmach, | 325 | desc = host->dmach->device->device_prep_slave_sg(host->dmach, |
245 | sgl, sg_len, ssp->slave_dirn, flags); | 326 | sgl, sg_len, host->dma_dir, append); |
246 | if (desc) { | 327 | if (desc) { |
247 | desc->callback = mxs_mmc_dma_irq_callback; | 328 | desc->callback = mxs_mmc_dma_irq_callback; |
248 | desc->callback_param = host; | 329 | desc->callback_param = host; |
249 | } else { | 330 | } else { |
250 | if (data) | 331 | if (data) |
251 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 332 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, |
252 | data->sg_len, ssp->dma_dir); | 333 | data->sg_len, host->dma_dir); |
253 | } | 334 | } |
254 | 335 | ||
255 | return desc; | 336 | return desc; |
@@ -257,7 +338,6 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | |||
257 | 338 | ||
258 | static void mxs_mmc_bc(struct mxs_mmc_host *host) | 339 | static void mxs_mmc_bc(struct mxs_mmc_host *host) |
259 | { | 340 | { |
260 | struct mxs_ssp *ssp = &host->ssp; | ||
261 | struct mmc_command *cmd = host->cmd; | 341 | struct mmc_command *cmd = host->cmd; |
262 | struct dma_async_tx_descriptor *desc; | 342 | struct dma_async_tx_descriptor *desc; |
263 | u32 ctrl0, cmd0, cmd1; | 343 | u32 ctrl0, cmd0, cmd1; |
@@ -271,17 +351,15 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) | |||
271 | cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; | 351 | cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; |
272 | } | 352 | } |
273 | 353 | ||
274 | ssp->ssp_pio_words[0] = ctrl0; | 354 | host->ssp_pio_words[0] = ctrl0; |
275 | ssp->ssp_pio_words[1] = cmd0; | 355 | host->ssp_pio_words[1] = cmd0; |
276 | ssp->ssp_pio_words[2] = cmd1; | 356 | host->ssp_pio_words[2] = cmd1; |
277 | ssp->dma_dir = DMA_NONE; | 357 | host->dma_dir = DMA_NONE; |
278 | ssp->slave_dirn = DMA_TRANS_NONE; | 358 | desc = mxs_mmc_prep_dma(host, 0); |
279 | desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); | ||
280 | if (!desc) | 359 | if (!desc) |
281 | goto out; | 360 | goto out; |
282 | 361 | ||
283 | dmaengine_submit(desc); | 362 | dmaengine_submit(desc); |
284 | dma_async_issue_pending(ssp->dmach); | ||
285 | return; | 363 | return; |
286 | 364 | ||
287 | out: | 365 | out: |
@@ -291,7 +369,6 @@ out: | |||
291 | 369 | ||
292 | static void mxs_mmc_ac(struct mxs_mmc_host *host) | 370 | static void mxs_mmc_ac(struct mxs_mmc_host *host) |
293 | { | 371 | { |
294 | struct mxs_ssp *ssp = &host->ssp; | ||
295 | struct mmc_command *cmd = host->cmd; | 372 | struct mmc_command *cmd = host->cmd; |
296 | struct dma_async_tx_descriptor *desc; | 373 | struct dma_async_tx_descriptor *desc; |
297 | u32 ignore_crc, get_resp, long_resp; | 374 | u32 ignore_crc, get_resp, long_resp; |
@@ -313,17 +390,15 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) | |||
313 | cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; | 390 | cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; |
314 | } | 391 | } |
315 | 392 | ||
316 | ssp->ssp_pio_words[0] = ctrl0; | 393 | host->ssp_pio_words[0] = ctrl0; |
317 | ssp->ssp_pio_words[1] = cmd0; | 394 | host->ssp_pio_words[1] = cmd0; |
318 | ssp->ssp_pio_words[2] = cmd1; | 395 | host->ssp_pio_words[2] = cmd1; |
319 | ssp->dma_dir = DMA_NONE; | 396 | host->dma_dir = DMA_NONE; |
320 | ssp->slave_dirn = DMA_TRANS_NONE; | 397 | desc = mxs_mmc_prep_dma(host, 0); |
321 | desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); | ||
322 | if (!desc) | 398 | if (!desc) |
323 | goto out; | 399 | goto out; |
324 | 400 | ||
325 | dmaengine_submit(desc); | 401 | dmaengine_submit(desc); |
326 | dma_async_issue_pending(ssp->dmach); | ||
327 | return; | 402 | return; |
328 | 403 | ||
329 | out: | 404 | out: |
@@ -357,12 +432,9 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
357 | int i; | 432 | int i; |
358 | 433 | ||
359 | unsigned short dma_data_dir, timeout; | 434 | unsigned short dma_data_dir, timeout; |
360 | enum dma_transfer_direction slave_dirn; | ||
361 | unsigned int data_size = 0, log2_blksz; | 435 | unsigned int data_size = 0, log2_blksz; |
362 | unsigned int blocks = data->blocks; | 436 | unsigned int blocks = data->blocks; |
363 | 437 | ||
364 | struct mxs_ssp *ssp = &host->ssp; | ||
365 | |||
366 | u32 ignore_crc, get_resp, long_resp, read; | 438 | u32 ignore_crc, get_resp, long_resp, read; |
367 | u32 ctrl0, cmd0, cmd1, val; | 439 | u32 ctrl0, cmd0, cmd1, val; |
368 | 440 | ||
@@ -375,11 +447,9 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
375 | 447 | ||
376 | if (data->flags & MMC_DATA_WRITE) { | 448 | if (data->flags & MMC_DATA_WRITE) { |
377 | dma_data_dir = DMA_TO_DEVICE; | 449 | dma_data_dir = DMA_TO_DEVICE; |
378 | slave_dirn = DMA_MEM_TO_DEV; | ||
379 | read = 0; | 450 | read = 0; |
380 | } else { | 451 | } else { |
381 | dma_data_dir = DMA_FROM_DEVICE; | 452 | dma_data_dir = DMA_FROM_DEVICE; |
382 | slave_dirn = DMA_DEV_TO_MEM; | ||
383 | read = BM_SSP_CTRL0_READ; | 453 | read = BM_SSP_CTRL0_READ; |
384 | } | 454 | } |
385 | 455 | ||
@@ -405,15 +475,15 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
405 | blocks = 1; | 475 | blocks = 1; |
406 | 476 | ||
407 | /* xfer count, block size and count need to be set differently */ | 477 | /* xfer count, block size and count need to be set differently */ |
408 | if (ssp_is_old(ssp)) { | 478 | if (ssp_is_old()) { |
409 | ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); | 479 | ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); |
410 | cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | | 480 | cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | |
411 | BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); | 481 | BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); |
412 | } else { | 482 | } else { |
413 | writel(data_size, ssp->base + HW_SSP_XFER_SIZE); | 483 | writel(data_size, host->base + HW_SSP_XFER_SIZE); |
414 | writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | | 484 | writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | |
415 | BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), | 485 | BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), |
416 | ssp->base + HW_SSP_BLOCK_SIZE); | 486 | host->base + HW_SSP_BLOCK_SIZE); |
417 | } | 487 | } |
418 | 488 | ||
419 | if ((cmd->opcode == MMC_STOP_TRANSMISSION) || | 489 | if ((cmd->opcode == MMC_STOP_TRANSMISSION) || |
@@ -428,18 +498,17 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
428 | } | 498 | } |
429 | 499 | ||
430 | /* set the timeout count */ | 500 | /* set the timeout count */ |
431 | timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns); | 501 | timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); |
432 | val = readl(ssp->base + HW_SSP_TIMING(ssp)); | 502 | val = readl(host->base + HW_SSP_TIMING); |
433 | val &= ~(BM_SSP_TIMING_TIMEOUT); | 503 | val &= ~(BM_SSP_TIMING_TIMEOUT); |
434 | val |= BF_SSP(timeout, TIMING_TIMEOUT); | 504 | val |= BF_SSP(timeout, TIMING_TIMEOUT); |
435 | writel(val, ssp->base + HW_SSP_TIMING(ssp)); | 505 | writel(val, host->base + HW_SSP_TIMING); |
436 | 506 | ||
437 | /* pio */ | 507 | /* pio */ |
438 | ssp->ssp_pio_words[0] = ctrl0; | 508 | host->ssp_pio_words[0] = ctrl0; |
439 | ssp->ssp_pio_words[1] = cmd0; | 509 | host->ssp_pio_words[1] = cmd0; |
440 | ssp->ssp_pio_words[2] = cmd1; | 510 | host->ssp_pio_words[2] = cmd1; |
441 | ssp->dma_dir = DMA_NONE; | 511 | host->dma_dir = DMA_NONE; |
442 | ssp->slave_dirn = DMA_TRANS_NONE; | ||
443 | desc = mxs_mmc_prep_dma(host, 0); | 512 | desc = mxs_mmc_prep_dma(host, 0); |
444 | if (!desc) | 513 | if (!desc) |
445 | goto out; | 514 | goto out; |
@@ -447,14 +516,12 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
447 | /* append data sg */ | 516 | /* append data sg */ |
448 | WARN_ON(host->data != NULL); | 517 | WARN_ON(host->data != NULL); |
449 | host->data = data; | 518 | host->data = data; |
450 | ssp->dma_dir = dma_data_dir; | 519 | host->dma_dir = dma_data_dir; |
451 | ssp->slave_dirn = slave_dirn; | 520 | desc = mxs_mmc_prep_dma(host, 1); |
452 | desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
453 | if (!desc) | 521 | if (!desc) |
454 | goto out; | 522 | goto out; |
455 | 523 | ||
456 | dmaengine_submit(desc); | 524 | dmaengine_submit(desc); |
457 | dma_async_issue_pending(ssp->dmach); | ||
458 | return; | 525 | return; |
459 | out: | 526 | out: |
460 | dev_warn(mmc_dev(host->mmc), | 527 | dev_warn(mmc_dev(host->mmc), |
@@ -495,6 +562,42 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
495 | mxs_mmc_start_cmd(host, mrq->cmd); | 562 | mxs_mmc_start_cmd(host, mrq->cmd); |
496 | } | 563 | } |
497 | 564 | ||
565 | static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) | ||
566 | { | ||
567 | unsigned int ssp_clk, ssp_sck; | ||
568 | u32 clock_divide, clock_rate; | ||
569 | u32 val; | ||
570 | |||
571 | ssp_clk = clk_get_rate(host->clk); | ||
572 | |||
573 | for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) { | ||
574 | clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide); | ||
575 | clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0; | ||
576 | if (clock_rate <= 255) | ||
577 | break; | ||
578 | } | ||
579 | |||
580 | if (clock_divide > 254) { | ||
581 | dev_err(mmc_dev(host->mmc), | ||
582 | "%s: cannot set clock to %d\n", __func__, rate); | ||
583 | return; | ||
584 | } | ||
585 | |||
586 | ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); | ||
587 | |||
588 | val = readl(host->base + HW_SSP_TIMING); | ||
589 | val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); | ||
590 | val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); | ||
591 | val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); | ||
592 | writel(val, host->base + HW_SSP_TIMING); | ||
593 | |||
594 | host->clk_rate = ssp_sck; | ||
595 | |||
596 | dev_dbg(mmc_dev(host->mmc), | ||
597 | "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n", | ||
598 | __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate); | ||
599 | } | ||
600 | |||
498 | static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 601 | static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
499 | { | 602 | { |
500 | struct mxs_mmc_host *host = mmc_priv(mmc); | 603 | struct mxs_mmc_host *host = mmc_priv(mmc); |
@@ -507,13 +610,12 @@ static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
507 | host->bus_width = 0; | 610 | host->bus_width = 0; |
508 | 611 | ||
509 | if (ios->clock) | 612 | if (ios->clock) |
510 | mxs_ssp_set_clk_rate(&host->ssp, ios->clock); | 613 | mxs_mmc_set_clk_rate(host, ios->clock); |
511 | } | 614 | } |
512 | 615 | ||
513 | static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | 616 | static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) |
514 | { | 617 | { |
515 | struct mxs_mmc_host *host = mmc_priv(mmc); | 618 | struct mxs_mmc_host *host = mmc_priv(mmc); |
516 | struct mxs_ssp *ssp = &host->ssp; | ||
517 | unsigned long flags; | 619 | unsigned long flags; |
518 | 620 | ||
519 | spin_lock_irqsave(&host->lock, flags); | 621 | spin_lock_irqsave(&host->lock, flags); |
@@ -522,22 +624,21 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
522 | 624 | ||
523 | if (enable) { | 625 | if (enable) { |
524 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, | 626 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, |
525 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | 627 | host->base + HW_SSP_CTRL0 + MXS_SET_ADDR); |
526 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, | 628 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, |
527 | ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET); | 629 | host->base + HW_SSP_CTRL1 + MXS_SET_ADDR); |
630 | |||
631 | if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ) | ||
632 | mmc_signal_sdio_irq(host->mmc); | ||
633 | |||
528 | } else { | 634 | } else { |
529 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, | 635 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, |
530 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | 636 | host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR); |
531 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, | 637 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, |
532 | ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); | 638 | host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); |
533 | } | 639 | } |
534 | 640 | ||
535 | spin_unlock_irqrestore(&host->lock, flags); | 641 | spin_unlock_irqrestore(&host->lock, flags); |
536 | |||
537 | if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) & | ||
538 | BM_SSP_STATUS_SDIO_IRQ) | ||
539 | mmc_signal_sdio_irq(host->mmc); | ||
540 | |||
541 | } | 642 | } |
542 | 643 | ||
543 | static const struct mmc_host_ops mxs_mmc_ops = { | 644 | static const struct mmc_host_ops mxs_mmc_ops = { |
@@ -551,126 +652,75 @@ static const struct mmc_host_ops mxs_mmc_ops = { | |||
551 | static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) | 652 | static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) |
552 | { | 653 | { |
553 | struct mxs_mmc_host *host = param; | 654 | struct mxs_mmc_host *host = param; |
554 | struct mxs_ssp *ssp = &host->ssp; | ||
555 | 655 | ||
556 | if (!mxs_dma_is_apbh(chan)) | 656 | if (!mxs_dma_is_apbh(chan)) |
557 | return false; | 657 | return false; |
558 | 658 | ||
559 | if (chan->chan_id != ssp->dma_channel) | 659 | if (chan->chan_id != host->dma_res->start) |
560 | return false; | 660 | return false; |
561 | 661 | ||
562 | chan->private = &ssp->dma_data; | 662 | chan->private = &host->dma_data; |
563 | 663 | ||
564 | return true; | 664 | return true; |
565 | } | 665 | } |
566 | 666 | ||
567 | static struct platform_device_id mxs_ssp_ids[] = { | ||
568 | { | ||
569 | .name = "imx23-mmc", | ||
570 | .driver_data = IMX23_SSP, | ||
571 | }, { | ||
572 | .name = "imx28-mmc", | ||
573 | .driver_data = IMX28_SSP, | ||
574 | }, { | ||
575 | /* sentinel */ | ||
576 | } | ||
577 | }; | ||
578 | MODULE_DEVICE_TABLE(platform, mxs_ssp_ids); | ||
579 | |||
580 | static const struct of_device_id mxs_mmc_dt_ids[] = { | ||
581 | { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, }, | ||
582 | { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, }, | ||
583 | { /* sentinel */ } | ||
584 | }; | ||
585 | MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids); | ||
586 | |||
587 | static int mxs_mmc_probe(struct platform_device *pdev) | 667 | static int mxs_mmc_probe(struct platform_device *pdev) |
588 | { | 668 | { |
589 | const struct of_device_id *of_id = | ||
590 | of_match_device(mxs_mmc_dt_ids, &pdev->dev); | ||
591 | struct device_node *np = pdev->dev.of_node; | ||
592 | struct mxs_mmc_host *host; | 669 | struct mxs_mmc_host *host; |
593 | struct mmc_host *mmc; | 670 | struct mmc_host *mmc; |
594 | struct resource *iores, *dmares; | 671 | struct resource *iores, *dmares, *r; |
595 | struct pinctrl *pinctrl; | 672 | struct mxs_mmc_platform_data *pdata; |
596 | int ret = 0, irq_err, irq_dma; | 673 | int ret = 0, irq_err, irq_dma; |
597 | dma_cap_mask_t mask; | 674 | dma_cap_mask_t mask; |
598 | struct regulator *reg_vmmc; | ||
599 | enum of_gpio_flags flags; | ||
600 | struct mxs_ssp *ssp; | ||
601 | u32 bus_width = 0; | ||
602 | 675 | ||
603 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 676 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
604 | dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 677 | dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
605 | irq_err = platform_get_irq(pdev, 0); | 678 | irq_err = platform_get_irq(pdev, 0); |
606 | irq_dma = platform_get_irq(pdev, 1); | 679 | irq_dma = platform_get_irq(pdev, 1); |
607 | if (!iores || irq_err < 0 || irq_dma < 0) | 680 | if (!iores || !dmares || irq_err < 0 || irq_dma < 0) |
608 | return -EINVAL; | 681 | return -EINVAL; |
609 | 682 | ||
683 | r = request_mem_region(iores->start, resource_size(iores), pdev->name); | ||
684 | if (!r) | ||
685 | return -EBUSY; | ||
686 | |||
610 | mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); | 687 | mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); |
611 | if (!mmc) | 688 | if (!mmc) { |
612 | return -ENOMEM; | 689 | ret = -ENOMEM; |
690 | goto out_release_mem; | ||
691 | } | ||
613 | 692 | ||
614 | host = mmc_priv(mmc); | 693 | host = mmc_priv(mmc); |
615 | ssp = &host->ssp; | 694 | host->base = ioremap(r->start, resource_size(r)); |
616 | ssp->dev = &pdev->dev; | 695 | if (!host->base) { |
617 | ssp->base = devm_request_and_ioremap(&pdev->dev, iores); | 696 | ret = -ENOMEM; |
618 | if (!ssp->base) { | ||
619 | ret = -EADDRNOTAVAIL; | ||
620 | goto out_mmc_free; | 697 | goto out_mmc_free; |
621 | } | 698 | } |
622 | 699 | ||
623 | if (np) { | 700 | /* only major verion does matter */ |
624 | ssp->devid = (enum mxs_ssp_id) of_id->data; | 701 | host->version = readl(host->base + HW_SSP_VERSION) >> |
625 | /* | 702 | BP_SSP_VERSION_MAJOR; |
626 | * TODO: This is a temporary solution and should be changed | ||
627 | * to use generic DMA binding later when the helpers get in. | ||
628 | */ | ||
629 | ret = of_property_read_u32(np, "fsl,ssp-dma-channel", | ||
630 | &ssp->dma_channel); | ||
631 | if (ret) { | ||
632 | dev_err(mmc_dev(host->mmc), | ||
633 | "failed to get dma channel\n"); | ||
634 | goto out_mmc_free; | ||
635 | } | ||
636 | } else { | ||
637 | ssp->devid = pdev->id_entry->driver_data; | ||
638 | ssp->dma_channel = dmares->start; | ||
639 | } | ||
640 | 703 | ||
641 | host->mmc = mmc; | 704 | host->mmc = mmc; |
705 | host->res = r; | ||
706 | host->dma_res = dmares; | ||
707 | host->irq = irq_err; | ||
642 | host->sdio_irq_en = 0; | 708 | host->sdio_irq_en = 0; |
643 | 709 | ||
644 | reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); | 710 | host->clk = clk_get(&pdev->dev, NULL); |
645 | if (!IS_ERR(reg_vmmc)) { | 711 | if (IS_ERR(host->clk)) { |
646 | ret = regulator_enable(reg_vmmc); | 712 | ret = PTR_ERR(host->clk); |
647 | if (ret) { | 713 | goto out_iounmap; |
648 | dev_err(&pdev->dev, | ||
649 | "Failed to enable vmmc regulator: %d\n", ret); | ||
650 | goto out_mmc_free; | ||
651 | } | ||
652 | } | 714 | } |
653 | 715 | clk_enable(host->clk); | |
654 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
655 | if (IS_ERR(pinctrl)) { | ||
656 | ret = PTR_ERR(pinctrl); | ||
657 | goto out_mmc_free; | ||
658 | } | ||
659 | |||
660 | ssp->clk = clk_get(&pdev->dev, NULL); | ||
661 | if (IS_ERR(ssp->clk)) { | ||
662 | ret = PTR_ERR(ssp->clk); | ||
663 | goto out_mmc_free; | ||
664 | } | ||
665 | clk_prepare_enable(ssp->clk); | ||
666 | 716 | ||
667 | mxs_mmc_reset(host); | 717 | mxs_mmc_reset(host); |
668 | 718 | ||
669 | dma_cap_zero(mask); | 719 | dma_cap_zero(mask); |
670 | dma_cap_set(DMA_SLAVE, mask); | 720 | dma_cap_set(DMA_SLAVE, mask); |
671 | ssp->dma_data.chan_irq = irq_dma; | 721 | host->dma_data.chan_irq = irq_dma; |
672 | ssp->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); | 722 | host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); |
673 | if (!ssp->dmach) { | 723 | if (!host->dmach) { |
674 | dev_err(mmc_dev(host->mmc), | 724 | dev_err(mmc_dev(host->mmc), |
675 | "%s: failed to request dma\n", __func__); | 725 | "%s: failed to request dma\n", __func__); |
676 | goto out_clk_put; | 726 | goto out_clk_put; |
@@ -681,15 +731,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) | |||
681 | mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | | 731 | mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | |
682 | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; | 732 | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; |
683 | 733 | ||
684 | of_property_read_u32(np, "bus-width", &bus_width); | 734 | pdata = mmc_dev(host->mmc)->platform_data; |
685 | if (bus_width == 4) | 735 | if (pdata) { |
686 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 736 | if (pdata->flags & SLOTF_8_BIT_CAPABLE) |
687 | else if (bus_width == 8) | 737 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; |
688 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; | 738 | if (pdata->flags & SLOTF_4_BIT_CAPABLE) |
689 | host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); | 739 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
690 | 740 | } | |
691 | if (flags & OF_GPIO_ACTIVE_LOW) | ||
692 | host->wp_inverted = 1; | ||
693 | 741 | ||
694 | mmc->f_min = 400000; | 742 | mmc->f_min = 400000; |
695 | mmc->f_max = 288000000; | 743 | mmc->f_max = 288000000; |
@@ -697,14 +745,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) | |||
697 | 745 | ||
698 | mmc->max_segs = 52; | 746 | mmc->max_segs = 52; |
699 | mmc->max_blk_size = 1 << 0xf; | 747 | mmc->max_blk_size = 1 << 0xf; |
700 | mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; | 748 | mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff; |
701 | mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; | 749 | mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff; |
702 | mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); | 750 | mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); |
703 | 751 | ||
704 | platform_set_drvdata(pdev, mmc); | 752 | platform_set_drvdata(pdev, mmc); |
705 | 753 | ||
706 | ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, | 754 | ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host); |
707 | DRIVER_NAME, host); | ||
708 | if (ret) | 755 | if (ret) |
709 | goto out_free_dma; | 756 | goto out_free_dma; |
710 | 757 | ||
@@ -712,20 +759,26 @@ static int mxs_mmc_probe(struct platform_device *pdev) | |||
712 | 759 | ||
713 | ret = mmc_add_host(mmc); | 760 | ret = mmc_add_host(mmc); |
714 | if (ret) | 761 | if (ret) |
715 | goto out_free_dma; | 762 | goto out_free_irq; |
716 | 763 | ||
717 | dev_info(mmc_dev(host->mmc), "initialized\n"); | 764 | dev_info(mmc_dev(host->mmc), "initialized\n"); |
718 | 765 | ||
719 | return 0; | 766 | return 0; |
720 | 767 | ||
768 | out_free_irq: | ||
769 | free_irq(host->irq, host); | ||
721 | out_free_dma: | 770 | out_free_dma: |
722 | if (ssp->dmach) | 771 | if (host->dmach) |
723 | dma_release_channel(ssp->dmach); | 772 | dma_release_channel(host->dmach); |
724 | out_clk_put: | 773 | out_clk_put: |
725 | clk_disable_unprepare(ssp->clk); | 774 | clk_disable(host->clk); |
726 | clk_put(ssp->clk); | 775 | clk_put(host->clk); |
776 | out_iounmap: | ||
777 | iounmap(host->base); | ||
727 | out_mmc_free: | 778 | out_mmc_free: |
728 | mmc_free_host(mmc); | 779 | mmc_free_host(mmc); |
780 | out_release_mem: | ||
781 | release_mem_region(iores->start, resource_size(iores)); | ||
729 | return ret; | 782 | return ret; |
730 | } | 783 | } |
731 | 784 | ||
@@ -733,20 +786,26 @@ static int mxs_mmc_remove(struct platform_device *pdev) | |||
733 | { | 786 | { |
734 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 787 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
735 | struct mxs_mmc_host *host = mmc_priv(mmc); | 788 | struct mxs_mmc_host *host = mmc_priv(mmc); |
736 | struct mxs_ssp *ssp = &host->ssp; | 789 | struct resource *res = host->res; |
737 | 790 | ||
738 | mmc_remove_host(mmc); | 791 | mmc_remove_host(mmc); |
739 | 792 | ||
793 | free_irq(host->irq, host); | ||
794 | |||
740 | platform_set_drvdata(pdev, NULL); | 795 | platform_set_drvdata(pdev, NULL); |
741 | 796 | ||
742 | if (ssp->dmach) | 797 | if (host->dmach) |
743 | dma_release_channel(ssp->dmach); | 798 | dma_release_channel(host->dmach); |
744 | 799 | ||
745 | clk_disable_unprepare(ssp->clk); | 800 | clk_disable(host->clk); |
746 | clk_put(ssp->clk); | 801 | clk_put(host->clk); |
802 | |||
803 | iounmap(host->base); | ||
747 | 804 | ||
748 | mmc_free_host(mmc); | 805 | mmc_free_host(mmc); |
749 | 806 | ||
807 | release_mem_region(res->start, resource_size(res)); | ||
808 | |||
750 | return 0; | 809 | return 0; |
751 | } | 810 | } |
752 | 811 | ||
@@ -755,12 +814,11 @@ static int mxs_mmc_suspend(struct device *dev) | |||
755 | { | 814 | { |
756 | struct mmc_host *mmc = dev_get_drvdata(dev); | 815 | struct mmc_host *mmc = dev_get_drvdata(dev); |
757 | struct mxs_mmc_host *host = mmc_priv(mmc); | 816 | struct mxs_mmc_host *host = mmc_priv(mmc); |
758 | struct mxs_ssp *ssp = &host->ssp; | ||
759 | int ret = 0; | 817 | int ret = 0; |
760 | 818 | ||
761 | ret = mmc_suspend_host(mmc); | 819 | ret = mmc_suspend_host(mmc); |
762 | 820 | ||
763 | clk_disable_unprepare(ssp->clk); | 821 | clk_disable(host->clk); |
764 | 822 | ||
765 | return ret; | 823 | return ret; |
766 | } | 824 | } |
@@ -769,10 +827,9 @@ static int mxs_mmc_resume(struct device *dev) | |||
769 | { | 827 | { |
770 | struct mmc_host *mmc = dev_get_drvdata(dev); | 828 | struct mmc_host *mmc = dev_get_drvdata(dev); |
771 | struct mxs_mmc_host *host = mmc_priv(mmc); | 829 | struct mxs_mmc_host *host = mmc_priv(mmc); |
772 | struct mxs_ssp *ssp = &host->ssp; | ||
773 | int ret = 0; | 830 | int ret = 0; |
774 | 831 | ||
775 | clk_prepare_enable(ssp->clk); | 832 | clk_enable(host->clk); |
776 | 833 | ||
777 | ret = mmc_resume_host(mmc); | 834 | ret = mmc_resume_host(mmc); |
778 | 835 | ||
@@ -788,18 +845,27 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = { | |||
788 | static struct platform_driver mxs_mmc_driver = { | 845 | static struct platform_driver mxs_mmc_driver = { |
789 | .probe = mxs_mmc_probe, | 846 | .probe = mxs_mmc_probe, |
790 | .remove = mxs_mmc_remove, | 847 | .remove = mxs_mmc_remove, |
791 | .id_table = mxs_ssp_ids, | ||
792 | .driver = { | 848 | .driver = { |
793 | .name = DRIVER_NAME, | 849 | .name = DRIVER_NAME, |
794 | .owner = THIS_MODULE, | 850 | .owner = THIS_MODULE, |
795 | #ifdef CONFIG_PM | 851 | #ifdef CONFIG_PM |
796 | .pm = &mxs_mmc_pm_ops, | 852 | .pm = &mxs_mmc_pm_ops, |
797 | #endif | 853 | #endif |
798 | .of_match_table = mxs_mmc_dt_ids, | ||
799 | }, | 854 | }, |
800 | }; | 855 | }; |
801 | 856 | ||
802 | module_platform_driver(mxs_mmc_driver); | 857 | static int __init mxs_mmc_init(void) |
858 | { | ||
859 | return platform_driver_register(&mxs_mmc_driver); | ||
860 | } | ||
861 | |||
862 | static void __exit mxs_mmc_exit(void) | ||
863 | { | ||
864 | platform_driver_unregister(&mxs_mmc_driver); | ||
865 | } | ||
866 | |||
867 | module_init(mxs_mmc_init); | ||
868 | module_exit(mxs_mmc_exit); | ||
803 | 869 | ||
804 | MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); | 870 | MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); |
805 | MODULE_AUTHOR("Freescale Semiconductor"); | 871 | MODULE_AUTHOR("Freescale Semiconductor"); |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 1534b582c41..ab66f2454dc 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -113,8 +113,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) | |||
113 | const int j = i * 2; | 113 | const int j = i * 2; |
114 | u32 mask; | 114 | u32 mask; |
115 | 115 | ||
116 | mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), | 116 | mask = mmc_vddrange_to_ocrmask(voltage_ranges[j], |
117 | be32_to_cpu(voltage_ranges[j + 1])); | 117 | voltage_ranges[j + 1]); |
118 | if (!mask) { | 118 | if (!mask) { |
119 | ret = -EINVAL; | 119 | ret = -EINVAL; |
120 | dev_err(dev, "OF: voltage-range #%d is invalid\n", i); | 120 | dev_err(dev, "OF: voltage-range #%d is invalid\n", i); |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 4254975f931..a6c32904014 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * linux/drivers/mmc/host/omap.c | 2 | * linux/drivers/mmc/host/omap.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Nokia Corporation | 4 | * Copyright (C) 2004 Nokia Corporation |
5 | * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com> | 5 | * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com> |
6 | * Misc hacks here and there by Tony Lindgren <tony@atomide.com> | 6 | * Misc hacks here and there by Tony Lindgren <tony@atomide.com> |
7 | * Other hacks (DMA, SD, etc) by David Brownell | 7 | * Other hacks (DMA, SD, etc) by David Brownell |
8 | * | 8 | * |
@@ -17,19 +17,26 @@ | |||
17 | #include <linux/ioport.h> | 17 | #include <linux/ioport.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/dmaengine.h> | ||
21 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
22 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
23 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
24 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
25 | #include <linux/omap-dma.h> | ||
26 | #include <linux/mmc/host.h> | 24 | #include <linux/mmc/host.h> |
27 | #include <linux/mmc/card.h> | 25 | #include <linux/mmc/card.h> |
28 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
29 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/i2c/tps65010.h> | ||
30 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
31 | #include <linux/platform_data/mmc-omap.h> | ||
32 | 30 | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/irq.h> | ||
33 | |||
34 | #include <plat/board.h> | ||
35 | #include <plat/mmc.h> | ||
36 | #include <mach/gpio.h> | ||
37 | #include <plat/dma.h> | ||
38 | #include <plat/mux.h> | ||
39 | #include <plat/fpga.h> | ||
33 | 40 | ||
34 | #define OMAP_MMC_REG_CMD 0x00 | 41 | #define OMAP_MMC_REG_CMD 0x00 |
35 | #define OMAP_MMC_REG_ARGL 0x01 | 42 | #define OMAP_MMC_REG_ARGL 0x01 |
@@ -71,13 +78,6 @@ | |||
71 | #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) | 78 | #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) |
72 | #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) | 79 | #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) |
73 | 80 | ||
74 | #define mmc_omap7xx() (host->features & MMC_OMAP7XX) | ||
75 | #define mmc_omap15xx() (host->features & MMC_OMAP15XX) | ||
76 | #define mmc_omap16xx() (host->features & MMC_OMAP16XX) | ||
77 | #define MMC_OMAP1_MASK (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX) | ||
78 | #define mmc_omap1() (host->features & MMC_OMAP1_MASK) | ||
79 | #define mmc_omap2() (!mmc_omap1()) | ||
80 | |||
81 | #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) | 81 | #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) |
82 | #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) | 82 | #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) |
83 | #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) | 83 | #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) |
@@ -90,16 +90,6 @@ | |||
90 | #define OMAP_MMC_CMDTYPE_AC 2 | 90 | #define OMAP_MMC_CMDTYPE_AC 2 |
91 | #define OMAP_MMC_CMDTYPE_ADTC 3 | 91 | #define OMAP_MMC_CMDTYPE_ADTC 3 |
92 | 92 | ||
93 | #define OMAP_DMA_MMC_TX 21 | ||
94 | #define OMAP_DMA_MMC_RX 22 | ||
95 | #define OMAP_DMA_MMC2_TX 54 | ||
96 | #define OMAP_DMA_MMC2_RX 55 | ||
97 | |||
98 | #define OMAP24XX_DMA_MMC2_TX 47 | ||
99 | #define OMAP24XX_DMA_MMC2_RX 48 | ||
100 | #define OMAP24XX_DMA_MMC1_TX 61 | ||
101 | #define OMAP24XX_DMA_MMC1_RX 62 | ||
102 | |||
103 | 93 | ||
104 | #define DRIVER_NAME "mmci-omap" | 94 | #define DRIVER_NAME "mmci-omap" |
105 | 95 | ||
@@ -115,6 +105,7 @@ struct mmc_omap_slot { | |||
115 | u16 saved_con; | 105 | u16 saved_con; |
116 | u16 bus_mode; | 106 | u16 bus_mode; |
117 | unsigned int fclk_freq; | 107 | unsigned int fclk_freq; |
108 | unsigned powered:1; | ||
118 | 109 | ||
119 | struct tasklet_struct cover_tasklet; | 110 | struct tasklet_struct cover_tasklet; |
120 | struct timer_list cover_timer; | 111 | struct timer_list cover_timer; |
@@ -137,15 +128,12 @@ struct mmc_omap_host { | |||
137 | unsigned char id; /* 16xx chips have 2 MMC blocks */ | 128 | unsigned char id; /* 16xx chips have 2 MMC blocks */ |
138 | struct clk * iclk; | 129 | struct clk * iclk; |
139 | struct clk * fclk; | 130 | struct clk * fclk; |
140 | struct dma_chan *dma_rx; | ||
141 | u32 dma_rx_burst; | ||
142 | struct dma_chan *dma_tx; | ||
143 | u32 dma_tx_burst; | ||
144 | struct resource *mem_res; | 131 | struct resource *mem_res; |
145 | void __iomem *virt_base; | 132 | void __iomem *virt_base; |
146 | unsigned int phys_base; | 133 | unsigned int phys_base; |
147 | int irq; | 134 | int irq; |
148 | unsigned char bus_mode; | 135 | unsigned char bus_mode; |
136 | unsigned char hw_bus_mode; | ||
149 | unsigned int reg_shift; | 137 | unsigned int reg_shift; |
150 | 138 | ||
151 | struct work_struct cmd_abort_work; | 139 | struct work_struct cmd_abort_work; |
@@ -163,11 +151,14 @@ struct mmc_omap_host { | |||
163 | u32 buffer_bytes_left; | 151 | u32 buffer_bytes_left; |
164 | u32 total_bytes_left; | 152 | u32 total_bytes_left; |
165 | 153 | ||
166 | unsigned features; | ||
167 | unsigned use_dma:1; | 154 | unsigned use_dma:1; |
168 | unsigned brs_received:1, dma_done:1; | 155 | unsigned brs_received:1, dma_done:1; |
156 | unsigned dma_is_read:1; | ||
169 | unsigned dma_in_use:1; | 157 | unsigned dma_in_use:1; |
158 | int dma_ch; | ||
170 | spinlock_t dma_lock; | 159 | spinlock_t dma_lock; |
160 | struct timer_list dma_timer; | ||
161 | unsigned dma_len; | ||
171 | 162 | ||
172 | struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; | 163 | struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; |
173 | struct mmc_omap_slot *current_slot; | 164 | struct mmc_omap_slot *current_slot; |
@@ -178,11 +169,11 @@ struct mmc_omap_host { | |||
178 | struct timer_list clk_timer; | 169 | struct timer_list clk_timer; |
179 | spinlock_t clk_lock; /* for changing enabled state */ | 170 | spinlock_t clk_lock; /* for changing enabled state */ |
180 | unsigned int fclk_enabled:1; | 171 | unsigned int fclk_enabled:1; |
181 | struct workqueue_struct *mmc_omap_wq; | ||
182 | 172 | ||
183 | struct omap_mmc_platform_data *pdata; | 173 | struct omap_mmc_platform_data *pdata; |
184 | }; | 174 | }; |
185 | 175 | ||
176 | static struct workqueue_struct *mmc_omap_wq; | ||
186 | 177 | ||
187 | static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) | 178 | static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) |
188 | { | 179 | { |
@@ -300,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled) | |||
300 | host->next_slot = new_slot; | 291 | host->next_slot = new_slot; |
301 | host->mmc = new_slot->mmc; | 292 | host->mmc = new_slot->mmc; |
302 | spin_unlock_irqrestore(&host->slot_lock, flags); | 293 | spin_unlock_irqrestore(&host->slot_lock, flags); |
303 | queue_work(host->mmc_omap_wq, &host->slot_release_work); | 294 | queue_work(mmc_omap_wq, &host->slot_release_work); |
304 | return; | 295 | return; |
305 | } | 296 | } |
306 | 297 | ||
@@ -415,25 +406,18 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, | |||
415 | int abort) | 406 | int abort) |
416 | { | 407 | { |
417 | enum dma_data_direction dma_data_dir; | 408 | enum dma_data_direction dma_data_dir; |
418 | struct device *dev = mmc_dev(host->mmc); | ||
419 | struct dma_chan *c; | ||
420 | 409 | ||
421 | if (data->flags & MMC_DATA_WRITE) { | 410 | BUG_ON(host->dma_ch < 0); |
411 | if (data->error) | ||
412 | omap_stop_dma(host->dma_ch); | ||
413 | /* Release DMA channel lazily */ | ||
414 | mod_timer(&host->dma_timer, jiffies + HZ); | ||
415 | if (data->flags & MMC_DATA_WRITE) | ||
422 | dma_data_dir = DMA_TO_DEVICE; | 416 | dma_data_dir = DMA_TO_DEVICE; |
423 | c = host->dma_tx; | 417 | else |
424 | } else { | ||
425 | dma_data_dir = DMA_FROM_DEVICE; | 418 | dma_data_dir = DMA_FROM_DEVICE; |
426 | c = host->dma_rx; | 419 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, |
427 | } | 420 | dma_data_dir); |
428 | if (c) { | ||
429 | if (data->error) { | ||
430 | dmaengine_terminate_all(c); | ||
431 | /* Claim nothing transferred on error... */ | ||
432 | data->bytes_xfered = 0; | ||
433 | } | ||
434 | dev = c->device->dev; | ||
435 | } | ||
436 | dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); | ||
437 | } | 421 | } |
438 | 422 | ||
439 | static void mmc_omap_send_stop_work(struct work_struct *work) | 423 | static void mmc_omap_send_stop_work(struct work_struct *work) |
@@ -475,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) | |||
475 | } | 459 | } |
476 | 460 | ||
477 | host->stop_data = data; | 461 | host->stop_data = data; |
478 | queue_work(host->mmc_omap_wq, &host->send_stop_work); | 462 | queue_work(mmc_omap_wq, &host->send_stop_work); |
479 | } | 463 | } |
480 | 464 | ||
481 | static void | 465 | static void |
@@ -541,6 +525,16 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) | |||
541 | } | 525 | } |
542 | 526 | ||
543 | static void | 527 | static void |
528 | mmc_omap_dma_timer(unsigned long data) | ||
529 | { | ||
530 | struct mmc_omap_host *host = (struct mmc_omap_host *) data; | ||
531 | |||
532 | BUG_ON(host->dma_ch < 0); | ||
533 | omap_free_dma(host->dma_ch); | ||
534 | host->dma_ch = -1; | ||
535 | } | ||
536 | |||
537 | static void | ||
544 | mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) | 538 | mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) |
545 | { | 539 | { |
546 | unsigned long flags; | 540 | unsigned long flags; |
@@ -645,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data) | |||
645 | OMAP_MMC_WRITE(host, IE, 0); | 639 | OMAP_MMC_WRITE(host, IE, 0); |
646 | disable_irq(host->irq); | 640 | disable_irq(host->irq); |
647 | host->abort = 1; | 641 | host->abort = 1; |
648 | queue_work(host->mmc_omap_wq, &host->cmd_abort_work); | 642 | queue_work(mmc_omap_wq, &host->cmd_abort_work); |
649 | } | 643 | } |
650 | spin_unlock_irqrestore(&host->slot_lock, flags); | 644 | spin_unlock_irqrestore(&host->slot_lock, flags); |
651 | } | 645 | } |
@@ -675,7 +669,7 @@ mmc_omap_clk_timer(unsigned long data) | |||
675 | static void | 669 | static void |
676 | mmc_omap_xfer_data(struct mmc_omap_host *host, int write) | 670 | mmc_omap_xfer_data(struct mmc_omap_host *host, int write) |
677 | { | 671 | { |
678 | int n, nwords; | 672 | int n; |
679 | 673 | ||
680 | if (host->buffer_bytes_left == 0) { | 674 | if (host->buffer_bytes_left == 0) { |
681 | host->sg_idx++; | 675 | host->sg_idx++; |
@@ -685,48 +679,33 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) | |||
685 | n = 64; | 679 | n = 64; |
686 | if (n > host->buffer_bytes_left) | 680 | if (n > host->buffer_bytes_left) |
687 | n = host->buffer_bytes_left; | 681 | n = host->buffer_bytes_left; |
688 | |||
689 | nwords = n / 2; | ||
690 | nwords += n & 1; /* handle odd number of bytes to transfer */ | ||
691 | |||
692 | host->buffer_bytes_left -= n; | 682 | host->buffer_bytes_left -= n; |
693 | host->total_bytes_left -= n; | 683 | host->total_bytes_left -= n; |
694 | host->data->bytes_xfered += n; | 684 | host->data->bytes_xfered += n; |
695 | 685 | ||
696 | if (write) { | 686 | if (write) { |
697 | __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), | 687 | __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); |
698 | host->buffer, nwords); | ||
699 | } else { | 688 | } else { |
700 | __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), | 689 | __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); |
701 | host->buffer, nwords); | ||
702 | } | 690 | } |
703 | |||
704 | host->buffer += nwords; | ||
705 | } | 691 | } |
706 | 692 | ||
707 | #ifdef CONFIG_MMC_DEBUG | 693 | static inline void mmc_omap_report_irq(u16 status) |
708 | static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) | ||
709 | { | 694 | { |
710 | static const char *mmc_omap_status_bits[] = { | 695 | static const char *mmc_omap_status_bits[] = { |
711 | "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", | 696 | "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", |
712 | "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" | 697 | "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" |
713 | }; | 698 | }; |
714 | int i; | 699 | int i, c = 0; |
715 | char res[64], *buf = res; | ||
716 | |||
717 | buf += sprintf(buf, "MMC IRQ 0x%x:", status); | ||
718 | 700 | ||
719 | for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) | 701 | for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) |
720 | if (status & (1 << i)) | 702 | if (status & (1 << i)) { |
721 | buf += sprintf(buf, " %s", mmc_omap_status_bits[i]); | 703 | if (c) |
722 | dev_vdbg(mmc_dev(host->mmc), "%s\n", res); | 704 | printk(" "); |
723 | } | 705 | printk("%s", mmc_omap_status_bits[i]); |
724 | #else | 706 | c++; |
725 | static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) | 707 | } |
726 | { | ||
727 | } | 708 | } |
728 | #endif | ||
729 | |||
730 | 709 | ||
731 | static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | 710 | static irqreturn_t mmc_omap_irq(int irq, void *dev_id) |
732 | { | 711 | { |
@@ -760,10 +739,12 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
760 | cmd = host->cmd->opcode; | 739 | cmd = host->cmd->opcode; |
761 | else | 740 | else |
762 | cmd = -1; | 741 | cmd = -1; |
742 | #ifdef CONFIG_MMC_DEBUG | ||
763 | dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", | 743 | dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", |
764 | status, cmd); | 744 | status, cmd); |
765 | mmc_omap_report_irq(host, status); | 745 | mmc_omap_report_irq(status); |
766 | 746 | printk("\n"); | |
747 | #endif | ||
767 | if (host->total_bytes_left) { | 748 | if (host->total_bytes_left) { |
768 | if ((status & OMAP_MMC_STAT_A_FULL) || | 749 | if ((status & OMAP_MMC_STAT_A_FULL) || |
769 | (status & OMAP_MMC_STAT_END_OF_DATA)) | 750 | (status & OMAP_MMC_STAT_END_OF_DATA)) |
@@ -847,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
847 | host->abort = 1; | 828 | host->abort = 1; |
848 | OMAP_MMC_WRITE(host, IE, 0); | 829 | OMAP_MMC_WRITE(host, IE, 0); |
849 | disable_irq_nosync(host->irq); | 830 | disable_irq_nosync(host->irq); |
850 | queue_work(host->mmc_omap_wq, &host->cmd_abort_work); | 831 | queue_work(mmc_omap_wq, &host->cmd_abort_work); |
851 | return IRQ_HANDLED; | 832 | return IRQ_HANDLED; |
852 | } | 833 | } |
853 | 834 | ||
@@ -910,15 +891,159 @@ static void mmc_omap_cover_handler(unsigned long param) | |||
910 | jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); | 891 | jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); |
911 | } | 892 | } |
912 | 893 | ||
913 | static void mmc_omap_dma_callback(void *priv) | 894 | /* Prepare to transfer the next segment of a scatterlist */ |
895 | static void | ||
896 | mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) | ||
914 | { | 897 | { |
915 | struct mmc_omap_host *host = priv; | 898 | int dma_ch = host->dma_ch; |
916 | struct mmc_data *data = host->data; | 899 | unsigned long data_addr; |
900 | u16 buf, frame; | ||
901 | u32 count; | ||
902 | struct scatterlist *sg = &data->sg[host->sg_idx]; | ||
903 | int src_port = 0; | ||
904 | int dst_port = 0; | ||
905 | int sync_dev = 0; | ||
906 | |||
907 | data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); | ||
908 | frame = data->blksz; | ||
909 | count = sg_dma_len(sg); | ||
910 | |||
911 | if ((data->blocks == 1) && (count > data->blksz)) | ||
912 | count = frame; | ||
913 | |||
914 | host->dma_len = count; | ||
915 | |||
916 | /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. | ||
917 | * Use 16 or 32 word frames when the blocksize is at least that large. | ||
918 | * Blocksize is usually 512 bytes; but not for some SD reads. | ||
919 | */ | ||
920 | if (cpu_is_omap15xx() && frame > 32) | ||
921 | frame = 32; | ||
922 | else if (frame > 64) | ||
923 | frame = 64; | ||
924 | count /= frame; | ||
925 | frame >>= 1; | ||
926 | |||
927 | if (!(data->flags & MMC_DATA_WRITE)) { | ||
928 | buf = 0x800f | ((frame - 1) << 8); | ||
929 | |||
930 | if (cpu_class_is_omap1()) { | ||
931 | src_port = OMAP_DMA_PORT_TIPB; | ||
932 | dst_port = OMAP_DMA_PORT_EMIFF; | ||
933 | } | ||
934 | if (cpu_is_omap24xx()) | ||
935 | sync_dev = OMAP24XX_DMA_MMC1_RX; | ||
936 | |||
937 | omap_set_dma_src_params(dma_ch, src_port, | ||
938 | OMAP_DMA_AMODE_CONSTANT, | ||
939 | data_addr, 0, 0); | ||
940 | omap_set_dma_dest_params(dma_ch, dst_port, | ||
941 | OMAP_DMA_AMODE_POST_INC, | ||
942 | sg_dma_address(sg), 0, 0); | ||
943 | omap_set_dma_dest_data_pack(dma_ch, 1); | ||
944 | omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); | ||
945 | } else { | ||
946 | buf = 0x0f80 | ((frame - 1) << 0); | ||
947 | |||
948 | if (cpu_class_is_omap1()) { | ||
949 | src_port = OMAP_DMA_PORT_EMIFF; | ||
950 | dst_port = OMAP_DMA_PORT_TIPB; | ||
951 | } | ||
952 | if (cpu_is_omap24xx()) | ||
953 | sync_dev = OMAP24XX_DMA_MMC1_TX; | ||
954 | |||
955 | omap_set_dma_dest_params(dma_ch, dst_port, | ||
956 | OMAP_DMA_AMODE_CONSTANT, | ||
957 | data_addr, 0, 0); | ||
958 | omap_set_dma_src_params(dma_ch, src_port, | ||
959 | OMAP_DMA_AMODE_POST_INC, | ||
960 | sg_dma_address(sg), 0, 0); | ||
961 | omap_set_dma_src_data_pack(dma_ch, 1); | ||
962 | omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); | ||
963 | } | ||
917 | 964 | ||
918 | /* If we got to the end of DMA, assume everything went well */ | 965 | /* Max limit for DMA frame count is 0xffff */ |
919 | data->bytes_xfered += data->blocks * data->blksz; | 966 | BUG_ON(count > 0xffff); |
967 | |||
968 | OMAP_MMC_WRITE(host, BUF, buf); | ||
969 | omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, | ||
970 | frame, count, OMAP_DMA_SYNC_FRAME, | ||
971 | sync_dev, 0); | ||
972 | } | ||
973 | |||
974 | /* A scatterlist segment completed */ | ||
975 | static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) | ||
976 | { | ||
977 | struct mmc_omap_host *host = (struct mmc_omap_host *) data; | ||
978 | struct mmc_data *mmcdat = host->data; | ||
979 | |||
980 | if (unlikely(host->dma_ch < 0)) { | ||
981 | dev_err(mmc_dev(host->mmc), | ||
982 | "DMA callback while DMA not enabled\n"); | ||
983 | return; | ||
984 | } | ||
985 | /* FIXME: We really should do something to _handle_ the errors */ | ||
986 | if (ch_status & OMAP1_DMA_TOUT_IRQ) { | ||
987 | dev_err(mmc_dev(host->mmc),"DMA timeout\n"); | ||
988 | return; | ||
989 | } | ||
990 | if (ch_status & OMAP_DMA_DROP_IRQ) { | ||
991 | dev_err(mmc_dev(host->mmc), "DMA sync error\n"); | ||
992 | return; | ||
993 | } | ||
994 | if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { | ||
995 | return; | ||
996 | } | ||
997 | mmcdat->bytes_xfered += host->dma_len; | ||
998 | host->sg_idx++; | ||
999 | if (host->sg_idx < host->sg_len) { | ||
1000 | mmc_omap_prepare_dma(host, host->data); | ||
1001 | omap_start_dma(host->dma_ch); | ||
1002 | } else | ||
1003 | mmc_omap_dma_done(host, host->data); | ||
1004 | } | ||
1005 | |||
1006 | static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) | ||
1007 | { | ||
1008 | const char *dma_dev_name; | ||
1009 | int sync_dev, dma_ch, is_read, r; | ||
1010 | |||
1011 | is_read = !(data->flags & MMC_DATA_WRITE); | ||
1012 | del_timer_sync(&host->dma_timer); | ||
1013 | if (host->dma_ch >= 0) { | ||
1014 | if (is_read == host->dma_is_read) | ||
1015 | return 0; | ||
1016 | omap_free_dma(host->dma_ch); | ||
1017 | host->dma_ch = -1; | ||
1018 | } | ||
1019 | |||
1020 | if (is_read) { | ||
1021 | if (host->id == 0) { | ||
1022 | sync_dev = OMAP_DMA_MMC_RX; | ||
1023 | dma_dev_name = "MMC1 read"; | ||
1024 | } else { | ||
1025 | sync_dev = OMAP_DMA_MMC2_RX; | ||
1026 | dma_dev_name = "MMC2 read"; | ||
1027 | } | ||
1028 | } else { | ||
1029 | if (host->id == 0) { | ||
1030 | sync_dev = OMAP_DMA_MMC_TX; | ||
1031 | dma_dev_name = "MMC1 write"; | ||
1032 | } else { | ||
1033 | sync_dev = OMAP_DMA_MMC2_TX; | ||
1034 | dma_dev_name = "MMC2 write"; | ||
1035 | } | ||
1036 | } | ||
1037 | r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, | ||
1038 | host, &dma_ch); | ||
1039 | if (r != 0) { | ||
1040 | dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); | ||
1041 | return r; | ||
1042 | } | ||
1043 | host->dma_ch = dma_ch; | ||
1044 | host->dma_is_read = is_read; | ||
920 | 1045 | ||
921 | mmc_omap_dma_done(host, data); | 1046 | return 0; |
922 | } | 1047 | } |
923 | 1048 | ||
924 | static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) | 1049 | static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) |
@@ -993,85 +1118,33 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) | |||
993 | 1118 | ||
994 | host->sg_idx = 0; | 1119 | host->sg_idx = 0; |
995 | if (use_dma) { | 1120 | if (use_dma) { |
996 | enum dma_data_direction dma_data_dir; | 1121 | if (mmc_omap_get_dma_channel(host, data) == 0) { |
997 | struct dma_async_tx_descriptor *tx; | 1122 | enum dma_data_direction dma_data_dir; |
998 | struct dma_chan *c; | 1123 | |
999 | u32 burst, *bp; | 1124 | if (data->flags & MMC_DATA_WRITE) |
1000 | u16 buf; | 1125 | dma_data_dir = DMA_TO_DEVICE; |
1001 | 1126 | else | |
1002 | /* | 1127 | dma_data_dir = DMA_FROM_DEVICE; |
1003 | * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx | 1128 | |
1004 | * and 24xx. Use 16 or 32 word frames when the | 1129 | host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, |
1005 | * blocksize is at least that large. Blocksize is | 1130 | sg_len, dma_data_dir); |
1006 | * usually 512 bytes; but not for some SD reads. | 1131 | host->total_bytes_left = 0; |
1007 | */ | 1132 | mmc_omap_prepare_dma(host, req->data); |
1008 | burst = mmc_omap15xx() ? 32 : 64; | 1133 | host->brs_received = 0; |
1009 | if (burst > data->blksz) | 1134 | host->dma_done = 0; |
1010 | burst = data->blksz; | 1135 | host->dma_in_use = 1; |
1011 | 1136 | } else | |
1012 | burst >>= 1; | 1137 | use_dma = 0; |
1013 | |||
1014 | if (data->flags & MMC_DATA_WRITE) { | ||
1015 | c = host->dma_tx; | ||
1016 | bp = &host->dma_tx_burst; | ||
1017 | buf = 0x0f80 | (burst - 1) << 0; | ||
1018 | dma_data_dir = DMA_TO_DEVICE; | ||
1019 | } else { | ||
1020 | c = host->dma_rx; | ||
1021 | bp = &host->dma_rx_burst; | ||
1022 | buf = 0x800f | (burst - 1) << 8; | ||
1023 | dma_data_dir = DMA_FROM_DEVICE; | ||
1024 | } | ||
1025 | |||
1026 | if (!c) | ||
1027 | goto use_pio; | ||
1028 | |||
1029 | /* Only reconfigure if we have a different burst size */ | ||
1030 | if (*bp != burst) { | ||
1031 | struct dma_slave_config cfg; | ||
1032 | |||
1033 | cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); | ||
1034 | cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); | ||
1035 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
1036 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
1037 | cfg.src_maxburst = burst; | ||
1038 | cfg.dst_maxburst = burst; | ||
1039 | |||
1040 | if (dmaengine_slave_config(c, &cfg)) | ||
1041 | goto use_pio; | ||
1042 | |||
1043 | *bp = burst; | ||
1044 | } | ||
1045 | |||
1046 | host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, | ||
1047 | dma_data_dir); | ||
1048 | if (host->sg_len == 0) | ||
1049 | goto use_pio; | ||
1050 | |||
1051 | tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, | ||
1052 | data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | ||
1053 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1054 | if (!tx) | ||
1055 | goto use_pio; | ||
1056 | |||
1057 | OMAP_MMC_WRITE(host, BUF, buf); | ||
1058 | |||
1059 | tx->callback = mmc_omap_dma_callback; | ||
1060 | tx->callback_param = host; | ||
1061 | dmaengine_submit(tx); | ||
1062 | host->brs_received = 0; | ||
1063 | host->dma_done = 0; | ||
1064 | host->dma_in_use = 1; | ||
1065 | return; | ||
1066 | } | 1138 | } |
1067 | use_pio: | ||
1068 | 1139 | ||
1069 | /* Revert to PIO? */ | 1140 | /* Revert to PIO? */ |
1070 | OMAP_MMC_WRITE(host, BUF, 0x1f1f); | 1141 | if (!use_dma) { |
1071 | host->total_bytes_left = data->blocks * block_size; | 1142 | OMAP_MMC_WRITE(host, BUF, 0x1f1f); |
1072 | host->sg_len = sg_len; | 1143 | host->total_bytes_left = data->blocks * block_size; |
1073 | mmc_omap_sg_to_buf(host); | 1144 | host->sg_len = sg_len; |
1074 | host->dma_in_use = 0; | 1145 | mmc_omap_sg_to_buf(host); |
1146 | host->dma_in_use = 0; | ||
1147 | } | ||
1075 | } | 1148 | } |
1076 | 1149 | ||
1077 | static void mmc_omap_start_request(struct mmc_omap_host *host, | 1150 | static void mmc_omap_start_request(struct mmc_omap_host *host, |
@@ -1084,12 +1157,8 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, | |||
1084 | /* only touch fifo AFTER the controller readies it */ | 1157 | /* only touch fifo AFTER the controller readies it */ |
1085 | mmc_omap_prepare_data(host, req); | 1158 | mmc_omap_prepare_data(host, req); |
1086 | mmc_omap_start_command(host, req->cmd); | 1159 | mmc_omap_start_command(host, req->cmd); |
1087 | if (host->dma_in_use) { | 1160 | if (host->dma_in_use) |
1088 | struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? | 1161 | omap_start_dma(host->dma_ch); |
1089 | host->dma_tx : host->dma_rx; | ||
1090 | |||
1091 | dma_async_issue_pending(c); | ||
1092 | } | ||
1093 | } | 1162 | } |
1094 | 1163 | ||
1095 | static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) | 1164 | static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) |
@@ -1121,7 +1190,8 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, | |||
1121 | if (slot->pdata->set_power != NULL) | 1190 | if (slot->pdata->set_power != NULL) |
1122 | slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, | 1191 | slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, |
1123 | vdd); | 1192 | vdd); |
1124 | if (mmc_omap2()) { | 1193 | |
1194 | if (cpu_is_omap24xx()) { | ||
1125 | u16 w; | 1195 | u16 w; |
1126 | 1196 | ||
1127 | if (power_on) { | 1197 | if (power_on) { |
@@ -1230,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = { | |||
1230 | .set_ios = mmc_omap_set_ios, | 1300 | .set_ios = mmc_omap_set_ios, |
1231 | }; | 1301 | }; |
1232 | 1302 | ||
1233 | static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) | 1303 | static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) |
1234 | { | 1304 | { |
1235 | struct mmc_omap_slot *slot = NULL; | 1305 | struct mmc_omap_slot *slot = NULL; |
1236 | struct mmc_host *mmc; | 1306 | struct mmc_host *mmc; |
@@ -1255,7 +1325,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) | |||
1255 | mmc->ops = &mmc_omap_ops; | 1325 | mmc->ops = &mmc_omap_ops; |
1256 | mmc->f_min = 400000; | 1326 | mmc->f_min = 400000; |
1257 | 1327 | ||
1258 | if (mmc_omap2()) | 1328 | if (cpu_class_is_omap2()) |
1259 | mmc->f_max = 48000000; | 1329 | mmc->f_max = 48000000; |
1260 | else | 1330 | else |
1261 | mmc->f_max = 24000000; | 1331 | mmc->f_max = 24000000; |
@@ -1319,19 +1389,17 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) | |||
1319 | 1389 | ||
1320 | tasklet_kill(&slot->cover_tasklet); | 1390 | tasklet_kill(&slot->cover_tasklet); |
1321 | del_timer_sync(&slot->cover_timer); | 1391 | del_timer_sync(&slot->cover_timer); |
1322 | flush_workqueue(slot->host->mmc_omap_wq); | 1392 | flush_workqueue(mmc_omap_wq); |
1323 | 1393 | ||
1324 | mmc_remove_host(mmc); | 1394 | mmc_remove_host(mmc); |
1325 | mmc_free_host(mmc); | 1395 | mmc_free_host(mmc); |
1326 | } | 1396 | } |
1327 | 1397 | ||
1328 | static int mmc_omap_probe(struct platform_device *pdev) | 1398 | static int __init mmc_omap_probe(struct platform_device *pdev) |
1329 | { | 1399 | { |
1330 | struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; | 1400 | struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; |
1331 | struct mmc_omap_host *host = NULL; | 1401 | struct mmc_omap_host *host = NULL; |
1332 | struct resource *res; | 1402 | struct resource *res; |
1333 | dma_cap_mask_t mask; | ||
1334 | unsigned sig; | ||
1335 | int i, ret = 0; | 1403 | int i, ret = 0; |
1336 | int irq; | 1404 | int irq; |
1337 | 1405 | ||
@@ -1371,18 +1439,22 @@ static int mmc_omap_probe(struct platform_device *pdev) | |||
1371 | setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); | 1439 | setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); |
1372 | 1440 | ||
1373 | spin_lock_init(&host->dma_lock); | 1441 | spin_lock_init(&host->dma_lock); |
1442 | setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); | ||
1374 | spin_lock_init(&host->slot_lock); | 1443 | spin_lock_init(&host->slot_lock); |
1375 | init_waitqueue_head(&host->slot_wq); | 1444 | init_waitqueue_head(&host->slot_wq); |
1376 | 1445 | ||
1377 | host->pdata = pdata; | 1446 | host->pdata = pdata; |
1378 | host->features = host->pdata->slots[0].features; | ||
1379 | host->dev = &pdev->dev; | 1447 | host->dev = &pdev->dev; |
1380 | platform_set_drvdata(pdev, host); | 1448 | platform_set_drvdata(pdev, host); |
1381 | 1449 | ||
1382 | host->id = pdev->id; | 1450 | host->id = pdev->id; |
1383 | host->mem_res = res; | 1451 | host->mem_res = res; |
1384 | host->irq = irq; | 1452 | host->irq = irq; |
1453 | |||
1385 | host->use_dma = 1; | 1454 | host->use_dma = 1; |
1455 | host->dev->dma_mask = &pdata->dma_mask; | ||
1456 | host->dma_ch = -1; | ||
1457 | |||
1386 | host->irq = irq; | 1458 | host->irq = irq; |
1387 | host->phys_base = host->mem_res->start; | 1459 | host->phys_base = host->mem_res->start; |
1388 | host->virt_base = ioremap(res->start, resource_size(res)); | 1460 | host->virt_base = ioremap(res->start, resource_size(res)); |
@@ -1402,48 +1474,9 @@ static int mmc_omap_probe(struct platform_device *pdev) | |||
1402 | goto err_free_iclk; | 1474 | goto err_free_iclk; |
1403 | } | 1475 | } |
1404 | 1476 | ||
1405 | dma_cap_zero(mask); | ||
1406 | dma_cap_set(DMA_SLAVE, mask); | ||
1407 | |||
1408 | host->dma_tx_burst = -1; | ||
1409 | host->dma_rx_burst = -1; | ||
1410 | |||
1411 | if (mmc_omap2()) | ||
1412 | sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; | ||
1413 | else | ||
1414 | sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; | ||
1415 | host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); | ||
1416 | #if 0 | ||
1417 | if (!host->dma_tx) { | ||
1418 | dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", | ||
1419 | sig); | ||
1420 | goto err_dma; | ||
1421 | } | ||
1422 | #else | ||
1423 | if (!host->dma_tx) | ||
1424 | dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", | ||
1425 | sig); | ||
1426 | #endif | ||
1427 | if (mmc_omap2()) | ||
1428 | sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; | ||
1429 | else | ||
1430 | sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; | ||
1431 | host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); | ||
1432 | #if 0 | ||
1433 | if (!host->dma_rx) { | ||
1434 | dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", | ||
1435 | sig); | ||
1436 | goto err_dma; | ||
1437 | } | ||
1438 | #else | ||
1439 | if (!host->dma_rx) | ||
1440 | dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", | ||
1441 | sig); | ||
1442 | #endif | ||
1443 | |||
1444 | ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); | 1477 | ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); |
1445 | if (ret) | 1478 | if (ret) |
1446 | goto err_free_dma; | 1479 | goto err_free_fclk; |
1447 | 1480 | ||
1448 | if (pdata->init != NULL) { | 1481 | if (pdata->init != NULL) { |
1449 | ret = pdata->init(&pdev->dev); | 1482 | ret = pdata->init(&pdev->dev); |
@@ -1452,36 +1485,26 @@ static int mmc_omap_probe(struct platform_device *pdev) | |||
1452 | } | 1485 | } |
1453 | 1486 | ||
1454 | host->nr_slots = pdata->nr_slots; | 1487 | host->nr_slots = pdata->nr_slots; |
1455 | host->reg_shift = (mmc_omap7xx() ? 1 : 2); | ||
1456 | |||
1457 | host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); | ||
1458 | if (!host->mmc_omap_wq) | ||
1459 | goto err_plat_cleanup; | ||
1460 | |||
1461 | for (i = 0; i < pdata->nr_slots; i++) { | 1488 | for (i = 0; i < pdata->nr_slots; i++) { |
1462 | ret = mmc_omap_new_slot(host, i); | 1489 | ret = mmc_omap_new_slot(host, i); |
1463 | if (ret < 0) { | 1490 | if (ret < 0) { |
1464 | while (--i >= 0) | 1491 | while (--i >= 0) |
1465 | mmc_omap_remove_slot(host->slots[i]); | 1492 | mmc_omap_remove_slot(host->slots[i]); |
1466 | 1493 | ||
1467 | goto err_destroy_wq; | 1494 | goto err_plat_cleanup; |
1468 | } | 1495 | } |
1469 | } | 1496 | } |
1470 | 1497 | ||
1498 | host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); | ||
1499 | |||
1471 | return 0; | 1500 | return 0; |
1472 | 1501 | ||
1473 | err_destroy_wq: | ||
1474 | destroy_workqueue(host->mmc_omap_wq); | ||
1475 | err_plat_cleanup: | 1502 | err_plat_cleanup: |
1476 | if (pdata->cleanup) | 1503 | if (pdata->cleanup) |
1477 | pdata->cleanup(&pdev->dev); | 1504 | pdata->cleanup(&pdev->dev); |
1478 | err_free_irq: | 1505 | err_free_irq: |
1479 | free_irq(host->irq, host); | 1506 | free_irq(host->irq, host); |
1480 | err_free_dma: | 1507 | err_free_fclk: |
1481 | if (host->dma_tx) | ||
1482 | dma_release_channel(host->dma_tx); | ||
1483 | if (host->dma_rx) | ||
1484 | dma_release_channel(host->dma_rx); | ||
1485 | clk_put(host->fclk); | 1508 | clk_put(host->fclk); |
1486 | err_free_iclk: | 1509 | err_free_iclk: |
1487 | clk_disable(host->iclk); | 1510 | clk_disable(host->iclk); |
@@ -1516,15 +1539,9 @@ static int mmc_omap_remove(struct platform_device *pdev) | |||
1516 | clk_disable(host->iclk); | 1539 | clk_disable(host->iclk); |
1517 | clk_put(host->iclk); | 1540 | clk_put(host->iclk); |
1518 | 1541 | ||
1519 | if (host->dma_tx) | ||
1520 | dma_release_channel(host->dma_tx); | ||
1521 | if (host->dma_rx) | ||
1522 | dma_release_channel(host->dma_rx); | ||
1523 | |||
1524 | iounmap(host->virt_base); | 1542 | iounmap(host->virt_base); |
1525 | release_mem_region(pdev->resource[0].start, | 1543 | release_mem_region(pdev->resource[0].start, |
1526 | pdev->resource[0].end - pdev->resource[0].start + 1); | 1544 | pdev->resource[0].end - pdev->resource[0].start + 1); |
1527 | destroy_workqueue(host->mmc_omap_wq); | ||
1528 | 1545 | ||
1529 | kfree(host); | 1546 | kfree(host); |
1530 | 1547 | ||
@@ -1582,7 +1599,6 @@ static int mmc_omap_resume(struct platform_device *pdev) | |||
1582 | #endif | 1599 | #endif |
1583 | 1600 | ||
1584 | static struct platform_driver mmc_omap_driver = { | 1601 | static struct platform_driver mmc_omap_driver = { |
1585 | .probe = mmc_omap_probe, | ||
1586 | .remove = mmc_omap_remove, | 1602 | .remove = mmc_omap_remove, |
1587 | .suspend = mmc_omap_suspend, | 1603 | .suspend = mmc_omap_suspend, |
1588 | .resume = mmc_omap_resume, | 1604 | .resume = mmc_omap_resume, |
@@ -1592,8 +1608,30 @@ static struct platform_driver mmc_omap_driver = { | |||
1592 | }, | 1608 | }, |
1593 | }; | 1609 | }; |
1594 | 1610 | ||
1595 | module_platform_driver(mmc_omap_driver); | 1611 | static int __init mmc_omap_init(void) |
1612 | { | ||
1613 | int ret; | ||
1614 | |||
1615 | mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); | ||
1616 | if (!mmc_omap_wq) | ||
1617 | return -ENOMEM; | ||
1618 | |||
1619 | ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); | ||
1620 | if (ret) | ||
1621 | destroy_workqueue(mmc_omap_wq); | ||
1622 | return ret; | ||
1623 | } | ||
1624 | |||
1625 | static void __exit mmc_omap_exit(void) | ||
1626 | { | ||
1627 | platform_driver_unregister(&mmc_omap_driver); | ||
1628 | destroy_workqueue(mmc_omap_wq); | ||
1629 | } | ||
1630 | |||
1631 | module_init(mmc_omap_init); | ||
1632 | module_exit(mmc_omap_exit); | ||
1633 | |||
1596 | MODULE_DESCRIPTION("OMAP Multimedia Card driver"); | 1634 | MODULE_DESCRIPTION("OMAP Multimedia Card driver"); |
1597 | MODULE_LICENSE("GPL"); | 1635 | MODULE_LICENSE("GPL"); |
1598 | MODULE_ALIAS("platform:" DRIVER_NAME); | 1636 | MODULE_ALIAS("platform:" DRIVER_NAME); |
1599 | MODULE_AUTHOR("Juha Yrjölä"); | 1637 | MODULE_AUTHOR("Juha Yrjölä"); |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index bc5807873b2..21e4a799df4 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -19,29 +19,30 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
24 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
25 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
26 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/timer.h> | 28 | #include <linux/timer.h> |
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | #include <linux/of.h> | ||
31 | #include <linux/of_gpio.h> | ||
32 | #include <linux/of_device.h> | ||
33 | #include <linux/omap-dma.h> | ||
34 | #include <linux/mmc/host.h> | 30 | #include <linux/mmc/host.h> |
35 | #include <linux/mmc/core.h> | 31 | #include <linux/mmc/core.h> |
36 | #include <linux/mmc/mmc.h> | 32 | #include <linux/mmc/mmc.h> |
37 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/semaphore.h> | ||
38 | #include <linux/gpio.h> | 35 | #include <linux/gpio.h> |
39 | #include <linux/regulator/consumer.h> | 36 | #include <linux/regulator/consumer.h> |
40 | #include <linux/pinctrl/consumer.h> | ||
41 | #include <linux/pm_runtime.h> | 37 | #include <linux/pm_runtime.h> |
42 | #include <linux/platform_data/mmc-omap.h> | 38 | #include <plat/dma.h> |
39 | #include <mach/hardware.h> | ||
40 | #include <plat/board.h> | ||
41 | #include <plat/mmc.h> | ||
42 | #include <plat/cpu.h> | ||
43 | 43 | ||
44 | /* OMAP HSMMC Host Controller Registers */ | 44 | /* OMAP HSMMC Host Controller Registers */ |
45 | #define OMAP_HSMMC_SYSCONFIG 0x0010 | ||
45 | #define OMAP_HSMMC_SYSSTATUS 0x0014 | 46 | #define OMAP_HSMMC_SYSSTATUS 0x0014 |
46 | #define OMAP_HSMMC_CON 0x002C | 47 | #define OMAP_HSMMC_CON 0x002C |
47 | #define OMAP_HSMMC_BLK 0x0104 | 48 | #define OMAP_HSMMC_BLK 0x0104 |
@@ -61,7 +62,6 @@ | |||
61 | 62 | ||
62 | #define VS18 (1 << 26) | 63 | #define VS18 (1 << 26) |
63 | #define VS30 (1 << 25) | 64 | #define VS30 (1 << 25) |
64 | #define HSS (1 << 21) | ||
65 | #define SDVS18 (0x5 << 9) | 65 | #define SDVS18 (0x5 << 9) |
66 | #define SDVS30 (0x6 << 9) | 66 | #define SDVS30 (0x6 << 9) |
67 | #define SDVS33 (0x7 << 9) | 67 | #define SDVS33 (0x7 << 9) |
@@ -78,17 +78,27 @@ | |||
78 | #define CLKD_SHIFT 6 | 78 | #define CLKD_SHIFT 6 |
79 | #define DTO_MASK 0x000F0000 | 79 | #define DTO_MASK 0x000F0000 |
80 | #define DTO_SHIFT 16 | 80 | #define DTO_SHIFT 16 |
81 | #define INT_EN_MASK 0x307F0033 | ||
82 | #define BWR_ENABLE (1 << 4) | ||
83 | #define BRR_ENABLE (1 << 5) | ||
84 | #define DTO_ENABLE (1 << 20) | ||
81 | #define INIT_STREAM (1 << 1) | 85 | #define INIT_STREAM (1 << 1) |
82 | #define DP_SELECT (1 << 21) | 86 | #define DP_SELECT (1 << 21) |
83 | #define DDIR (1 << 4) | 87 | #define DDIR (1 << 4) |
84 | #define DMAE 0x1 | 88 | #define DMA_EN 0x1 |
85 | #define MSBS (1 << 5) | 89 | #define MSBS (1 << 5) |
86 | #define BCE (1 << 1) | 90 | #define BCE (1 << 1) |
87 | #define FOUR_BIT (1 << 1) | 91 | #define FOUR_BIT (1 << 1) |
88 | #define HSPE (1 << 2) | ||
89 | #define DDR (1 << 19) | ||
90 | #define DW8 (1 << 5) | 92 | #define DW8 (1 << 5) |
93 | #define CC 0x1 | ||
94 | #define TC 0x02 | ||
91 | #define OD 0x1 | 95 | #define OD 0x1 |
96 | #define ERR (1 << 15) | ||
97 | #define CMD_TIMEOUT (1 << 16) | ||
98 | #define DATA_TIMEOUT (1 << 20) | ||
99 | #define CMD_CRC (1 << 17) | ||
100 | #define DATA_CRC (1 << 21) | ||
101 | #define CARD_ERR (1 << 28) | ||
92 | #define STAT_CLEAR 0xFFFFFFFF | 102 | #define STAT_CLEAR 0xFFFFFFFF |
93 | #define INIT_STREAM_CMD 0x00000000 | 103 | #define INIT_STREAM_CMD 0x00000000 |
94 | #define DUAL_VOLT_OCR_BIT 7 | 104 | #define DUAL_VOLT_OCR_BIT 7 |
@@ -97,28 +107,20 @@ | |||
97 | #define SOFTRESET (1 << 1) | 107 | #define SOFTRESET (1 << 1) |
98 | #define RESETDONE (1 << 0) | 108 | #define RESETDONE (1 << 0) |
99 | 109 | ||
100 | /* Interrupt masks for IE and ISE register */ | 110 | /* |
101 | #define CC_EN (1 << 0) | 111 | * FIXME: Most likely all the data using these _DEVID defines should come |
102 | #define TC_EN (1 << 1) | 112 | * from the platform_data, or implemented in controller and slot specific |
103 | #define BWR_EN (1 << 4) | 113 | * functions. |
104 | #define BRR_EN (1 << 5) | 114 | */ |
105 | #define ERR_EN (1 << 15) | 115 | #define OMAP_MMC1_DEVID 0 |
106 | #define CTO_EN (1 << 16) | 116 | #define OMAP_MMC2_DEVID 1 |
107 | #define CCRC_EN (1 << 17) | 117 | #define OMAP_MMC3_DEVID 2 |
108 | #define CEB_EN (1 << 18) | 118 | #define OMAP_MMC4_DEVID 3 |
109 | #define CIE_EN (1 << 19) | 119 | #define OMAP_MMC5_DEVID 4 |
110 | #define DTO_EN (1 << 20) | ||
111 | #define DCRC_EN (1 << 21) | ||
112 | #define DEB_EN (1 << 22) | ||
113 | #define CERR_EN (1 << 28) | ||
114 | #define BADA_EN (1 << 29) | ||
115 | |||
116 | #define INT_EN_MASK (BADA_EN | CERR_EN | DEB_EN | DCRC_EN |\ | ||
117 | DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ | ||
118 | BRR_EN | BWR_EN | TC_EN | CC_EN) | ||
119 | 120 | ||
120 | #define MMC_AUTOSUSPEND_DELAY 100 | 121 | #define MMC_AUTOSUSPEND_DELAY 100 |
121 | #define MMC_TIMEOUT_MS 20 | 122 | #define MMC_TIMEOUT_MS 20 |
123 | #define OMAP_MMC_MASTER_CLOCK 96000000 | ||
122 | #define OMAP_MMC_MIN_CLOCK 400000 | 124 | #define OMAP_MMC_MIN_CLOCK 400000 |
123 | #define OMAP_MMC_MAX_CLOCK 52000000 | 125 | #define OMAP_MMC_MAX_CLOCK 52000000 |
124 | #define DRIVER_NAME "omap_hsmmc" | 126 | #define DRIVER_NAME "omap_hsmmc" |
@@ -161,21 +163,27 @@ struct omap_hsmmc_host { | |||
161 | */ | 163 | */ |
162 | struct regulator *vcc; | 164 | struct regulator *vcc; |
163 | struct regulator *vcc_aux; | 165 | struct regulator *vcc_aux; |
166 | struct work_struct mmc_carddetect_work; | ||
164 | void __iomem *base; | 167 | void __iomem *base; |
165 | resource_size_t mapbase; | 168 | resource_size_t mapbase; |
166 | spinlock_t irq_lock; /* Prevent races with irq handler */ | 169 | spinlock_t irq_lock; /* Prevent races with irq handler */ |
170 | unsigned int id; | ||
167 | unsigned int dma_len; | 171 | unsigned int dma_len; |
168 | unsigned int dma_sg_idx; | 172 | unsigned int dma_sg_idx; |
169 | unsigned char bus_mode; | 173 | unsigned char bus_mode; |
170 | unsigned char power_mode; | 174 | unsigned char power_mode; |
175 | u32 *buffer; | ||
176 | u32 bytesleft; | ||
171 | int suspended; | 177 | int suspended; |
172 | int irq; | 178 | int irq; |
173 | int use_dma, dma_ch; | 179 | int use_dma, dma_ch; |
174 | struct dma_chan *tx_chan; | 180 | int dma_line_tx, dma_line_rx; |
175 | struct dma_chan *rx_chan; | ||
176 | int slot_id; | 181 | int slot_id; |
182 | int got_dbclk; | ||
177 | int response_busy; | 183 | int response_busy; |
178 | int context_loss; | 184 | int context_loss; |
185 | int dpm_state; | ||
186 | int vdd; | ||
179 | int protect_card; | 187 | int protect_card; |
180 | int reqs_blocked; | 188 | int reqs_blocked; |
181 | int use_reg; | 189 | int use_reg; |
@@ -187,8 +195,7 @@ struct omap_hsmmc_host { | |||
187 | 195 | ||
188 | static int omap_hsmmc_card_detect(struct device *dev, int slot) | 196 | static int omap_hsmmc_card_detect(struct device *dev, int slot) |
189 | { | 197 | { |
190 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 198 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
191 | struct omap_mmc_platform_data *mmc = host->pdata; | ||
192 | 199 | ||
193 | /* NOTE: assumes card detect signal is active-low */ | 200 | /* NOTE: assumes card detect signal is active-low */ |
194 | return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); | 201 | return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); |
@@ -196,8 +203,7 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot) | |||
196 | 203 | ||
197 | static int omap_hsmmc_get_wp(struct device *dev, int slot) | 204 | static int omap_hsmmc_get_wp(struct device *dev, int slot) |
198 | { | 205 | { |
199 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 206 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
200 | struct omap_mmc_platform_data *mmc = host->pdata; | ||
201 | 207 | ||
202 | /* NOTE: assumes write protect signal is active-high */ | 208 | /* NOTE: assumes write protect signal is active-high */ |
203 | return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); | 209 | return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); |
@@ -205,8 +211,7 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot) | |||
205 | 211 | ||
206 | static int omap_hsmmc_get_cover_state(struct device *dev, int slot) | 212 | static int omap_hsmmc_get_cover_state(struct device *dev, int slot) |
207 | { | 213 | { |
208 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 214 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
209 | struct omap_mmc_platform_data *mmc = host->pdata; | ||
210 | 215 | ||
211 | /* NOTE: assumes card detect signal is active-low */ | 216 | /* NOTE: assumes card detect signal is active-low */ |
212 | return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); | 217 | return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); |
@@ -216,8 +221,7 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot) | |||
216 | 221 | ||
217 | static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) | 222 | static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) |
218 | { | 223 | { |
219 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 224 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
220 | struct omap_mmc_platform_data *mmc = host->pdata; | ||
221 | 225 | ||
222 | disable_irq(mmc->slots[0].card_detect_irq); | 226 | disable_irq(mmc->slots[0].card_detect_irq); |
223 | return 0; | 227 | return 0; |
@@ -225,8 +229,7 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) | |||
225 | 229 | ||
226 | static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) | 230 | static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) |
227 | { | 231 | { |
228 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 232 | struct omap_mmc_platform_data *mmc = dev->platform_data; |
229 | struct omap_mmc_platform_data *mmc = host->pdata; | ||
230 | 233 | ||
231 | enable_irq(mmc->slots[0].card_detect_irq); | 234 | enable_irq(mmc->slots[0].card_detect_irq); |
232 | return 0; | 235 | return 0; |
@@ -241,7 +244,28 @@ static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) | |||
241 | 244 | ||
242 | #ifdef CONFIG_REGULATOR | 245 | #ifdef CONFIG_REGULATOR |
243 | 246 | ||
244 | static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, | 247 | static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, |
248 | int vdd) | ||
249 | { | ||
250 | struct omap_hsmmc_host *host = | ||
251 | platform_get_drvdata(to_platform_device(dev)); | ||
252 | int ret; | ||
253 | |||
254 | if (mmc_slot(host).before_set_reg) | ||
255 | mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); | ||
256 | |||
257 | if (power_on) | ||
258 | ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); | ||
259 | else | ||
260 | ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); | ||
261 | |||
262 | if (mmc_slot(host).after_set_reg) | ||
263 | mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); | ||
264 | |||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, | ||
245 | int vdd) | 269 | int vdd) |
246 | { | 270 | { |
247 | struct omap_hsmmc_host *host = | 271 | struct omap_hsmmc_host *host = |
@@ -254,13 +278,6 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, | |||
254 | */ | 278 | */ |
255 | if (!host->vcc) | 279 | if (!host->vcc) |
256 | return 0; | 280 | return 0; |
257 | /* | ||
258 | * With DT, never turn OFF the regulator. This is because | ||
259 | * the pbias cell programming support is still missing when | ||
260 | * booting with Device tree | ||
261 | */ | ||
262 | if (dev->of_node && !vdd) | ||
263 | return 0; | ||
264 | 281 | ||
265 | if (mmc_slot(host).before_set_reg) | 282 | if (mmc_slot(host).before_set_reg) |
266 | mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); | 283 | mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); |
@@ -304,25 +321,115 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, | |||
304 | return ret; | 321 | return ret; |
305 | } | 322 | } |
306 | 323 | ||
324 | static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, | ||
325 | int vdd) | ||
326 | { | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, | ||
331 | int vdd, int cardsleep) | ||
332 | { | ||
333 | struct omap_hsmmc_host *host = | ||
334 | platform_get_drvdata(to_platform_device(dev)); | ||
335 | int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; | ||
336 | |||
337 | return regulator_set_mode(host->vcc, mode); | ||
338 | } | ||
339 | |||
340 | static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, | ||
341 | int vdd, int cardsleep) | ||
342 | { | ||
343 | struct omap_hsmmc_host *host = | ||
344 | platform_get_drvdata(to_platform_device(dev)); | ||
345 | int err, mode; | ||
346 | |||
347 | /* | ||
348 | * If we don't see a Vcc regulator, assume it's a fixed | ||
349 | * voltage always-on regulator. | ||
350 | */ | ||
351 | if (!host->vcc) | ||
352 | return 0; | ||
353 | |||
354 | mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; | ||
355 | |||
356 | if (!host->vcc_aux) | ||
357 | return regulator_set_mode(host->vcc, mode); | ||
358 | |||
359 | if (cardsleep) { | ||
360 | /* VCC can be turned off if card is asleep */ | ||
361 | if (sleep) | ||
362 | err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); | ||
363 | else | ||
364 | err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); | ||
365 | } else | ||
366 | err = regulator_set_mode(host->vcc, mode); | ||
367 | if (err) | ||
368 | return err; | ||
369 | |||
370 | if (!mmc_slot(host).vcc_aux_disable_is_sleep) | ||
371 | return regulator_set_mode(host->vcc_aux, mode); | ||
372 | |||
373 | if (sleep) | ||
374 | return regulator_disable(host->vcc_aux); | ||
375 | else | ||
376 | return regulator_enable(host->vcc_aux); | ||
377 | } | ||
378 | |||
379 | static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, | ||
380 | int vdd, int cardsleep) | ||
381 | { | ||
382 | return 0; | ||
383 | } | ||
384 | |||
307 | static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | 385 | static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) |
308 | { | 386 | { |
309 | struct regulator *reg; | 387 | struct regulator *reg; |
388 | int ret = 0; | ||
310 | int ocr_value = 0; | 389 | int ocr_value = 0; |
311 | 390 | ||
391 | switch (host->id) { | ||
392 | case OMAP_MMC1_DEVID: | ||
393 | /* On-chip level shifting via PBIAS0/PBIAS1 */ | ||
394 | mmc_slot(host).set_power = omap_hsmmc_1_set_power; | ||
395 | mmc_slot(host).set_sleep = omap_hsmmc_1_set_sleep; | ||
396 | break; | ||
397 | case OMAP_MMC2_DEVID: | ||
398 | case OMAP_MMC3_DEVID: | ||
399 | case OMAP_MMC5_DEVID: | ||
400 | /* Off-chip level shifting, or none */ | ||
401 | mmc_slot(host).set_power = omap_hsmmc_235_set_power; | ||
402 | mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; | ||
403 | break; | ||
404 | case OMAP_MMC4_DEVID: | ||
405 | mmc_slot(host).set_power = omap_hsmmc_4_set_power; | ||
406 | mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; | ||
407 | default: | ||
408 | pr_err("MMC%d configuration not supported!\n", host->id); | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | |||
312 | reg = regulator_get(host->dev, "vmmc"); | 412 | reg = regulator_get(host->dev, "vmmc"); |
313 | if (IS_ERR(reg)) { | 413 | if (IS_ERR(reg)) { |
314 | dev_err(host->dev, "vmmc regulator missing\n"); | 414 | dev_dbg(host->dev, "vmmc regulator missing\n"); |
315 | return PTR_ERR(reg); | 415 | /* |
416 | * HACK: until fixed.c regulator is usable, | ||
417 | * we don't require a main regulator | ||
418 | * for MMC2 or MMC3 | ||
419 | */ | ||
420 | if (host->id == OMAP_MMC1_DEVID) { | ||
421 | ret = PTR_ERR(reg); | ||
422 | goto err; | ||
423 | } | ||
316 | } else { | 424 | } else { |
317 | mmc_slot(host).set_power = omap_hsmmc_set_power; | ||
318 | host->vcc = reg; | 425 | host->vcc = reg; |
319 | ocr_value = mmc_regulator_get_ocrmask(reg); | 426 | ocr_value = mmc_regulator_get_ocrmask(reg); |
320 | if (!mmc_slot(host).ocr_mask) { | 427 | if (!mmc_slot(host).ocr_mask) { |
321 | mmc_slot(host).ocr_mask = ocr_value; | 428 | mmc_slot(host).ocr_mask = ocr_value; |
322 | } else { | 429 | } else { |
323 | if (!(mmc_slot(host).ocr_mask & ocr_value)) { | 430 | if (!(mmc_slot(host).ocr_mask & ocr_value)) { |
324 | dev_err(host->dev, "ocrmask %x is not supported\n", | 431 | pr_err("MMC%d ocrmask %x is not supported\n", |
325 | mmc_slot(host).ocr_mask); | 432 | host->id, mmc_slot(host).ocr_mask); |
326 | mmc_slot(host).ocr_mask = 0; | 433 | mmc_slot(host).ocr_mask = 0; |
327 | return -EINVAL; | 434 | return -EINVAL; |
328 | } | 435 | } |
@@ -343,18 +450,24 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
343 | * framework is fixed, we need a workaround like this | 450 | * framework is fixed, we need a workaround like this |
344 | * (which is safe for MMC, but not in general). | 451 | * (which is safe for MMC, but not in general). |
345 | */ | 452 | */ |
346 | if (regulator_is_enabled(host->vcc) > 0 || | 453 | if (regulator_is_enabled(host->vcc) > 0) { |
347 | (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { | 454 | regulator_enable(host->vcc); |
348 | int vdd = ffs(mmc_slot(host).ocr_mask) - 1; | 455 | regulator_disable(host->vcc); |
349 | 456 | } | |
350 | mmc_slot(host).set_power(host->dev, host->slot_id, | 457 | if (host->vcc_aux) { |
351 | 1, vdd); | 458 | if (regulator_is_enabled(reg) > 0) { |
352 | mmc_slot(host).set_power(host->dev, host->slot_id, | 459 | regulator_enable(reg); |
353 | 0, 0); | 460 | regulator_disable(reg); |
461 | } | ||
354 | } | 462 | } |
355 | } | 463 | } |
356 | 464 | ||
357 | return 0; | 465 | return 0; |
466 | |||
467 | err: | ||
468 | mmc_slot(host).set_power = NULL; | ||
469 | mmc_slot(host).set_sleep = NULL; | ||
470 | return ret; | ||
358 | } | 471 | } |
359 | 472 | ||
360 | static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) | 473 | static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) |
@@ -362,6 +475,7 @@ static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) | |||
362 | regulator_put(host->vcc); | 475 | regulator_put(host->vcc); |
363 | regulator_put(host->vcc_aux); | 476 | regulator_put(host->vcc_aux); |
364 | mmc_slot(host).set_power = NULL; | 477 | mmc_slot(host).set_power = NULL; |
478 | mmc_slot(host).set_sleep = NULL; | ||
365 | } | 479 | } |
366 | 480 | ||
367 | static inline int omap_hsmmc_have_reg(void) | 481 | static inline int omap_hsmmc_have_reg(void) |
@@ -455,7 +569,7 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) | |||
455 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | 569 | OMAP_HSMMC_WRITE(host->base, SYSCTL, |
456 | OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); | 570 | OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); |
457 | if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) | 571 | if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) |
458 | dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); | 572 | dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); |
459 | } | 573 | } |
460 | 574 | ||
461 | static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, | 575 | static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, |
@@ -464,13 +578,13 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, | |||
464 | unsigned int irq_mask; | 578 | unsigned int irq_mask; |
465 | 579 | ||
466 | if (host->use_dma) | 580 | if (host->use_dma) |
467 | irq_mask = INT_EN_MASK & ~(BRR_EN | BWR_EN); | 581 | irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE); |
468 | else | 582 | else |
469 | irq_mask = INT_EN_MASK; | 583 | irq_mask = INT_EN_MASK; |
470 | 584 | ||
471 | /* Disable timeout for erases */ | 585 | /* Disable timeout for erases */ |
472 | if (cmd->opcode == MMC_ERASE) | 586 | if (cmd->opcode == MMC_ERASE) |
473 | irq_mask &= ~DTO_EN; | 587 | irq_mask &= ~DTO_ENABLE; |
474 | 588 | ||
475 | OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); | 589 | OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); |
476 | OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); | 590 | OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); |
@@ -485,12 +599,12 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) | |||
485 | } | 599 | } |
486 | 600 | ||
487 | /* Calculate divisor for the given clock frequency */ | 601 | /* Calculate divisor for the given clock frequency */ |
488 | static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) | 602 | static u16 calc_divisor(struct mmc_ios *ios) |
489 | { | 603 | { |
490 | u16 dsor = 0; | 604 | u16 dsor = 0; |
491 | 605 | ||
492 | if (ios->clock) { | 606 | if (ios->clock) { |
493 | dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); | 607 | dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock); |
494 | if (dsor > 250) | 608 | if (dsor > 250) |
495 | dsor = 250; | 609 | dsor = 250; |
496 | } | 610 | } |
@@ -503,16 +617,14 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) | |||
503 | struct mmc_ios *ios = &host->mmc->ios; | 617 | struct mmc_ios *ios = &host->mmc->ios; |
504 | unsigned long regval; | 618 | unsigned long regval; |
505 | unsigned long timeout; | 619 | unsigned long timeout; |
506 | unsigned long clkdiv; | ||
507 | 620 | ||
508 | dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); | 621 | dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); |
509 | 622 | ||
510 | omap_hsmmc_stop_clock(host); | 623 | omap_hsmmc_stop_clock(host); |
511 | 624 | ||
512 | regval = OMAP_HSMMC_READ(host->base, SYSCTL); | 625 | regval = OMAP_HSMMC_READ(host->base, SYSCTL); |
513 | regval = regval & ~(CLKD_MASK | DTO_MASK); | 626 | regval = regval & ~(CLKD_MASK | DTO_MASK); |
514 | clkdiv = calc_divisor(host, ios); | 627 | regval = regval | (calc_divisor(ios) << 6) | (DTO << 16); |
515 | regval = regval | (clkdiv << 6) | (DTO << 16); | ||
516 | OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); | 628 | OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); |
517 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | 629 | OMAP_HSMMC_WRITE(host->base, SYSCTL, |
518 | OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); | 630 | OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); |
@@ -523,27 +635,6 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) | |||
523 | && time_before(jiffies, timeout)) | 635 | && time_before(jiffies, timeout)) |
524 | cpu_relax(); | 636 | cpu_relax(); |
525 | 637 | ||
526 | /* | ||
527 | * Enable High-Speed Support | ||
528 | * Pre-Requisites | ||
529 | * - Controller should support High-Speed-Enable Bit | ||
530 | * - Controller should not be using DDR Mode | ||
531 | * - Controller should advertise that it supports High Speed | ||
532 | * in capabilities register | ||
533 | * - MMC/SD clock coming out of controller > 25MHz | ||
534 | */ | ||
535 | if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && | ||
536 | (ios->timing != MMC_TIMING_UHS_DDR50) && | ||
537 | ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { | ||
538 | regval = OMAP_HSMMC_READ(host->base, HCTL); | ||
539 | if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) | ||
540 | regval |= HSPE; | ||
541 | else | ||
542 | regval &= ~HSPE; | ||
543 | |||
544 | OMAP_HSMMC_WRITE(host->base, HCTL, regval); | ||
545 | } | ||
546 | |||
547 | omap_hsmmc_start_clock(host); | 638 | omap_hsmmc_start_clock(host); |
548 | } | 639 | } |
549 | 640 | ||
@@ -553,10 +644,6 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) | |||
553 | u32 con; | 644 | u32 con; |
554 | 645 | ||
555 | con = OMAP_HSMMC_READ(host->base, CON); | 646 | con = OMAP_HSMMC_READ(host->base, CON); |
556 | if (ios->timing == MMC_TIMING_UHS_DDR50) | ||
557 | con |= DDR; /* configure in DDR mode */ | ||
558 | else | ||
559 | con &= ~DDR; | ||
560 | switch (ios->bus_width) { | 647 | switch (ios->bus_width) { |
561 | case MMC_BUS_WIDTH_8: | 648 | case MMC_BUS_WIDTH_8: |
562 | OMAP_HSMMC_WRITE(host->base, CON, con | DW8); | 649 | OMAP_HSMMC_WRITE(host->base, CON, con | DW8); |
@@ -611,10 +698,23 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) | |||
611 | if (host->context_loss == context_loss) | 698 | if (host->context_loss == context_loss) |
612 | return 1; | 699 | return 1; |
613 | 700 | ||
614 | if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) | 701 | /* Wait for hardware reset */ |
615 | return 1; | 702 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); |
703 | while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE | ||
704 | && time_before(jiffies, timeout)) | ||
705 | ; | ||
616 | 706 | ||
617 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { | 707 | /* Do software reset */ |
708 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); | ||
709 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | ||
710 | while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE | ||
711 | && time_before(jiffies, timeout)) | ||
712 | ; | ||
713 | |||
714 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, | ||
715 | OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); | ||
716 | |||
717 | if (host->id == OMAP_MMC1_DEVID) { | ||
618 | if (host->power_mode != MMC_POWER_OFF && | 718 | if (host->power_mode != MMC_POWER_OFF && |
619 | (1 << ios->vdd) <= MMC_VDD_23_24) | 719 | (1 << ios->vdd) <= MMC_VDD_23_24) |
620 | hctl = SDVS18; | 720 | hctl = SDVS18; |
@@ -708,8 +808,8 @@ static void send_init_stream(struct omap_hsmmc_host *host) | |||
708 | OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); | 808 | OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); |
709 | 809 | ||
710 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | 810 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); |
711 | while ((reg != CC_EN) && time_before(jiffies, timeout)) | 811 | while ((reg != CC) && time_before(jiffies, timeout)) |
712 | reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; | 812 | reg = OMAP_HSMMC_READ(host->base, STAT) & CC; |
713 | 813 | ||
714 | OMAP_HSMMC_WRITE(host->base, CON, | 814 | OMAP_HSMMC_WRITE(host->base, CON, |
715 | OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); | 815 | OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); |
@@ -764,7 +864,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, | |||
764 | { | 864 | { |
765 | int cmdreg = 0, resptype = 0, cmdtype = 0; | 865 | int cmdreg = 0, resptype = 0, cmdtype = 0; |
766 | 866 | ||
767 | dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", | 867 | dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", |
768 | mmc_hostname(host->mmc), cmd->opcode, cmd->arg); | 868 | mmc_hostname(host->mmc), cmd->opcode, cmd->arg); |
769 | host->cmd = cmd; | 869 | host->cmd = cmd; |
770 | 870 | ||
@@ -800,7 +900,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, | |||
800 | } | 900 | } |
801 | 901 | ||
802 | if (host->use_dma) | 902 | if (host->use_dma) |
803 | cmdreg |= DMAE; | 903 | cmdreg |= DMA_EN; |
804 | 904 | ||
805 | host->req_in_progress = 1; | 905 | host->req_in_progress = 1; |
806 | 906 | ||
@@ -817,21 +917,14 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) | |||
817 | return DMA_FROM_DEVICE; | 917 | return DMA_FROM_DEVICE; |
818 | } | 918 | } |
819 | 919 | ||
820 | static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, | ||
821 | struct mmc_data *data) | ||
822 | { | ||
823 | return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; | ||
824 | } | ||
825 | |||
826 | static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) | 920 | static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) |
827 | { | 921 | { |
828 | int dma_ch; | 922 | int dma_ch; |
829 | unsigned long flags; | ||
830 | 923 | ||
831 | spin_lock_irqsave(&host->irq_lock, flags); | 924 | spin_lock(&host->irq_lock); |
832 | host->req_in_progress = 0; | 925 | host->req_in_progress = 0; |
833 | dma_ch = host->dma_ch; | 926 | dma_ch = host->dma_ch; |
834 | spin_unlock_irqrestore(&host->irq_lock, flags); | 927 | spin_unlock(&host->irq_lock); |
835 | 928 | ||
836 | omap_hsmmc_disable_irq(host); | 929 | omap_hsmmc_disable_irq(host); |
837 | /* Do not complete the request if DMA is still in progress */ | 930 | /* Do not complete the request if DMA is still in progress */ |
@@ -905,24 +998,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) | |||
905 | static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) | 998 | static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) |
906 | { | 999 | { |
907 | int dma_ch; | 1000 | int dma_ch; |
908 | unsigned long flags; | ||
909 | 1001 | ||
910 | host->data->error = errno; | 1002 | host->data->error = errno; |
911 | 1003 | ||
912 | spin_lock_irqsave(&host->irq_lock, flags); | 1004 | spin_lock(&host->irq_lock); |
913 | dma_ch = host->dma_ch; | 1005 | dma_ch = host->dma_ch; |
914 | host->dma_ch = -1; | 1006 | host->dma_ch = -1; |
915 | spin_unlock_irqrestore(&host->irq_lock, flags); | 1007 | spin_unlock(&host->irq_lock); |
916 | 1008 | ||
917 | if (host->use_dma && dma_ch != -1) { | 1009 | if (host->use_dma && dma_ch != -1) { |
918 | struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); | 1010 | dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, |
919 | 1011 | host->data->sg_len, | |
920 | dmaengine_terminate_all(chan); | ||
921 | dma_unmap_sg(chan->device->dev, | ||
922 | host->data->sg, host->data->sg_len, | ||
923 | omap_hsmmc_get_dma_dir(host, host->data)); | 1012 | omap_hsmmc_get_dma_dir(host, host->data)); |
924 | 1013 | omap_free_dma(dma_ch); | |
925 | host->data->host_cookie = 0; | ||
926 | } | 1014 | } |
927 | host->data = NULL; | 1015 | host->data = NULL; |
928 | } | 1016 | } |
@@ -953,7 +1041,7 @@ static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) | |||
953 | buf += len; | 1041 | buf += len; |
954 | } | 1042 | } |
955 | 1043 | ||
956 | dev_vdbg(mmc_dev(host->mmc), "%s\n", res); | 1044 | dev_dbg(mmc_dev(host->mmc), "%s\n", res); |
957 | } | 1045 | } |
958 | #else | 1046 | #else |
959 | static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, | 1047 | static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, |
@@ -1000,49 +1088,75 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, | |||
1000 | __func__); | 1088 | __func__); |
1001 | } | 1089 | } |
1002 | 1090 | ||
1003 | static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, | ||
1004 | int err, int end_cmd) | ||
1005 | { | ||
1006 | if (end_cmd) { | ||
1007 | omap_hsmmc_reset_controller_fsm(host, SRC); | ||
1008 | if (host->cmd) | ||
1009 | host->cmd->error = err; | ||
1010 | } | ||
1011 | |||
1012 | if (host->data) { | ||
1013 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
1014 | omap_hsmmc_dma_cleanup(host, err); | ||
1015 | } else if (host->mrq && host->mrq->cmd) | ||
1016 | host->mrq->cmd->error = err; | ||
1017 | } | ||
1018 | |||
1019 | static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) | 1091 | static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) |
1020 | { | 1092 | { |
1021 | struct mmc_data *data; | 1093 | struct mmc_data *data; |
1022 | int end_cmd = 0, end_trans = 0; | 1094 | int end_cmd = 0, end_trans = 0; |
1023 | 1095 | ||
1096 | if (!host->req_in_progress) { | ||
1097 | do { | ||
1098 | OMAP_HSMMC_WRITE(host->base, STAT, status); | ||
1099 | /* Flush posted write */ | ||
1100 | status = OMAP_HSMMC_READ(host->base, STAT); | ||
1101 | } while (status & INT_EN_MASK); | ||
1102 | return; | ||
1103 | } | ||
1104 | |||
1024 | data = host->data; | 1105 | data = host->data; |
1025 | dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); | 1106 | dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); |
1026 | 1107 | ||
1027 | if (status & ERR_EN) { | 1108 | if (status & ERR) { |
1028 | omap_hsmmc_dbg_report_irq(host, status); | 1109 | omap_hsmmc_dbg_report_irq(host, status); |
1029 | 1110 | if ((status & CMD_TIMEOUT) || | |
1030 | if (status & (CTO_EN | CCRC_EN)) | 1111 | (status & CMD_CRC)) { |
1031 | end_cmd = 1; | 1112 | if (host->cmd) { |
1032 | if (status & (CTO_EN | DTO_EN)) | 1113 | if (status & CMD_TIMEOUT) { |
1033 | hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); | 1114 | omap_hsmmc_reset_controller_fsm(host, |
1034 | else if (status & (CCRC_EN | DCRC_EN)) | 1115 | SRC); |
1035 | hsmmc_command_incomplete(host, -EILSEQ, end_cmd); | 1116 | host->cmd->error = -ETIMEDOUT; |
1036 | 1117 | } else { | |
1037 | if (host->data || host->response_busy) { | 1118 | host->cmd->error = -EILSEQ; |
1038 | end_trans = !end_cmd; | 1119 | } |
1039 | host->response_busy = 0; | 1120 | end_cmd = 1; |
1121 | } | ||
1122 | if (host->data || host->response_busy) { | ||
1123 | if (host->data) | ||
1124 | omap_hsmmc_dma_cleanup(host, | ||
1125 | -ETIMEDOUT); | ||
1126 | host->response_busy = 0; | ||
1127 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
1128 | } | ||
1129 | } | ||
1130 | if ((status & DATA_TIMEOUT) || | ||
1131 | (status & DATA_CRC)) { | ||
1132 | if (host->data || host->response_busy) { | ||
1133 | int err = (status & DATA_TIMEOUT) ? | ||
1134 | -ETIMEDOUT : -EILSEQ; | ||
1135 | |||
1136 | if (host->data) | ||
1137 | omap_hsmmc_dma_cleanup(host, err); | ||
1138 | else | ||
1139 | host->mrq->cmd->error = err; | ||
1140 | host->response_busy = 0; | ||
1141 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
1142 | end_trans = 1; | ||
1143 | } | ||
1144 | } | ||
1145 | if (status & CARD_ERR) { | ||
1146 | dev_dbg(mmc_dev(host->mmc), | ||
1147 | "Ignoring card err CMD%d\n", host->cmd->opcode); | ||
1148 | if (host->cmd) | ||
1149 | end_cmd = 1; | ||
1150 | if (host->data) | ||
1151 | end_trans = 1; | ||
1040 | } | 1152 | } |
1041 | } | 1153 | } |
1042 | 1154 | ||
1043 | if (end_cmd || ((status & CC_EN) && host->cmd)) | 1155 | OMAP_HSMMC_WRITE(host->base, STAT, status); |
1156 | |||
1157 | if (end_cmd || ((status & CC) && host->cmd)) | ||
1044 | omap_hsmmc_cmd_done(host, host->cmd); | 1158 | omap_hsmmc_cmd_done(host, host->cmd); |
1045 | if ((end_trans || (status & TC_EN)) && host->mrq) | 1159 | if ((end_trans || (status & TC)) && host->mrq) |
1046 | omap_hsmmc_xfer_done(host, data); | 1160 | omap_hsmmc_xfer_done(host, data); |
1047 | } | 1161 | } |
1048 | 1162 | ||
@@ -1055,13 +1169,11 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) | |||
1055 | int status; | 1169 | int status; |
1056 | 1170 | ||
1057 | status = OMAP_HSMMC_READ(host->base, STAT); | 1171 | status = OMAP_HSMMC_READ(host->base, STAT); |
1058 | while (status & INT_EN_MASK && host->req_in_progress) { | 1172 | do { |
1059 | omap_hsmmc_do_irq(host, status); | 1173 | omap_hsmmc_do_irq(host, status); |
1060 | |||
1061 | /* Flush posted write */ | 1174 | /* Flush posted write */ |
1062 | OMAP_HSMMC_WRITE(host->base, STAT, status); | ||
1063 | status = OMAP_HSMMC_READ(host->base, STAT); | 1175 | status = OMAP_HSMMC_READ(host->base, STAT); |
1064 | } | 1176 | } while (status & INT_EN_MASK); |
1065 | 1177 | ||
1066 | return IRQ_HANDLED; | 1178 | return IRQ_HANDLED; |
1067 | } | 1179 | } |
@@ -1093,8 +1205,8 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) | |||
1093 | 1205 | ||
1094 | /* Disable the clocks */ | 1206 | /* Disable the clocks */ |
1095 | pm_runtime_put_sync(host->dev); | 1207 | pm_runtime_put_sync(host->dev); |
1096 | if (host->dbclk) | 1208 | if (host->got_dbclk) |
1097 | clk_disable_unprepare(host->dbclk); | 1209 | clk_disable(host->dbclk); |
1098 | 1210 | ||
1099 | /* Turn the power off */ | 1211 | /* Turn the power off */ |
1100 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); | 1212 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); |
@@ -1104,8 +1216,8 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) | |||
1104 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, | 1216 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, |
1105 | vdd); | 1217 | vdd); |
1106 | pm_runtime_get_sync(host->dev); | 1218 | pm_runtime_get_sync(host->dev); |
1107 | if (host->dbclk) | 1219 | if (host->got_dbclk) |
1108 | clk_prepare_enable(host->dbclk); | 1220 | clk_enable(host->dbclk); |
1109 | 1221 | ||
1110 | if (ret != 0) | 1222 | if (ret != 0) |
1111 | goto err; | 1223 | goto err; |
@@ -1139,7 +1251,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) | |||
1139 | 1251 | ||
1140 | return 0; | 1252 | return 0; |
1141 | err: | 1253 | err: |
1142 | dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); | 1254 | dev_dbg(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); |
1143 | return ret; | 1255 | return ret; |
1144 | } | 1256 | } |
1145 | 1257 | ||
@@ -1152,14 +1264,14 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) | |||
1152 | host->reqs_blocked = 0; | 1264 | host->reqs_blocked = 0; |
1153 | if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { | 1265 | if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { |
1154 | if (host->protect_card) { | 1266 | if (host->protect_card) { |
1155 | dev_info(host->dev, "%s: cover is closed, " | 1267 | printk(KERN_INFO "%s: cover is closed, " |
1156 | "card is now accessible\n", | 1268 | "card is now accessible\n", |
1157 | mmc_hostname(host->mmc)); | 1269 | mmc_hostname(host->mmc)); |
1158 | host->protect_card = 0; | 1270 | host->protect_card = 0; |
1159 | } | 1271 | } |
1160 | } else { | 1272 | } else { |
1161 | if (!host->protect_card) { | 1273 | if (!host->protect_card) { |
1162 | dev_info(host->dev, "%s: cover is open, " | 1274 | printk(KERN_INFO "%s: cover is open, " |
1163 | "card is now inaccessible\n", | 1275 | "card is now inaccessible\n", |
1164 | mmc_hostname(host->mmc)); | 1276 | mmc_hostname(host->mmc)); |
1165 | host->protect_card = 1; | 1277 | host->protect_card = 1; |
@@ -1168,16 +1280,17 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) | |||
1168 | } | 1280 | } |
1169 | 1281 | ||
1170 | /* | 1282 | /* |
1171 | * irq handler to notify the core about card insertion/removal | 1283 | * Work Item to notify the core about card insertion/removal |
1172 | */ | 1284 | */ |
1173 | static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) | 1285 | static void omap_hsmmc_detect(struct work_struct *work) |
1174 | { | 1286 | { |
1175 | struct omap_hsmmc_host *host = dev_id; | 1287 | struct omap_hsmmc_host *host = |
1288 | container_of(work, struct omap_hsmmc_host, mmc_carddetect_work); | ||
1176 | struct omap_mmc_slot_data *slot = &mmc_slot(host); | 1289 | struct omap_mmc_slot_data *slot = &mmc_slot(host); |
1177 | int carddetect; | 1290 | int carddetect; |
1178 | 1291 | ||
1179 | if (host->suspended) | 1292 | if (host->suspended) |
1180 | return IRQ_HANDLED; | 1293 | return; |
1181 | 1294 | ||
1182 | sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); | 1295 | sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); |
1183 | 1296 | ||
@@ -1192,32 +1305,105 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) | |||
1192 | mmc_detect_change(host->mmc, (HZ * 200) / 1000); | 1305 | mmc_detect_change(host->mmc, (HZ * 200) / 1000); |
1193 | else | 1306 | else |
1194 | mmc_detect_change(host->mmc, (HZ * 50) / 1000); | 1307 | mmc_detect_change(host->mmc, (HZ * 50) / 1000); |
1308 | } | ||
1309 | |||
1310 | /* | ||
1311 | * ISR for handling card insertion and removal | ||
1312 | */ | ||
1313 | static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id) | ||
1314 | { | ||
1315 | struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id; | ||
1316 | |||
1317 | if (host->suspended) | ||
1318 | return IRQ_HANDLED; | ||
1319 | schedule_work(&host->mmc_carddetect_work); | ||
1320 | |||
1195 | return IRQ_HANDLED; | 1321 | return IRQ_HANDLED; |
1196 | } | 1322 | } |
1197 | 1323 | ||
1198 | static void omap_hsmmc_dma_callback(void *param) | 1324 | static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, |
1325 | struct mmc_data *data) | ||
1326 | { | ||
1327 | int sync_dev; | ||
1328 | |||
1329 | if (data->flags & MMC_DATA_WRITE) | ||
1330 | sync_dev = host->dma_line_tx; | ||
1331 | else | ||
1332 | sync_dev = host->dma_line_rx; | ||
1333 | return sync_dev; | ||
1334 | } | ||
1335 | |||
1336 | static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, | ||
1337 | struct mmc_data *data, | ||
1338 | struct scatterlist *sgl) | ||
1339 | { | ||
1340 | int blksz, nblk, dma_ch; | ||
1341 | |||
1342 | dma_ch = host->dma_ch; | ||
1343 | if (data->flags & MMC_DATA_WRITE) { | ||
1344 | omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | ||
1345 | (host->mapbase + OMAP_HSMMC_DATA), 0, 0); | ||
1346 | omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | ||
1347 | sg_dma_address(sgl), 0, 0); | ||
1348 | } else { | ||
1349 | omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | ||
1350 | (host->mapbase + OMAP_HSMMC_DATA), 0, 0); | ||
1351 | omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | ||
1352 | sg_dma_address(sgl), 0, 0); | ||
1353 | } | ||
1354 | |||
1355 | blksz = host->data->blksz; | ||
1356 | nblk = sg_dma_len(sgl) / blksz; | ||
1357 | |||
1358 | omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, | ||
1359 | blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, | ||
1360 | omap_hsmmc_get_dma_sync_dev(host, data), | ||
1361 | !(data->flags & MMC_DATA_WRITE)); | ||
1362 | |||
1363 | omap_start_dma(dma_ch); | ||
1364 | } | ||
1365 | |||
1366 | /* | ||
1367 | * DMA call back function | ||
1368 | */ | ||
1369 | static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | ||
1199 | { | 1370 | { |
1200 | struct omap_hsmmc_host *host = param; | 1371 | struct omap_hsmmc_host *host = cb_data; |
1201 | struct dma_chan *chan; | ||
1202 | struct mmc_data *data; | 1372 | struct mmc_data *data; |
1203 | int req_in_progress; | 1373 | int dma_ch, req_in_progress; |
1204 | 1374 | ||
1205 | spin_lock_irq(&host->irq_lock); | 1375 | if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { |
1376 | dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", | ||
1377 | ch_status); | ||
1378 | return; | ||
1379 | } | ||
1380 | |||
1381 | spin_lock(&host->irq_lock); | ||
1206 | if (host->dma_ch < 0) { | 1382 | if (host->dma_ch < 0) { |
1207 | spin_unlock_irq(&host->irq_lock); | 1383 | spin_unlock(&host->irq_lock); |
1208 | return; | 1384 | return; |
1209 | } | 1385 | } |
1210 | 1386 | ||
1211 | data = host->mrq->data; | 1387 | data = host->mrq->data; |
1212 | chan = omap_hsmmc_get_dma_chan(host, data); | 1388 | host->dma_sg_idx++; |
1389 | if (host->dma_sg_idx < host->dma_len) { | ||
1390 | /* Fire up the next transfer. */ | ||
1391 | omap_hsmmc_config_dma_params(host, data, | ||
1392 | data->sg + host->dma_sg_idx); | ||
1393 | spin_unlock(&host->irq_lock); | ||
1394 | return; | ||
1395 | } | ||
1396 | |||
1213 | if (!data->host_cookie) | 1397 | if (!data->host_cookie) |
1214 | dma_unmap_sg(chan->device->dev, | 1398 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
1215 | data->sg, data->sg_len, | ||
1216 | omap_hsmmc_get_dma_dir(host, data)); | 1399 | omap_hsmmc_get_dma_dir(host, data)); |
1217 | 1400 | ||
1218 | req_in_progress = host->req_in_progress; | 1401 | req_in_progress = host->req_in_progress; |
1402 | dma_ch = host->dma_ch; | ||
1219 | host->dma_ch = -1; | 1403 | host->dma_ch = -1; |
1220 | spin_unlock_irq(&host->irq_lock); | 1404 | spin_unlock(&host->irq_lock); |
1405 | |||
1406 | omap_free_dma(dma_ch); | ||
1221 | 1407 | ||
1222 | /* If DMA has finished after TC, complete the request */ | 1408 | /* If DMA has finished after TC, complete the request */ |
1223 | if (!req_in_progress) { | 1409 | if (!req_in_progress) { |
@@ -1230,14 +1416,13 @@ static void omap_hsmmc_dma_callback(void *param) | |||
1230 | 1416 | ||
1231 | static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, | 1417 | static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, |
1232 | struct mmc_data *data, | 1418 | struct mmc_data *data, |
1233 | struct omap_hsmmc_next *next, | 1419 | struct omap_hsmmc_next *next) |
1234 | struct dma_chan *chan) | ||
1235 | { | 1420 | { |
1236 | int dma_len; | 1421 | int dma_len; |
1237 | 1422 | ||
1238 | if (!next && data->host_cookie && | 1423 | if (!next && data->host_cookie && |
1239 | data->host_cookie != host->next_data.cookie) { | 1424 | data->host_cookie != host->next_data.cookie) { |
1240 | dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" | 1425 | printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" |
1241 | " host->next_data.cookie %d\n", | 1426 | " host->next_data.cookie %d\n", |
1242 | __func__, data->host_cookie, host->next_data.cookie); | 1427 | __func__, data->host_cookie, host->next_data.cookie); |
1243 | data->host_cookie = 0; | 1428 | data->host_cookie = 0; |
@@ -1246,7 +1431,8 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, | |||
1246 | /* Check if next job is already prepared */ | 1431 | /* Check if next job is already prepared */ |
1247 | if (next || | 1432 | if (next || |
1248 | (!next && data->host_cookie != host->next_data.cookie)) { | 1433 | (!next && data->host_cookie != host->next_data.cookie)) { |
1249 | dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, | 1434 | dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, |
1435 | data->sg_len, | ||
1250 | omap_hsmmc_get_dma_dir(host, data)); | 1436 | omap_hsmmc_get_dma_dir(host, data)); |
1251 | 1437 | ||
1252 | } else { | 1438 | } else { |
@@ -1273,11 +1459,8 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, | |||
1273 | static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, | 1459 | static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, |
1274 | struct mmc_request *req) | 1460 | struct mmc_request *req) |
1275 | { | 1461 | { |
1276 | struct dma_slave_config cfg; | 1462 | int dma_ch = 0, ret = 0, i; |
1277 | struct dma_async_tx_descriptor *tx; | ||
1278 | int ret = 0, i; | ||
1279 | struct mmc_data *data = req->data; | 1463 | struct mmc_data *data = req->data; |
1280 | struct dma_chan *chan; | ||
1281 | 1464 | ||
1282 | /* Sanity check: all the SG entries must be aligned by block size. */ | 1465 | /* Sanity check: all the SG entries must be aligned by block size. */ |
1283 | for (i = 0; i < data->sg_len; i++) { | 1466 | for (i = 0; i < data->sg_len; i++) { |
@@ -1295,41 +1478,22 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, | |||
1295 | 1478 | ||
1296 | BUG_ON(host->dma_ch != -1); | 1479 | BUG_ON(host->dma_ch != -1); |
1297 | 1480 | ||
1298 | chan = omap_hsmmc_get_dma_chan(host, data); | 1481 | ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), |
1299 | 1482 | "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); | |
1300 | cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; | 1483 | if (ret != 0) { |
1301 | cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; | 1484 | dev_err(mmc_dev(host->mmc), |
1302 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 1485 | "%s: omap_request_dma() failed with %d\n", |
1303 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 1486 | mmc_hostname(host->mmc), ret); |
1304 | cfg.src_maxburst = data->blksz / 4; | ||
1305 | cfg.dst_maxburst = data->blksz / 4; | ||
1306 | |||
1307 | ret = dmaengine_slave_config(chan, &cfg); | ||
1308 | if (ret) | ||
1309 | return ret; | 1487 | return ret; |
1310 | 1488 | } | |
1311 | ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); | 1489 | ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); |
1312 | if (ret) | 1490 | if (ret) |
1313 | return ret; | 1491 | return ret; |
1314 | 1492 | ||
1315 | tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, | 1493 | host->dma_ch = dma_ch; |
1316 | data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | 1494 | host->dma_sg_idx = 0; |
1317 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1318 | if (!tx) { | ||
1319 | dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); | ||
1320 | /* FIXME: cleanup */ | ||
1321 | return -1; | ||
1322 | } | ||
1323 | |||
1324 | tx->callback = omap_hsmmc_dma_callback; | ||
1325 | tx->callback_param = host; | ||
1326 | |||
1327 | /* Does not fail */ | ||
1328 | dmaengine_submit(tx); | ||
1329 | |||
1330 | host->dma_ch = 1; | ||
1331 | 1495 | ||
1332 | dma_async_issue_pending(chan); | 1496 | omap_hsmmc_config_dma_params(host, data, data->sg); |
1333 | 1497 | ||
1334 | return 0; | 1498 | return 0; |
1335 | } | 1499 | } |
@@ -1398,7 +1562,7 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) | |||
1398 | if (host->use_dma) { | 1562 | if (host->use_dma) { |
1399 | ret = omap_hsmmc_start_dma_transfer(host, req); | 1563 | ret = omap_hsmmc_start_dma_transfer(host, req); |
1400 | if (ret != 0) { | 1564 | if (ret != 0) { |
1401 | dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); | 1565 | dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); |
1402 | return ret; | 1566 | return ret; |
1403 | } | 1567 | } |
1404 | } | 1568 | } |
@@ -1411,10 +1575,8 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, | |||
1411 | struct omap_hsmmc_host *host = mmc_priv(mmc); | 1575 | struct omap_hsmmc_host *host = mmc_priv(mmc); |
1412 | struct mmc_data *data = mrq->data; | 1576 | struct mmc_data *data = mrq->data; |
1413 | 1577 | ||
1414 | if (host->use_dma && data->host_cookie) { | 1578 | if (host->use_dma) { |
1415 | struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); | 1579 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
1416 | |||
1417 | dma_unmap_sg(c->device->dev, data->sg, data->sg_len, | ||
1418 | omap_hsmmc_get_dma_dir(host, data)); | 1580 | omap_hsmmc_get_dma_dir(host, data)); |
1419 | data->host_cookie = 0; | 1581 | data->host_cookie = 0; |
1420 | } | 1582 | } |
@@ -1430,13 +1592,10 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, | |||
1430 | return ; | 1592 | return ; |
1431 | } | 1593 | } |
1432 | 1594 | ||
1433 | if (host->use_dma) { | 1595 | if (host->use_dma) |
1434 | struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); | ||
1435 | |||
1436 | if (omap_hsmmc_pre_dma_transfer(host, mrq->data, | 1596 | if (omap_hsmmc_pre_dma_transfer(host, mrq->data, |
1437 | &host->next_data, c)) | 1597 | &host->next_data)) |
1438 | mrq->data->host_cookie = 0; | 1598 | mrq->data->host_cookie = 0; |
1439 | } | ||
1440 | } | 1599 | } |
1441 | 1600 | ||
1442 | /* | 1601 | /* |
@@ -1496,10 +1655,12 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1496 | case MMC_POWER_OFF: | 1655 | case MMC_POWER_OFF: |
1497 | mmc_slot(host).set_power(host->dev, host->slot_id, | 1656 | mmc_slot(host).set_power(host->dev, host->slot_id, |
1498 | 0, 0); | 1657 | 0, 0); |
1658 | host->vdd = 0; | ||
1499 | break; | 1659 | break; |
1500 | case MMC_POWER_UP: | 1660 | case MMC_POWER_UP: |
1501 | mmc_slot(host).set_power(host->dev, host->slot_id, | 1661 | mmc_slot(host).set_power(host->dev, host->slot_id, |
1502 | 1, ios->vdd); | 1662 | 1, ios->vdd); |
1663 | host->vdd = ios->vdd; | ||
1503 | break; | 1664 | break; |
1504 | case MMC_POWER_ON: | 1665 | case MMC_POWER_ON: |
1505 | do_send_init_stream = 1; | 1666 | do_send_init_stream = 1; |
@@ -1517,13 +1678,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1517 | * of external transceiver; but they all handle 1.8V. | 1678 | * of external transceiver; but they all handle 1.8V. |
1518 | */ | 1679 | */ |
1519 | if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && | 1680 | if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && |
1520 | (ios->vdd == DUAL_VOLT_OCR_BIT) && | 1681 | (ios->vdd == DUAL_VOLT_OCR_BIT)) { |
1521 | /* | ||
1522 | * With pbias cell programming missing, this | ||
1523 | * can't be allowed when booting with device | ||
1524 | * tree. | ||
1525 | */ | ||
1526 | !host->dev->of_node) { | ||
1527 | /* | 1682 | /* |
1528 | * The mmc_select_voltage fn of the core does | 1683 | * The mmc_select_voltage fn of the core does |
1529 | * not seem to set the power_mode to | 1684 | * not seem to set the power_mode to |
@@ -1591,6 +1746,10 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) | |||
1591 | value = OMAP_HSMMC_READ(host->base, CAPA); | 1746 | value = OMAP_HSMMC_READ(host->base, CAPA); |
1592 | OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); | 1747 | OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); |
1593 | 1748 | ||
1749 | /* Set the controller to AUTO IDLE mode */ | ||
1750 | value = OMAP_HSMMC_READ(host->base, SYSCONFIG); | ||
1751 | OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE); | ||
1752 | |||
1594 | /* Set SD bus power bit */ | 1753 | /* Set SD bus power bit */ |
1595 | set_sd_bus_power(host); | 1754 | set_sd_bus_power(host); |
1596 | } | 1755 | } |
@@ -1604,7 +1763,7 @@ static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) | |||
1604 | return 0; | 1763 | return 0; |
1605 | } | 1764 | } |
1606 | 1765 | ||
1607 | static int omap_hsmmc_disable_fclk(struct mmc_host *mmc) | 1766 | static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) |
1608 | { | 1767 | { |
1609 | struct omap_hsmmc_host *host = mmc_priv(mmc); | 1768 | struct omap_hsmmc_host *host = mmc_priv(mmc); |
1610 | 1769 | ||
@@ -1638,8 +1797,15 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) | |||
1638 | if (host->pdata->get_context_loss_count) | 1797 | if (host->pdata->get_context_loss_count) |
1639 | context_loss = host->pdata->get_context_loss_count(host->dev); | 1798 | context_loss = host->pdata->get_context_loss_count(host->dev); |
1640 | 1799 | ||
1641 | seq_printf(s, "mmc%d:\n ctx_loss:\t%d:%d\n\nregs:\n", | 1800 | seq_printf(s, "mmc%d:\n" |
1642 | mmc->index, host->context_loss, context_loss); | 1801 | " enabled:\t%d\n" |
1802 | " dpm_state:\t%d\n" | ||
1803 | " nesting_cnt:\t%d\n" | ||
1804 | " ctx_loss:\t%d:%d\n" | ||
1805 | "\nregs:\n", | ||
1806 | mmc->index, mmc->enabled ? 1 : 0, | ||
1807 | host->dpm_state, mmc->nesting_cnt, | ||
1808 | host->context_loss, context_loss); | ||
1643 | 1809 | ||
1644 | if (host->suspended) { | 1810 | if (host->suspended) { |
1645 | seq_printf(s, "host suspended, can't read registers\n"); | 1811 | seq_printf(s, "host suspended, can't read registers\n"); |
@@ -1648,6 +1814,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) | |||
1648 | 1814 | ||
1649 | pm_runtime_get_sync(host->dev); | 1815 | pm_runtime_get_sync(host->dev); |
1650 | 1816 | ||
1817 | seq_printf(s, "SYSCONFIG:\t0x%08x\n", | ||
1818 | OMAP_HSMMC_READ(host->base, SYSCONFIG)); | ||
1651 | seq_printf(s, "CON:\t\t0x%08x\n", | 1819 | seq_printf(s, "CON:\t\t0x%08x\n", |
1652 | OMAP_HSMMC_READ(host->base, CON)); | 1820 | OMAP_HSMMC_READ(host->base, CON)); |
1653 | seq_printf(s, "HCTL:\t\t0x%08x\n", | 1821 | seq_printf(s, "HCTL:\t\t0x%08x\n", |
@@ -1694,91 +1862,13 @@ static void omap_hsmmc_debugfs(struct mmc_host *mmc) | |||
1694 | 1862 | ||
1695 | #endif | 1863 | #endif |
1696 | 1864 | ||
1697 | #ifdef CONFIG_OF | 1865 | static int __init omap_hsmmc_probe(struct platform_device *pdev) |
1698 | static u16 omap4_reg_offset = 0x100; | ||
1699 | |||
1700 | static const struct of_device_id omap_mmc_of_match[] = { | ||
1701 | { | ||
1702 | .compatible = "ti,omap2-hsmmc", | ||
1703 | }, | ||
1704 | { | ||
1705 | .compatible = "ti,omap3-hsmmc", | ||
1706 | }, | ||
1707 | { | ||
1708 | .compatible = "ti,omap4-hsmmc", | ||
1709 | .data = &omap4_reg_offset, | ||
1710 | }, | ||
1711 | {}, | ||
1712 | }; | ||
1713 | MODULE_DEVICE_TABLE(of, omap_mmc_of_match); | ||
1714 | |||
1715 | static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) | ||
1716 | { | ||
1717 | struct omap_mmc_platform_data *pdata; | ||
1718 | struct device_node *np = dev->of_node; | ||
1719 | u32 bus_width, max_freq; | ||
1720 | |||
1721 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
1722 | if (!pdata) | ||
1723 | return NULL; /* out of memory */ | ||
1724 | |||
1725 | if (of_find_property(np, "ti,dual-volt", NULL)) | ||
1726 | pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; | ||
1727 | |||
1728 | /* This driver only supports 1 slot */ | ||
1729 | pdata->nr_slots = 1; | ||
1730 | pdata->slots[0].switch_pin = of_get_named_gpio(np, "cd-gpios", 0); | ||
1731 | pdata->slots[0].gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); | ||
1732 | |||
1733 | if (of_find_property(np, "ti,non-removable", NULL)) { | ||
1734 | pdata->slots[0].nonremovable = true; | ||
1735 | pdata->slots[0].no_regulator_off_init = true; | ||
1736 | } | ||
1737 | of_property_read_u32(np, "bus-width", &bus_width); | ||
1738 | if (bus_width == 4) | ||
1739 | pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; | ||
1740 | else if (bus_width == 8) | ||
1741 | pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; | ||
1742 | |||
1743 | if (of_find_property(np, "ti,needs-special-reset", NULL)) | ||
1744 | pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; | ||
1745 | |||
1746 | if (!of_property_read_u32(np, "max-frequency", &max_freq)) | ||
1747 | pdata->max_freq = max_freq; | ||
1748 | |||
1749 | if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) | ||
1750 | pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT; | ||
1751 | |||
1752 | return pdata; | ||
1753 | } | ||
1754 | #else | ||
1755 | static inline struct omap_mmc_platform_data | ||
1756 | *of_get_hsmmc_pdata(struct device *dev) | ||
1757 | { | ||
1758 | return NULL; | ||
1759 | } | ||
1760 | #endif | ||
1761 | |||
1762 | static int omap_hsmmc_probe(struct platform_device *pdev) | ||
1763 | { | 1866 | { |
1764 | struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; | 1867 | struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; |
1765 | struct mmc_host *mmc; | 1868 | struct mmc_host *mmc; |
1766 | struct omap_hsmmc_host *host = NULL; | 1869 | struct omap_hsmmc_host *host = NULL; |
1767 | struct resource *res; | 1870 | struct resource *res; |
1768 | int ret, irq; | 1871 | int ret, irq; |
1769 | const struct of_device_id *match; | ||
1770 | dma_cap_mask_t mask; | ||
1771 | unsigned tx_req, rx_req; | ||
1772 | struct pinctrl *pinctrl; | ||
1773 | |||
1774 | match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); | ||
1775 | if (match) { | ||
1776 | pdata = of_get_hsmmc_pdata(&pdev->dev); | ||
1777 | if (match->data) { | ||
1778 | const u16 *offsetp = match->data; | ||
1779 | pdata->reg_offset = *offsetp; | ||
1780 | } | ||
1781 | } | ||
1782 | 1872 | ||
1783 | if (pdata == NULL) { | 1873 | if (pdata == NULL) { |
1784 | dev_err(&pdev->dev, "Platform Data is missing\n"); | 1874 | dev_err(&pdev->dev, "Platform Data is missing\n"); |
@@ -1795,6 +1885,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1795 | if (res == NULL || irq < 0) | 1885 | if (res == NULL || irq < 0) |
1796 | return -ENXIO; | 1886 | return -ENXIO; |
1797 | 1887 | ||
1888 | res->start += pdata->reg_offset; | ||
1889 | res->end += pdata->reg_offset; | ||
1798 | res = request_mem_region(res->start, resource_size(res), pdev->name); | 1890 | res = request_mem_region(res->start, resource_size(res), pdev->name); |
1799 | if (res == NULL) | 1891 | if (res == NULL) |
1800 | return -EBUSY; | 1892 | return -EBUSY; |
@@ -1814,15 +1906,18 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1814 | host->pdata = pdata; | 1906 | host->pdata = pdata; |
1815 | host->dev = &pdev->dev; | 1907 | host->dev = &pdev->dev; |
1816 | host->use_dma = 1; | 1908 | host->use_dma = 1; |
1909 | host->dev->dma_mask = &pdata->dma_mask; | ||
1817 | host->dma_ch = -1; | 1910 | host->dma_ch = -1; |
1818 | host->irq = irq; | 1911 | host->irq = irq; |
1912 | host->id = pdev->id; | ||
1819 | host->slot_id = 0; | 1913 | host->slot_id = 0; |
1820 | host->mapbase = res->start + pdata->reg_offset; | 1914 | host->mapbase = res->start; |
1821 | host->base = ioremap(host->mapbase, SZ_4K); | 1915 | host->base = ioremap(host->mapbase, SZ_4K); |
1822 | host->power_mode = MMC_POWER_OFF; | 1916 | host->power_mode = MMC_POWER_OFF; |
1823 | host->next_data.cookie = 1; | 1917 | host->next_data.cookie = 1; |
1824 | 1918 | ||
1825 | platform_set_drvdata(pdev, host); | 1919 | platform_set_drvdata(pdev, host); |
1920 | INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); | ||
1826 | 1921 | ||
1827 | mmc->ops = &omap_hsmmc_ops; | 1922 | mmc->ops = &omap_hsmmc_ops; |
1828 | 1923 | ||
@@ -1833,12 +1928,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1833 | if (mmc_slot(host).vcc_aux_disable_is_sleep) | 1928 | if (mmc_slot(host).vcc_aux_disable_is_sleep) |
1834 | mmc_slot(host).no_off = 1; | 1929 | mmc_slot(host).no_off = 1; |
1835 | 1930 | ||
1836 | mmc->f_min = OMAP_MMC_MIN_CLOCK; | 1931 | mmc->f_min = OMAP_MMC_MIN_CLOCK; |
1837 | 1932 | mmc->f_max = OMAP_MMC_MAX_CLOCK; | |
1838 | if (pdata->max_freq > 0) | ||
1839 | mmc->f_max = pdata->max_freq; | ||
1840 | else | ||
1841 | mmc->f_max = OMAP_MMC_MAX_CLOCK; | ||
1842 | 1933 | ||
1843 | spin_lock_init(&host->irq_lock); | 1934 | spin_lock_init(&host->irq_lock); |
1844 | 1935 | ||
@@ -1849,28 +1940,30 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1849 | goto err1; | 1940 | goto err1; |
1850 | } | 1941 | } |
1851 | 1942 | ||
1852 | if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { | 1943 | omap_hsmmc_context_save(host); |
1853 | dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); | 1944 | |
1854 | mmc->caps2 |= MMC_CAP2_NO_MULTI_READ; | 1945 | mmc->caps |= MMC_CAP_DISABLE; |
1855 | } | ||
1856 | 1946 | ||
1857 | pm_runtime_enable(host->dev); | 1947 | pm_runtime_enable(host->dev); |
1858 | pm_runtime_get_sync(host->dev); | 1948 | pm_runtime_get_sync(host->dev); |
1859 | pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); | 1949 | pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); |
1860 | pm_runtime_use_autosuspend(host->dev); | 1950 | pm_runtime_use_autosuspend(host->dev); |
1861 | 1951 | ||
1862 | omap_hsmmc_context_save(host); | 1952 | if (cpu_is_omap2430()) { |
1953 | host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); | ||
1954 | /* | ||
1955 | * MMC can still work without debounce clock. | ||
1956 | */ | ||
1957 | if (IS_ERR(host->dbclk)) | ||
1958 | dev_warn(mmc_dev(host->mmc), | ||
1959 | "Failed to get debounce clock\n"); | ||
1960 | else | ||
1961 | host->got_dbclk = 1; | ||
1863 | 1962 | ||
1864 | host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); | 1963 | if (host->got_dbclk) |
1865 | /* | 1964 | if (clk_enable(host->dbclk) != 0) |
1866 | * MMC can still work without debounce clock. | 1965 | dev_dbg(mmc_dev(host->mmc), "Enabling debounce" |
1867 | */ | 1966 | " clk failed\n"); |
1868 | if (IS_ERR(host->dbclk)) { | ||
1869 | host->dbclk = NULL; | ||
1870 | } else if (clk_prepare_enable(host->dbclk) != 0) { | ||
1871 | dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); | ||
1872 | clk_put(host->dbclk); | ||
1873 | host->dbclk = NULL; | ||
1874 | } | 1967 | } |
1875 | 1968 | ||
1876 | /* Since we do only SG emulation, we can have as many segs | 1969 | /* Since we do only SG emulation, we can have as many segs |
@@ -1892,54 +1985,46 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1892 | if (mmc_slot(host).nonremovable) | 1985 | if (mmc_slot(host).nonremovable) |
1893 | mmc->caps |= MMC_CAP_NONREMOVABLE; | 1986 | mmc->caps |= MMC_CAP_NONREMOVABLE; |
1894 | 1987 | ||
1895 | mmc->pm_caps = mmc_slot(host).pm_caps; | ||
1896 | |||
1897 | omap_hsmmc_conf_bus_power(host); | 1988 | omap_hsmmc_conf_bus_power(host); |
1898 | 1989 | ||
1899 | res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); | 1990 | /* Select DMA lines */ |
1900 | if (!res) { | 1991 | switch (host->id) { |
1901 | dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); | 1992 | case OMAP_MMC1_DEVID: |
1902 | ret = -ENXIO; | 1993 | host->dma_line_tx = OMAP24XX_DMA_MMC1_TX; |
1903 | goto err_irq; | 1994 | host->dma_line_rx = OMAP24XX_DMA_MMC1_RX; |
1904 | } | 1995 | break; |
1905 | tx_req = res->start; | 1996 | case OMAP_MMC2_DEVID: |
1906 | 1997 | host->dma_line_tx = OMAP24XX_DMA_MMC2_TX; | |
1907 | res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); | 1998 | host->dma_line_rx = OMAP24XX_DMA_MMC2_RX; |
1908 | if (!res) { | 1999 | break; |
1909 | dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); | 2000 | case OMAP_MMC3_DEVID: |
1910 | ret = -ENXIO; | 2001 | host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; |
1911 | goto err_irq; | 2002 | host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; |
1912 | } | 2003 | break; |
1913 | rx_req = res->start; | 2004 | case OMAP_MMC4_DEVID: |
1914 | 2005 | host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; | |
1915 | dma_cap_zero(mask); | 2006 | host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; |
1916 | dma_cap_set(DMA_SLAVE, mask); | 2007 | break; |
1917 | 2008 | case OMAP_MMC5_DEVID: | |
1918 | host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); | 2009 | host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; |
1919 | if (!host->rx_chan) { | 2010 | host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; |
1920 | dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); | 2011 | break; |
1921 | ret = -ENXIO; | 2012 | default: |
1922 | goto err_irq; | 2013 | dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); |
1923 | } | ||
1924 | |||
1925 | host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); | ||
1926 | if (!host->tx_chan) { | ||
1927 | dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); | ||
1928 | ret = -ENXIO; | ||
1929 | goto err_irq; | 2014 | goto err_irq; |
1930 | } | 2015 | } |
1931 | 2016 | ||
1932 | /* Request IRQ for MMC operations */ | 2017 | /* Request IRQ for MMC operations */ |
1933 | ret = request_irq(host->irq, omap_hsmmc_irq, 0, | 2018 | ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, |
1934 | mmc_hostname(mmc), host); | 2019 | mmc_hostname(mmc), host); |
1935 | if (ret) { | 2020 | if (ret) { |
1936 | dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); | 2021 | dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); |
1937 | goto err_irq; | 2022 | goto err_irq; |
1938 | } | 2023 | } |
1939 | 2024 | ||
1940 | if (pdata->init != NULL) { | 2025 | if (pdata->init != NULL) { |
1941 | if (pdata->init(&pdev->dev) != 0) { | 2026 | if (pdata->init(&pdev->dev) != 0) { |
1942 | dev_err(mmc_dev(host->mmc), | 2027 | dev_dbg(mmc_dev(host->mmc), |
1943 | "Unable to configure MMC IRQs\n"); | 2028 | "Unable to configure MMC IRQs\n"); |
1944 | goto err_irq_cd_init; | 2029 | goto err_irq_cd_init; |
1945 | } | 2030 | } |
@@ -1956,13 +2041,13 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1956 | 2041 | ||
1957 | /* Request IRQ for card detect */ | 2042 | /* Request IRQ for card detect */ |
1958 | if ((mmc_slot(host).card_detect_irq)) { | 2043 | if ((mmc_slot(host).card_detect_irq)) { |
1959 | ret = request_threaded_irq(mmc_slot(host).card_detect_irq, | 2044 | ret = request_irq(mmc_slot(host).card_detect_irq, |
1960 | NULL, | 2045 | omap_hsmmc_cd_handler, |
1961 | omap_hsmmc_detect, | 2046 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
1962 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 2047 | | IRQF_DISABLED, |
1963 | mmc_hostname(mmc), host); | 2048 | mmc_hostname(mmc), host); |
1964 | if (ret) { | 2049 | if (ret) { |
1965 | dev_err(mmc_dev(host->mmc), | 2050 | dev_dbg(mmc_dev(host->mmc), |
1966 | "Unable to grab MMC CD IRQ\n"); | 2051 | "Unable to grab MMC CD IRQ\n"); |
1967 | goto err_irq_cd; | 2052 | goto err_irq_cd; |
1968 | } | 2053 | } |
@@ -1972,11 +2057,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
1972 | 2057 | ||
1973 | omap_hsmmc_disable_irq(host); | 2058 | omap_hsmmc_disable_irq(host); |
1974 | 2059 | ||
1975 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
1976 | if (IS_ERR(pinctrl)) | ||
1977 | dev_warn(&pdev->dev, | ||
1978 | "pins are not configured from the driver\n"); | ||
1979 | |||
1980 | omap_hsmmc_protect_card(host); | 2060 | omap_hsmmc_protect_card(host); |
1981 | 2061 | ||
1982 | mmc_add_host(mmc); | 2062 | mmc_add_host(mmc); |
@@ -2011,15 +2091,11 @@ err_reg: | |||
2011 | err_irq_cd_init: | 2091 | err_irq_cd_init: |
2012 | free_irq(host->irq, host); | 2092 | free_irq(host->irq, host); |
2013 | err_irq: | 2093 | err_irq: |
2014 | if (host->tx_chan) | 2094 | pm_runtime_mark_last_busy(host->dev); |
2015 | dma_release_channel(host->tx_chan); | 2095 | pm_runtime_put_autosuspend(host->dev); |
2016 | if (host->rx_chan) | ||
2017 | dma_release_channel(host->rx_chan); | ||
2018 | pm_runtime_put_sync(host->dev); | ||
2019 | pm_runtime_disable(host->dev); | ||
2020 | clk_put(host->fclk); | 2096 | clk_put(host->fclk); |
2021 | if (host->dbclk) { | 2097 | if (host->got_dbclk) { |
2022 | clk_disable_unprepare(host->dbclk); | 2098 | clk_disable(host->dbclk); |
2023 | clk_put(host->dbclk); | 2099 | clk_put(host->dbclk); |
2024 | } | 2100 | } |
2025 | err1: | 2101 | err1: |
@@ -2029,9 +2105,7 @@ err1: | |||
2029 | err_alloc: | 2105 | err_alloc: |
2030 | omap_hsmmc_gpio_free(pdata); | 2106 | omap_hsmmc_gpio_free(pdata); |
2031 | err: | 2107 | err: |
2032 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2108 | release_mem_region(res->start, resource_size(res)); |
2033 | if (res) | ||
2034 | release_mem_region(res->start, resource_size(res)); | ||
2035 | return ret; | 2109 | return ret; |
2036 | } | 2110 | } |
2037 | 2111 | ||
@@ -2040,33 +2114,31 @@ static int omap_hsmmc_remove(struct platform_device *pdev) | |||
2040 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); | 2114 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); |
2041 | struct resource *res; | 2115 | struct resource *res; |
2042 | 2116 | ||
2043 | pm_runtime_get_sync(host->dev); | 2117 | if (host) { |
2044 | mmc_remove_host(host->mmc); | 2118 | pm_runtime_get_sync(host->dev); |
2045 | if (host->use_reg) | 2119 | mmc_remove_host(host->mmc); |
2046 | omap_hsmmc_reg_put(host); | 2120 | if (host->use_reg) |
2047 | if (host->pdata->cleanup) | 2121 | omap_hsmmc_reg_put(host); |
2048 | host->pdata->cleanup(&pdev->dev); | 2122 | if (host->pdata->cleanup) |
2049 | free_irq(host->irq, host); | 2123 | host->pdata->cleanup(&pdev->dev); |
2050 | if (mmc_slot(host).card_detect_irq) | 2124 | free_irq(host->irq, host); |
2051 | free_irq(mmc_slot(host).card_detect_irq, host); | 2125 | if (mmc_slot(host).card_detect_irq) |
2052 | 2126 | free_irq(mmc_slot(host).card_detect_irq, host); | |
2053 | if (host->tx_chan) | 2127 | flush_work_sync(&host->mmc_carddetect_work); |
2054 | dma_release_channel(host->tx_chan); | 2128 | |
2055 | if (host->rx_chan) | 2129 | pm_runtime_put_sync(host->dev); |
2056 | dma_release_channel(host->rx_chan); | 2130 | pm_runtime_disable(host->dev); |
2131 | clk_put(host->fclk); | ||
2132 | if (host->got_dbclk) { | ||
2133 | clk_disable(host->dbclk); | ||
2134 | clk_put(host->dbclk); | ||
2135 | } | ||
2057 | 2136 | ||
2058 | pm_runtime_put_sync(host->dev); | 2137 | mmc_free_host(host->mmc); |
2059 | pm_runtime_disable(host->dev); | 2138 | iounmap(host->base); |
2060 | clk_put(host->fclk); | 2139 | omap_hsmmc_gpio_free(pdev->dev.platform_data); |
2061 | if (host->dbclk) { | ||
2062 | clk_disable_unprepare(host->dbclk); | ||
2063 | clk_put(host->dbclk); | ||
2064 | } | 2140 | } |
2065 | 2141 | ||
2066 | omap_hsmmc_gpio_free(host->pdata); | ||
2067 | iounmap(host->base); | ||
2068 | mmc_free_host(host->mmc); | ||
2069 | |||
2070 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2142 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2071 | if (res) | 2143 | if (res) |
2072 | release_mem_region(res->start, resource_size(res)); | 2144 | release_mem_region(res->start, resource_size(res)); |
@@ -2076,55 +2148,50 @@ static int omap_hsmmc_remove(struct platform_device *pdev) | |||
2076 | } | 2148 | } |
2077 | 2149 | ||
2078 | #ifdef CONFIG_PM | 2150 | #ifdef CONFIG_PM |
2079 | static int omap_hsmmc_prepare(struct device *dev) | ||
2080 | { | ||
2081 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | ||
2082 | |||
2083 | if (host->pdata->suspend) | ||
2084 | return host->pdata->suspend(dev, host->slot_id); | ||
2085 | |||
2086 | return 0; | ||
2087 | } | ||
2088 | |||
2089 | static void omap_hsmmc_complete(struct device *dev) | ||
2090 | { | ||
2091 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | ||
2092 | |||
2093 | if (host->pdata->resume) | ||
2094 | host->pdata->resume(dev, host->slot_id); | ||
2095 | |||
2096 | } | ||
2097 | |||
2098 | static int omap_hsmmc_suspend(struct device *dev) | 2151 | static int omap_hsmmc_suspend(struct device *dev) |
2099 | { | 2152 | { |
2100 | int ret = 0; | 2153 | int ret = 0; |
2101 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 2154 | struct platform_device *pdev = to_platform_device(dev); |
2102 | 2155 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); | |
2103 | if (!host) | ||
2104 | return 0; | ||
2105 | 2156 | ||
2106 | if (host && host->suspended) | 2157 | if (host && host->suspended) |
2107 | return 0; | 2158 | return 0; |
2108 | 2159 | ||
2109 | pm_runtime_get_sync(host->dev); | 2160 | if (host) { |
2110 | host->suspended = 1; | 2161 | pm_runtime_get_sync(host->dev); |
2111 | ret = mmc_suspend_host(host->mmc); | 2162 | host->suspended = 1; |
2112 | 2163 | if (host->pdata->suspend) { | |
2113 | if (ret) { | 2164 | ret = host->pdata->suspend(&pdev->dev, |
2114 | host->suspended = 0; | 2165 | host->slot_id); |
2115 | goto err; | 2166 | if (ret) { |
2116 | } | 2167 | dev_dbg(mmc_dev(host->mmc), |
2168 | "Unable to handle MMC board" | ||
2169 | " level suspend\n"); | ||
2170 | host->suspended = 0; | ||
2171 | return ret; | ||
2172 | } | ||
2173 | } | ||
2174 | cancel_work_sync(&host->mmc_carddetect_work); | ||
2175 | ret = mmc_suspend_host(host->mmc); | ||
2117 | 2176 | ||
2118 | if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { | 2177 | if (ret == 0) { |
2119 | omap_hsmmc_disable_irq(host); | 2178 | omap_hsmmc_disable_irq(host); |
2120 | OMAP_HSMMC_WRITE(host->base, HCTL, | 2179 | OMAP_HSMMC_WRITE(host->base, HCTL, |
2121 | OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); | 2180 | OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); |
2181 | if (host->got_dbclk) | ||
2182 | clk_disable(host->dbclk); | ||
2183 | } else { | ||
2184 | host->suspended = 0; | ||
2185 | if (host->pdata->resume) { | ||
2186 | ret = host->pdata->resume(&pdev->dev, | ||
2187 | host->slot_id); | ||
2188 | if (ret) | ||
2189 | dev_dbg(mmc_dev(host->mmc), | ||
2190 | "Unmask interrupt failed\n"); | ||
2191 | } | ||
2192 | } | ||
2193 | pm_runtime_put_sync(host->dev); | ||
2122 | } | 2194 | } |
2123 | |||
2124 | if (host->dbclk) | ||
2125 | clk_disable_unprepare(host->dbclk); | ||
2126 | err: | ||
2127 | pm_runtime_put_sync(host->dev); | ||
2128 | return ret; | 2195 | return ret; |
2129 | } | 2196 | } |
2130 | 2197 | ||
@@ -2132,41 +2199,45 @@ err: | |||
2132 | static int omap_hsmmc_resume(struct device *dev) | 2199 | static int omap_hsmmc_resume(struct device *dev) |
2133 | { | 2200 | { |
2134 | int ret = 0; | 2201 | int ret = 0; |
2135 | struct omap_hsmmc_host *host = dev_get_drvdata(dev); | 2202 | struct platform_device *pdev = to_platform_device(dev); |
2136 | 2203 | struct omap_hsmmc_host *host = platform_get_drvdata(pdev); | |
2137 | if (!host) | ||
2138 | return 0; | ||
2139 | 2204 | ||
2140 | if (host && !host->suspended) | 2205 | if (host && !host->suspended) |
2141 | return 0; | 2206 | return 0; |
2142 | 2207 | ||
2143 | pm_runtime_get_sync(host->dev); | 2208 | if (host) { |
2209 | pm_runtime_get_sync(host->dev); | ||
2144 | 2210 | ||
2145 | if (host->dbclk) | 2211 | if (host->got_dbclk) |
2146 | clk_prepare_enable(host->dbclk); | 2212 | clk_enable(host->dbclk); |
2147 | 2213 | ||
2148 | if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) | ||
2149 | omap_hsmmc_conf_bus_power(host); | 2214 | omap_hsmmc_conf_bus_power(host); |
2150 | 2215 | ||
2151 | omap_hsmmc_protect_card(host); | 2216 | if (host->pdata->resume) { |
2217 | ret = host->pdata->resume(&pdev->dev, host->slot_id); | ||
2218 | if (ret) | ||
2219 | dev_dbg(mmc_dev(host->mmc), | ||
2220 | "Unmask interrupt failed\n"); | ||
2221 | } | ||
2152 | 2222 | ||
2153 | /* Notify the core to resume the host */ | 2223 | omap_hsmmc_protect_card(host); |
2154 | ret = mmc_resume_host(host->mmc); | ||
2155 | if (ret == 0) | ||
2156 | host->suspended = 0; | ||
2157 | 2224 | ||
2158 | pm_runtime_mark_last_busy(host->dev); | 2225 | /* Notify the core to resume the host */ |
2159 | pm_runtime_put_autosuspend(host->dev); | 2226 | ret = mmc_resume_host(host->mmc); |
2227 | if (ret == 0) | ||
2228 | host->suspended = 0; | ||
2229 | |||
2230 | pm_runtime_mark_last_busy(host->dev); | ||
2231 | pm_runtime_put_autosuspend(host->dev); | ||
2232 | } | ||
2160 | 2233 | ||
2161 | return ret; | 2234 | return ret; |
2162 | 2235 | ||
2163 | } | 2236 | } |
2164 | 2237 | ||
2165 | #else | 2238 | #else |
2166 | #define omap_hsmmc_prepare NULL | ||
2167 | #define omap_hsmmc_complete NULL | ||
2168 | #define omap_hsmmc_suspend NULL | 2239 | #define omap_hsmmc_suspend NULL |
2169 | #define omap_hsmmc_resume NULL | 2240 | #define omap_hsmmc_resume NULL |
2170 | #endif | 2241 | #endif |
2171 | 2242 | ||
2172 | static int omap_hsmmc_runtime_suspend(struct device *dev) | 2243 | static int omap_hsmmc_runtime_suspend(struct device *dev) |
@@ -2175,7 +2246,7 @@ static int omap_hsmmc_runtime_suspend(struct device *dev) | |||
2175 | 2246 | ||
2176 | host = platform_get_drvdata(to_platform_device(dev)); | 2247 | host = platform_get_drvdata(to_platform_device(dev)); |
2177 | omap_hsmmc_context_save(host); | 2248 | omap_hsmmc_context_save(host); |
2178 | dev_dbg(dev, "disabled\n"); | 2249 | dev_dbg(mmc_dev(host->mmc), "disabled\n"); |
2179 | 2250 | ||
2180 | return 0; | 2251 | return 0; |
2181 | } | 2252 | } |
@@ -2186,7 +2257,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev) | |||
2186 | 2257 | ||
2187 | host = platform_get_drvdata(to_platform_device(dev)); | 2258 | host = platform_get_drvdata(to_platform_device(dev)); |
2188 | omap_hsmmc_context_restore(host); | 2259 | omap_hsmmc_context_restore(host); |
2189 | dev_dbg(dev, "enabled\n"); | 2260 | dev_dbg(mmc_dev(host->mmc), "enabled\n"); |
2190 | 2261 | ||
2191 | return 0; | 2262 | return 0; |
2192 | } | 2263 | } |
@@ -2194,24 +2265,34 @@ static int omap_hsmmc_runtime_resume(struct device *dev) | |||
2194 | static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { | 2265 | static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { |
2195 | .suspend = omap_hsmmc_suspend, | 2266 | .suspend = omap_hsmmc_suspend, |
2196 | .resume = omap_hsmmc_resume, | 2267 | .resume = omap_hsmmc_resume, |
2197 | .prepare = omap_hsmmc_prepare, | ||
2198 | .complete = omap_hsmmc_complete, | ||
2199 | .runtime_suspend = omap_hsmmc_runtime_suspend, | 2268 | .runtime_suspend = omap_hsmmc_runtime_suspend, |
2200 | .runtime_resume = omap_hsmmc_runtime_resume, | 2269 | .runtime_resume = omap_hsmmc_runtime_resume, |
2201 | }; | 2270 | }; |
2202 | 2271 | ||
2203 | static struct platform_driver omap_hsmmc_driver = { | 2272 | static struct platform_driver omap_hsmmc_driver = { |
2204 | .probe = omap_hsmmc_probe, | ||
2205 | .remove = omap_hsmmc_remove, | 2273 | .remove = omap_hsmmc_remove, |
2206 | .driver = { | 2274 | .driver = { |
2207 | .name = DRIVER_NAME, | 2275 | .name = DRIVER_NAME, |
2208 | .owner = THIS_MODULE, | 2276 | .owner = THIS_MODULE, |
2209 | .pm = &omap_hsmmc_dev_pm_ops, | 2277 | .pm = &omap_hsmmc_dev_pm_ops, |
2210 | .of_match_table = of_match_ptr(omap_mmc_of_match), | ||
2211 | }, | 2278 | }, |
2212 | }; | 2279 | }; |
2213 | 2280 | ||
2214 | module_platform_driver(omap_hsmmc_driver); | 2281 | static int __init omap_hsmmc_init(void) |
2282 | { | ||
2283 | /* Register the MMC driver */ | ||
2284 | return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe); | ||
2285 | } | ||
2286 | |||
2287 | static void __exit omap_hsmmc_cleanup(void) | ||
2288 | { | ||
2289 | /* Unregister MMC driver */ | ||
2290 | platform_driver_unregister(&omap_hsmmc_driver); | ||
2291 | } | ||
2292 | |||
2293 | module_init(omap_hsmmc_init); | ||
2294 | module_exit(omap_hsmmc_cleanup); | ||
2295 | |||
2215 | MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); | 2296 | MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); |
2216 | MODULE_LICENSE("GPL"); | 2297 | MODULE_LICENSE("GPL"); |
2217 | MODULE_ALIAS("platform:" DRIVER_NAME); | 2298 | MODULE_ALIAS("platform:" DRIVER_NAME); |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 2b2f65ada22..7257738fd7d 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -30,15 +30,12 @@ | |||
30 | #include <linux/regulator/consumer.h> | 30 | #include <linux/regulator/consumer.h> |
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
33 | #include <linux/of.h> | ||
34 | #include <linux/of_gpio.h> | ||
35 | #include <linux/of_device.h> | ||
36 | 33 | ||
37 | #include <asm/sizes.h> | 34 | #include <asm/sizes.h> |
38 | 35 | ||
39 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
40 | #include <mach/dma.h> | 37 | #include <mach/dma.h> |
41 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <mach/mmc.h> |
42 | 39 | ||
43 | #include "pxamci.h" | 40 | #include "pxamci.h" |
44 | 41 | ||
@@ -561,7 +558,7 @@ static void pxamci_dma_irq(int dma, void *devid) | |||
561 | if (dcsr & DCSR_ENDINTR) { | 558 | if (dcsr & DCSR_ENDINTR) { |
562 | writel(BUF_PART_FULL, host->base + MMC_PRTBUF); | 559 | writel(BUF_PART_FULL, host->base + MMC_PRTBUF); |
563 | } else { | 560 | } else { |
564 | pr_err("%s: DMA error on channel %d (DCSR=%#x)\n", | 561 | printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", |
565 | mmc_hostname(host->mmc), dma, dcsr); | 562 | mmc_hostname(host->mmc), dma, dcsr); |
566 | host->data->error = -EIO; | 563 | host->data->error = -EIO; |
567 | pxamci_data_done(host, 0); | 564 | pxamci_data_done(host, 0); |
@@ -576,50 +573,6 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid) | |||
576 | return IRQ_HANDLED; | 573 | return IRQ_HANDLED; |
577 | } | 574 | } |
578 | 575 | ||
579 | #ifdef CONFIG_OF | ||
580 | static const struct of_device_id pxa_mmc_dt_ids[] = { | ||
581 | { .compatible = "marvell,pxa-mmc" }, | ||
582 | { } | ||
583 | }; | ||
584 | |||
585 | MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); | ||
586 | |||
587 | static int pxamci_of_init(struct platform_device *pdev) | ||
588 | { | ||
589 | struct device_node *np = pdev->dev.of_node; | ||
590 | struct pxamci_platform_data *pdata; | ||
591 | u32 tmp; | ||
592 | |||
593 | if (!np) | ||
594 | return 0; | ||
595 | |||
596 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
597 | if (!pdata) | ||
598 | return -ENOMEM; | ||
599 | |||
600 | pdata->gpio_card_detect = | ||
601 | of_get_named_gpio(np, "cd-gpios", 0); | ||
602 | pdata->gpio_card_ro = | ||
603 | of_get_named_gpio(np, "wp-gpios", 0); | ||
604 | |||
605 | /* pxa-mmc specific */ | ||
606 | pdata->gpio_power = | ||
607 | of_get_named_gpio(np, "pxa-mmc,gpio-power", 0); | ||
608 | |||
609 | if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) | ||
610 | pdata->detect_delay_ms = tmp; | ||
611 | |||
612 | pdev->dev.platform_data = pdata; | ||
613 | |||
614 | return 0; | ||
615 | } | ||
616 | #else | ||
617 | static int pxamci_of_init(struct platform_device *pdev) | ||
618 | { | ||
619 | return 0; | ||
620 | } | ||
621 | #endif | ||
622 | |||
623 | static int pxamci_probe(struct platform_device *pdev) | 576 | static int pxamci_probe(struct platform_device *pdev) |
624 | { | 577 | { |
625 | struct mmc_host *mmc; | 578 | struct mmc_host *mmc; |
@@ -627,10 +580,6 @@ static int pxamci_probe(struct platform_device *pdev) | |||
627 | struct resource *r, *dmarx, *dmatx; | 580 | struct resource *r, *dmarx, *dmatx; |
628 | int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; | 581 | int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; |
629 | 582 | ||
630 | ret = pxamci_of_init(pdev); | ||
631 | if (ret) | ||
632 | return ret; | ||
633 | |||
634 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 583 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
635 | irq = platform_get_irq(pdev, 0); | 584 | irq = platform_get_irq(pdev, 0); |
636 | if (!r || irq < 0) | 585 | if (!r || irq < 0) |
@@ -917,14 +866,24 @@ static struct platform_driver pxamci_driver = { | |||
917 | .driver = { | 866 | .driver = { |
918 | .name = DRIVER_NAME, | 867 | .name = DRIVER_NAME, |
919 | .owner = THIS_MODULE, | 868 | .owner = THIS_MODULE, |
920 | .of_match_table = of_match_ptr(pxa_mmc_dt_ids), | ||
921 | #ifdef CONFIG_PM | 869 | #ifdef CONFIG_PM |
922 | .pm = &pxamci_pm_ops, | 870 | .pm = &pxamci_pm_ops, |
923 | #endif | 871 | #endif |
924 | }, | 872 | }, |
925 | }; | 873 | }; |
926 | 874 | ||
927 | module_platform_driver(pxamci_driver); | 875 | static int __init pxamci_init(void) |
876 | { | ||
877 | return platform_driver_register(&pxamci_driver); | ||
878 | } | ||
879 | |||
880 | static void __exit pxamci_exit(void) | ||
881 | { | ||
882 | platform_driver_unregister(&pxamci_driver); | ||
883 | } | ||
884 | |||
885 | module_init(pxamci_init); | ||
886 | module_exit(pxamci_exit); | ||
928 | 887 | ||
929 | MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); | 888 | MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); |
930 | MODULE_LICENSE("GPL"); | 889 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c deleted file mode 100644 index 571915dfb21..00000000000 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ /dev/null | |||
@@ -1,1348 +0,0 @@ | |||
1 | /* Realtek PCI-Express SD/MMC Card Interface driver | ||
2 | * | ||
3 | * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2, or (at your option) any | ||
8 | * later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. | ||
17 | * | ||
18 | * Author: | ||
19 | * Wei WANG <wei_wang@realsil.com.cn> | ||
20 | * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/highmem.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/mmc/host.h> | ||
29 | #include <linux/mmc/mmc.h> | ||
30 | #include <linux/mmc/sd.h> | ||
31 | #include <linux/mmc/card.h> | ||
32 | #include <linux/mfd/rtsx_pci.h> | ||
33 | #include <asm/unaligned.h> | ||
34 | |||
35 | /* SD Tuning Data Structure | ||
36 | * Record continuous timing phase path | ||
37 | */ | ||
38 | struct timing_phase_path { | ||
39 | int start; | ||
40 | int end; | ||
41 | int mid; | ||
42 | int len; | ||
43 | }; | ||
44 | |||
45 | struct realtek_pci_sdmmc { | ||
46 | struct platform_device *pdev; | ||
47 | struct rtsx_pcr *pcr; | ||
48 | struct mmc_host *mmc; | ||
49 | struct mmc_request *mrq; | ||
50 | |||
51 | struct mutex host_mutex; | ||
52 | |||
53 | u8 ssc_depth; | ||
54 | unsigned int clock; | ||
55 | bool vpclk; | ||
56 | bool double_clk; | ||
57 | bool eject; | ||
58 | bool initial_mode; | ||
59 | bool ddr_mode; | ||
60 | }; | ||
61 | |||
62 | static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) | ||
63 | { | ||
64 | return &(host->pdev->dev); | ||
65 | } | ||
66 | |||
67 | static inline void sd_clear_error(struct realtek_pci_sdmmc *host) | ||
68 | { | ||
69 | rtsx_pci_write_register(host->pcr, CARD_STOP, | ||
70 | SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR); | ||
71 | } | ||
72 | |||
73 | #ifdef DEBUG | ||
74 | static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) | ||
75 | { | ||
76 | struct rtsx_pcr *pcr = host->pcr; | ||
77 | u16 i; | ||
78 | u8 *ptr; | ||
79 | |||
80 | /* Print SD host internal registers */ | ||
81 | rtsx_pci_init_cmd(pcr); | ||
82 | for (i = 0xFDA0; i <= 0xFDAE; i++) | ||
83 | rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); | ||
84 | for (i = 0xFD52; i <= 0xFD69; i++) | ||
85 | rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); | ||
86 | rtsx_pci_send_cmd(pcr, 100); | ||
87 | |||
88 | ptr = rtsx_pci_get_cmd_data(pcr); | ||
89 | for (i = 0xFDA0; i <= 0xFDAE; i++) | ||
90 | dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); | ||
91 | for (i = 0xFD52; i <= 0xFD69; i++) | ||
92 | dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); | ||
93 | } | ||
94 | #else | ||
95 | #define sd_print_debug_regs(host) | ||
96 | #endif /* DEBUG */ | ||
97 | |||
98 | static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, | ||
99 | u8 *buf, int buf_len, int timeout) | ||
100 | { | ||
101 | struct rtsx_pcr *pcr = host->pcr; | ||
102 | int err, i; | ||
103 | u8 trans_mode; | ||
104 | |||
105 | dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, cmd[0] - 0x40); | ||
106 | |||
107 | if (!buf) | ||
108 | buf_len = 0; | ||
109 | |||
110 | if ((cmd[0] & 0x3F) == MMC_SEND_TUNING_BLOCK) | ||
111 | trans_mode = SD_TM_AUTO_TUNING; | ||
112 | else | ||
113 | trans_mode = SD_TM_NORMAL_READ; | ||
114 | |||
115 | rtsx_pci_init_cmd(pcr); | ||
116 | |||
117 | for (i = 0; i < 5; i++) | ||
118 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0 + i, 0xFF, cmd[i]); | ||
119 | |||
120 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); | ||
121 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, | ||
122 | 0xFF, (u8)(byte_cnt >> 8)); | ||
123 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); | ||
124 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); | ||
125 | |||
126 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, | ||
127 | SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | | ||
128 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); | ||
129 | if (trans_mode != SD_TM_AUTO_TUNING) | ||
130 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, | ||
131 | CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); | ||
132 | |||
133 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, | ||
134 | 0xFF, trans_mode | SD_TRANSFER_START); | ||
135 | rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, | ||
136 | SD_TRANSFER_END, SD_TRANSFER_END); | ||
137 | |||
138 | err = rtsx_pci_send_cmd(pcr, timeout); | ||
139 | if (err < 0) { | ||
140 | sd_print_debug_regs(host); | ||
141 | dev_dbg(sdmmc_dev(host), | ||
142 | "rtsx_pci_send_cmd fail (err = %d)\n", err); | ||
143 | return err; | ||
144 | } | ||
145 | |||
146 | if (buf && buf_len) { | ||
147 | err = rtsx_pci_read_ppbuf(pcr, buf, buf_len); | ||
148 | if (err < 0) { | ||
149 | dev_dbg(sdmmc_dev(host), | ||
150 | "rtsx_pci_read_ppbuf fail (err = %d)\n", err); | ||
151 | return err; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, | ||
159 | u8 *buf, int buf_len, int timeout) | ||
160 | { | ||
161 | struct rtsx_pcr *pcr = host->pcr; | ||
162 | int err, i; | ||
163 | u8 trans_mode; | ||
164 | |||
165 | if (!buf) | ||
166 | buf_len = 0; | ||
167 | |||
168 | if (buf && buf_len) { | ||
169 | err = rtsx_pci_write_ppbuf(pcr, buf, buf_len); | ||
170 | if (err < 0) { | ||
171 | dev_dbg(sdmmc_dev(host), | ||
172 | "rtsx_pci_write_ppbuf fail (err = %d)\n", err); | ||
173 | return err; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | trans_mode = cmd ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; | ||
178 | rtsx_pci_init_cmd(pcr); | ||
179 | |||
180 | if (cmd) { | ||
181 | dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d\n", __func__, | ||
182 | cmd[0] - 0x40); | ||
183 | |||
184 | for (i = 0; i < 5; i++) | ||
185 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, | ||
186 | SD_CMD0 + i, 0xFF, cmd[i]); | ||
187 | } | ||
188 | |||
189 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); | ||
190 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, | ||
191 | 0xFF, (u8)(byte_cnt >> 8)); | ||
192 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); | ||
193 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); | ||
194 | |||
195 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, | ||
196 | SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | | ||
197 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); | ||
198 | |||
199 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, | ||
200 | trans_mode | SD_TRANSFER_START); | ||
201 | rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, | ||
202 | SD_TRANSFER_END, SD_TRANSFER_END); | ||
203 | |||
204 | err = rtsx_pci_send_cmd(pcr, timeout); | ||
205 | if (err < 0) { | ||
206 | sd_print_debug_regs(host); | ||
207 | dev_dbg(sdmmc_dev(host), | ||
208 | "rtsx_pci_send_cmd fail (err = %d)\n", err); | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, | ||
216 | struct mmc_command *cmd) | ||
217 | { | ||
218 | struct rtsx_pcr *pcr = host->pcr; | ||
219 | u8 cmd_idx = (u8)cmd->opcode; | ||
220 | u32 arg = cmd->arg; | ||
221 | int err = 0; | ||
222 | int timeout = 100; | ||
223 | int i; | ||
224 | u8 *ptr; | ||
225 | int stat_idx = 0; | ||
226 | u8 rsp_type; | ||
227 | int rsp_len = 5; | ||
228 | |||
229 | dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", | ||
230 | __func__, cmd_idx, arg); | ||
231 | |||
232 | /* Response type: | ||
233 | * R0 | ||
234 | * R1, R5, R6, R7 | ||
235 | * R1b | ||
236 | * R2 | ||
237 | * R3, R4 | ||
238 | */ | ||
239 | switch (mmc_resp_type(cmd)) { | ||
240 | case MMC_RSP_NONE: | ||
241 | rsp_type = SD_RSP_TYPE_R0; | ||
242 | rsp_len = 0; | ||
243 | break; | ||
244 | case MMC_RSP_R1: | ||
245 | rsp_type = SD_RSP_TYPE_R1; | ||
246 | break; | ||
247 | case MMC_RSP_R1B: | ||
248 | rsp_type = SD_RSP_TYPE_R1b; | ||
249 | break; | ||
250 | case MMC_RSP_R2: | ||
251 | rsp_type = SD_RSP_TYPE_R2; | ||
252 | rsp_len = 16; | ||
253 | break; | ||
254 | case MMC_RSP_R3: | ||
255 | rsp_type = SD_RSP_TYPE_R3; | ||
256 | break; | ||
257 | default: | ||
258 | dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); | ||
259 | err = -EINVAL; | ||
260 | goto out; | ||
261 | } | ||
262 | |||
263 | if (rsp_type == SD_RSP_TYPE_R1b) | ||
264 | timeout = 3000; | ||
265 | |||
266 | if (cmd->opcode == SD_SWITCH_VOLTAGE) { | ||
267 | err = rtsx_pci_write_register(pcr, SD_BUS_STAT, | ||
268 | 0xFF, SD_CLK_TOGGLE_EN); | ||
269 | if (err < 0) | ||
270 | goto out; | ||
271 | } | ||
272 | |||
273 | rtsx_pci_init_cmd(pcr); | ||
274 | |||
275 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); | ||
276 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); | ||
277 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); | ||
278 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); | ||
279 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); | ||
280 | |||
281 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); | ||
282 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, | ||
283 | 0x01, PINGPONG_BUFFER); | ||
284 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, | ||
285 | 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); | ||
286 | rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, | ||
287 | SD_TRANSFER_END | SD_STAT_IDLE, | ||
288 | SD_TRANSFER_END | SD_STAT_IDLE); | ||
289 | |||
290 | if (rsp_type == SD_RSP_TYPE_R2) { | ||
291 | /* Read data from ping-pong buffer */ | ||
292 | for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) | ||
293 | rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); | ||
294 | stat_idx = 16; | ||
295 | } else if (rsp_type != SD_RSP_TYPE_R0) { | ||
296 | /* Read data from SD_CMDx registers */ | ||
297 | for (i = SD_CMD0; i <= SD_CMD4; i++) | ||
298 | rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); | ||
299 | stat_idx = 5; | ||
300 | } | ||
301 | |||
302 | rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); | ||
303 | |||
304 | err = rtsx_pci_send_cmd(pcr, timeout); | ||
305 | if (err < 0) { | ||
306 | sd_print_debug_regs(host); | ||
307 | sd_clear_error(host); | ||
308 | dev_dbg(sdmmc_dev(host), | ||
309 | "rtsx_pci_send_cmd error (err = %d)\n", err); | ||
310 | goto out; | ||
311 | } | ||
312 | |||
313 | if (rsp_type == SD_RSP_TYPE_R0) { | ||
314 | err = 0; | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | /* Eliminate returned value of CHECK_REG_CMD */ | ||
319 | ptr = rtsx_pci_get_cmd_data(pcr) + 1; | ||
320 | |||
321 | /* Check (Start,Transmission) bit of Response */ | ||
322 | if ((ptr[0] & 0xC0) != 0) { | ||
323 | err = -EILSEQ; | ||
324 | dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); | ||
325 | goto out; | ||
326 | } | ||
327 | |||
328 | /* Check CRC7 */ | ||
329 | if (!(rsp_type & SD_NO_CHECK_CRC7)) { | ||
330 | if (ptr[stat_idx] & SD_CRC7_ERR) { | ||
331 | err = -EILSEQ; | ||
332 | dev_dbg(sdmmc_dev(host), "CRC7 error\n"); | ||
333 | goto out; | ||
334 | } | ||
335 | } | ||
336 | |||
337 | if (rsp_type == SD_RSP_TYPE_R2) { | ||
338 | for (i = 0; i < 4; i++) { | ||
339 | cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); | ||
340 | dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", | ||
341 | i, cmd->resp[i]); | ||
342 | } | ||
343 | } else { | ||
344 | cmd->resp[0] = get_unaligned_be32(ptr + 1); | ||
345 | dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", | ||
346 | cmd->resp[0]); | ||
347 | } | ||
348 | |||
349 | out: | ||
350 | cmd->error = err; | ||
351 | } | ||
352 | |||
353 | static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) | ||
354 | { | ||
355 | struct rtsx_pcr *pcr = host->pcr; | ||
356 | struct mmc_host *mmc = host->mmc; | ||
357 | struct mmc_card *card = mmc->card; | ||
358 | struct mmc_data *data = mrq->data; | ||
359 | int uhs = mmc_sd_card_uhs(card); | ||
360 | int read = (data->flags & MMC_DATA_READ) ? 1 : 0; | ||
361 | u8 cfg2, trans_mode; | ||
362 | int err; | ||
363 | size_t data_len = data->blksz * data->blocks; | ||
364 | |||
365 | if (read) { | ||
366 | cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | | ||
367 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; | ||
368 | trans_mode = SD_TM_AUTO_READ_3; | ||
369 | } else { | ||
370 | cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | | ||
371 | SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; | ||
372 | trans_mode = SD_TM_AUTO_WRITE_3; | ||
373 | } | ||
374 | |||
375 | if (!uhs) | ||
376 | cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; | ||
377 | |||
378 | rtsx_pci_init_cmd(pcr); | ||
379 | |||
380 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); | ||
381 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); | ||
382 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, | ||
383 | 0xFF, (u8)data->blocks); | ||
384 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, | ||
385 | 0xFF, (u8)(data->blocks >> 8)); | ||
386 | |||
387 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, | ||
388 | DMA_DONE_INT, DMA_DONE_INT); | ||
389 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, | ||
390 | 0xFF, (u8)(data_len >> 24)); | ||
391 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, | ||
392 | 0xFF, (u8)(data_len >> 16)); | ||
393 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, | ||
394 | 0xFF, (u8)(data_len >> 8)); | ||
395 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len); | ||
396 | if (read) { | ||
397 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, | ||
398 | 0x03 | DMA_PACK_SIZE_MASK, | ||
399 | DMA_DIR_FROM_CARD | DMA_EN | DMA_512); | ||
400 | } else { | ||
401 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, | ||
402 | 0x03 | DMA_PACK_SIZE_MASK, | ||
403 | DMA_DIR_TO_CARD | DMA_EN | DMA_512); | ||
404 | } | ||
405 | |||
406 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, | ||
407 | 0x01, RING_BUFFER); | ||
408 | |||
409 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); | ||
410 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, | ||
411 | trans_mode | SD_TRANSFER_START); | ||
412 | rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, | ||
413 | SD_TRANSFER_END, SD_TRANSFER_END); | ||
414 | |||
415 | rtsx_pci_send_cmd_no_wait(pcr); | ||
416 | |||
417 | err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); | ||
418 | if (err < 0) { | ||
419 | sd_clear_error(host); | ||
420 | return err; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) | ||
427 | { | ||
428 | rtsx_pci_write_register(host->pcr, SD_CFG1, | ||
429 | SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); | ||
430 | } | ||
431 | |||
432 | static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) | ||
433 | { | ||
434 | rtsx_pci_write_register(host->pcr, SD_CFG1, | ||
435 | SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); | ||
436 | } | ||
437 | |||
438 | static void sd_normal_rw(struct realtek_pci_sdmmc *host, | ||
439 | struct mmc_request *mrq) | ||
440 | { | ||
441 | struct mmc_command *cmd = mrq->cmd; | ||
442 | struct mmc_data *data = mrq->data; | ||
443 | u8 _cmd[5], *buf; | ||
444 | |||
445 | _cmd[0] = 0x40 | (u8)cmd->opcode; | ||
446 | put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1])); | ||
447 | |||
448 | buf = kzalloc(data->blksz, GFP_NOIO); | ||
449 | if (!buf) { | ||
450 | cmd->error = -ENOMEM; | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | if (data->flags & MMC_DATA_READ) { | ||
455 | if (host->initial_mode) | ||
456 | sd_disable_initial_mode(host); | ||
457 | |||
458 | cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf, | ||
459 | data->blksz, 200); | ||
460 | |||
461 | if (host->initial_mode) | ||
462 | sd_enable_initial_mode(host); | ||
463 | |||
464 | sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); | ||
465 | } else { | ||
466 | sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); | ||
467 | |||
468 | cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf, | ||
469 | data->blksz, 200); | ||
470 | } | ||
471 | |||
472 | kfree(buf); | ||
473 | } | ||
474 | |||
475 | static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point) | ||
476 | { | ||
477 | struct rtsx_pcr *pcr = host->pcr; | ||
478 | int err; | ||
479 | |||
480 | dev_dbg(sdmmc_dev(host), "%s: sample_point = %d\n", | ||
481 | __func__, sample_point); | ||
482 | |||
483 | rtsx_pci_init_cmd(pcr); | ||
484 | |||
485 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK); | ||
486 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPRX_CTL, 0x1F, sample_point); | ||
487 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); | ||
488 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, | ||
489 | PHASE_NOT_RESET, PHASE_NOT_RESET); | ||
490 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0); | ||
491 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); | ||
492 | |||
493 | err = rtsx_pci_send_cmd(pcr, 100); | ||
494 | if (err < 0) | ||
495 | return err; | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map) | ||
501 | { | ||
502 | struct timing_phase_path path[MAX_PHASE + 1]; | ||
503 | int i, j, cont_path_cnt; | ||
504 | int new_block, max_len, final_path_idx; | ||
505 | u8 final_phase = 0xFF; | ||
506 | |||
507 | /* Parse phase_map, take it as a bit-ring */ | ||
508 | cont_path_cnt = 0; | ||
509 | new_block = 1; | ||
510 | j = 0; | ||
511 | for (i = 0; i < MAX_PHASE + 1; i++) { | ||
512 | if (phase_map & (1 << i)) { | ||
513 | if (new_block) { | ||
514 | new_block = 0; | ||
515 | j = cont_path_cnt++; | ||
516 | path[j].start = i; | ||
517 | path[j].end = i; | ||
518 | } else { | ||
519 | path[j].end = i; | ||
520 | } | ||
521 | } else { | ||
522 | new_block = 1; | ||
523 | if (cont_path_cnt) { | ||
524 | /* Calculate path length and middle point */ | ||
525 | int idx = cont_path_cnt - 1; | ||
526 | path[idx].len = | ||
527 | path[idx].end - path[idx].start + 1; | ||
528 | path[idx].mid = | ||
529 | path[idx].start + path[idx].len / 2; | ||
530 | } | ||
531 | } | ||
532 | } | ||
533 | |||
534 | if (cont_path_cnt == 0) { | ||
535 | dev_dbg(sdmmc_dev(host), "No continuous phase path\n"); | ||
536 | goto finish; | ||
537 | } else { | ||
538 | /* Calculate last continuous path length and middle point */ | ||
539 | int idx = cont_path_cnt - 1; | ||
540 | path[idx].len = path[idx].end - path[idx].start + 1; | ||
541 | path[idx].mid = path[idx].start + path[idx].len / 2; | ||
542 | } | ||
543 | |||
544 | /* Connect the first and last continuous paths if they are adjacent */ | ||
545 | if (!path[0].start && (path[cont_path_cnt - 1].end == MAX_PHASE)) { | ||
546 | /* Using negative index */ | ||
547 | path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1; | ||
548 | path[0].len += path[cont_path_cnt - 1].len; | ||
549 | path[0].mid = path[0].start + path[0].len / 2; | ||
550 | /* Convert negative middle point index to positive one */ | ||
551 | if (path[0].mid < 0) | ||
552 | path[0].mid += MAX_PHASE + 1; | ||
553 | cont_path_cnt--; | ||
554 | } | ||
555 | |||
556 | /* Choose the longest continuous phase path */ | ||
557 | max_len = 0; | ||
558 | final_phase = 0; | ||
559 | final_path_idx = 0; | ||
560 | for (i = 0; i < cont_path_cnt; i++) { | ||
561 | if (path[i].len > max_len) { | ||
562 | max_len = path[i].len; | ||
563 | final_phase = (u8)path[i].mid; | ||
564 | final_path_idx = i; | ||
565 | } | ||
566 | |||
567 | dev_dbg(sdmmc_dev(host), "path[%d].start = %d\n", | ||
568 | i, path[i].start); | ||
569 | dev_dbg(sdmmc_dev(host), "path[%d].end = %d\n", | ||
570 | i, path[i].end); | ||
571 | dev_dbg(sdmmc_dev(host), "path[%d].len = %d\n", | ||
572 | i, path[i].len); | ||
573 | dev_dbg(sdmmc_dev(host), "path[%d].mid = %d\n", | ||
574 | i, path[i].mid); | ||
575 | } | ||
576 | |||
577 | finish: | ||
578 | dev_dbg(sdmmc_dev(host), "Final chosen phase: %d\n", final_phase); | ||
579 | return final_phase; | ||
580 | } | ||
581 | |||
582 | static void sd_wait_data_idle(struct realtek_pci_sdmmc *host) | ||
583 | { | ||
584 | int err, i; | ||
585 | u8 val = 0; | ||
586 | |||
587 | for (i = 0; i < 100; i++) { | ||
588 | err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val); | ||
589 | if (val & SD_DATA_IDLE) | ||
590 | return; | ||
591 | |||
592 | udelay(100); | ||
593 | } | ||
594 | } | ||
595 | |||
596 | static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, | ||
597 | u8 opcode, u8 sample_point) | ||
598 | { | ||
599 | int err; | ||
600 | u8 cmd[5] = {0}; | ||
601 | |||
602 | err = sd_change_phase(host, sample_point); | ||
603 | if (err < 0) | ||
604 | return err; | ||
605 | |||
606 | cmd[0] = 0x40 | opcode; | ||
607 | err = sd_read_data(host, cmd, 0x40, NULL, 0, 100); | ||
608 | if (err < 0) { | ||
609 | /* Wait till SD DATA IDLE */ | ||
610 | sd_wait_data_idle(host); | ||
611 | sd_clear_error(host); | ||
612 | return err; | ||
613 | } | ||
614 | |||
615 | return 0; | ||
616 | } | ||
617 | |||
618 | static int sd_tuning_phase(struct realtek_pci_sdmmc *host, | ||
619 | u8 opcode, u32 *phase_map) | ||
620 | { | ||
621 | int err, i; | ||
622 | u32 raw_phase_map = 0; | ||
623 | |||
624 | for (i = MAX_PHASE; i >= 0; i--) { | ||
625 | err = sd_tuning_rx_cmd(host, opcode, (u8)i); | ||
626 | if (err == 0) | ||
627 | raw_phase_map |= 1 << i; | ||
628 | } | ||
629 | |||
630 | if (phase_map) | ||
631 | *phase_map = raw_phase_map; | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) | ||
637 | { | ||
638 | int err, i; | ||
639 | u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; | ||
640 | u8 final_phase; | ||
641 | |||
642 | for (i = 0; i < RX_TUNING_CNT; i++) { | ||
643 | err = sd_tuning_phase(host, opcode, &(raw_phase_map[i])); | ||
644 | if (err < 0) | ||
645 | return err; | ||
646 | |||
647 | if (raw_phase_map[i] == 0) | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | phase_map = 0xFFFFFFFF; | ||
652 | for (i = 0; i < RX_TUNING_CNT; i++) { | ||
653 | dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n", | ||
654 | i, raw_phase_map[i]); | ||
655 | phase_map &= raw_phase_map[i]; | ||
656 | } | ||
657 | dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map); | ||
658 | |||
659 | if (phase_map) { | ||
660 | final_phase = sd_search_final_phase(host, phase_map); | ||
661 | if (final_phase == 0xFF) | ||
662 | return -EINVAL; | ||
663 | |||
664 | err = sd_change_phase(host, final_phase); | ||
665 | if (err < 0) | ||
666 | return err; | ||
667 | } else { | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | |||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
675 | { | ||
676 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
677 | struct rtsx_pcr *pcr = host->pcr; | ||
678 | struct mmc_command *cmd = mrq->cmd; | ||
679 | struct mmc_data *data = mrq->data; | ||
680 | unsigned int data_size = 0; | ||
681 | |||
682 | if (host->eject) { | ||
683 | cmd->error = -ENOMEDIUM; | ||
684 | goto finish; | ||
685 | } | ||
686 | |||
687 | mutex_lock(&pcr->pcr_mutex); | ||
688 | |||
689 | rtsx_pci_start_run(pcr); | ||
690 | |||
691 | rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, | ||
692 | host->initial_mode, host->double_clk, host->vpclk); | ||
693 | rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL); | ||
694 | rtsx_pci_write_register(pcr, CARD_SHARE_MODE, | ||
695 | CARD_SHARE_MASK, CARD_SHARE_48_SD); | ||
696 | |||
697 | mutex_lock(&host->host_mutex); | ||
698 | host->mrq = mrq; | ||
699 | mutex_unlock(&host->host_mutex); | ||
700 | |||
701 | if (mrq->data) | ||
702 | data_size = data->blocks * data->blksz; | ||
703 | |||
704 | if (!data_size || mmc_op_multi(cmd->opcode) || | ||
705 | (cmd->opcode == MMC_READ_SINGLE_BLOCK) || | ||
706 | (cmd->opcode == MMC_WRITE_BLOCK)) { | ||
707 | sd_send_cmd_get_rsp(host, cmd); | ||
708 | |||
709 | if (!cmd->error && data_size) { | ||
710 | sd_rw_multi(host, mrq); | ||
711 | |||
712 | if (mmc_op_multi(cmd->opcode) && mrq->stop) | ||
713 | sd_send_cmd_get_rsp(host, mrq->stop); | ||
714 | } | ||
715 | } else { | ||
716 | sd_normal_rw(host, mrq); | ||
717 | } | ||
718 | |||
719 | if (mrq->data) { | ||
720 | if (cmd->error || data->error) | ||
721 | data->bytes_xfered = 0; | ||
722 | else | ||
723 | data->bytes_xfered = data->blocks * data->blksz; | ||
724 | } | ||
725 | |||
726 | mutex_unlock(&pcr->pcr_mutex); | ||
727 | |||
728 | finish: | ||
729 | if (cmd->error) | ||
730 | dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); | ||
731 | |||
732 | mutex_lock(&host->host_mutex); | ||
733 | host->mrq = NULL; | ||
734 | mutex_unlock(&host->host_mutex); | ||
735 | |||
736 | mmc_request_done(mmc, mrq); | ||
737 | } | ||
738 | |||
739 | static int sd_set_bus_width(struct realtek_pci_sdmmc *host, | ||
740 | unsigned char bus_width) | ||
741 | { | ||
742 | int err = 0; | ||
743 | u8 width[] = { | ||
744 | [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, | ||
745 | [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, | ||
746 | [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, | ||
747 | }; | ||
748 | |||
749 | if (bus_width <= MMC_BUS_WIDTH_8) | ||
750 | err = rtsx_pci_write_register(host->pcr, SD_CFG1, | ||
751 | 0x03, width[bus_width]); | ||
752 | |||
753 | return err; | ||
754 | } | ||
755 | |||
756 | static int sd_power_on(struct realtek_pci_sdmmc *host) | ||
757 | { | ||
758 | struct rtsx_pcr *pcr = host->pcr; | ||
759 | int err; | ||
760 | |||
761 | rtsx_pci_init_cmd(pcr); | ||
762 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); | ||
763 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, | ||
764 | CARD_SHARE_MASK, CARD_SHARE_48_SD); | ||
765 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, | ||
766 | SD_CLK_EN, SD_CLK_EN); | ||
767 | err = rtsx_pci_send_cmd(pcr, 100); | ||
768 | if (err < 0) | ||
769 | return err; | ||
770 | |||
771 | err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD); | ||
772 | if (err < 0) | ||
773 | return err; | ||
774 | |||
775 | err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD); | ||
776 | if (err < 0) | ||
777 | return err; | ||
778 | |||
779 | err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); | ||
780 | if (err < 0) | ||
781 | return err; | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static int sd_power_off(struct realtek_pci_sdmmc *host) | ||
787 | { | ||
788 | struct rtsx_pcr *pcr = host->pcr; | ||
789 | int err; | ||
790 | |||
791 | rtsx_pci_init_cmd(pcr); | ||
792 | |||
793 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); | ||
794 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); | ||
795 | |||
796 | err = rtsx_pci_send_cmd(pcr, 100); | ||
797 | if (err < 0) | ||
798 | return err; | ||
799 | |||
800 | err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); | ||
801 | if (err < 0) | ||
802 | return err; | ||
803 | |||
804 | return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); | ||
805 | } | ||
806 | |||
807 | static int sd_set_power_mode(struct realtek_pci_sdmmc *host, | ||
808 | unsigned char power_mode) | ||
809 | { | ||
810 | int err; | ||
811 | |||
812 | if (power_mode == MMC_POWER_OFF) | ||
813 | err = sd_power_off(host); | ||
814 | else | ||
815 | err = sd_power_on(host); | ||
816 | |||
817 | return err; | ||
818 | } | ||
819 | |||
820 | static int sd_set_timing(struct realtek_pci_sdmmc *host, | ||
821 | unsigned char timing, bool *ddr_mode) | ||
822 | { | ||
823 | struct rtsx_pcr *pcr = host->pcr; | ||
824 | int err = 0; | ||
825 | |||
826 | *ddr_mode = false; | ||
827 | |||
828 | rtsx_pci_init_cmd(pcr); | ||
829 | |||
830 | switch (timing) { | ||
831 | case MMC_TIMING_UHS_SDR104: | ||
832 | case MMC_TIMING_UHS_SDR50: | ||
833 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, | ||
834 | 0x0C | SD_ASYNC_FIFO_NOT_RST, | ||
835 | SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); | ||
836 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, | ||
837 | CLK_LOW_FREQ, CLK_LOW_FREQ); | ||
838 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, | ||
839 | CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); | ||
840 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); | ||
841 | break; | ||
842 | |||
843 | case MMC_TIMING_UHS_DDR50: | ||
844 | *ddr_mode = true; | ||
845 | |||
846 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, | ||
847 | 0x0C | SD_ASYNC_FIFO_NOT_RST, | ||
848 | SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST); | ||
849 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, | ||
850 | CLK_LOW_FREQ, CLK_LOW_FREQ); | ||
851 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, | ||
852 | CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); | ||
853 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); | ||
854 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, | ||
855 | DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); | ||
856 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, | ||
857 | DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, | ||
858 | DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); | ||
859 | break; | ||
860 | |||
861 | case MMC_TIMING_MMC_HS: | ||
862 | case MMC_TIMING_SD_HS: | ||
863 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, | ||
864 | 0x0C, SD_20_MODE); | ||
865 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, | ||
866 | CLK_LOW_FREQ, CLK_LOW_FREQ); | ||
867 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, | ||
868 | CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); | ||
869 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); | ||
870 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, | ||
871 | SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); | ||
872 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, | ||
873 | SD20_RX_SEL_MASK, SD20_RX_14_DELAY); | ||
874 | break; | ||
875 | |||
876 | default: | ||
877 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, | ||
878 | SD_CFG1, 0x0C, SD_20_MODE); | ||
879 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, | ||
880 | CLK_LOW_FREQ, CLK_LOW_FREQ); | ||
881 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, | ||
882 | CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); | ||
883 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); | ||
884 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, | ||
885 | SD_PUSH_POINT_CTL, 0xFF, 0); | ||
886 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, | ||
887 | SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); | ||
888 | break; | ||
889 | } | ||
890 | |||
891 | err = rtsx_pci_send_cmd(pcr, 100); | ||
892 | |||
893 | return err; | ||
894 | } | ||
895 | |||
896 | static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
897 | { | ||
898 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
899 | struct rtsx_pcr *pcr = host->pcr; | ||
900 | |||
901 | if (host->eject) | ||
902 | return; | ||
903 | |||
904 | mutex_lock(&pcr->pcr_mutex); | ||
905 | |||
906 | rtsx_pci_start_run(pcr); | ||
907 | |||
908 | sd_set_bus_width(host, ios->bus_width); | ||
909 | sd_set_power_mode(host, ios->power_mode); | ||
910 | sd_set_timing(host, ios->timing, &host->ddr_mode); | ||
911 | |||
912 | host->vpclk = false; | ||
913 | host->double_clk = true; | ||
914 | |||
915 | switch (ios->timing) { | ||
916 | case MMC_TIMING_UHS_SDR104: | ||
917 | case MMC_TIMING_UHS_SDR50: | ||
918 | host->ssc_depth = RTSX_SSC_DEPTH_2M; | ||
919 | host->vpclk = true; | ||
920 | host->double_clk = false; | ||
921 | break; | ||
922 | case MMC_TIMING_UHS_DDR50: | ||
923 | case MMC_TIMING_UHS_SDR25: | ||
924 | host->ssc_depth = RTSX_SSC_DEPTH_1M; | ||
925 | break; | ||
926 | default: | ||
927 | host->ssc_depth = RTSX_SSC_DEPTH_500K; | ||
928 | break; | ||
929 | } | ||
930 | |||
931 | host->initial_mode = (ios->clock <= 1000000) ? true : false; | ||
932 | |||
933 | host->clock = ios->clock; | ||
934 | rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth, | ||
935 | host->initial_mode, host->double_clk, host->vpclk); | ||
936 | |||
937 | mutex_unlock(&pcr->pcr_mutex); | ||
938 | } | ||
939 | |||
940 | static int sdmmc_get_ro(struct mmc_host *mmc) | ||
941 | { | ||
942 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
943 | struct rtsx_pcr *pcr = host->pcr; | ||
944 | int ro = 0; | ||
945 | u32 val; | ||
946 | |||
947 | if (host->eject) | ||
948 | return -ENOMEDIUM; | ||
949 | |||
950 | mutex_lock(&pcr->pcr_mutex); | ||
951 | |||
952 | rtsx_pci_start_run(pcr); | ||
953 | |||
954 | /* Check SD mechanical write-protect switch */ | ||
955 | val = rtsx_pci_readl(pcr, RTSX_BIPR); | ||
956 | dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); | ||
957 | if (val & SD_WRITE_PROTECT) | ||
958 | ro = 1; | ||
959 | |||
960 | mutex_unlock(&pcr->pcr_mutex); | ||
961 | |||
962 | return ro; | ||
963 | } | ||
964 | |||
965 | static int sdmmc_get_cd(struct mmc_host *mmc) | ||
966 | { | ||
967 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
968 | struct rtsx_pcr *pcr = host->pcr; | ||
969 | int cd = 0; | ||
970 | u32 val; | ||
971 | |||
972 | if (host->eject) | ||
973 | return -ENOMEDIUM; | ||
974 | |||
975 | mutex_lock(&pcr->pcr_mutex); | ||
976 | |||
977 | rtsx_pci_start_run(pcr); | ||
978 | |||
979 | /* Check SD card detect */ | ||
980 | val = rtsx_pci_card_exist(pcr); | ||
981 | dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); | ||
982 | if (val & SD_EXIST) | ||
983 | cd = 1; | ||
984 | |||
985 | mutex_unlock(&pcr->pcr_mutex); | ||
986 | |||
987 | return cd; | ||
988 | } | ||
989 | |||
990 | static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host) | ||
991 | { | ||
992 | struct rtsx_pcr *pcr = host->pcr; | ||
993 | int err; | ||
994 | u8 stat; | ||
995 | |||
996 | /* Reference to Signal Voltage Switch Sequence in SD spec. | ||
997 | * Wait for a period of time so that the card can drive SD_CMD and | ||
998 | * SD_DAT[3:0] to low after sending back CMD11 response. | ||
999 | */ | ||
1000 | mdelay(1); | ||
1001 | |||
1002 | /* SD_CMD, SD_DAT[3:0] should be driven to low by card; | ||
1003 | * If either one of SD_CMD,SD_DAT[3:0] is not low, | ||
1004 | * abort the voltage switch sequence; | ||
1005 | */ | ||
1006 | err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); | ||
1007 | if (err < 0) | ||
1008 | return err; | ||
1009 | |||
1010 | if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | | ||
1011 | SD_DAT1_STATUS | SD_DAT0_STATUS)) | ||
1012 | return -EINVAL; | ||
1013 | |||
1014 | /* Stop toggle SD clock */ | ||
1015 | err = rtsx_pci_write_register(pcr, SD_BUS_STAT, | ||
1016 | 0xFF, SD_CLK_FORCE_STOP); | ||
1017 | if (err < 0) | ||
1018 | return err; | ||
1019 | |||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host) | ||
1024 | { | ||
1025 | struct rtsx_pcr *pcr = host->pcr; | ||
1026 | int err; | ||
1027 | u8 stat, mask, val; | ||
1028 | |||
1029 | /* Wait 1.8V output of voltage regulator in card stable */ | ||
1030 | msleep(50); | ||
1031 | |||
1032 | /* Toggle SD clock again */ | ||
1033 | err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN); | ||
1034 | if (err < 0) | ||
1035 | return err; | ||
1036 | |||
1037 | /* Wait for a period of time so that the card can drive | ||
1038 | * SD_DAT[3:0] to high at 1.8V | ||
1039 | */ | ||
1040 | msleep(20); | ||
1041 | |||
1042 | /* SD_CMD, SD_DAT[3:0] should be pulled high by host */ | ||
1043 | err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); | ||
1044 | if (err < 0) | ||
1045 | return err; | ||
1046 | |||
1047 | mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | | ||
1048 | SD_DAT1_STATUS | SD_DAT0_STATUS; | ||
1049 | val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | | ||
1050 | SD_DAT1_STATUS | SD_DAT0_STATUS; | ||
1051 | if ((stat & mask) != val) { | ||
1052 | dev_dbg(sdmmc_dev(host), | ||
1053 | "%s: SD_BUS_STAT = 0x%x\n", __func__, stat); | ||
1054 | rtsx_pci_write_register(pcr, SD_BUS_STAT, | ||
1055 | SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); | ||
1056 | rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0); | ||
1057 | return -EINVAL; | ||
1058 | } | ||
1059 | |||
1060 | return 0; | ||
1061 | } | ||
1062 | |||
1063 | static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage) | ||
1064 | { | ||
1065 | struct rtsx_pcr *pcr = host->pcr; | ||
1066 | int err; | ||
1067 | |||
1068 | if (voltage == SD_IO_3V3) { | ||
1069 | err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); | ||
1070 | if (err < 0) | ||
1071 | return err; | ||
1072 | } else if (voltage == SD_IO_1V8) { | ||
1073 | err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); | ||
1074 | if (err < 0) | ||
1075 | return err; | ||
1076 | } else { | ||
1077 | return -EINVAL; | ||
1078 | } | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1084 | { | ||
1085 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
1086 | struct rtsx_pcr *pcr = host->pcr; | ||
1087 | int err = 0; | ||
1088 | u8 voltage; | ||
1089 | |||
1090 | dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", | ||
1091 | __func__, ios->signal_voltage); | ||
1092 | |||
1093 | if (host->eject) | ||
1094 | return -ENOMEDIUM; | ||
1095 | |||
1096 | mutex_lock(&pcr->pcr_mutex); | ||
1097 | |||
1098 | rtsx_pci_start_run(pcr); | ||
1099 | |||
1100 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) | ||
1101 | voltage = SD_IO_3V3; | ||
1102 | else | ||
1103 | voltage = SD_IO_1V8; | ||
1104 | |||
1105 | if (voltage == SD_IO_1V8) { | ||
1106 | err = rtsx_pci_write_register(pcr, | ||
1107 | SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B); | ||
1108 | if (err < 0) | ||
1109 | goto out; | ||
1110 | |||
1111 | err = sd_wait_voltage_stable_1(host); | ||
1112 | if (err < 0) | ||
1113 | goto out; | ||
1114 | } | ||
1115 | |||
1116 | err = sd_change_bank_voltage(host, voltage); | ||
1117 | if (err < 0) | ||
1118 | goto out; | ||
1119 | |||
1120 | if (voltage == SD_IO_1V8) { | ||
1121 | err = sd_wait_voltage_stable_2(host); | ||
1122 | if (err < 0) | ||
1123 | goto out; | ||
1124 | } | ||
1125 | |||
1126 | /* Stop toggle SD clock in idle */ | ||
1127 | err = rtsx_pci_write_register(pcr, SD_BUS_STAT, | ||
1128 | SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); | ||
1129 | |||
1130 | out: | ||
1131 | mutex_unlock(&pcr->pcr_mutex); | ||
1132 | |||
1133 | return err; | ||
1134 | } | ||
1135 | |||
1136 | static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) | ||
1137 | { | ||
1138 | struct realtek_pci_sdmmc *host = mmc_priv(mmc); | ||
1139 | struct rtsx_pcr *pcr = host->pcr; | ||
1140 | int err = 0; | ||
1141 | |||
1142 | if (host->eject) | ||
1143 | return -ENOMEDIUM; | ||
1144 | |||
1145 | mutex_lock(&pcr->pcr_mutex); | ||
1146 | |||
1147 | rtsx_pci_start_run(pcr); | ||
1148 | |||
1149 | if (!host->ddr_mode) | ||
1150 | err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK); | ||
1151 | |||
1152 | mutex_unlock(&pcr->pcr_mutex); | ||
1153 | |||
1154 | return err; | ||
1155 | } | ||
1156 | |||
1157 | static const struct mmc_host_ops realtek_pci_sdmmc_ops = { | ||
1158 | .request = sdmmc_request, | ||
1159 | .set_ios = sdmmc_set_ios, | ||
1160 | .get_ro = sdmmc_get_ro, | ||
1161 | .get_cd = sdmmc_get_cd, | ||
1162 | .start_signal_voltage_switch = sdmmc_switch_voltage, | ||
1163 | .execute_tuning = sdmmc_execute_tuning, | ||
1164 | }; | ||
1165 | |||
1166 | #ifdef CONFIG_PM | ||
1167 | static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev, | ||
1168 | pm_message_t state) | ||
1169 | { | ||
1170 | struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); | ||
1171 | struct mmc_host *mmc = host->mmc; | ||
1172 | int err; | ||
1173 | |||
1174 | dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); | ||
1175 | |||
1176 | err = mmc_suspend_host(mmc); | ||
1177 | if (err) | ||
1178 | return err; | ||
1179 | |||
1180 | return 0; | ||
1181 | } | ||
1182 | |||
1183 | static int rtsx_pci_sdmmc_resume(struct platform_device *pdev) | ||
1184 | { | ||
1185 | struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); | ||
1186 | struct mmc_host *mmc = host->mmc; | ||
1187 | |||
1188 | dev_dbg(sdmmc_dev(host), "--> %s\n", __func__); | ||
1189 | |||
1190 | return mmc_resume_host(mmc); | ||
1191 | } | ||
1192 | #else /* CONFIG_PM */ | ||
1193 | #define rtsx_pci_sdmmc_suspend NULL | ||
1194 | #define rtsx_pci_sdmmc_resume NULL | ||
1195 | #endif /* CONFIG_PM */ | ||
1196 | |||
1197 | static void init_extra_caps(struct realtek_pci_sdmmc *host) | ||
1198 | { | ||
1199 | struct mmc_host *mmc = host->mmc; | ||
1200 | struct rtsx_pcr *pcr = host->pcr; | ||
1201 | |||
1202 | dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps); | ||
1203 | |||
1204 | if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50) | ||
1205 | mmc->caps |= MMC_CAP_UHS_SDR50; | ||
1206 | if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104) | ||
1207 | mmc->caps |= MMC_CAP_UHS_SDR104; | ||
1208 | if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50) | ||
1209 | mmc->caps |= MMC_CAP_UHS_DDR50; | ||
1210 | if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR) | ||
1211 | mmc->caps |= MMC_CAP_1_8V_DDR; | ||
1212 | if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT) | ||
1213 | mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
1214 | } | ||
1215 | |||
1216 | static void realtek_init_host(struct realtek_pci_sdmmc *host) | ||
1217 | { | ||
1218 | struct mmc_host *mmc = host->mmc; | ||
1219 | |||
1220 | mmc->f_min = 250000; | ||
1221 | mmc->f_max = 208000000; | ||
1222 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; | ||
1223 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | | ||
1224 | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | | ||
1225 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; | ||
1226 | mmc->max_current_330 = 400; | ||
1227 | mmc->max_current_180 = 800; | ||
1228 | mmc->ops = &realtek_pci_sdmmc_ops; | ||
1229 | |||
1230 | init_extra_caps(host); | ||
1231 | |||
1232 | mmc->max_segs = 256; | ||
1233 | mmc->max_seg_size = 65536; | ||
1234 | mmc->max_blk_size = 512; | ||
1235 | mmc->max_blk_count = 65535; | ||
1236 | mmc->max_req_size = 524288; | ||
1237 | } | ||
1238 | |||
1239 | static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev) | ||
1240 | { | ||
1241 | struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); | ||
1242 | |||
1243 | mmc_detect_change(host->mmc, 0); | ||
1244 | } | ||
1245 | |||
1246 | static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) | ||
1247 | { | ||
1248 | struct mmc_host *mmc; | ||
1249 | struct realtek_pci_sdmmc *host; | ||
1250 | struct rtsx_pcr *pcr; | ||
1251 | struct pcr_handle *handle = pdev->dev.platform_data; | ||
1252 | |||
1253 | if (!handle) | ||
1254 | return -ENXIO; | ||
1255 | |||
1256 | pcr = handle->pcr; | ||
1257 | if (!pcr) | ||
1258 | return -ENXIO; | ||
1259 | |||
1260 | dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n"); | ||
1261 | |||
1262 | mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); | ||
1263 | if (!mmc) | ||
1264 | return -ENOMEM; | ||
1265 | |||
1266 | host = mmc_priv(mmc); | ||
1267 | host->pcr = pcr; | ||
1268 | host->mmc = mmc; | ||
1269 | host->pdev = pdev; | ||
1270 | platform_set_drvdata(pdev, host); | ||
1271 | pcr->slots[RTSX_SD_CARD].p_dev = pdev; | ||
1272 | pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; | ||
1273 | |||
1274 | mutex_init(&host->host_mutex); | ||
1275 | |||
1276 | realtek_init_host(host); | ||
1277 | |||
1278 | mmc_add_host(mmc); | ||
1279 | |||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) | ||
1284 | { | ||
1285 | struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); | ||
1286 | struct rtsx_pcr *pcr; | ||
1287 | struct mmc_host *mmc; | ||
1288 | |||
1289 | if (!host) | ||
1290 | return 0; | ||
1291 | |||
1292 | pcr = host->pcr; | ||
1293 | pcr->slots[RTSX_SD_CARD].p_dev = NULL; | ||
1294 | pcr->slots[RTSX_SD_CARD].card_event = NULL; | ||
1295 | mmc = host->mmc; | ||
1296 | host->eject = true; | ||
1297 | |||
1298 | mutex_lock(&host->host_mutex); | ||
1299 | if (host->mrq) { | ||
1300 | dev_dbg(&(pdev->dev), | ||
1301 | "%s: Controller removed during transfer\n", | ||
1302 | mmc_hostname(mmc)); | ||
1303 | |||
1304 | rtsx_pci_complete_unfinished_transfer(pcr); | ||
1305 | |||
1306 | host->mrq->cmd->error = -ENOMEDIUM; | ||
1307 | if (host->mrq->stop) | ||
1308 | host->mrq->stop->error = -ENOMEDIUM; | ||
1309 | mmc_request_done(mmc, host->mrq); | ||
1310 | } | ||
1311 | mutex_unlock(&host->host_mutex); | ||
1312 | |||
1313 | mmc_remove_host(mmc); | ||
1314 | mmc_free_host(mmc); | ||
1315 | |||
1316 | platform_set_drvdata(pdev, NULL); | ||
1317 | |||
1318 | dev_dbg(&(pdev->dev), | ||
1319 | ": Realtek PCI-E SDMMC controller has been removed\n"); | ||
1320 | |||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | static struct platform_device_id rtsx_pci_sdmmc_ids[] = { | ||
1325 | { | ||
1326 | .name = DRV_NAME_RTSX_PCI_SDMMC, | ||
1327 | }, { | ||
1328 | /* sentinel */ | ||
1329 | } | ||
1330 | }; | ||
1331 | MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids); | ||
1332 | |||
1333 | static struct platform_driver rtsx_pci_sdmmc_driver = { | ||
1334 | .probe = rtsx_pci_sdmmc_drv_probe, | ||
1335 | .remove = rtsx_pci_sdmmc_drv_remove, | ||
1336 | .id_table = rtsx_pci_sdmmc_ids, | ||
1337 | .suspend = rtsx_pci_sdmmc_suspend, | ||
1338 | .resume = rtsx_pci_sdmmc_resume, | ||
1339 | .driver = { | ||
1340 | .owner = THIS_MODULE, | ||
1341 | .name = DRV_NAME_RTSX_PCI_SDMMC, | ||
1342 | }, | ||
1343 | }; | ||
1344 | module_platform_driver(rtsx_pci_sdmmc_driver); | ||
1345 | |||
1346 | MODULE_LICENSE("GPL"); | ||
1347 | MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); | ||
1348 | MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver"); | ||
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 63fb265e0da..a04f87d7ee3 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
@@ -26,8 +26,9 @@ | |||
26 | #include <mach/dma.h> | 26 | #include <mach/dma.h> |
27 | 27 | ||
28 | #include <mach/regs-sdi.h> | 28 | #include <mach/regs-sdi.h> |
29 | #include <mach/regs-gpio.h> | ||
29 | 30 | ||
30 | #include <linux/platform_data/mmc-s3cmci.h> | 31 | #include <plat/mci.h> |
31 | 32 | ||
32 | #include "s3cmci.h" | 33 | #include "s3cmci.h" |
33 | 34 | ||
@@ -246,7 +247,7 @@ static void s3cmci_check_sdio_irq(struct s3cmci_host *host) | |||
246 | { | 247 | { |
247 | if (host->sdio_irqen) { | 248 | if (host->sdio_irqen) { |
248 | if (gpio_get_value(S3C2410_GPE(8)) == 0) { | 249 | if (gpio_get_value(S3C2410_GPE(8)) == 0) { |
249 | pr_debug("%s: signalling irq\n", __func__); | 250 | printk(KERN_DEBUG "%s: signalling irq\n", __func__); |
250 | mmc_signal_sdio_irq(host->mmc); | 251 | mmc_signal_sdio_irq(host->mmc); |
251 | } | 252 | } |
252 | } | 253 | } |
@@ -343,7 +344,7 @@ static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer) | |||
343 | 344 | ||
344 | local_irq_save(flags); | 345 | local_irq_save(flags); |
345 | 346 | ||
346 | /* pr_debug("%s: transfer %d\n", __func__, transfer); */ | 347 | //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer); |
347 | 348 | ||
348 | host->irq_disabled = transfer; | 349 | host->irq_disabled = transfer; |
349 | 350 | ||
@@ -912,9 +913,9 @@ request_done: | |||
912 | } | 913 | } |
913 | 914 | ||
914 | static void s3cmci_dma_setup(struct s3cmci_host *host, | 915 | static void s3cmci_dma_setup(struct s3cmci_host *host, |
915 | enum dma_data_direction source) | 916 | enum s3c2410_dmasrc source) |
916 | { | 917 | { |
917 | static enum dma_data_direction last_source = -1; | 918 | static enum s3c2410_dmasrc last_source = -1; |
918 | static int setup_ok; | 919 | static int setup_ok; |
919 | 920 | ||
920 | if (last_source == source) | 921 | if (last_source == source) |
@@ -1086,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) | |||
1086 | 1087 | ||
1087 | BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); | 1088 | BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); |
1088 | 1089 | ||
1089 | s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 1090 | s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); |
1090 | s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); | 1091 | s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); |
1091 | 1092 | ||
1092 | dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 1093 | dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
@@ -1236,9 +1237,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1236 | switch (ios->power_mode) { | 1237 | switch (ios->power_mode) { |
1237 | case MMC_POWER_ON: | 1238 | case MMC_POWER_ON: |
1238 | case MMC_POWER_UP: | 1239 | case MMC_POWER_UP: |
1239 | /* Configure GPE5...GPE10 pins in SD mode */ | 1240 | s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK); |
1240 | s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2), | 1241 | s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD); |
1241 | S3C_GPIO_PULL_NONE); | 1242 | s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0); |
1243 | s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1); | ||
1244 | s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2); | ||
1245 | s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3); | ||
1242 | 1246 | ||
1243 | if (host->pdata->set_power) | 1247 | if (host->pdata->set_power) |
1244 | host->pdata->set_power(ios->power_mode, ios->vdd); | 1248 | host->pdata->set_power(ios->power_mode, ios->vdd); |
@@ -1540,7 +1544,7 @@ static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { } | |||
1540 | 1544 | ||
1541 | #endif /* CONFIG_DEBUG_FS */ | 1545 | #endif /* CONFIG_DEBUG_FS */ |
1542 | 1546 | ||
1543 | static int s3cmci_probe(struct platform_device *pdev) | 1547 | static int __devinit s3cmci_probe(struct platform_device *pdev) |
1544 | { | 1548 | { |
1545 | struct s3cmci_host *host; | 1549 | struct s3cmci_host *host; |
1546 | struct mmc_host *mmc; | 1550 | struct mmc_host *mmc; |
@@ -1602,7 +1606,7 @@ static int s3cmci_probe(struct platform_device *pdev) | |||
1602 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1606 | host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1603 | if (!host->mem) { | 1607 | if (!host->mem) { |
1604 | dev_err(&pdev->dev, | 1608 | dev_err(&pdev->dev, |
1605 | "failed to get io memory region resource.\n"); | 1609 | "failed to get io memory region resouce.\n"); |
1606 | 1610 | ||
1607 | ret = -ENOENT; | 1611 | ret = -ENOENT; |
1608 | goto probe_free_gpio; | 1612 | goto probe_free_gpio; |
@@ -1626,7 +1630,7 @@ static int s3cmci_probe(struct platform_device *pdev) | |||
1626 | 1630 | ||
1627 | host->irq = platform_get_irq(pdev, 0); | 1631 | host->irq = platform_get_irq(pdev, 0); |
1628 | if (host->irq == 0) { | 1632 | if (host->irq == 0) { |
1629 | dev_err(&pdev->dev, "failed to get interrupt resource.\n"); | 1633 | dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); |
1630 | ret = -EINVAL; | 1634 | ret = -EINVAL; |
1631 | goto probe_iounmap; | 1635 | goto probe_iounmap; |
1632 | } | 1636 | } |
@@ -1819,7 +1823,7 @@ static void s3cmci_shutdown(struct platform_device *pdev) | |||
1819 | clk_disable(host->clk); | 1823 | clk_disable(host->clk); |
1820 | } | 1824 | } |
1821 | 1825 | ||
1822 | static int s3cmci_remove(struct platform_device *pdev) | 1826 | static int __devexit s3cmci_remove(struct platform_device *pdev) |
1823 | { | 1827 | { |
1824 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 1828 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1825 | struct s3cmci_host *host = mmc_priv(mmc); | 1829 | struct s3cmci_host *host = mmc_priv(mmc); |
@@ -1906,11 +1910,22 @@ static struct platform_driver s3cmci_driver = { | |||
1906 | }, | 1910 | }, |
1907 | .id_table = s3cmci_driver_ids, | 1911 | .id_table = s3cmci_driver_ids, |
1908 | .probe = s3cmci_probe, | 1912 | .probe = s3cmci_probe, |
1909 | .remove = s3cmci_remove, | 1913 | .remove = __devexit_p(s3cmci_remove), |
1910 | .shutdown = s3cmci_shutdown, | 1914 | .shutdown = s3cmci_shutdown, |
1911 | }; | 1915 | }; |
1912 | 1916 | ||
1913 | module_platform_driver(s3cmci_driver); | 1917 | static int __init s3cmci_init(void) |
1918 | { | ||
1919 | return platform_driver_register(&s3cmci_driver); | ||
1920 | } | ||
1921 | |||
1922 | static void __exit s3cmci_exit(void) | ||
1923 | { | ||
1924 | platform_driver_unregister(&s3cmci_driver); | ||
1925 | } | ||
1926 | |||
1927 | module_init(s3cmci_init); | ||
1928 | module_exit(s3cmci_exit); | ||
1914 | 1929 | ||
1915 | MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); | 1930 | MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); |
1916 | MODULE_LICENSE("GPL v2"); | 1931 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c deleted file mode 100644 index 2592dddbd96..00000000000 --- a/drivers/mmc/host/sdhci-acpi.c +++ /dev/null | |||
@@ -1,312 +0,0 @@ | |||
1 | /* | ||
2 | * Secure Digital Host Controller Interface ACPI driver. | ||
3 | * | ||
4 | * Copyright (c) 2012, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/device.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/compiler.h> | ||
30 | #include <linux/stddef.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/acpi.h> | ||
36 | #include <linux/pm.h> | ||
37 | #include <linux/pm_runtime.h> | ||
38 | |||
39 | #include <linux/mmc/host.h> | ||
40 | #include <linux/mmc/pm.h> | ||
41 | #include <linux/mmc/sdhci.h> | ||
42 | |||
43 | #include "sdhci.h" | ||
44 | |||
45 | enum { | ||
46 | SDHCI_ACPI_SD_CD = BIT(0), | ||
47 | SDHCI_ACPI_RUNTIME_PM = BIT(1), | ||
48 | }; | ||
49 | |||
50 | struct sdhci_acpi_chip { | ||
51 | const struct sdhci_ops *ops; | ||
52 | unsigned int quirks; | ||
53 | unsigned int quirks2; | ||
54 | unsigned long caps; | ||
55 | unsigned int caps2; | ||
56 | mmc_pm_flag_t pm_caps; | ||
57 | }; | ||
58 | |||
59 | struct sdhci_acpi_slot { | ||
60 | const struct sdhci_acpi_chip *chip; | ||
61 | unsigned int quirks; | ||
62 | unsigned int quirks2; | ||
63 | unsigned long caps; | ||
64 | unsigned int caps2; | ||
65 | mmc_pm_flag_t pm_caps; | ||
66 | unsigned int flags; | ||
67 | }; | ||
68 | |||
69 | struct sdhci_acpi_host { | ||
70 | struct sdhci_host *host; | ||
71 | const struct sdhci_acpi_slot *slot; | ||
72 | struct platform_device *pdev; | ||
73 | bool use_runtime_pm; | ||
74 | }; | ||
75 | |||
76 | static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) | ||
77 | { | ||
78 | return c->slot && (c->slot->flags & flag); | ||
79 | } | ||
80 | |||
81 | static int sdhci_acpi_enable_dma(struct sdhci_host *host) | ||
82 | { | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static const struct sdhci_ops sdhci_acpi_ops_dflt = { | ||
87 | .enable_dma = sdhci_acpi_enable_dma, | ||
88 | }; | ||
89 | |||
90 | static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { | ||
91 | .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, | ||
92 | .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, | ||
93 | .flags = SDHCI_ACPI_RUNTIME_PM, | ||
94 | .pm_caps = MMC_PM_KEEP_POWER, | ||
95 | }; | ||
96 | |||
97 | static const struct acpi_device_id sdhci_acpi_ids[] = { | ||
98 | { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio }, | ||
99 | { "PNP0D40" }, | ||
100 | { }, | ||
101 | }; | ||
102 | MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); | ||
103 | |||
104 | static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid) | ||
105 | { | ||
106 | const struct acpi_device_id *id; | ||
107 | |||
108 | for (id = sdhci_acpi_ids; id->id[0]; id++) | ||
109 | if (!strcmp(id->id, hid)) | ||
110 | return (const struct sdhci_acpi_slot *)id->driver_data; | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | static int sdhci_acpi_probe(struct platform_device *pdev) | ||
115 | { | ||
116 | struct device *dev = &pdev->dev; | ||
117 | acpi_handle handle = ACPI_HANDLE(dev); | ||
118 | struct acpi_device *device; | ||
119 | struct sdhci_acpi_host *c; | ||
120 | struct sdhci_host *host; | ||
121 | struct resource *iomem; | ||
122 | resource_size_t len; | ||
123 | const char *hid; | ||
124 | int err; | ||
125 | |||
126 | if (acpi_bus_get_device(handle, &device)) | ||
127 | return -ENODEV; | ||
128 | |||
129 | if (acpi_bus_get_status(device) || !device->status.present) | ||
130 | return -ENODEV; | ||
131 | |||
132 | hid = acpi_device_hid(device); | ||
133 | |||
134 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
135 | if (!iomem) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | len = resource_size(iomem); | ||
139 | if (len < 0x100) | ||
140 | dev_err(dev, "Invalid iomem size!\n"); | ||
141 | |||
142 | if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) | ||
143 | return -ENOMEM; | ||
144 | |||
145 | host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); | ||
146 | if (IS_ERR(host)) | ||
147 | return PTR_ERR(host); | ||
148 | |||
149 | c = sdhci_priv(host); | ||
150 | c->host = host; | ||
151 | c->slot = sdhci_acpi_get_slot(hid); | ||
152 | c->pdev = pdev; | ||
153 | c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); | ||
154 | |||
155 | platform_set_drvdata(pdev, c); | ||
156 | |||
157 | host->hw_name = "ACPI"; | ||
158 | host->ops = &sdhci_acpi_ops_dflt; | ||
159 | host->irq = platform_get_irq(pdev, 0); | ||
160 | |||
161 | host->ioaddr = devm_ioremap_nocache(dev, iomem->start, | ||
162 | resource_size(iomem)); | ||
163 | if (host->ioaddr == NULL) { | ||
164 | err = -ENOMEM; | ||
165 | goto err_free; | ||
166 | } | ||
167 | |||
168 | if (!dev->dma_mask) { | ||
169 | u64 dma_mask; | ||
170 | |||
171 | if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { | ||
172 | /* 64-bit DMA is not supported at present */ | ||
173 | dma_mask = DMA_BIT_MASK(32); | ||
174 | } else { | ||
175 | dma_mask = DMA_BIT_MASK(32); | ||
176 | } | ||
177 | |||
178 | dev->dma_mask = &dev->coherent_dma_mask; | ||
179 | dev->coherent_dma_mask = dma_mask; | ||
180 | } | ||
181 | |||
182 | if (c->slot) { | ||
183 | if (c->slot->chip) { | ||
184 | host->ops = c->slot->chip->ops; | ||
185 | host->quirks |= c->slot->chip->quirks; | ||
186 | host->quirks2 |= c->slot->chip->quirks2; | ||
187 | host->mmc->caps |= c->slot->chip->caps; | ||
188 | host->mmc->caps2 |= c->slot->chip->caps2; | ||
189 | host->mmc->pm_caps |= c->slot->chip->pm_caps; | ||
190 | } | ||
191 | host->quirks |= c->slot->quirks; | ||
192 | host->quirks2 |= c->slot->quirks2; | ||
193 | host->mmc->caps |= c->slot->caps; | ||
194 | host->mmc->caps2 |= c->slot->caps2; | ||
195 | host->mmc->pm_caps |= c->slot->pm_caps; | ||
196 | } | ||
197 | |||
198 | err = sdhci_add_host(host); | ||
199 | if (err) | ||
200 | goto err_free; | ||
201 | |||
202 | if (c->use_runtime_pm) { | ||
203 | pm_suspend_ignore_children(dev, 1); | ||
204 | pm_runtime_set_autosuspend_delay(dev, 50); | ||
205 | pm_runtime_use_autosuspend(dev); | ||
206 | pm_runtime_enable(dev); | ||
207 | } | ||
208 | |||
209 | return 0; | ||
210 | |||
211 | err_free: | ||
212 | platform_set_drvdata(pdev, NULL); | ||
213 | sdhci_free_host(c->host); | ||
214 | return err; | ||
215 | } | ||
216 | |||
217 | static int sdhci_acpi_remove(struct platform_device *pdev) | ||
218 | { | ||
219 | struct sdhci_acpi_host *c = platform_get_drvdata(pdev); | ||
220 | struct device *dev = &pdev->dev; | ||
221 | int dead; | ||
222 | |||
223 | if (c->use_runtime_pm) { | ||
224 | pm_runtime_get_sync(dev); | ||
225 | pm_runtime_disable(dev); | ||
226 | pm_runtime_put_noidle(dev); | ||
227 | } | ||
228 | |||
229 | dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); | ||
230 | sdhci_remove_host(c->host, dead); | ||
231 | platform_set_drvdata(pdev, NULL); | ||
232 | sdhci_free_host(c->host); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | #ifdef CONFIG_PM_SLEEP | ||
238 | |||
239 | static int sdhci_acpi_suspend(struct device *dev) | ||
240 | { | ||
241 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
242 | |||
243 | return sdhci_suspend_host(c->host); | ||
244 | } | ||
245 | |||
246 | static int sdhci_acpi_resume(struct device *dev) | ||
247 | { | ||
248 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
249 | |||
250 | return sdhci_resume_host(c->host); | ||
251 | } | ||
252 | |||
253 | #else | ||
254 | |||
255 | #define sdhci_acpi_suspend NULL | ||
256 | #define sdhci_acpi_resume NULL | ||
257 | |||
258 | #endif | ||
259 | |||
260 | #ifdef CONFIG_PM_RUNTIME | ||
261 | |||
262 | static int sdhci_acpi_runtime_suspend(struct device *dev) | ||
263 | { | ||
264 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
265 | |||
266 | return sdhci_runtime_suspend_host(c->host); | ||
267 | } | ||
268 | |||
269 | static int sdhci_acpi_runtime_resume(struct device *dev) | ||
270 | { | ||
271 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | ||
272 | |||
273 | return sdhci_runtime_resume_host(c->host); | ||
274 | } | ||
275 | |||
276 | static int sdhci_acpi_runtime_idle(struct device *dev) | ||
277 | { | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | #else | ||
282 | |||
283 | #define sdhci_acpi_runtime_suspend NULL | ||
284 | #define sdhci_acpi_runtime_resume NULL | ||
285 | #define sdhci_acpi_runtime_idle NULL | ||
286 | |||
287 | #endif | ||
288 | |||
289 | static const struct dev_pm_ops sdhci_acpi_pm_ops = { | ||
290 | .suspend = sdhci_acpi_suspend, | ||
291 | .resume = sdhci_acpi_resume, | ||
292 | .runtime_suspend = sdhci_acpi_runtime_suspend, | ||
293 | .runtime_resume = sdhci_acpi_runtime_resume, | ||
294 | .runtime_idle = sdhci_acpi_runtime_idle, | ||
295 | }; | ||
296 | |||
297 | static struct platform_driver sdhci_acpi_driver = { | ||
298 | .driver = { | ||
299 | .name = "sdhci-acpi", | ||
300 | .owner = THIS_MODULE, | ||
301 | .acpi_match_table = sdhci_acpi_ids, | ||
302 | .pm = &sdhci_acpi_pm_ops, | ||
303 | }, | ||
304 | .probe = sdhci_acpi_probe, | ||
305 | .remove = sdhci_acpi_remove, | ||
306 | }; | ||
307 | |||
308 | module_platform_driver(sdhci_acpi_driver); | ||
309 | |||
310 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); | ||
311 | MODULE_AUTHOR("Adrian Hunter"); | ||
312 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c index 30bfdc4ae52..4b920b7621c 100644 --- a/drivers/mmc/host/sdhci-cns3xxx.c +++ b/drivers/mmc/host/sdhci-cns3xxx.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
18 | #include <linux/module.h> | ||
19 | #include <mach/cns3xxx.h> | 18 | #include <mach/cns3xxx.h> |
20 | #include "sdhci-pltfm.h" | 19 | #include "sdhci-pltfm.h" |
21 | 20 | ||
@@ -95,12 +94,12 @@ static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { | |||
95 | SDHCI_QUIRK_NONSTANDARD_CLOCK, | 94 | SDHCI_QUIRK_NONSTANDARD_CLOCK, |
96 | }; | 95 | }; |
97 | 96 | ||
98 | static int sdhci_cns3xxx_probe(struct platform_device *pdev) | 97 | static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) |
99 | { | 98 | { |
100 | return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata); | 99 | return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata); |
101 | } | 100 | } |
102 | 101 | ||
103 | static int sdhci_cns3xxx_remove(struct platform_device *pdev) | 102 | static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev) |
104 | { | 103 | { |
105 | return sdhci_pltfm_unregister(pdev); | 104 | return sdhci_pltfm_unregister(pdev); |
106 | } | 105 | } |
@@ -109,13 +108,26 @@ static struct platform_driver sdhci_cns3xxx_driver = { | |||
109 | .driver = { | 108 | .driver = { |
110 | .name = "sdhci-cns3xxx", | 109 | .name = "sdhci-cns3xxx", |
111 | .owner = THIS_MODULE, | 110 | .owner = THIS_MODULE, |
112 | .pm = SDHCI_PLTFM_PMOPS, | ||
113 | }, | 111 | }, |
114 | .probe = sdhci_cns3xxx_probe, | 112 | .probe = sdhci_cns3xxx_probe, |
115 | .remove = sdhci_cns3xxx_remove, | 113 | .remove = __devexit_p(sdhci_cns3xxx_remove), |
114 | #ifdef CONFIG_PM | ||
115 | .suspend = sdhci_pltfm_suspend, | ||
116 | .resume = sdhci_pltfm_resume, | ||
117 | #endif | ||
116 | }; | 118 | }; |
117 | 119 | ||
118 | module_platform_driver(sdhci_cns3xxx_driver); | 120 | static int __init sdhci_cns3xxx_init(void) |
121 | { | ||
122 | return platform_driver_register(&sdhci_cns3xxx_driver); | ||
123 | } | ||
124 | module_init(sdhci_cns3xxx_init); | ||
125 | |||
126 | static void __exit sdhci_cns3xxx_exit(void) | ||
127 | { | ||
128 | platform_driver_unregister(&sdhci_cns3xxx_driver); | ||
129 | } | ||
130 | module_exit(sdhci_cns3xxx_exit); | ||
119 | 131 | ||
120 | MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); | 132 | MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); |
121 | MODULE_AUTHOR("Scott Shu, " | 133 | MODULE_AUTHOR("Scott Shu, " |
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c index 169fab91778..f2d29dca442 100644 --- a/drivers/mmc/host/sdhci-dove.c +++ b/drivers/mmc/host/sdhci-dove.c | |||
@@ -19,30 +19,11 @@ | |||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/gpio.h> | ||
25 | #include <linux/io.h> | 22 | #include <linux/io.h> |
26 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/of.h> | ||
29 | #include <linux/of_gpio.h> | ||
30 | 24 | ||
31 | #include "sdhci-pltfm.h" | 25 | #include "sdhci-pltfm.h" |
32 | 26 | ||
33 | struct sdhci_dove_priv { | ||
34 | struct clk *clk; | ||
35 | int gpio_cd; | ||
36 | }; | ||
37 | |||
38 | static irqreturn_t sdhci_dove_carddetect_irq(int irq, void *data) | ||
39 | { | ||
40 | struct sdhci_host *host = data; | ||
41 | |||
42 | tasklet_schedule(&host->card_tasklet); | ||
43 | return IRQ_HANDLED; | ||
44 | } | ||
45 | |||
46 | static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) | 27 | static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) |
47 | { | 28 | { |
48 | u16 ret; | 29 | u16 ret; |
@@ -60,25 +41,16 @@ static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) | |||
60 | 41 | ||
61 | static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) | 42 | static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) |
62 | { | 43 | { |
63 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
64 | struct sdhci_dove_priv *priv = pltfm_host->priv; | ||
65 | u32 ret; | 44 | u32 ret; |
66 | 45 | ||
67 | ret = readl(host->ioaddr + reg); | ||
68 | |||
69 | switch (reg) { | 46 | switch (reg) { |
70 | case SDHCI_CAPABILITIES: | 47 | case SDHCI_CAPABILITIES: |
48 | ret = readl(host->ioaddr + reg); | ||
71 | /* Mask the support for 3.0V */ | 49 | /* Mask the support for 3.0V */ |
72 | ret &= ~SDHCI_CAN_VDD_300; | 50 | ret &= ~SDHCI_CAN_VDD_300; |
73 | break; | 51 | break; |
74 | case SDHCI_PRESENT_STATE: | 52 | default: |
75 | if (gpio_is_valid(priv->gpio_cd)) { | 53 | ret = readl(host->ioaddr + reg); |
76 | if (gpio_get_value(priv->gpio_cd) == 0) | ||
77 | ret |= SDHCI_CARD_PRESENT; | ||
78 | else | ||
79 | ret &= ~SDHCI_CARD_PRESENT; | ||
80 | } | ||
81 | break; | ||
82 | } | 54 | } |
83 | return ret; | 55 | return ret; |
84 | } | 56 | } |
@@ -93,128 +65,43 @@ static struct sdhci_pltfm_data sdhci_dove_pdata = { | |||
93 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | | 65 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | |
94 | SDHCI_QUIRK_NO_BUSY_IRQ | | 66 | SDHCI_QUIRK_NO_BUSY_IRQ | |
95 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 67 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | |
96 | SDHCI_QUIRK_FORCE_DMA | | 68 | SDHCI_QUIRK_FORCE_DMA, |
97 | SDHCI_QUIRK_NO_HISPD_BIT, | ||
98 | }; | 69 | }; |
99 | 70 | ||
100 | static int sdhci_dove_probe(struct platform_device *pdev) | 71 | static int __devinit sdhci_dove_probe(struct platform_device *pdev) |
101 | { | 72 | { |
102 | struct sdhci_host *host; | 73 | return sdhci_pltfm_register(pdev, &sdhci_dove_pdata); |
103 | struct sdhci_pltfm_host *pltfm_host; | ||
104 | struct sdhci_dove_priv *priv; | ||
105 | int ret; | ||
106 | |||
107 | priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), | ||
108 | GFP_KERNEL); | ||
109 | if (!priv) { | ||
110 | dev_err(&pdev->dev, "unable to allocate private data"); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | priv->clk = devm_clk_get(&pdev->dev, NULL); | ||
115 | |||
116 | if (pdev->dev.of_node) { | ||
117 | priv->gpio_cd = of_get_named_gpio(pdev->dev.of_node, | ||
118 | "cd-gpios", 0); | ||
119 | } else { | ||
120 | priv->gpio_cd = -EINVAL; | ||
121 | } | ||
122 | |||
123 | if (gpio_is_valid(priv->gpio_cd)) { | ||
124 | ret = gpio_request(priv->gpio_cd, "sdhci-cd"); | ||
125 | if (ret) { | ||
126 | dev_err(&pdev->dev, "card detect gpio request failed: %d\n", | ||
127 | ret); | ||
128 | return ret; | ||
129 | } | ||
130 | gpio_direction_input(priv->gpio_cd); | ||
131 | } | ||
132 | |||
133 | host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata); | ||
134 | if (IS_ERR(host)) { | ||
135 | ret = PTR_ERR(host); | ||
136 | goto err_sdhci_pltfm_init; | ||
137 | } | ||
138 | |||
139 | pltfm_host = sdhci_priv(host); | ||
140 | pltfm_host->priv = priv; | ||
141 | |||
142 | if (!IS_ERR(priv->clk)) | ||
143 | clk_prepare_enable(priv->clk); | ||
144 | |||
145 | sdhci_get_of_property(pdev); | ||
146 | |||
147 | ret = sdhci_add_host(host); | ||
148 | if (ret) | ||
149 | goto err_sdhci_add; | ||
150 | |||
151 | /* | ||
152 | * We must request the IRQ after sdhci_add_host(), as the tasklet only | ||
153 | * gets setup in sdhci_add_host() and we oops. | ||
154 | */ | ||
155 | if (gpio_is_valid(priv->gpio_cd)) { | ||
156 | ret = request_irq(gpio_to_irq(priv->gpio_cd), | ||
157 | sdhci_dove_carddetect_irq, | ||
158 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
159 | mmc_hostname(host->mmc), host); | ||
160 | if (ret) { | ||
161 | dev_err(&pdev->dev, "card detect irq request failed: %d\n", | ||
162 | ret); | ||
163 | goto err_request_irq; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return 0; | ||
168 | |||
169 | err_request_irq: | ||
170 | sdhci_remove_host(host, 0); | ||
171 | err_sdhci_add: | ||
172 | if (!IS_ERR(priv->clk)) | ||
173 | clk_disable_unprepare(priv->clk); | ||
174 | sdhci_pltfm_free(pdev); | ||
175 | err_sdhci_pltfm_init: | ||
176 | if (gpio_is_valid(priv->gpio_cd)) | ||
177 | gpio_free(priv->gpio_cd); | ||
178 | return ret; | ||
179 | } | 74 | } |
180 | 75 | ||
181 | static int sdhci_dove_remove(struct platform_device *pdev) | 76 | static int __devexit sdhci_dove_remove(struct platform_device *pdev) |
182 | { | 77 | { |
183 | struct sdhci_host *host = platform_get_drvdata(pdev); | 78 | return sdhci_pltfm_unregister(pdev); |
184 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
185 | struct sdhci_dove_priv *priv = pltfm_host->priv; | ||
186 | |||
187 | sdhci_pltfm_unregister(pdev); | ||
188 | |||
189 | if (gpio_is_valid(priv->gpio_cd)) { | ||
190 | free_irq(gpio_to_irq(priv->gpio_cd), host); | ||
191 | gpio_free(priv->gpio_cd); | ||
192 | } | ||
193 | |||
194 | if (!IS_ERR(priv->clk)) | ||
195 | clk_disable_unprepare(priv->clk); | ||
196 | |||
197 | return 0; | ||
198 | } | 79 | } |
199 | 80 | ||
200 | static const struct of_device_id sdhci_dove_of_match_table[] = { | ||
201 | { .compatible = "marvell,dove-sdhci", }, | ||
202 | {} | ||
203 | }; | ||
204 | MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); | ||
205 | |||
206 | static struct platform_driver sdhci_dove_driver = { | 81 | static struct platform_driver sdhci_dove_driver = { |
207 | .driver = { | 82 | .driver = { |
208 | .name = "sdhci-dove", | 83 | .name = "sdhci-dove", |
209 | .owner = THIS_MODULE, | 84 | .owner = THIS_MODULE, |
210 | .pm = SDHCI_PLTFM_PMOPS, | ||
211 | .of_match_table = of_match_ptr(sdhci_dove_of_match_table), | ||
212 | }, | 85 | }, |
213 | .probe = sdhci_dove_probe, | 86 | .probe = sdhci_dove_probe, |
214 | .remove = sdhci_dove_remove, | 87 | .remove = __devexit_p(sdhci_dove_remove), |
88 | #ifdef CONFIG_PM | ||
89 | .suspend = sdhci_pltfm_suspend, | ||
90 | .resume = sdhci_pltfm_resume, | ||
91 | #endif | ||
215 | }; | 92 | }; |
216 | 93 | ||
217 | module_platform_driver(sdhci_dove_driver); | 94 | static int __init sdhci_dove_init(void) |
95 | { | ||
96 | return platform_driver_register(&sdhci_dove_driver); | ||
97 | } | ||
98 | module_init(sdhci_dove_init); | ||
99 | |||
100 | static void __exit sdhci_dove_exit(void) | ||
101 | { | ||
102 | platform_driver_unregister(&sdhci_dove_driver); | ||
103 | } | ||
104 | module_exit(sdhci_dove_exit); | ||
218 | 105 | ||
219 | MODULE_DESCRIPTION("SDHCI driver for Dove"); | 106 | MODULE_DESCRIPTION("SDHCI driver for Dove"); |
220 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, " | 107 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, " |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index e07df812ff1..4dc0028086a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -24,8 +24,7 @@ | |||
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/of_device.h> | 25 | #include <linux/of_device.h> |
26 | #include <linux/of_gpio.h> | 26 | #include <linux/of_gpio.h> |
27 | #include <linux/pinctrl/consumer.h> | 27 | #include <mach/esdhc.h> |
28 | #include <linux/platform_data/mmc-esdhc-imx.h> | ||
29 | #include "sdhci-pltfm.h" | 28 | #include "sdhci-pltfm.h" |
30 | #include "sdhci-esdhc.h" | 29 | #include "sdhci-esdhc.h" |
31 | 30 | ||
@@ -33,16 +32,6 @@ | |||
33 | /* VENDOR SPEC register */ | 32 | /* VENDOR SPEC register */ |
34 | #define SDHCI_VENDOR_SPEC 0xC0 | 33 | #define SDHCI_VENDOR_SPEC 0xC0 |
35 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 34 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 |
36 | #define SDHCI_WTMK_LVL 0x44 | ||
37 | #define SDHCI_MIX_CTRL 0x48 | ||
38 | |||
39 | /* | ||
40 | * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: | ||
41 | * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design, | ||
42 | * but bit28 is used as the INT DMA ERR in fsl eSDHC design. | ||
43 | * Define this macro DMA error INT for fsl eSDHC | ||
44 | */ | ||
45 | #define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000 | ||
46 | 35 | ||
47 | /* | 36 | /* |
48 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | 37 | * The CMDTYPE of the CMD register (offset 0xE) should be set to |
@@ -62,18 +51,13 @@ enum imx_esdhc_type { | |||
62 | IMX35_ESDHC, | 51 | IMX35_ESDHC, |
63 | IMX51_ESDHC, | 52 | IMX51_ESDHC, |
64 | IMX53_ESDHC, | 53 | IMX53_ESDHC, |
65 | IMX6Q_USDHC, | ||
66 | }; | 54 | }; |
67 | 55 | ||
68 | struct pltfm_imx_data { | 56 | struct pltfm_imx_data { |
69 | int flags; | 57 | int flags; |
70 | u32 scratchpad; | 58 | u32 scratchpad; |
71 | enum imx_esdhc_type devtype; | 59 | enum imx_esdhc_type devtype; |
72 | struct pinctrl *pinctrl; | ||
73 | struct esdhc_platform_data boarddata; | 60 | struct esdhc_platform_data boarddata; |
74 | struct clk *clk_ipg; | ||
75 | struct clk *clk_ahb; | ||
76 | struct clk *clk_per; | ||
77 | }; | 61 | }; |
78 | 62 | ||
79 | static struct platform_device_id imx_esdhc_devtype[] = { | 63 | static struct platform_device_id imx_esdhc_devtype[] = { |
@@ -90,9 +74,6 @@ static struct platform_device_id imx_esdhc_devtype[] = { | |||
90 | .name = "sdhci-esdhc-imx53", | 74 | .name = "sdhci-esdhc-imx53", |
91 | .driver_data = IMX53_ESDHC, | 75 | .driver_data = IMX53_ESDHC, |
92 | }, { | 76 | }, { |
93 | .name = "sdhci-usdhc-imx6q", | ||
94 | .driver_data = IMX6Q_USDHC, | ||
95 | }, { | ||
96 | /* sentinel */ | 77 | /* sentinel */ |
97 | } | 78 | } |
98 | }; | 79 | }; |
@@ -103,7 +84,6 @@ static const struct of_device_id imx_esdhc_dt_ids[] = { | |||
103 | { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, | 84 | { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, |
104 | { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, | 85 | { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, |
105 | { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, | 86 | { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, |
106 | { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], }, | ||
107 | { /* sentinel */ } | 87 | { /* sentinel */ } |
108 | }; | 88 | }; |
109 | MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); | 89 | MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); |
@@ -128,11 +108,6 @@ static inline int is_imx53_esdhc(struct pltfm_imx_data *data) | |||
128 | return data->devtype == IMX53_ESDHC; | 108 | return data->devtype == IMX53_ESDHC; |
129 | } | 109 | } |
130 | 110 | ||
131 | static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) | ||
132 | { | ||
133 | return data->devtype == IMX6Q_USDHC; | ||
134 | } | ||
135 | |||
136 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) | 111 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) |
137 | { | 112 | { |
138 | void __iomem *base = host->ioaddr + (reg & ~0x3); | 113 | void __iomem *base = host->ioaddr + (reg & ~0x3); |
@@ -160,27 +135,6 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
160 | val |= SDHCI_CARD_PRESENT; | 135 | val |= SDHCI_CARD_PRESENT; |
161 | } | 136 | } |
162 | 137 | ||
163 | if (unlikely(reg == SDHCI_CAPABILITIES)) { | ||
164 | /* In FSL esdhc IC module, only bit20 is used to indicate the | ||
165 | * ADMA2 capability of esdhc, but this bit is messed up on | ||
166 | * some SOCs (e.g. on MX25, MX35 this bit is set, but they | ||
167 | * don't actually support ADMA2). So set the BROKEN_ADMA | ||
168 | * uirk on MX25/35 platforms. | ||
169 | */ | ||
170 | |||
171 | if (val & SDHCI_CAN_DO_ADMA1) { | ||
172 | val &= ~SDHCI_CAN_DO_ADMA1; | ||
173 | val |= SDHCI_CAN_DO_ADMA2; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | if (unlikely(reg == SDHCI_INT_STATUS)) { | ||
178 | if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) { | ||
179 | val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR; | ||
180 | val |= SDHCI_INT_ADMA_ERROR; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | return val; | 138 | return val; |
185 | } | 139 | } |
186 | 140 | ||
@@ -225,28 +179,13 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
225 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | 179 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); |
226 | } | 180 | } |
227 | 181 | ||
228 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { | ||
229 | if (val & SDHCI_INT_ADMA_ERROR) { | ||
230 | val &= ~SDHCI_INT_ADMA_ERROR; | ||
231 | val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | writel(val, host->ioaddr + reg); | 182 | writel(val, host->ioaddr + reg); |
236 | } | 183 | } |
237 | 184 | ||
238 | static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | 185 | static u16 esdhc_readw_le(struct sdhci_host *host, int reg) |
239 | { | 186 | { |
240 | if (unlikely(reg == SDHCI_HOST_VERSION)) { | 187 | if (unlikely(reg == SDHCI_HOST_VERSION)) |
241 | u16 val = readw(host->ioaddr + (reg ^ 2)); | 188 | reg ^= 2; |
242 | /* | ||
243 | * uSDHC supports SDHCI v3.0, but it's encoded as value | ||
244 | * 0x3 in host controller version register, which violates | ||
245 | * SDHCI_SPEC_300 definition. Work it around here. | ||
246 | */ | ||
247 | if ((val & SDHCI_SPEC_VER_MASK) == 3) | ||
248 | return --val; | ||
249 | } | ||
250 | 189 | ||
251 | return readw(host->ioaddr + reg); | 190 | return readw(host->ioaddr + reg); |
252 | } | 191 | } |
@@ -274,21 +213,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
274 | imx_data->scratchpad = val; | 213 | imx_data->scratchpad = val; |
275 | return; | 214 | return; |
276 | case SDHCI_COMMAND: | 215 | case SDHCI_COMMAND: |
277 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || | 216 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) |
278 | host->cmd->opcode == MMC_SET_BLOCK_COUNT) && | 217 | && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) |
279 | (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | ||
280 | val |= SDHCI_CMD_ABORTCMD; | 218 | val |= SDHCI_CMD_ABORTCMD; |
281 | 219 | writel(val << 16 | imx_data->scratchpad, | |
282 | if (is_imx6q_usdhc(imx_data)) { | 220 | host->ioaddr + SDHCI_TRANSFER_MODE); |
283 | u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL); | ||
284 | m = imx_data->scratchpad | (m & 0xffff0000); | ||
285 | writel(m, host->ioaddr + SDHCI_MIX_CTRL); | ||
286 | writel(val << 16, | ||
287 | host->ioaddr + SDHCI_TRANSFER_MODE); | ||
288 | } else { | ||
289 | writel(val << 16 | imx_data->scratchpad, | ||
290 | host->ioaddr + SDHCI_TRANSFER_MODE); | ||
291 | } | ||
292 | return; | 221 | return; |
293 | case SDHCI_BLOCK_SIZE: | 222 | case SDHCI_BLOCK_SIZE: |
294 | val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); | 223 | val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); |
@@ -299,8 +228,6 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
299 | 228 | ||
300 | static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | 229 | static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) |
301 | { | 230 | { |
302 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
303 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
304 | u32 new_val; | 231 | u32 new_val; |
305 | 232 | ||
306 | switch (reg) { | 233 | switch (reg) { |
@@ -315,13 +242,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
315 | new_val = val & (SDHCI_CTRL_LED | \ | 242 | new_val = val & (SDHCI_CTRL_LED | \ |
316 | SDHCI_CTRL_4BITBUS | \ | 243 | SDHCI_CTRL_4BITBUS | \ |
317 | SDHCI_CTRL_D3CD); | 244 | SDHCI_CTRL_D3CD); |
318 | /* ensure the endianness */ | 245 | /* ensure the endianess */ |
319 | new_val |= ESDHC_HOST_CONTROL_LE; | 246 | new_val |= ESDHC_HOST_CONTROL_LE; |
320 | /* bits 8&9 are reserved on mx25 */ | 247 | /* DMA mode bits are shifted */ |
321 | if (!is_imx25_esdhc(imx_data)) { | 248 | new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; |
322 | /* DMA mode bits are shifted */ | ||
323 | new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; | ||
324 | } | ||
325 | 249 | ||
326 | esdhc_clrset_le(host, 0xffff, new_val, reg); | 250 | esdhc_clrset_le(host, 0xffff, new_val, reg); |
327 | return; | 251 | return; |
@@ -387,10 +311,9 @@ static struct sdhci_ops sdhci_esdhc_ops = { | |||
387 | }; | 311 | }; |
388 | 312 | ||
389 | static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 313 | static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
390 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT | 314 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA |
391 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | ||
392 | | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | ||
393 | | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 315 | | SDHCI_QUIRK_BROKEN_CARD_DETECTION, |
316 | /* ADMA has issues. Might be fixable */ | ||
394 | .ops = &sdhci_esdhc_ops, | 317 | .ops = &sdhci_esdhc_ops, |
395 | }; | 318 | }; |
396 | 319 | ||
@@ -403,7 +326,7 @@ static irqreturn_t cd_irq(int irq, void *data) | |||
403 | }; | 326 | }; |
404 | 327 | ||
405 | #ifdef CONFIG_OF | 328 | #ifdef CONFIG_OF |
406 | static int | 329 | static int __devinit |
407 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | 330 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, |
408 | struct esdhc_platform_data *boarddata) | 331 | struct esdhc_platform_data *boarddata) |
409 | { | 332 | { |
@@ -412,7 +335,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
412 | if (!np) | 335 | if (!np) |
413 | return -ENODEV; | 336 | return -ENODEV; |
414 | 337 | ||
415 | if (of_get_property(np, "non-removable", NULL)) | 338 | if (of_get_property(np, "fsl,card-wired", NULL)) |
416 | boarddata->cd_type = ESDHC_CD_PERMANENT; | 339 | boarddata->cd_type = ESDHC_CD_PERMANENT; |
417 | 340 | ||
418 | if (of_get_property(np, "fsl,cd-controller", NULL)) | 341 | if (of_get_property(np, "fsl,cd-controller", NULL)) |
@@ -440,13 +363,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
440 | } | 363 | } |
441 | #endif | 364 | #endif |
442 | 365 | ||
443 | static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | 366 | static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) |
444 | { | 367 | { |
445 | const struct of_device_id *of_id = | 368 | const struct of_device_id *of_id = |
446 | of_match_device(imx_esdhc_dt_ids, &pdev->dev); | 369 | of_match_device(imx_esdhc_dt_ids, &pdev->dev); |
447 | struct sdhci_pltfm_host *pltfm_host; | 370 | struct sdhci_pltfm_host *pltfm_host; |
448 | struct sdhci_host *host; | 371 | struct sdhci_host *host; |
449 | struct esdhc_platform_data *boarddata; | 372 | struct esdhc_platform_data *boarddata; |
373 | struct clk *clk; | ||
450 | int err; | 374 | int err; |
451 | struct pltfm_imx_data *imx_data; | 375 | struct pltfm_imx_data *imx_data; |
452 | 376 | ||
@@ -456,10 +380,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
456 | 380 | ||
457 | pltfm_host = sdhci_priv(host); | 381 | pltfm_host = sdhci_priv(host); |
458 | 382 | ||
459 | imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL); | 383 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); |
460 | if (!imx_data) { | 384 | if (!imx_data) { |
461 | err = -ENOMEM; | 385 | err = -ENOMEM; |
462 | goto free_sdhci; | 386 | goto err_imx_data; |
463 | } | 387 | } |
464 | 388 | ||
465 | if (of_id) | 389 | if (of_id) |
@@ -467,59 +391,31 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
467 | imx_data->devtype = pdev->id_entry->driver_data; | 391 | imx_data->devtype = pdev->id_entry->driver_data; |
468 | pltfm_host->priv = imx_data; | 392 | pltfm_host->priv = imx_data; |
469 | 393 | ||
470 | imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 394 | clk = clk_get(mmc_dev(host->mmc), NULL); |
471 | if (IS_ERR(imx_data->clk_ipg)) { | 395 | if (IS_ERR(clk)) { |
472 | err = PTR_ERR(imx_data->clk_ipg); | 396 | dev_err(mmc_dev(host->mmc), "clk err\n"); |
473 | goto free_sdhci; | 397 | err = PTR_ERR(clk); |
474 | } | 398 | goto err_clk_get; |
475 | |||
476 | imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | ||
477 | if (IS_ERR(imx_data->clk_ahb)) { | ||
478 | err = PTR_ERR(imx_data->clk_ahb); | ||
479 | goto free_sdhci; | ||
480 | } | 399 | } |
400 | clk_enable(clk); | ||
401 | pltfm_host->clk = clk; | ||
481 | 402 | ||
482 | imx_data->clk_per = devm_clk_get(&pdev->dev, "per"); | 403 | if (!is_imx25_esdhc(imx_data)) |
483 | if (IS_ERR(imx_data->clk_per)) { | 404 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
484 | err = PTR_ERR(imx_data->clk_per); | ||
485 | goto free_sdhci; | ||
486 | } | ||
487 | |||
488 | pltfm_host->clk = imx_data->clk_per; | ||
489 | |||
490 | clk_prepare_enable(imx_data->clk_per); | ||
491 | clk_prepare_enable(imx_data->clk_ipg); | ||
492 | clk_prepare_enable(imx_data->clk_ahb); | ||
493 | |||
494 | imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
495 | if (IS_ERR(imx_data->pinctrl)) { | ||
496 | err = PTR_ERR(imx_data->pinctrl); | ||
497 | goto disable_clk; | ||
498 | } | ||
499 | |||
500 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | ||
501 | 405 | ||
502 | if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) | 406 | if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) |
503 | /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ | 407 | /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ |
504 | host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK | 408 | host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; |
505 | | SDHCI_QUIRK_BROKEN_ADMA; | ||
506 | 409 | ||
507 | if (is_imx53_esdhc(imx_data)) | 410 | if (is_imx53_esdhc(imx_data)) |
508 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | 411 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; |
509 | 412 | ||
510 | /* | ||
511 | * The imx6q ROM code will change the default watermark level setting | ||
512 | * to something insane. Change it back here. | ||
513 | */ | ||
514 | if (is_imx6q_usdhc(imx_data)) | ||
515 | writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL); | ||
516 | |||
517 | boarddata = &imx_data->boarddata; | 413 | boarddata = &imx_data->boarddata; |
518 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { | 414 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { |
519 | if (!host->mmc->parent->platform_data) { | 415 | if (!host->mmc->parent->platform_data) { |
520 | dev_err(mmc_dev(host->mmc), "no board data!\n"); | 416 | dev_err(mmc_dev(host->mmc), "no board data!\n"); |
521 | err = -EINVAL; | 417 | err = -EINVAL; |
522 | goto disable_clk; | 418 | goto no_board_data; |
523 | } | 419 | } |
524 | imx_data->boarddata = *((struct esdhc_platform_data *) | 420 | imx_data->boarddata = *((struct esdhc_platform_data *) |
525 | host->mmc->parent->platform_data); | 421 | host->mmc->parent->platform_data); |
@@ -527,8 +423,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
527 | 423 | ||
528 | /* write_protect */ | 424 | /* write_protect */ |
529 | if (boarddata->wp_type == ESDHC_WP_GPIO) { | 425 | if (boarddata->wp_type == ESDHC_WP_GPIO) { |
530 | err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio, | 426 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
531 | GPIOF_IN, "ESDHC_WP"); | ||
532 | if (err) { | 427 | if (err) { |
533 | dev_warn(mmc_dev(host->mmc), | 428 | dev_warn(mmc_dev(host->mmc), |
534 | "no write-protect pin available!\n"); | 429 | "no write-protect pin available!\n"); |
@@ -544,21 +439,19 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
544 | 439 | ||
545 | switch (boarddata->cd_type) { | 440 | switch (boarddata->cd_type) { |
546 | case ESDHC_CD_GPIO: | 441 | case ESDHC_CD_GPIO: |
547 | err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio, | 442 | err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); |
548 | GPIOF_IN, "ESDHC_CD"); | ||
549 | if (err) { | 443 | if (err) { |
550 | dev_err(mmc_dev(host->mmc), | 444 | dev_err(mmc_dev(host->mmc), |
551 | "no card-detect pin available!\n"); | 445 | "no card-detect pin available!\n"); |
552 | goto disable_clk; | 446 | goto no_card_detect_pin; |
553 | } | 447 | } |
554 | 448 | ||
555 | err = devm_request_irq(&pdev->dev, | 449 | err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, |
556 | gpio_to_irq(boarddata->cd_gpio), cd_irq, | ||
557 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 450 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, |
558 | mmc_hostname(host->mmc), host); | 451 | mmc_hostname(host->mmc), host); |
559 | if (err) { | 452 | if (err) { |
560 | dev_err(mmc_dev(host->mmc), "request irq error\n"); | 453 | dev_err(mmc_dev(host->mmc), "request irq error\n"); |
561 | goto disable_clk; | 454 | goto no_card_detect_irq; |
562 | } | 455 | } |
563 | /* fall through */ | 456 | /* fall through */ |
564 | 457 | ||
@@ -577,31 +470,50 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
577 | 470 | ||
578 | err = sdhci_add_host(host); | 471 | err = sdhci_add_host(host); |
579 | if (err) | 472 | if (err) |
580 | goto disable_clk; | 473 | goto err_add_host; |
581 | 474 | ||
582 | return 0; | 475 | return 0; |
583 | 476 | ||
584 | disable_clk: | 477 | err_add_host: |
585 | clk_disable_unprepare(imx_data->clk_per); | 478 | if (gpio_is_valid(boarddata->cd_gpio)) |
586 | clk_disable_unprepare(imx_data->clk_ipg); | 479 | free_irq(gpio_to_irq(boarddata->cd_gpio), host); |
587 | clk_disable_unprepare(imx_data->clk_ahb); | 480 | no_card_detect_irq: |
588 | free_sdhci: | 481 | if (gpio_is_valid(boarddata->cd_gpio)) |
482 | gpio_free(boarddata->cd_gpio); | ||
483 | if (gpio_is_valid(boarddata->wp_gpio)) | ||
484 | gpio_free(boarddata->wp_gpio); | ||
485 | no_card_detect_pin: | ||
486 | no_board_data: | ||
487 | clk_disable(pltfm_host->clk); | ||
488 | clk_put(pltfm_host->clk); | ||
489 | err_clk_get: | ||
490 | kfree(imx_data); | ||
491 | err_imx_data: | ||
589 | sdhci_pltfm_free(pdev); | 492 | sdhci_pltfm_free(pdev); |
590 | return err; | 493 | return err; |
591 | } | 494 | } |
592 | 495 | ||
593 | static int sdhci_esdhc_imx_remove(struct platform_device *pdev) | 496 | static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev) |
594 | { | 497 | { |
595 | struct sdhci_host *host = platform_get_drvdata(pdev); | 498 | struct sdhci_host *host = platform_get_drvdata(pdev); |
596 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 499 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
597 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 500 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
501 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | ||
598 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | 502 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); |
599 | 503 | ||
600 | sdhci_remove_host(host, dead); | 504 | sdhci_remove_host(host, dead); |
601 | 505 | ||
602 | clk_disable_unprepare(imx_data->clk_per); | 506 | if (gpio_is_valid(boarddata->wp_gpio)) |
603 | clk_disable_unprepare(imx_data->clk_ipg); | 507 | gpio_free(boarddata->wp_gpio); |
604 | clk_disable_unprepare(imx_data->clk_ahb); | 508 | |
509 | if (gpio_is_valid(boarddata->cd_gpio)) { | ||
510 | free_irq(gpio_to_irq(boarddata->cd_gpio), host); | ||
511 | gpio_free(boarddata->cd_gpio); | ||
512 | } | ||
513 | |||
514 | clk_disable(pltfm_host->clk); | ||
515 | clk_put(pltfm_host->clk); | ||
516 | kfree(imx_data); | ||
605 | 517 | ||
606 | sdhci_pltfm_free(pdev); | 518 | sdhci_pltfm_free(pdev); |
607 | 519 | ||
@@ -613,14 +525,27 @@ static struct platform_driver sdhci_esdhc_imx_driver = { | |||
613 | .name = "sdhci-esdhc-imx", | 525 | .name = "sdhci-esdhc-imx", |
614 | .owner = THIS_MODULE, | 526 | .owner = THIS_MODULE, |
615 | .of_match_table = imx_esdhc_dt_ids, | 527 | .of_match_table = imx_esdhc_dt_ids, |
616 | .pm = SDHCI_PLTFM_PMOPS, | ||
617 | }, | 528 | }, |
618 | .id_table = imx_esdhc_devtype, | 529 | .id_table = imx_esdhc_devtype, |
619 | .probe = sdhci_esdhc_imx_probe, | 530 | .probe = sdhci_esdhc_imx_probe, |
620 | .remove = sdhci_esdhc_imx_remove, | 531 | .remove = __devexit_p(sdhci_esdhc_imx_remove), |
532 | #ifdef CONFIG_PM | ||
533 | .suspend = sdhci_pltfm_suspend, | ||
534 | .resume = sdhci_pltfm_resume, | ||
535 | #endif | ||
621 | }; | 536 | }; |
622 | 537 | ||
623 | module_platform_driver(sdhci_esdhc_imx_driver); | 538 | static int __init sdhci_esdhc_imx_init(void) |
539 | { | ||
540 | return platform_driver_register(&sdhci_esdhc_imx_driver); | ||
541 | } | ||
542 | module_init(sdhci_esdhc_imx_init); | ||
543 | |||
544 | static void __exit sdhci_esdhc_imx_exit(void) | ||
545 | { | ||
546 | platform_driver_unregister(&sdhci_esdhc_imx_driver); | ||
547 | } | ||
548 | module_exit(sdhci_esdhc_imx_exit); | ||
624 | 549 | ||
625 | MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); | 550 | MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); |
626 | MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); | 551 | MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index d25f9ab9a54..c3b08f11194 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) | |||
48 | int div = 1; | 48 | int div = 1; |
49 | u32 temp; | 49 | u32 temp; |
50 | 50 | ||
51 | if (clock == 0) | ||
52 | goto out; | ||
53 | |||
54 | temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); | 51 | temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); |
55 | temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | 52 | temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
56 | | ESDHC_CLOCK_MASK); | 53 | | ESDHC_CLOCK_MASK); |
57 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); | 54 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); |
58 | 55 | ||
56 | if (clock == 0) | ||
57 | goto out; | ||
58 | |||
59 | while (host->max_clk / pre_div / 16 > clock && pre_div < 256) | 59 | while (host->max_clk / pre_div / 16 > clock && pre_div < 256) |
60 | pre_div *= 2; | 60 | pre_div *= 2; |
61 | 61 | ||
@@ -73,7 +73,7 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) | |||
73 | | (div << ESDHC_DIVIDER_SHIFT) | 73 | | (div << ESDHC_DIVIDER_SHIFT) |
74 | | (pre_div << ESDHC_PREDIV_SHIFT)); | 74 | | (pre_div << ESDHC_PREDIV_SHIFT)); |
75 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); | 75 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); |
76 | mdelay(1); | 76 | mdelay(100); |
77 | out: | 77 | out: |
78 | host->clock = clock; | 78 | host->clock = clock; |
79 | } | 79 | } |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index f32526d2d96..fe604df6501 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale eSDHC controller driver. | 2 | * Freescale eSDHC controller driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2007 Freescale Semiconductor, Inc. |
5 | * Copyright (c) 2009 MontaVista Software, Inc. | 5 | * Copyright (c) 2009 MontaVista Software, Inc. |
6 | * | 6 | * |
7 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | 7 | * Authors: Xiaobo Xie <X.Xie@freescale.com> |
@@ -14,90 +14,22 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/of.h> | ||
18 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/mmc/host.h> | 18 | #include <linux/mmc/host.h> |
21 | #include "sdhci-pltfm.h" | 19 | #include "sdhci-pltfm.h" |
22 | #include "sdhci-esdhc.h" | 20 | #include "sdhci-esdhc.h" |
23 | 21 | ||
24 | #define VENDOR_V_22 0x12 | ||
25 | #define VENDOR_V_23 0x13 | ||
26 | static u32 esdhc_readl(struct sdhci_host *host, int reg) | ||
27 | { | ||
28 | u32 ret; | ||
29 | |||
30 | ret = in_be32(host->ioaddr + reg); | ||
31 | /* | ||
32 | * The bit of ADMA flag in eSDHC is not compatible with standard | ||
33 | * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is | ||
34 | * supported by eSDHC. | ||
35 | * And for many FSL eSDHC controller, the reset value of field | ||
36 | * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, | ||
37 | * only these vendor version is greater than 2.2/0x12 support ADMA. | ||
38 | * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the | ||
39 | * the verdor version number, oxFE is SDHCI_HOST_VERSION. | ||
40 | */ | ||
41 | if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { | ||
42 | u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); | ||
43 | tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; | ||
44 | if (tmp > VENDOR_V_22) | ||
45 | ret |= SDHCI_CAN_DO_ADMA2; | ||
46 | } | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | static u16 esdhc_readw(struct sdhci_host *host, int reg) | 22 | static u16 esdhc_readw(struct sdhci_host *host, int reg) |
52 | { | 23 | { |
53 | u16 ret; | 24 | u16 ret; |
54 | int base = reg & ~0x3; | ||
55 | int shift = (reg & 0x2) * 8; | ||
56 | 25 | ||
57 | if (unlikely(reg == SDHCI_HOST_VERSION)) | 26 | if (unlikely(reg == SDHCI_HOST_VERSION)) |
58 | ret = in_be32(host->ioaddr + base) & 0xffff; | 27 | ret = in_be16(host->ioaddr + reg); |
59 | else | 28 | else |
60 | ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; | 29 | ret = sdhci_be32bs_readw(host, reg); |
61 | return ret; | 30 | return ret; |
62 | } | 31 | } |
63 | 32 | ||
64 | static u8 esdhc_readb(struct sdhci_host *host, int reg) | ||
65 | { | ||
66 | int base = reg & ~0x3; | ||
67 | int shift = (reg & 0x3) * 8; | ||
68 | u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; | ||
69 | |||
70 | /* | ||
71 | * "DMA select" locates at offset 0x28 in SD specification, but on | ||
72 | * P5020 or P3041, it locates at 0x29. | ||
73 | */ | ||
74 | if (reg == SDHCI_HOST_CONTROL) { | ||
75 | u32 dma_bits; | ||
76 | |||
77 | dma_bits = in_be32(host->ioaddr + reg); | ||
78 | /* DMA select is 22,23 bits in Protocol Control Register */ | ||
79 | dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; | ||
80 | |||
81 | /* fixup the result */ | ||
82 | ret &= ~SDHCI_CTRL_DMA_MASK; | ||
83 | ret |= dma_bits; | ||
84 | } | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) | ||
90 | { | ||
91 | /* | ||
92 | * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] | ||
93 | * when SYSCTL[RSTD]) is set for some special operations. | ||
94 | * No any impact other operation. | ||
95 | */ | ||
96 | if (reg == SDHCI_INT_ENABLE) | ||
97 | val |= SDHCI_INT_BLK_GAP; | ||
98 | sdhci_be32bs_writel(host, val, reg); | ||
99 | } | ||
100 | |||
101 | static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) | 33 | static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) |
102 | { | 34 | { |
103 | if (reg == SDHCI_BLOCK_SIZE) { | 35 | if (reg == SDHCI_BLOCK_SIZE) { |
@@ -113,62 +45,12 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) | |||
113 | 45 | ||
114 | static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) | 46 | static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) |
115 | { | 47 | { |
116 | /* | ||
117 | * "DMA select" location is offset 0x28 in SD specification, but on | ||
118 | * P5020 or P3041, it's located at 0x29. | ||
119 | */ | ||
120 | if (reg == SDHCI_HOST_CONTROL) { | ||
121 | u32 dma_bits; | ||
122 | |||
123 | /* DMA select is 22,23 bits in Protocol Control Register */ | ||
124 | dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; | ||
125 | clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, | ||
126 | dma_bits); | ||
127 | val &= ~SDHCI_CTRL_DMA_MASK; | ||
128 | val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; | ||
129 | } | ||
130 | |||
131 | /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ | 48 | /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ |
132 | if (reg == SDHCI_HOST_CONTROL) | 49 | if (reg == SDHCI_HOST_CONTROL) |
133 | val &= ~ESDHC_HOST_CONTROL_RES; | 50 | val &= ~ESDHC_HOST_CONTROL_RES; |
134 | sdhci_be32bs_writeb(host, val, reg); | 51 | sdhci_be32bs_writeb(host, val, reg); |
135 | } | 52 | } |
136 | 53 | ||
137 | /* | ||
138 | * For Abort or Suspend after Stop at Block Gap, ignore the ADMA | ||
139 | * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC]) | ||
140 | * and Block Gap Event(IRQSTAT[BGE]) are also set. | ||
141 | * For Continue, apply soft reset for data(SYSCTL[RSTD]); | ||
142 | * and re-issue the entire read transaction from beginning. | ||
143 | */ | ||
144 | static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) | ||
145 | { | ||
146 | u32 tmp; | ||
147 | bool applicable; | ||
148 | dma_addr_t dmastart; | ||
149 | dma_addr_t dmanow; | ||
150 | |||
151 | tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); | ||
152 | tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; | ||
153 | |||
154 | applicable = (intmask & SDHCI_INT_DATA_END) && | ||
155 | (intmask & SDHCI_INT_BLK_GAP) && | ||
156 | (tmp == VENDOR_V_23); | ||
157 | if (!applicable) | ||
158 | return; | ||
159 | |||
160 | host->data->error = 0; | ||
161 | dmastart = sg_dma_address(host->data->sg); | ||
162 | dmanow = dmastart + host->data->bytes_xfered; | ||
163 | /* | ||
164 | * Force update to the next DMA block boundary. | ||
165 | */ | ||
166 | dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + | ||
167 | SDHCI_DEFAULT_BOUNDARY_SIZE; | ||
168 | host->data->bytes_xfered = dmanow - dmastart; | ||
169 | sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); | ||
170 | } | ||
171 | |||
172 | static int esdhc_of_enable_dma(struct sdhci_host *host) | 54 | static int esdhc_of_enable_dma(struct sdhci_host *host) |
173 | { | 55 | { |
174 | setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); | 56 | setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); |
@@ -189,83 +71,32 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | |||
189 | return pltfm_host->clock / 256 / 16; | 71 | return pltfm_host->clock / 256 / 16; |
190 | } | 72 | } |
191 | 73 | ||
192 | static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) | ||
193 | { | ||
194 | /* Workaround to reduce the clock frequency for p1010 esdhc */ | ||
195 | if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { | ||
196 | if (clock > 20000000) | ||
197 | clock -= 5000000; | ||
198 | if (clock > 40000000) | ||
199 | clock -= 5000000; | ||
200 | } | ||
201 | |||
202 | /* Set the clock */ | ||
203 | esdhc_set_clock(host, clock); | ||
204 | } | ||
205 | |||
206 | #ifdef CONFIG_PM | ||
207 | static u32 esdhc_proctl; | ||
208 | static void esdhc_of_suspend(struct sdhci_host *host) | ||
209 | { | ||
210 | esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); | ||
211 | } | ||
212 | |||
213 | static void esdhc_of_resume(struct sdhci_host *host) | ||
214 | { | ||
215 | esdhc_of_enable_dma(host); | ||
216 | sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); | ||
217 | } | ||
218 | #endif | ||
219 | |||
220 | static void esdhc_of_platform_init(struct sdhci_host *host) | ||
221 | { | ||
222 | u32 vvn; | ||
223 | |||
224 | vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); | ||
225 | vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; | ||
226 | if (vvn == VENDOR_V_22) | ||
227 | host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; | ||
228 | |||
229 | if (vvn > VENDOR_V_22) | ||
230 | host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; | ||
231 | } | ||
232 | |||
233 | static struct sdhci_ops sdhci_esdhc_ops = { | 74 | static struct sdhci_ops sdhci_esdhc_ops = { |
234 | .read_l = esdhc_readl, | 75 | .read_l = sdhci_be32bs_readl, |
235 | .read_w = esdhc_readw, | 76 | .read_w = esdhc_readw, |
236 | .read_b = esdhc_readb, | 77 | .read_b = sdhci_be32bs_readb, |
237 | .write_l = esdhc_writel, | 78 | .write_l = sdhci_be32bs_writel, |
238 | .write_w = esdhc_writew, | 79 | .write_w = esdhc_writew, |
239 | .write_b = esdhc_writeb, | 80 | .write_b = esdhc_writeb, |
240 | .set_clock = esdhc_of_set_clock, | 81 | .set_clock = esdhc_set_clock, |
241 | .enable_dma = esdhc_of_enable_dma, | 82 | .enable_dma = esdhc_of_enable_dma, |
242 | .get_max_clock = esdhc_of_get_max_clock, | 83 | .get_max_clock = esdhc_of_get_max_clock, |
243 | .get_min_clock = esdhc_of_get_min_clock, | 84 | .get_min_clock = esdhc_of_get_min_clock, |
244 | .platform_init = esdhc_of_platform_init, | ||
245 | #ifdef CONFIG_PM | ||
246 | .platform_suspend = esdhc_of_suspend, | ||
247 | .platform_resume = esdhc_of_resume, | ||
248 | #endif | ||
249 | .adma_workaround = esdhci_of_adma_workaround, | ||
250 | }; | 85 | }; |
251 | 86 | ||
252 | static struct sdhci_pltfm_data sdhci_esdhc_pdata = { | 87 | static struct sdhci_pltfm_data sdhci_esdhc_pdata = { |
253 | /* | 88 | /* card detection could be handled via GPIO */ |
254 | * card detection could be handled via GPIO | ||
255 | * eSDHC cannot support End Attribute in NOP ADMA descriptor | ||
256 | */ | ||
257 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION | 89 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
258 | | SDHCI_QUIRK_NO_CARD_NO_RESET | 90 | | SDHCI_QUIRK_NO_CARD_NO_RESET, |
259 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | ||
260 | .ops = &sdhci_esdhc_ops, | 91 | .ops = &sdhci_esdhc_ops, |
261 | }; | 92 | }; |
262 | 93 | ||
263 | static int sdhci_esdhc_probe(struct platform_device *pdev) | 94 | static int __devinit sdhci_esdhc_probe(struct platform_device *pdev) |
264 | { | 95 | { |
265 | return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata); | 96 | return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata); |
266 | } | 97 | } |
267 | 98 | ||
268 | static int sdhci_esdhc_remove(struct platform_device *pdev) | 99 | static int __devexit sdhci_esdhc_remove(struct platform_device *pdev) |
269 | { | 100 | { |
270 | return sdhci_pltfm_unregister(pdev); | 101 | return sdhci_pltfm_unregister(pdev); |
271 | } | 102 | } |
@@ -283,13 +114,26 @@ static struct platform_driver sdhci_esdhc_driver = { | |||
283 | .name = "sdhci-esdhc", | 114 | .name = "sdhci-esdhc", |
284 | .owner = THIS_MODULE, | 115 | .owner = THIS_MODULE, |
285 | .of_match_table = sdhci_esdhc_of_match, | 116 | .of_match_table = sdhci_esdhc_of_match, |
286 | .pm = SDHCI_PLTFM_PMOPS, | ||
287 | }, | 117 | }, |
288 | .probe = sdhci_esdhc_probe, | 118 | .probe = sdhci_esdhc_probe, |
289 | .remove = sdhci_esdhc_remove, | 119 | .remove = __devexit_p(sdhci_esdhc_remove), |
120 | #ifdef CONFIG_PM | ||
121 | .suspend = sdhci_pltfm_suspend, | ||
122 | .resume = sdhci_pltfm_resume, | ||
123 | #endif | ||
290 | }; | 124 | }; |
291 | 125 | ||
292 | module_platform_driver(sdhci_esdhc_driver); | 126 | static int __init sdhci_esdhc_init(void) |
127 | { | ||
128 | return platform_driver_register(&sdhci_esdhc_driver); | ||
129 | } | ||
130 | module_init(sdhci_esdhc_init); | ||
131 | |||
132 | static void __exit sdhci_esdhc_exit(void) | ||
133 | { | ||
134 | platform_driver_unregister(&sdhci_esdhc_driver); | ||
135 | } | ||
136 | module_exit(sdhci_esdhc_exit); | ||
293 | 137 | ||
294 | MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); | 138 | MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); |
295 | MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " | 139 | MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " |
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index c3d3715ec3d..735be131dca 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c | |||
@@ -20,7 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/module.h> | ||
24 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
25 | #include "sdhci-pltfm.h" | 24 | #include "sdhci-pltfm.h" |
26 | 25 | ||
@@ -66,12 +65,12 @@ static struct sdhci_pltfm_data sdhci_hlwd_pdata = { | |||
66 | .ops = &sdhci_hlwd_ops, | 65 | .ops = &sdhci_hlwd_ops, |
67 | }; | 66 | }; |
68 | 67 | ||
69 | static int sdhci_hlwd_probe(struct platform_device *pdev) | 68 | static int __devinit sdhci_hlwd_probe(struct platform_device *pdev) |
70 | { | 69 | { |
71 | return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata); | 70 | return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata); |
72 | } | 71 | } |
73 | 72 | ||
74 | static int sdhci_hlwd_remove(struct platform_device *pdev) | 73 | static int __devexit sdhci_hlwd_remove(struct platform_device *pdev) |
75 | { | 74 | { |
76 | return sdhci_pltfm_unregister(pdev); | 75 | return sdhci_pltfm_unregister(pdev); |
77 | } | 76 | } |
@@ -87,13 +86,26 @@ static struct platform_driver sdhci_hlwd_driver = { | |||
87 | .name = "sdhci-hlwd", | 86 | .name = "sdhci-hlwd", |
88 | .owner = THIS_MODULE, | 87 | .owner = THIS_MODULE, |
89 | .of_match_table = sdhci_hlwd_of_match, | 88 | .of_match_table = sdhci_hlwd_of_match, |
90 | .pm = SDHCI_PLTFM_PMOPS, | ||
91 | }, | 89 | }, |
92 | .probe = sdhci_hlwd_probe, | 90 | .probe = sdhci_hlwd_probe, |
93 | .remove = sdhci_hlwd_remove, | 91 | .remove = __devexit_p(sdhci_hlwd_remove), |
92 | #ifdef CONFIG_PM | ||
93 | .suspend = sdhci_pltfm_suspend, | ||
94 | .resume = sdhci_pltfm_resume, | ||
95 | #endif | ||
94 | }; | 96 | }; |
95 | 97 | ||
96 | module_platform_driver(sdhci_hlwd_driver); | 98 | static int __init sdhci_hlwd_init(void) |
99 | { | ||
100 | return platform_driver_register(&sdhci_hlwd_driver); | ||
101 | } | ||
102 | module_init(sdhci_hlwd_init); | ||
103 | |||
104 | static void __exit sdhci_hlwd_exit(void) | ||
105 | { | ||
106 | platform_driver_unregister(&sdhci_hlwd_driver); | ||
107 | } | ||
108 | module_exit(sdhci_hlwd_exit); | ||
97 | 109 | ||
98 | MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); | 110 | MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); |
99 | MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); | 111 | MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); |
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c deleted file mode 100644 index a611217769f..00000000000 --- a/drivers/mmc/host/sdhci-pci-data.c +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/mmc/sdhci-pci-data.h> | ||
3 | |||
4 | struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); | ||
5 | EXPORT_SYMBOL_GPL(sdhci_pci_get_data); | ||
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index c7dd0cbc99d..26c528648f3 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
17 | #include <linux/module.h> | ||
18 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
19 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -22,19 +21,10 @@ | |||
22 | #include <linux/mmc/host.h> | 21 | #include <linux/mmc/host.h> |
23 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
24 | #include <linux/io.h> | 23 | #include <linux/io.h> |
25 | #include <linux/gpio.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/mmc/sdhci-pci-data.h> | ||
28 | 24 | ||
29 | #include "sdhci.h" | 25 | #include "sdhci.h" |
30 | 26 | ||
31 | /* | 27 | /* |
32 | * PCI device IDs | ||
33 | */ | ||
34 | #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 | ||
35 | #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a | ||
36 | |||
37 | /* | ||
38 | * PCI registers | 28 | * PCI registers |
39 | */ | 29 | */ |
40 | 30 | ||
@@ -53,35 +43,28 @@ struct sdhci_pci_slot; | |||
53 | 43 | ||
54 | struct sdhci_pci_fixes { | 44 | struct sdhci_pci_fixes { |
55 | unsigned int quirks; | 45 | unsigned int quirks; |
56 | unsigned int quirks2; | ||
57 | bool allow_runtime_pm; | ||
58 | 46 | ||
59 | int (*probe) (struct sdhci_pci_chip *); | 47 | int (*probe) (struct sdhci_pci_chip *); |
60 | 48 | ||
61 | int (*probe_slot) (struct sdhci_pci_slot *); | 49 | int (*probe_slot) (struct sdhci_pci_slot *); |
62 | void (*remove_slot) (struct sdhci_pci_slot *, int); | 50 | void (*remove_slot) (struct sdhci_pci_slot *, int); |
63 | 51 | ||
64 | int (*suspend) (struct sdhci_pci_chip *); | 52 | int (*suspend) (struct sdhci_pci_chip *, |
53 | pm_message_t); | ||
65 | int (*resume) (struct sdhci_pci_chip *); | 54 | int (*resume) (struct sdhci_pci_chip *); |
66 | }; | 55 | }; |
67 | 56 | ||
68 | struct sdhci_pci_slot { | 57 | struct sdhci_pci_slot { |
69 | struct sdhci_pci_chip *chip; | 58 | struct sdhci_pci_chip *chip; |
70 | struct sdhci_host *host; | 59 | struct sdhci_host *host; |
71 | struct sdhci_pci_data *data; | ||
72 | 60 | ||
73 | int pci_bar; | 61 | int pci_bar; |
74 | int rst_n_gpio; | ||
75 | int cd_gpio; | ||
76 | int cd_irq; | ||
77 | }; | 62 | }; |
78 | 63 | ||
79 | struct sdhci_pci_chip { | 64 | struct sdhci_pci_chip { |
80 | struct pci_dev *pdev; | 65 | struct pci_dev *pdev; |
81 | 66 | ||
82 | unsigned int quirks; | 67 | unsigned int quirks; |
83 | unsigned int quirks2; | ||
84 | bool allow_runtime_pm; | ||
85 | const struct sdhci_pci_fixes *fixes; | 68 | const struct sdhci_pci_fixes *fixes; |
86 | 69 | ||
87 | int num_slots; /* Slots on controller */ | 70 | int num_slots; /* Slots on controller */ |
@@ -114,7 +97,6 @@ static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) | |||
114 | 97 | ||
115 | SDHCI_TIMEOUT_CLK_UNIT | | 98 | SDHCI_TIMEOUT_CLK_UNIT | |
116 | SDHCI_CAN_VDD_330 | | 99 | SDHCI_CAN_VDD_330 | |
117 | SDHCI_CAN_DO_HISPD | | ||
118 | SDHCI_CAN_DO_SDMA; | 100 | SDHCI_CAN_DO_SDMA; |
119 | return 0; | 101 | return 0; |
120 | } | 102 | } |
@@ -158,7 +140,6 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = { | |||
158 | static const struct sdhci_pci_fixes sdhci_cafe = { | 140 | static const struct sdhci_pci_fixes sdhci_cafe = { |
159 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | | 141 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | |
160 | SDHCI_QUIRK_NO_BUSY_IRQ | | 142 | SDHCI_QUIRK_NO_BUSY_IRQ | |
161 | SDHCI_QUIRK_BROKEN_CARD_DETECTION | | ||
162 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, | 143 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, |
163 | }; | 144 | }; |
164 | 145 | ||
@@ -182,92 +163,9 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip) | |||
182 | return 0; | 163 | return 0; |
183 | } | 164 | } |
184 | 165 | ||
185 | static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) | ||
186 | { | ||
187 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_PM_RUNTIME | ||
192 | |||
193 | static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) | ||
194 | { | ||
195 | struct sdhci_pci_slot *slot = dev_id; | ||
196 | struct sdhci_host *host = slot->host; | ||
197 | |||
198 | mmc_detect_change(host->mmc, msecs_to_jiffies(200)); | ||
199 | return IRQ_HANDLED; | ||
200 | } | ||
201 | |||
202 | static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) | ||
203 | { | ||
204 | int err, irq, gpio = slot->cd_gpio; | ||
205 | |||
206 | slot->cd_gpio = -EINVAL; | ||
207 | slot->cd_irq = -EINVAL; | ||
208 | |||
209 | if (!gpio_is_valid(gpio)) | ||
210 | return; | ||
211 | |||
212 | err = gpio_request(gpio, "sd_cd"); | ||
213 | if (err < 0) | ||
214 | goto out; | ||
215 | |||
216 | err = gpio_direction_input(gpio); | ||
217 | if (err < 0) | ||
218 | goto out_free; | ||
219 | |||
220 | irq = gpio_to_irq(gpio); | ||
221 | if (irq < 0) | ||
222 | goto out_free; | ||
223 | |||
224 | err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | | ||
225 | IRQF_TRIGGER_FALLING, "sd_cd", slot); | ||
226 | if (err) | ||
227 | goto out_free; | ||
228 | |||
229 | slot->cd_gpio = gpio; | ||
230 | slot->cd_irq = irq; | ||
231 | |||
232 | return; | ||
233 | |||
234 | out_free: | ||
235 | gpio_free(gpio); | ||
236 | out: | ||
237 | dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); | ||
238 | } | ||
239 | |||
240 | static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) | ||
241 | { | ||
242 | if (slot->cd_irq >= 0) | ||
243 | free_irq(slot->cd_irq, slot); | ||
244 | if (gpio_is_valid(slot->cd_gpio)) | ||
245 | gpio_free(slot->cd_gpio); | ||
246 | } | ||
247 | |||
248 | #else | ||
249 | |||
250 | static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) | ||
251 | { | ||
252 | } | ||
253 | |||
254 | static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) | ||
255 | { | ||
256 | } | ||
257 | |||
258 | #endif | ||
259 | |||
260 | static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) | 166 | static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) |
261 | { | 167 | { |
262 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; | 168 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; |
263 | slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | | ||
264 | MMC_CAP2_HC_ERASE_SZ; | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) | ||
269 | { | ||
270 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; | ||
271 | return 0; | 169 | return 0; |
272 | } | 170 | } |
273 | 171 | ||
@@ -283,27 +181,17 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { | |||
283 | 181 | ||
284 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { | 182 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { |
285 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 183 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
286 | .allow_runtime_pm = true, | ||
287 | }; | 184 | }; |
288 | 185 | ||
289 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { | 186 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { |
290 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 187 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
291 | .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, | ||
292 | .allow_runtime_pm = true, | ||
293 | .probe_slot = mfd_sdio_probe_slot, | ||
294 | }; | 188 | }; |
295 | 189 | ||
296 | static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { | 190 | static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { |
297 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 191 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
298 | .allow_runtime_pm = true, | ||
299 | .probe_slot = mfd_emmc_probe_slot, | 192 | .probe_slot = mfd_emmc_probe_slot, |
300 | }; | 193 | }; |
301 | 194 | ||
302 | static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { | ||
303 | .quirks = SDHCI_QUIRK_BROKEN_ADMA, | ||
304 | .probe_slot = pch_hc_probe_slot, | ||
305 | }; | ||
306 | |||
307 | /* O2Micro extra registers */ | 195 | /* O2Micro extra registers */ |
308 | #define O2_SD_LOCK_WP 0xD3 | 196 | #define O2_SD_LOCK_WP 0xD3 |
309 | #define O2_SD_MULTI_VCC3V 0xEE | 197 | #define O2_SD_MULTI_VCC3V 0xEE |
@@ -529,7 +417,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) | |||
529 | jmicron_enable_mmc(slot->host, 0); | 417 | jmicron_enable_mmc(slot->host, 0); |
530 | } | 418 | } |
531 | 419 | ||
532 | static int jmicron_suspend(struct sdhci_pci_chip *chip) | 420 | static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) |
533 | { | 421 | { |
534 | int i; | 422 | int i; |
535 | 423 | ||
@@ -654,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = { | |||
654 | .probe = via_probe, | 542 | .probe = via_probe, |
655 | }; | 543 | }; |
656 | 544 | ||
657 | static const struct pci_device_id pci_ids[] = { | 545 | static const struct pci_device_id pci_ids[] __devinitdata = { |
658 | { | 546 | { |
659 | .vendor = PCI_VENDOR_ID_RICOH, | 547 | .vendor = PCI_VENDOR_ID_RICOH, |
660 | .device = PCI_DEVICE_ID_RICOH_R5C822, | 548 | .device = PCI_DEVICE_ID_RICOH_R5C822, |
@@ -840,22 +728,6 @@ static const struct pci_device_id pci_ids[] = { | |||
840 | }, | 728 | }, |
841 | 729 | ||
842 | { | 730 | { |
843 | .vendor = PCI_VENDOR_ID_INTEL, | ||
844 | .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0, | ||
845 | .subvendor = PCI_ANY_ID, | ||
846 | .subdevice = PCI_ANY_ID, | ||
847 | .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, | ||
848 | }, | ||
849 | |||
850 | { | ||
851 | .vendor = PCI_VENDOR_ID_INTEL, | ||
852 | .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1, | ||
853 | .subvendor = PCI_ANY_ID, | ||
854 | .subdevice = PCI_ANY_ID, | ||
855 | .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, | ||
856 | }, | ||
857 | |||
858 | { | ||
859 | .vendor = PCI_VENDOR_ID_O2, | 731 | .vendor = PCI_VENDOR_ID_O2, |
860 | .device = PCI_DEVICE_ID_O2_8120, | 732 | .device = PCI_DEVICE_ID_O2_8120, |
861 | .subvendor = PCI_ANY_ID, | 733 | .subvendor = PCI_ANY_ID, |
@@ -960,25 +832,9 @@ static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) | |||
960 | return 0; | 832 | return 0; |
961 | } | 833 | } |
962 | 834 | ||
963 | static void sdhci_pci_hw_reset(struct sdhci_host *host) | ||
964 | { | ||
965 | struct sdhci_pci_slot *slot = sdhci_priv(host); | ||
966 | int rst_n_gpio = slot->rst_n_gpio; | ||
967 | |||
968 | if (!gpio_is_valid(rst_n_gpio)) | ||
969 | return; | ||
970 | gpio_set_value_cansleep(rst_n_gpio, 0); | ||
971 | /* For eMMC, minimum is 1us but give it 10us for good measure */ | ||
972 | udelay(10); | ||
973 | gpio_set_value_cansleep(rst_n_gpio, 1); | ||
974 | /* For eMMC, minimum is 200us but give it 300us for good measure */ | ||
975 | usleep_range(300, 1000); | ||
976 | } | ||
977 | |||
978 | static struct sdhci_ops sdhci_pci_ops = { | 835 | static struct sdhci_ops sdhci_pci_ops = { |
979 | .enable_dma = sdhci_pci_enable_dma, | 836 | .enable_dma = sdhci_pci_enable_dma, |
980 | .platform_8bit_width = sdhci_pci_8bit_width, | 837 | .platform_8bit_width = sdhci_pci_8bit_width, |
981 | .hw_reset = sdhci_pci_hw_reset, | ||
982 | }; | 838 | }; |
983 | 839 | ||
984 | /*****************************************************************************\ | 840 | /*****************************************************************************\ |
@@ -989,9 +845,8 @@ static struct sdhci_ops sdhci_pci_ops = { | |||
989 | 845 | ||
990 | #ifdef CONFIG_PM | 846 | #ifdef CONFIG_PM |
991 | 847 | ||
992 | static int sdhci_pci_suspend(struct device *dev) | 848 | static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
993 | { | 849 | { |
994 | struct pci_dev *pdev = to_pci_dev(dev); | ||
995 | struct sdhci_pci_chip *chip; | 850 | struct sdhci_pci_chip *chip; |
996 | struct sdhci_pci_slot *slot; | 851 | struct sdhci_pci_slot *slot; |
997 | mmc_pm_flag_t slot_pm_flags; | 852 | mmc_pm_flag_t slot_pm_flags; |
@@ -1007,10 +862,13 @@ static int sdhci_pci_suspend(struct device *dev) | |||
1007 | if (!slot) | 862 | if (!slot) |
1008 | continue; | 863 | continue; |
1009 | 864 | ||
1010 | ret = sdhci_suspend_host(slot->host); | 865 | ret = sdhci_suspend_host(slot->host, state); |
1011 | 866 | ||
1012 | if (ret) | 867 | if (ret) { |
1013 | goto err_pci_suspend; | 868 | for (i--; i >= 0; i--) |
869 | sdhci_resume_host(chip->slots[i]->host); | ||
870 | return ret; | ||
871 | } | ||
1014 | 872 | ||
1015 | slot_pm_flags = slot->host->mmc->pm_flags; | 873 | slot_pm_flags = slot->host->mmc->pm_flags; |
1016 | if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) | 874 | if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) |
@@ -1020,9 +878,12 @@ static int sdhci_pci_suspend(struct device *dev) | |||
1020 | } | 878 | } |
1021 | 879 | ||
1022 | if (chip->fixes && chip->fixes->suspend) { | 880 | if (chip->fixes && chip->fixes->suspend) { |
1023 | ret = chip->fixes->suspend(chip); | 881 | ret = chip->fixes->suspend(chip, state); |
1024 | if (ret) | 882 | if (ret) { |
1025 | goto err_pci_suspend; | 883 | for (i = chip->num_slots - 1; i >= 0; i--) |
884 | sdhci_resume_host(chip->slots[i]->host); | ||
885 | return ret; | ||
886 | } | ||
1026 | } | 887 | } |
1027 | 888 | ||
1028 | pci_save_state(pdev); | 889 | pci_save_state(pdev); |
@@ -1033,22 +894,16 @@ static int sdhci_pci_suspend(struct device *dev) | |||
1033 | } | 894 | } |
1034 | pci_set_power_state(pdev, PCI_D3hot); | 895 | pci_set_power_state(pdev, PCI_D3hot); |
1035 | } else { | 896 | } else { |
1036 | pci_enable_wake(pdev, PCI_D3hot, 0); | 897 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
1037 | pci_disable_device(pdev); | 898 | pci_disable_device(pdev); |
1038 | pci_set_power_state(pdev, PCI_D3hot); | 899 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1039 | } | 900 | } |
1040 | 901 | ||
1041 | return 0; | 902 | return 0; |
1042 | |||
1043 | err_pci_suspend: | ||
1044 | while (--i >= 0) | ||
1045 | sdhci_resume_host(chip->slots[i]->host); | ||
1046 | return ret; | ||
1047 | } | 903 | } |
1048 | 904 | ||
1049 | static int sdhci_pci_resume(struct device *dev) | 905 | static int sdhci_pci_resume(struct pci_dev *pdev) |
1050 | { | 906 | { |
1051 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1052 | struct sdhci_pci_chip *chip; | 907 | struct sdhci_pci_chip *chip; |
1053 | struct sdhci_pci_slot *slot; | 908 | struct sdhci_pci_slot *slot; |
1054 | int i, ret; | 909 | int i, ret; |
@@ -1089,115 +944,25 @@ static int sdhci_pci_resume(struct device *dev) | |||
1089 | 944 | ||
1090 | #endif /* CONFIG_PM */ | 945 | #endif /* CONFIG_PM */ |
1091 | 946 | ||
1092 | #ifdef CONFIG_PM_RUNTIME | ||
1093 | |||
1094 | static int sdhci_pci_runtime_suspend(struct device *dev) | ||
1095 | { | ||
1096 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | ||
1097 | struct sdhci_pci_chip *chip; | ||
1098 | struct sdhci_pci_slot *slot; | ||
1099 | int i, ret; | ||
1100 | |||
1101 | chip = pci_get_drvdata(pdev); | ||
1102 | if (!chip) | ||
1103 | return 0; | ||
1104 | |||
1105 | for (i = 0; i < chip->num_slots; i++) { | ||
1106 | slot = chip->slots[i]; | ||
1107 | if (!slot) | ||
1108 | continue; | ||
1109 | |||
1110 | ret = sdhci_runtime_suspend_host(slot->host); | ||
1111 | |||
1112 | if (ret) | ||
1113 | goto err_pci_runtime_suspend; | ||
1114 | } | ||
1115 | |||
1116 | if (chip->fixes && chip->fixes->suspend) { | ||
1117 | ret = chip->fixes->suspend(chip); | ||
1118 | if (ret) | ||
1119 | goto err_pci_runtime_suspend; | ||
1120 | } | ||
1121 | |||
1122 | return 0; | ||
1123 | |||
1124 | err_pci_runtime_suspend: | ||
1125 | while (--i >= 0) | ||
1126 | sdhci_runtime_resume_host(chip->slots[i]->host); | ||
1127 | return ret; | ||
1128 | } | ||
1129 | |||
1130 | static int sdhci_pci_runtime_resume(struct device *dev) | ||
1131 | { | ||
1132 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | ||
1133 | struct sdhci_pci_chip *chip; | ||
1134 | struct sdhci_pci_slot *slot; | ||
1135 | int i, ret; | ||
1136 | |||
1137 | chip = pci_get_drvdata(pdev); | ||
1138 | if (!chip) | ||
1139 | return 0; | ||
1140 | |||
1141 | if (chip->fixes && chip->fixes->resume) { | ||
1142 | ret = chip->fixes->resume(chip); | ||
1143 | if (ret) | ||
1144 | return ret; | ||
1145 | } | ||
1146 | |||
1147 | for (i = 0; i < chip->num_slots; i++) { | ||
1148 | slot = chip->slots[i]; | ||
1149 | if (!slot) | ||
1150 | continue; | ||
1151 | |||
1152 | ret = sdhci_runtime_resume_host(slot->host); | ||
1153 | if (ret) | ||
1154 | return ret; | ||
1155 | } | ||
1156 | |||
1157 | return 0; | ||
1158 | } | ||
1159 | |||
1160 | static int sdhci_pci_runtime_idle(struct device *dev) | ||
1161 | { | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | #else | ||
1166 | |||
1167 | #define sdhci_pci_runtime_suspend NULL | ||
1168 | #define sdhci_pci_runtime_resume NULL | ||
1169 | #define sdhci_pci_runtime_idle NULL | ||
1170 | |||
1171 | #endif | ||
1172 | |||
1173 | static const struct dev_pm_ops sdhci_pci_pm_ops = { | ||
1174 | .suspend = sdhci_pci_suspend, | ||
1175 | .resume = sdhci_pci_resume, | ||
1176 | .runtime_suspend = sdhci_pci_runtime_suspend, | ||
1177 | .runtime_resume = sdhci_pci_runtime_resume, | ||
1178 | .runtime_idle = sdhci_pci_runtime_idle, | ||
1179 | }; | ||
1180 | |||
1181 | /*****************************************************************************\ | 947 | /*****************************************************************************\ |
1182 | * * | 948 | * * |
1183 | * Device probing/removal * | 949 | * Device probing/removal * |
1184 | * * | 950 | * * |
1185 | \*****************************************************************************/ | 951 | \*****************************************************************************/ |
1186 | 952 | ||
1187 | static struct sdhci_pci_slot *sdhci_pci_probe_slot( | 953 | static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( |
1188 | struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, | 954 | struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) |
1189 | int slotno) | ||
1190 | { | 955 | { |
1191 | struct sdhci_pci_slot *slot; | 956 | struct sdhci_pci_slot *slot; |
1192 | struct sdhci_host *host; | 957 | struct sdhci_host *host; |
1193 | int ret, bar = first_bar + slotno; | 958 | int ret; |
1194 | 959 | ||
1195 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { | 960 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { |
1196 | dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); | 961 | dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); |
1197 | return ERR_PTR(-ENODEV); | 962 | return ERR_PTR(-ENODEV); |
1198 | } | 963 | } |
1199 | 964 | ||
1200 | if (pci_resource_len(pdev, bar) < 0x100) { | 965 | if (pci_resource_len(pdev, bar) != 0x100) { |
1201 | dev_err(&pdev->dev, "Invalid iomem size. You may " | 966 | dev_err(&pdev->dev, "Invalid iomem size. You may " |
1202 | "experience problems.\n"); | 967 | "experience problems.\n"); |
1203 | } | 968 | } |
@@ -1223,36 +988,17 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( | |||
1223 | slot->chip = chip; | 988 | slot->chip = chip; |
1224 | slot->host = host; | 989 | slot->host = host; |
1225 | slot->pci_bar = bar; | 990 | slot->pci_bar = bar; |
1226 | slot->rst_n_gpio = -EINVAL; | ||
1227 | slot->cd_gpio = -EINVAL; | ||
1228 | |||
1229 | /* Retrieve platform data if there is any */ | ||
1230 | if (*sdhci_pci_get_data) | ||
1231 | slot->data = sdhci_pci_get_data(pdev, slotno); | ||
1232 | |||
1233 | if (slot->data) { | ||
1234 | if (slot->data->setup) { | ||
1235 | ret = slot->data->setup(slot->data); | ||
1236 | if (ret) { | ||
1237 | dev_err(&pdev->dev, "platform setup failed\n"); | ||
1238 | goto free; | ||
1239 | } | ||
1240 | } | ||
1241 | slot->rst_n_gpio = slot->data->rst_n_gpio; | ||
1242 | slot->cd_gpio = slot->data->cd_gpio; | ||
1243 | } | ||
1244 | 991 | ||
1245 | host->hw_name = "PCI"; | 992 | host->hw_name = "PCI"; |
1246 | host->ops = &sdhci_pci_ops; | 993 | host->ops = &sdhci_pci_ops; |
1247 | host->quirks = chip->quirks; | 994 | host->quirks = chip->quirks; |
1248 | host->quirks2 = chip->quirks2; | ||
1249 | 995 | ||
1250 | host->irq = pdev->irq; | 996 | host->irq = pdev->irq; |
1251 | 997 | ||
1252 | ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); | 998 | ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); |
1253 | if (ret) { | 999 | if (ret) { |
1254 | dev_err(&pdev->dev, "cannot request region\n"); | 1000 | dev_err(&pdev->dev, "cannot request region\n"); |
1255 | goto cleanup; | 1001 | goto free; |
1256 | } | 1002 | } |
1257 | 1003 | ||
1258 | host->ioaddr = pci_ioremap_bar(pdev, bar); | 1004 | host->ioaddr = pci_ioremap_bar(pdev, bar); |
@@ -1268,30 +1014,15 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( | |||
1268 | goto unmap; | 1014 | goto unmap; |
1269 | } | 1015 | } |
1270 | 1016 | ||
1271 | if (gpio_is_valid(slot->rst_n_gpio)) { | ||
1272 | if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { | ||
1273 | gpio_direction_output(slot->rst_n_gpio, 1); | ||
1274 | slot->host->mmc->caps |= MMC_CAP_HW_RESET; | ||
1275 | } else { | ||
1276 | dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); | ||
1277 | slot->rst_n_gpio = -EINVAL; | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; | 1017 | host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; |
1282 | 1018 | ||
1283 | ret = sdhci_add_host(host); | 1019 | ret = sdhci_add_host(host); |
1284 | if (ret) | 1020 | if (ret) |
1285 | goto remove; | 1021 | goto remove; |
1286 | 1022 | ||
1287 | sdhci_pci_add_own_cd(slot); | ||
1288 | |||
1289 | return slot; | 1023 | return slot; |
1290 | 1024 | ||
1291 | remove: | 1025 | remove: |
1292 | if (gpio_is_valid(slot->rst_n_gpio)) | ||
1293 | gpio_free(slot->rst_n_gpio); | ||
1294 | |||
1295 | if (chip->fixes && chip->fixes->remove_slot) | 1026 | if (chip->fixes && chip->fixes->remove_slot) |
1296 | chip->fixes->remove_slot(slot, 0); | 1027 | chip->fixes->remove_slot(slot, 0); |
1297 | 1028 | ||
@@ -1301,10 +1032,6 @@ unmap: | |||
1301 | release: | 1032 | release: |
1302 | pci_release_region(pdev, bar); | 1033 | pci_release_region(pdev, bar); |
1303 | 1034 | ||
1304 | cleanup: | ||
1305 | if (slot->data && slot->data->cleanup) | ||
1306 | slot->data->cleanup(slot->data); | ||
1307 | |||
1308 | free: | 1035 | free: |
1309 | sdhci_free_host(host); | 1036 | sdhci_free_host(host); |
1310 | 1037 | ||
@@ -1316,8 +1043,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) | |||
1316 | int dead; | 1043 | int dead; |
1317 | u32 scratch; | 1044 | u32 scratch; |
1318 | 1045 | ||
1319 | sdhci_pci_remove_own_cd(slot); | ||
1320 | |||
1321 | dead = 0; | 1046 | dead = 0; |
1322 | scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); | 1047 | scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); |
1323 | if (scratch == (u32)-1) | 1048 | if (scratch == (u32)-1) |
@@ -1325,36 +1050,15 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) | |||
1325 | 1050 | ||
1326 | sdhci_remove_host(slot->host, dead); | 1051 | sdhci_remove_host(slot->host, dead); |
1327 | 1052 | ||
1328 | if (gpio_is_valid(slot->rst_n_gpio)) | ||
1329 | gpio_free(slot->rst_n_gpio); | ||
1330 | |||
1331 | if (slot->chip->fixes && slot->chip->fixes->remove_slot) | 1053 | if (slot->chip->fixes && slot->chip->fixes->remove_slot) |
1332 | slot->chip->fixes->remove_slot(slot, dead); | 1054 | slot->chip->fixes->remove_slot(slot, dead); |
1333 | 1055 | ||
1334 | if (slot->data && slot->data->cleanup) | ||
1335 | slot->data->cleanup(slot->data); | ||
1336 | |||
1337 | pci_release_region(slot->chip->pdev, slot->pci_bar); | 1056 | pci_release_region(slot->chip->pdev, slot->pci_bar); |
1338 | 1057 | ||
1339 | sdhci_free_host(slot->host); | 1058 | sdhci_free_host(slot->host); |
1340 | } | 1059 | } |
1341 | 1060 | ||
1342 | static void sdhci_pci_runtime_pm_allow(struct device *dev) | 1061 | static int __devinit sdhci_pci_probe(struct pci_dev *pdev, |
1343 | { | ||
1344 | pm_runtime_put_noidle(dev); | ||
1345 | pm_runtime_allow(dev); | ||
1346 | pm_runtime_set_autosuspend_delay(dev, 50); | ||
1347 | pm_runtime_use_autosuspend(dev); | ||
1348 | pm_suspend_ignore_children(dev, 1); | ||
1349 | } | ||
1350 | |||
1351 | static void sdhci_pci_runtime_pm_forbid(struct device *dev) | ||
1352 | { | ||
1353 | pm_runtime_forbid(dev); | ||
1354 | pm_runtime_get_noresume(dev); | ||
1355 | } | ||
1356 | |||
1357 | static int sdhci_pci_probe(struct pci_dev *pdev, | ||
1358 | const struct pci_device_id *ent) | 1062 | const struct pci_device_id *ent) |
1359 | { | 1063 | { |
1360 | struct sdhci_pci_chip *chip; | 1064 | struct sdhci_pci_chip *chip; |
@@ -1403,11 +1107,8 @@ static int sdhci_pci_probe(struct pci_dev *pdev, | |||
1403 | 1107 | ||
1404 | chip->pdev = pdev; | 1108 | chip->pdev = pdev; |
1405 | chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; | 1109 | chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; |
1406 | if (chip->fixes) { | 1110 | if (chip->fixes) |
1407 | chip->quirks = chip->fixes->quirks; | 1111 | chip->quirks = chip->fixes->quirks; |
1408 | chip->quirks2 = chip->fixes->quirks2; | ||
1409 | chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; | ||
1410 | } | ||
1411 | chip->num_slots = slots; | 1112 | chip->num_slots = slots; |
1412 | 1113 | ||
1413 | pci_set_drvdata(pdev, chip); | 1114 | pci_set_drvdata(pdev, chip); |
@@ -1421,7 +1122,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev, | |||
1421 | slots = chip->num_slots; /* Quirk may have changed this */ | 1122 | slots = chip->num_slots; /* Quirk may have changed this */ |
1422 | 1123 | ||
1423 | for (i = 0; i < slots; i++) { | 1124 | for (i = 0; i < slots; i++) { |
1424 | slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); | 1125 | slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); |
1425 | if (IS_ERR(slot)) { | 1126 | if (IS_ERR(slot)) { |
1426 | for (i--; i >= 0; i--) | 1127 | for (i--; i >= 0; i--) |
1427 | sdhci_pci_remove_slot(chip->slots[i]); | 1128 | sdhci_pci_remove_slot(chip->slots[i]); |
@@ -1432,9 +1133,6 @@ static int sdhci_pci_probe(struct pci_dev *pdev, | |||
1432 | chip->slots[i] = slot; | 1133 | chip->slots[i] = slot; |
1433 | } | 1134 | } |
1434 | 1135 | ||
1435 | if (chip->allow_runtime_pm) | ||
1436 | sdhci_pci_runtime_pm_allow(&pdev->dev); | ||
1437 | |||
1438 | return 0; | 1136 | return 0; |
1439 | 1137 | ||
1440 | free: | 1138 | free: |
@@ -1446,7 +1144,7 @@ err: | |||
1446 | return ret; | 1144 | return ret; |
1447 | } | 1145 | } |
1448 | 1146 | ||
1449 | static void sdhci_pci_remove(struct pci_dev *pdev) | 1147 | static void __devexit sdhci_pci_remove(struct pci_dev *pdev) |
1450 | { | 1148 | { |
1451 | int i; | 1149 | int i; |
1452 | struct sdhci_pci_chip *chip; | 1150 | struct sdhci_pci_chip *chip; |
@@ -1454,9 +1152,6 @@ static void sdhci_pci_remove(struct pci_dev *pdev) | |||
1454 | chip = pci_get_drvdata(pdev); | 1152 | chip = pci_get_drvdata(pdev); |
1455 | 1153 | ||
1456 | if (chip) { | 1154 | if (chip) { |
1457 | if (chip->allow_runtime_pm) | ||
1458 | sdhci_pci_runtime_pm_forbid(&pdev->dev); | ||
1459 | |||
1460 | for (i = 0; i < chip->num_slots; i++) | 1155 | for (i = 0; i < chip->num_slots; i++) |
1461 | sdhci_pci_remove_slot(chip->slots[i]); | 1156 | sdhci_pci_remove_slot(chip->slots[i]); |
1462 | 1157 | ||
@@ -1471,13 +1166,29 @@ static struct pci_driver sdhci_driver = { | |||
1471 | .name = "sdhci-pci", | 1166 | .name = "sdhci-pci", |
1472 | .id_table = pci_ids, | 1167 | .id_table = pci_ids, |
1473 | .probe = sdhci_pci_probe, | 1168 | .probe = sdhci_pci_probe, |
1474 | .remove = sdhci_pci_remove, | 1169 | .remove = __devexit_p(sdhci_pci_remove), |
1475 | .driver = { | 1170 | .suspend = sdhci_pci_suspend, |
1476 | .pm = &sdhci_pci_pm_ops | 1171 | .resume = sdhci_pci_resume, |
1477 | }, | ||
1478 | }; | 1172 | }; |
1479 | 1173 | ||
1480 | module_pci_driver(sdhci_driver); | 1174 | /*****************************************************************************\ |
1175 | * * | ||
1176 | * Driver init/exit * | ||
1177 | * * | ||
1178 | \*****************************************************************************/ | ||
1179 | |||
1180 | static int __init sdhci_drv_init(void) | ||
1181 | { | ||
1182 | return pci_register_driver(&sdhci_driver); | ||
1183 | } | ||
1184 | |||
1185 | static void __exit sdhci_drv_exit(void) | ||
1186 | { | ||
1187 | pci_unregister_driver(&sdhci_driver); | ||
1188 | } | ||
1189 | |||
1190 | module_init(sdhci_drv_init); | ||
1191 | module_exit(sdhci_drv_exit); | ||
1481 | 1192 | ||
1482 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); | 1193 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
1483 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); | 1194 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); |
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index d4283ef5917..1179f1be431 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * sdhci-pltfm.c Support for SDHCI platform devices | 2 | * sdhci-pltfm.c Support for SDHCI platform devices |
3 | * Copyright (c) 2009 Intel Corporation | 3 | * Copyright (c) 2009 Intel Corporation |
4 | * | 4 | * |
5 | * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc. | 5 | * Copyright (c) 2007 Freescale Semiconductor, Inc. |
6 | * Copyright (c) 2009 MontaVista Software, Inc. | 6 | * Copyright (c) 2009 MontaVista Software, Inc. |
7 | * | 7 | * |
8 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | 8 | * Authors: Xiaobo Xie <X.Xie@freescale.com> |
@@ -29,7 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/err.h> | 31 | #include <linux/err.h> |
32 | #include <linux/module.h> | ||
33 | #include <linux/of.h> | 32 | #include <linux/of.h> |
34 | #ifdef CONFIG_PPC | 33 | #ifdef CONFIG_PPC |
35 | #include <asm/machdep.h> | 34 | #include <asm/machdep.h> |
@@ -42,8 +41,7 @@ static struct sdhci_ops sdhci_pltfm_ops = { | |||
42 | #ifdef CONFIG_OF | 41 | #ifdef CONFIG_OF |
43 | static bool sdhci_of_wp_inverted(struct device_node *np) | 42 | static bool sdhci_of_wp_inverted(struct device_node *np) |
44 | { | 43 | { |
45 | if (of_get_property(np, "sdhci,wp-inverted", NULL) || | 44 | if (of_get_property(np, "sdhci,wp-inverted", NULL)) |
46 | of_get_property(np, "wp-inverted", NULL)) | ||
47 | return true; | 45 | return true; |
48 | 46 | ||
49 | /* Old device trees don't have the wp-inverted property. */ | 47 | /* Old device trees don't have the wp-inverted property. */ |
@@ -60,44 +58,21 @@ void sdhci_get_of_property(struct platform_device *pdev) | |||
60 | struct sdhci_host *host = platform_get_drvdata(pdev); | 58 | struct sdhci_host *host = platform_get_drvdata(pdev); |
61 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 59 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
62 | const __be32 *clk; | 60 | const __be32 *clk; |
63 | u32 bus_width; | ||
64 | int size; | 61 | int size; |
65 | 62 | ||
66 | if (of_device_is_available(np)) { | 63 | if (of_device_is_available(np)) { |
67 | if (of_get_property(np, "sdhci,auto-cmd12", NULL)) | 64 | if (of_get_property(np, "sdhci,auto-cmd12", NULL)) |
68 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; | 65 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; |
69 | 66 | ||
70 | if (of_get_property(np, "sdhci,1-bit-only", NULL) || | 67 | if (of_get_property(np, "sdhci,1-bit-only", NULL)) |
71 | (of_property_read_u32(np, "bus-width", &bus_width) == 0 && | ||
72 | bus_width == 1)) | ||
73 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; | 68 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; |
74 | 69 | ||
75 | if (sdhci_of_wp_inverted(np)) | 70 | if (sdhci_of_wp_inverted(np)) |
76 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; | 71 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; |
77 | 72 | ||
78 | if (of_get_property(np, "broken-cd", NULL)) | ||
79 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
80 | |||
81 | if (of_get_property(np, "no-1-8-v", NULL)) | ||
82 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
83 | |||
84 | if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) | ||
85 | host->quirks |= SDHCI_QUIRK_BROKEN_DMA; | ||
86 | |||
87 | if (of_device_is_compatible(np, "fsl,p2020-esdhc") || | ||
88 | of_device_is_compatible(np, "fsl,p1010-esdhc") || | ||
89 | of_device_is_compatible(np, "fsl,mpc8536-esdhc")) | ||
90 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | ||
91 | |||
92 | clk = of_get_property(np, "clock-frequency", &size); | 73 | clk = of_get_property(np, "clock-frequency", &size); |
93 | if (clk && size == sizeof(*clk) && *clk) | 74 | if (clk && size == sizeof(*clk) && *clk) |
94 | pltfm_host->clock = be32_to_cpup(clk); | 75 | pltfm_host->clock = be32_to_cpup(clk); |
95 | |||
96 | if (of_find_property(np, "keep-power-in-suspend", NULL)) | ||
97 | host->mmc->pm_caps |= MMC_PM_KEEP_POWER; | ||
98 | |||
99 | if (of_find_property(np, "enable-sdio-wakeup", NULL)) | ||
100 | host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; | ||
101 | } | 76 | } |
102 | } | 77 | } |
103 | #else | 78 | #else |
@@ -159,13 +134,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, | |||
159 | goto err_remap; | 134 | goto err_remap; |
160 | } | 135 | } |
161 | 136 | ||
162 | /* | ||
163 | * Some platforms need to probe the controller to be able to | ||
164 | * determine which caps should be used. | ||
165 | */ | ||
166 | if (host->ops && host->ops->platform_init) | ||
167 | host->ops->platform_init(host); | ||
168 | |||
169 | platform_set_drvdata(pdev, host); | 137 | platform_set_drvdata(pdev, host); |
170 | 138 | ||
171 | return host; | 139 | return host; |
@@ -225,25 +193,47 @@ int sdhci_pltfm_unregister(struct platform_device *pdev) | |||
225 | EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); | 193 | EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); |
226 | 194 | ||
227 | #ifdef CONFIG_PM | 195 | #ifdef CONFIG_PM |
228 | static int sdhci_pltfm_suspend(struct device *dev) | 196 | int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state) |
229 | { | 197 | { |
230 | struct sdhci_host *host = dev_get_drvdata(dev); | 198 | struct sdhci_host *host = platform_get_drvdata(dev); |
199 | int ret; | ||
231 | 200 | ||
232 | return sdhci_suspend_host(host); | 201 | ret = sdhci_suspend_host(host, state); |
202 | if (ret) { | ||
203 | dev_err(&dev->dev, "suspend failed, error = %d\n", ret); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | if (host->ops && host->ops->suspend) | ||
208 | ret = host->ops->suspend(host, state); | ||
209 | if (ret) { | ||
210 | dev_err(&dev->dev, "suspend hook failed, error = %d\n", ret); | ||
211 | sdhci_resume_host(host); | ||
212 | } | ||
213 | |||
214 | return ret; | ||
233 | } | 215 | } |
216 | EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); | ||
234 | 217 | ||
235 | static int sdhci_pltfm_resume(struct device *dev) | 218 | int sdhci_pltfm_resume(struct platform_device *dev) |
236 | { | 219 | { |
237 | struct sdhci_host *host = dev_get_drvdata(dev); | 220 | struct sdhci_host *host = platform_get_drvdata(dev); |
221 | int ret = 0; | ||
238 | 222 | ||
239 | return sdhci_resume_host(host); | 223 | if (host->ops && host->ops->resume) |
240 | } | 224 | ret = host->ops->resume(host); |
225 | if (ret) { | ||
226 | dev_err(&dev->dev, "resume hook failed, error = %d\n", ret); | ||
227 | return ret; | ||
228 | } | ||
241 | 229 | ||
242 | const struct dev_pm_ops sdhci_pltfm_pmops = { | 230 | ret = sdhci_resume_host(host); |
243 | .suspend = sdhci_pltfm_suspend, | 231 | if (ret) |
244 | .resume = sdhci_pltfm_resume, | 232 | dev_err(&dev->dev, "resume failed, error = %d\n", ret); |
245 | }; | 233 | |
246 | EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops); | 234 | return ret; |
235 | } | ||
236 | EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); | ||
247 | #endif /* CONFIG_PM */ | 237 | #endif /* CONFIG_PM */ |
248 | 238 | ||
249 | static int __init sdhci_pltfm_drv_init(void) | 239 | static int __init sdhci_pltfm_drv_init(void) |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 37e0e184a0b..b92c7f29a4e 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct sdhci_pltfm_data { | 18 | struct sdhci_pltfm_data { |
19 | struct sdhci_ops *ops; | 19 | struct sdhci_ops *ops; |
20 | unsigned int quirks; | 20 | u64 quirks; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct sdhci_pltfm_host { | 23 | struct sdhci_pltfm_host { |
@@ -99,10 +99,8 @@ extern int sdhci_pltfm_register(struct platform_device *pdev, | |||
99 | extern int sdhci_pltfm_unregister(struct platform_device *pdev); | 99 | extern int sdhci_pltfm_unregister(struct platform_device *pdev); |
100 | 100 | ||
101 | #ifdef CONFIG_PM | 101 | #ifdef CONFIG_PM |
102 | extern const struct dev_pm_ops sdhci_pltfm_pmops; | 102 | extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state); |
103 | #define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) | 103 | extern int sdhci_pltfm_resume(struct platform_device *dev); |
104 | #else | ||
105 | #define SDHCI_PLTFM_PMOPS NULL | ||
106 | #endif | 104 | #endif |
107 | 105 | ||
108 | #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ | 106 | #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ |
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index ac854aa192a..38f58994f79 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c | |||
@@ -21,16 +21,12 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/module.h> | ||
25 | #include <linux/io.h> | 24 | #include <linux/io.h> |
26 | #include <linux/gpio.h> | 25 | #include <linux/gpio.h> |
27 | #include <linux/mmc/card.h> | 26 | #include <linux/mmc/card.h> |
28 | #include <linux/mmc/host.h> | 27 | #include <linux/mmc/host.h> |
29 | #include <linux/platform_data/pxa_sdhci.h> | 28 | #include <linux/platform_data/pxa_sdhci.h> |
30 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
31 | #include <linux/of.h> | ||
32 | #include <linux/of_device.h> | ||
33 | |||
34 | #include "sdhci.h" | 30 | #include "sdhci.h" |
35 | #include "sdhci-pltfm.h" | 31 | #include "sdhci-pltfm.h" |
36 | 32 | ||
@@ -63,7 +59,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) | |||
63 | * tune timing of read data/command when crc error happen | 59 | * tune timing of read data/command when crc error happen |
64 | * no performance impact | 60 | * no performance impact |
65 | */ | 61 | */ |
66 | if (pdata && pdata->clk_delay_sel == 1) { | 62 | if (pdata->clk_delay_sel == 1) { |
67 | tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | 63 | tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); |
68 | 64 | ||
69 | tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); | 65 | tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); |
@@ -75,7 +71,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) | |||
75 | writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | 71 | writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); |
76 | } | 72 | } |
77 | 73 | ||
78 | if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) { | 74 | if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) { |
79 | tmp = readw(host->ioaddr + SD_FIFO_PARAM); | 75 | tmp = readw(host->ioaddr + SD_FIFO_PARAM); |
80 | tmp &= ~CLK_GATE_SETTING_BITS; | 76 | tmp &= ~CLK_GATE_SETTING_BITS; |
81 | writew(tmp, host->ioaddr + SD_FIFO_PARAM); | 77 | writew(tmp, host->ioaddr + SD_FIFO_PARAM); |
@@ -124,57 +120,13 @@ static struct sdhci_ops pxav2_sdhci_ops = { | |||
124 | .platform_8bit_width = pxav2_mmc_set_width, | 120 | .platform_8bit_width = pxav2_mmc_set_width, |
125 | }; | 121 | }; |
126 | 122 | ||
127 | #ifdef CONFIG_OF | 123 | static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) |
128 | static const struct of_device_id sdhci_pxav2_of_match[] = { | ||
129 | { | ||
130 | .compatible = "mrvl,pxav2-mmc", | ||
131 | }, | ||
132 | {}, | ||
133 | }; | ||
134 | MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match); | ||
135 | |||
136 | static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) | ||
137 | { | ||
138 | struct sdhci_pxa_platdata *pdata; | ||
139 | struct device_node *np = dev->of_node; | ||
140 | u32 bus_width; | ||
141 | u32 clk_delay_cycles; | ||
142 | |||
143 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
144 | if (!pdata) | ||
145 | return NULL; | ||
146 | |||
147 | if (of_find_property(np, "non-removable", NULL)) | ||
148 | pdata->flags |= PXA_FLAG_CARD_PERMANENT; | ||
149 | |||
150 | of_property_read_u32(np, "bus-width", &bus_width); | ||
151 | if (bus_width == 8) | ||
152 | pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; | ||
153 | |||
154 | of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); | ||
155 | if (clk_delay_cycles > 0) { | ||
156 | pdata->clk_delay_sel = 1; | ||
157 | pdata->clk_delay_cycles = clk_delay_cycles; | ||
158 | } | ||
159 | |||
160 | return pdata; | ||
161 | } | ||
162 | #else | ||
163 | static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) | ||
164 | { | ||
165 | return NULL; | ||
166 | } | ||
167 | #endif | ||
168 | |||
169 | static int sdhci_pxav2_probe(struct platform_device *pdev) | ||
170 | { | 124 | { |
171 | struct sdhci_pltfm_host *pltfm_host; | 125 | struct sdhci_pltfm_host *pltfm_host; |
172 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | 126 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; |
173 | struct device *dev = &pdev->dev; | 127 | struct device *dev = &pdev->dev; |
174 | struct sdhci_host *host = NULL; | 128 | struct sdhci_host *host = NULL; |
175 | struct sdhci_pxa *pxa = NULL; | 129 | struct sdhci_pxa *pxa = NULL; |
176 | const struct of_device_id *match; | ||
177 | |||
178 | int ret; | 130 | int ret; |
179 | struct clk *clk; | 131 | struct clk *clk; |
180 | 132 | ||
@@ -197,16 +149,12 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) | |||
197 | goto err_clk_get; | 149 | goto err_clk_get; |
198 | } | 150 | } |
199 | pltfm_host->clk = clk; | 151 | pltfm_host->clk = clk; |
200 | clk_prepare_enable(clk); | 152 | clk_enable(clk); |
201 | 153 | ||
202 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA | 154 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA |
203 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 155 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
204 | | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; | 156 | | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; |
205 | 157 | ||
206 | match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); | ||
207 | if (match) { | ||
208 | pdata = pxav2_get_mmc_pdata(dev); | ||
209 | } | ||
210 | if (pdata) { | 158 | if (pdata) { |
211 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { | 159 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { |
212 | /* on-chip device */ | 160 | /* on-chip device */ |
@@ -239,7 +187,7 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) | |||
239 | return 0; | 187 | return 0; |
240 | 188 | ||
241 | err_add_host: | 189 | err_add_host: |
242 | clk_disable_unprepare(clk); | 190 | clk_disable(clk); |
243 | clk_put(clk); | 191 | clk_put(clk); |
244 | err_clk_get: | 192 | err_clk_get: |
245 | sdhci_pltfm_free(pdev); | 193 | sdhci_pltfm_free(pdev); |
@@ -247,7 +195,7 @@ err_clk_get: | |||
247 | return ret; | 195 | return ret; |
248 | } | 196 | } |
249 | 197 | ||
250 | static int sdhci_pxav2_remove(struct platform_device *pdev) | 198 | static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) |
251 | { | 199 | { |
252 | struct sdhci_host *host = platform_get_drvdata(pdev); | 200 | struct sdhci_host *host = platform_get_drvdata(pdev); |
253 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 201 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
@@ -255,7 +203,7 @@ static int sdhci_pxav2_remove(struct platform_device *pdev) | |||
255 | 203 | ||
256 | sdhci_remove_host(host, 1); | 204 | sdhci_remove_host(host, 1); |
257 | 205 | ||
258 | clk_disable_unprepare(pltfm_host->clk); | 206 | clk_disable(pltfm_host->clk); |
259 | clk_put(pltfm_host->clk); | 207 | clk_put(pltfm_host->clk); |
260 | sdhci_pltfm_free(pdev); | 208 | sdhci_pltfm_free(pdev); |
261 | kfree(pxa); | 209 | kfree(pxa); |
@@ -269,16 +217,26 @@ static struct platform_driver sdhci_pxav2_driver = { | |||
269 | .driver = { | 217 | .driver = { |
270 | .name = "sdhci-pxav2", | 218 | .name = "sdhci-pxav2", |
271 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
272 | #ifdef CONFIG_OF | ||
273 | .of_match_table = sdhci_pxav2_of_match, | ||
274 | #endif | ||
275 | .pm = SDHCI_PLTFM_PMOPS, | ||
276 | }, | 220 | }, |
277 | .probe = sdhci_pxav2_probe, | 221 | .probe = sdhci_pxav2_probe, |
278 | .remove = sdhci_pxav2_remove, | 222 | .remove = __devexit_p(sdhci_pxav2_remove), |
223 | #ifdef CONFIG_PM | ||
224 | .suspend = sdhci_pltfm_suspend, | ||
225 | .resume = sdhci_pltfm_resume, | ||
226 | #endif | ||
279 | }; | 227 | }; |
228 | static int __init sdhci_pxav2_init(void) | ||
229 | { | ||
230 | return platform_driver_register(&sdhci_pxav2_driver); | ||
231 | } | ||
232 | |||
233 | static void __exit sdhci_pxav2_exit(void) | ||
234 | { | ||
235 | platform_driver_unregister(&sdhci_pxav2_driver); | ||
236 | } | ||
280 | 237 | ||
281 | module_platform_driver(sdhci_pxav2_driver); | 238 | module_init(sdhci_pxav2_init); |
239 | module_exit(sdhci_pxav2_exit); | ||
282 | 240 | ||
283 | MODULE_DESCRIPTION("SDHCI driver for pxav2"); | 241 | MODULE_DESCRIPTION("SDHCI driver for pxav2"); |
284 | MODULE_AUTHOR("Marvell International Ltd."); | 242 | MODULE_AUTHOR("Marvell International Ltd."); |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index fad0966427f..fc7e4a51562 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -24,15 +24,9 @@ | |||
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/mmc/card.h> | 25 | #include <linux/mmc/card.h> |
26 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
27 | #include <linux/mmc/slot-gpio.h> | ||
28 | #include <linux/platform_data/pxa_sdhci.h> | 27 | #include <linux/platform_data/pxa_sdhci.h> |
29 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
31 | #include <linux/module.h> | ||
32 | #include <linux/of.h> | ||
33 | #include <linux/of_device.h> | ||
34 | #include <linux/of_gpio.h> | ||
35 | |||
36 | #include "sdhci.h" | 30 | #include "sdhci.h" |
37 | #include "sdhci-pltfm.h" | 31 | #include "sdhci-pltfm.h" |
38 | 32 | ||
@@ -163,74 +157,19 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) | |||
163 | return 0; | 157 | return 0; |
164 | } | 158 | } |
165 | 159 | ||
166 | static u32 pxav3_get_max_clock(struct sdhci_host *host) | ||
167 | { | ||
168 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
169 | |||
170 | return clk_get_rate(pltfm_host->clk); | ||
171 | } | ||
172 | |||
173 | static struct sdhci_ops pxav3_sdhci_ops = { | 160 | static struct sdhci_ops pxav3_sdhci_ops = { |
174 | .platform_reset_exit = pxav3_set_private_registers, | 161 | .platform_reset_exit = pxav3_set_private_registers, |
175 | .set_uhs_signaling = pxav3_set_uhs_signaling, | 162 | .set_uhs_signaling = pxav3_set_uhs_signaling, |
176 | .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, | 163 | .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, |
177 | .get_max_clock = pxav3_get_max_clock, | ||
178 | }; | ||
179 | |||
180 | #ifdef CONFIG_OF | ||
181 | static const struct of_device_id sdhci_pxav3_of_match[] = { | ||
182 | { | ||
183 | .compatible = "mrvl,pxav3-mmc", | ||
184 | }, | ||
185 | {}, | ||
186 | }; | 164 | }; |
187 | MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match); | ||
188 | |||
189 | static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) | ||
190 | { | ||
191 | struct sdhci_pxa_platdata *pdata; | ||
192 | struct device_node *np = dev->of_node; | ||
193 | u32 bus_width; | ||
194 | u32 clk_delay_cycles; | ||
195 | enum of_gpio_flags gpio_flags; | ||
196 | |||
197 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
198 | if (!pdata) | ||
199 | return NULL; | ||
200 | |||
201 | if (of_find_property(np, "non-removable", NULL)) | ||
202 | pdata->flags |= PXA_FLAG_CARD_PERMANENT; | ||
203 | |||
204 | of_property_read_u32(np, "bus-width", &bus_width); | ||
205 | if (bus_width == 8) | ||
206 | pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; | ||
207 | |||
208 | of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); | ||
209 | if (clk_delay_cycles > 0) | ||
210 | pdata->clk_delay_cycles = clk_delay_cycles; | ||
211 | |||
212 | pdata->ext_cd_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &gpio_flags); | ||
213 | if (gpio_flags != OF_GPIO_ACTIVE_LOW) | ||
214 | pdata->host_caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; | ||
215 | |||
216 | return pdata; | ||
217 | } | ||
218 | #else | ||
219 | static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) | ||
220 | { | ||
221 | return NULL; | ||
222 | } | ||
223 | #endif | ||
224 | 165 | ||
225 | static int sdhci_pxav3_probe(struct platform_device *pdev) | 166 | static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) |
226 | { | 167 | { |
227 | struct sdhci_pltfm_host *pltfm_host; | 168 | struct sdhci_pltfm_host *pltfm_host; |
228 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | 169 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; |
229 | struct device *dev = &pdev->dev; | 170 | struct device *dev = &pdev->dev; |
230 | struct sdhci_host *host = NULL; | 171 | struct sdhci_host *host = NULL; |
231 | struct sdhci_pxa *pxa = NULL; | 172 | struct sdhci_pxa *pxa = NULL; |
232 | const struct of_device_id *match; | ||
233 | |||
234 | int ret; | 173 | int ret; |
235 | struct clk *clk; | 174 | struct clk *clk; |
236 | 175 | ||
@@ -246,27 +185,22 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
246 | pltfm_host = sdhci_priv(host); | 185 | pltfm_host = sdhci_priv(host); |
247 | pltfm_host->priv = pxa; | 186 | pltfm_host->priv = pxa; |
248 | 187 | ||
249 | clk = clk_get(dev, NULL); | 188 | clk = clk_get(dev, "PXA-SDHCLK"); |
250 | if (IS_ERR(clk)) { | 189 | if (IS_ERR(clk)) { |
251 | dev_err(dev, "failed to get io clock\n"); | 190 | dev_err(dev, "failed to get io clock\n"); |
252 | ret = PTR_ERR(clk); | 191 | ret = PTR_ERR(clk); |
253 | goto err_clk_get; | 192 | goto err_clk_get; |
254 | } | 193 | } |
255 | pltfm_host->clk = clk; | 194 | pltfm_host->clk = clk; |
256 | clk_prepare_enable(clk); | 195 | clk_enable(clk); |
257 | 196 | ||
258 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
259 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
260 | | SDHCI_QUIRK_32BIT_ADMA_SIZE | 199 | | SDHCI_QUIRK_32BIT_ADMA_SIZE; |
261 | | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; | ||
262 | 200 | ||
263 | /* enable 1/8V DDR capable */ | 201 | /* enable 1/8V DDR capable */ |
264 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | 202 | host->mmc->caps |= MMC_CAP_1_8V_DDR; |
265 | 203 | ||
266 | match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); | ||
267 | if (match) | ||
268 | pdata = pxav3_get_mmc_pdata(dev); | ||
269 | |||
270 | if (pdata) { | 204 | if (pdata) { |
271 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { | 205 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { |
272 | /* on-chip device */ | 206 | /* on-chip device */ |
@@ -280,29 +214,14 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
280 | 214 | ||
281 | if (pdata->quirks) | 215 | if (pdata->quirks) |
282 | host->quirks |= pdata->quirks; | 216 | host->quirks |= pdata->quirks; |
283 | if (pdata->quirks2) | ||
284 | host->quirks2 |= pdata->quirks2; | ||
285 | if (pdata->host_caps) | 217 | if (pdata->host_caps) |
286 | host->mmc->caps |= pdata->host_caps; | 218 | host->mmc->caps |= pdata->host_caps; |
287 | if (pdata->host_caps2) | ||
288 | host->mmc->caps2 |= pdata->host_caps2; | ||
289 | if (pdata->pm_caps) | 219 | if (pdata->pm_caps) |
290 | host->mmc->pm_caps |= pdata->pm_caps; | 220 | host->mmc->pm_caps |= pdata->pm_caps; |
291 | |||
292 | if (gpio_is_valid(pdata->ext_cd_gpio)) { | ||
293 | ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio); | ||
294 | if (ret) { | ||
295 | dev_err(mmc_dev(host->mmc), | ||
296 | "failed to allocate card detect gpio\n"); | ||
297 | goto err_cd_req; | ||
298 | } | ||
299 | } | ||
300 | } | 221 | } |
301 | 222 | ||
302 | host->ops = &pxav3_sdhci_ops; | 223 | host->ops = &pxav3_sdhci_ops; |
303 | 224 | ||
304 | sdhci_get_of_property(pdev); | ||
305 | |||
306 | ret = sdhci_add_host(host); | 225 | ret = sdhci_add_host(host); |
307 | if (ret) { | 226 | if (ret) { |
308 | dev_err(&pdev->dev, "failed to add host\n"); | 227 | dev_err(&pdev->dev, "failed to add host\n"); |
@@ -314,31 +233,24 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
314 | return 0; | 233 | return 0; |
315 | 234 | ||
316 | err_add_host: | 235 | err_add_host: |
317 | clk_disable_unprepare(clk); | 236 | clk_disable(clk); |
318 | clk_put(clk); | 237 | clk_put(clk); |
319 | mmc_gpio_free_cd(host->mmc); | ||
320 | err_cd_req: | ||
321 | err_clk_get: | 238 | err_clk_get: |
322 | sdhci_pltfm_free(pdev); | 239 | sdhci_pltfm_free(pdev); |
323 | kfree(pxa); | 240 | kfree(pxa); |
324 | return ret; | 241 | return ret; |
325 | } | 242 | } |
326 | 243 | ||
327 | static int sdhci_pxav3_remove(struct platform_device *pdev) | 244 | static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) |
328 | { | 245 | { |
329 | struct sdhci_host *host = platform_get_drvdata(pdev); | 246 | struct sdhci_host *host = platform_get_drvdata(pdev); |
330 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 247 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
331 | struct sdhci_pxa *pxa = pltfm_host->priv; | 248 | struct sdhci_pxa *pxa = pltfm_host->priv; |
332 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
333 | 249 | ||
334 | sdhci_remove_host(host, 1); | 250 | sdhci_remove_host(host, 1); |
335 | 251 | ||
336 | clk_disable_unprepare(pltfm_host->clk); | 252 | clk_disable(pltfm_host->clk); |
337 | clk_put(pltfm_host->clk); | 253 | clk_put(pltfm_host->clk); |
338 | |||
339 | if (gpio_is_valid(pdata->ext_cd_gpio)) | ||
340 | mmc_gpio_free_cd(host->mmc); | ||
341 | |||
342 | sdhci_pltfm_free(pdev); | 254 | sdhci_pltfm_free(pdev); |
343 | kfree(pxa); | 255 | kfree(pxa); |
344 | 256 | ||
@@ -350,17 +262,27 @@ static int sdhci_pxav3_remove(struct platform_device *pdev) | |||
350 | static struct platform_driver sdhci_pxav3_driver = { | 262 | static struct platform_driver sdhci_pxav3_driver = { |
351 | .driver = { | 263 | .driver = { |
352 | .name = "sdhci-pxav3", | 264 | .name = "sdhci-pxav3", |
353 | #ifdef CONFIG_OF | ||
354 | .of_match_table = sdhci_pxav3_of_match, | ||
355 | #endif | ||
356 | .owner = THIS_MODULE, | 265 | .owner = THIS_MODULE, |
357 | .pm = SDHCI_PLTFM_PMOPS, | ||
358 | }, | 266 | }, |
359 | .probe = sdhci_pxav3_probe, | 267 | .probe = sdhci_pxav3_probe, |
360 | .remove = sdhci_pxav3_remove, | 268 | .remove = __devexit_p(sdhci_pxav3_remove), |
269 | #ifdef CONFIG_PM | ||
270 | .suspend = sdhci_pltfm_suspend, | ||
271 | .resume = sdhci_pltfm_resume, | ||
272 | #endif | ||
361 | }; | 273 | }; |
274 | static int __init sdhci_pxav3_init(void) | ||
275 | { | ||
276 | return platform_driver_register(&sdhci_pxav3_driver); | ||
277 | } | ||
278 | |||
279 | static void __exit sdhci_pxav3_exit(void) | ||
280 | { | ||
281 | platform_driver_unregister(&sdhci_pxav3_driver); | ||
282 | } | ||
362 | 283 | ||
363 | module_platform_driver(sdhci_pxav3_driver); | 284 | module_init(sdhci_pxav3_init); |
285 | module_exit(sdhci_pxav3_exit); | ||
364 | 286 | ||
365 | MODULE_DESCRIPTION("SDHCI driver for pxav3"); | 287 | MODULE_DESCRIPTION("SDHCI driver for pxav3"); |
366 | MODULE_AUTHOR("Marvell International Ltd."); | 288 | MODULE_AUTHOR("Marvell International Ltd."); |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 82a8de148a8..fe886d6c474 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -20,11 +20,6 @@ | |||
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/of.h> | ||
24 | #include <linux/of_gpio.h> | ||
25 | #include <linux/pm.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/pinctrl/consumer.h> | ||
28 | 23 | ||
29 | #include <linux/mmc/host.h> | 24 | #include <linux/mmc/host.h> |
30 | 25 | ||
@@ -35,9 +30,6 @@ | |||
35 | 30 | ||
36 | #define MAX_BUS_CLK (4) | 31 | #define MAX_BUS_CLK (4) |
37 | 32 | ||
38 | /* Number of gpio's used is max data bus width + command and clock lines */ | ||
39 | #define NUM_GPIOS(x) (x + 2) | ||
40 | |||
41 | /** | 33 | /** |
42 | * struct sdhci_s3c - S3C SDHCI instance | 34 | * struct sdhci_s3c - S3C SDHCI instance |
43 | * @host: The SDHCI host created | 35 | * @host: The SDHCI host created |
@@ -45,7 +37,6 @@ | |||
45 | * @ioarea: The resource created when we claimed the IO area. | 37 | * @ioarea: The resource created when we claimed the IO area. |
46 | * @pdata: The platform data for this controller. | 38 | * @pdata: The platform data for this controller. |
47 | * @cur_clk: The index of the current bus clock. | 39 | * @cur_clk: The index of the current bus clock. |
48 | * @gpios: List of gpio numbers parsed from device tree. | ||
49 | * @clk_io: The clock for the internal bus interface. | 40 | * @clk_io: The clock for the internal bus interface. |
50 | * @clk_bus: The clocks that are available for the SD/MMC bus clock. | 41 | * @clk_bus: The clocks that are available for the SD/MMC bus clock. |
51 | */ | 42 | */ |
@@ -57,25 +48,11 @@ struct sdhci_s3c { | |||
57 | unsigned int cur_clk; | 48 | unsigned int cur_clk; |
58 | int ext_cd_irq; | 49 | int ext_cd_irq; |
59 | int ext_cd_gpio; | 50 | int ext_cd_gpio; |
60 | int *gpios; | ||
61 | struct pinctrl *pctrl; | ||
62 | 51 | ||
63 | struct clk *clk_io; | 52 | struct clk *clk_io; |
64 | struct clk *clk_bus[MAX_BUS_CLK]; | 53 | struct clk *clk_bus[MAX_BUS_CLK]; |
65 | }; | 54 | }; |
66 | 55 | ||
67 | /** | ||
68 | * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data | ||
69 | * @sdhci_quirks: sdhci host specific quirks. | ||
70 | * | ||
71 | * Specifies platform specific configuration of sdhci controller. | ||
72 | * Note: A structure for driver specific platform data is used for future | ||
73 | * expansion of its usage. | ||
74 | */ | ||
75 | struct sdhci_s3c_drv_data { | ||
76 | unsigned int sdhci_quirks; | ||
77 | }; | ||
78 | |||
79 | static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) | 56 | static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) |
80 | { | 57 | { |
81 | return sdhci_priv(host); | 58 | return sdhci_priv(host); |
@@ -103,7 +80,7 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host) | |||
103 | 80 | ||
104 | tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; | 81 | tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; |
105 | tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; | 82 | tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; |
106 | writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2); | 83 | writel(tmp, host->ioaddr + 0x80); |
107 | } | 84 | } |
108 | } | 85 | } |
109 | 86 | ||
@@ -155,10 +132,10 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, | |||
155 | return UINT_MAX; | 132 | return UINT_MAX; |
156 | 133 | ||
157 | /* | 134 | /* |
158 | * If controller uses a non-standard clock division, find the best clock | 135 | * Clock divider's step is different as 1 from that of host controller |
159 | * speed possible with selected clock source and skip the division. | 136 | * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL. |
160 | */ | 137 | */ |
161 | if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { | 138 | if (ourhost->pdata->clk_type) { |
162 | rate = clk_round_rate(clksrc, wanted); | 139 | rate = clk_round_rate(clksrc, wanted); |
163 | return wanted - rate; | 140 | return wanted - rate; |
164 | } | 141 | } |
@@ -173,7 +150,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, | |||
173 | dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", | 150 | dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", |
174 | src, rate, wanted, rate / div); | 151 | src, rate, wanted, rate / div); |
175 | 152 | ||
176 | return wanted - (rate / div); | 153 | return (wanted - (rate / div)); |
177 | } | 154 | } |
178 | 155 | ||
179 | /** | 156 | /** |
@@ -210,12 +187,10 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) | |||
210 | best_src, clock, best); | 187 | best_src, clock, best); |
211 | 188 | ||
212 | /* select the new clock source */ | 189 | /* select the new clock source */ |
190 | |||
213 | if (ourhost->cur_clk != best_src) { | 191 | if (ourhost->cur_clk != best_src) { |
214 | struct clk *clk = ourhost->clk_bus[best_src]; | 192 | struct clk *clk = ourhost->clk_bus[best_src]; |
215 | 193 | ||
216 | clk_prepare_enable(clk); | ||
217 | clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); | ||
218 | |||
219 | /* turn clock off to card before changing clock source */ | 194 | /* turn clock off to card before changing clock source */ |
220 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); | 195 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); |
221 | 196 | ||
@@ -228,23 +203,17 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) | |||
228 | writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); | 203 | writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); |
229 | } | 204 | } |
230 | 205 | ||
231 | /* reprogram default hardware configuration */ | 206 | /* reconfigure the hardware for new clock rate */ |
232 | writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, | 207 | |
233 | host->ioaddr + S3C64XX_SDHCI_CONTROL4); | 208 | { |
234 | 209 | struct mmc_ios ios; | |
235 | ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); | 210 | |
236 | ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR | | 211 | ios.clock = clock; |
237 | S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK | | 212 | |
238 | S3C_SDHCI_CTRL2_ENFBCLKRX | | 213 | if (ourhost->pdata->cfg_card) |
239 | S3C_SDHCI_CTRL2_DFCNT_NONE | | 214 | (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr, |
240 | S3C_SDHCI_CTRL2_ENCLKOUTHOLD); | 215 | &ios, NULL); |
241 | writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); | 216 | } |
242 | |||
243 | /* reconfigure the controller for new clock rate */ | ||
244 | ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); | ||
245 | if (clock < 25 * 1000000) | ||
246 | ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); | ||
247 | writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); | ||
248 | } | 217 | } |
249 | 218 | ||
250 | /** | 219 | /** |
@@ -297,9 +266,6 @@ static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) | |||
297 | static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) | 266 | static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) |
298 | { | 267 | { |
299 | struct sdhci_s3c *ourhost = to_s3c(host); | 268 | struct sdhci_s3c *ourhost = to_s3c(host); |
300 | struct device *dev = &ourhost->pdev->dev; | ||
301 | unsigned long timeout; | ||
302 | u16 clk = 0; | ||
303 | 269 | ||
304 | /* don't bother if the clock is going off */ | 270 | /* don't bother if the clock is going off */ |
305 | if (clock == 0) | 271 | if (clock == 0) |
@@ -310,25 +276,6 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) | |||
310 | clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); | 276 | clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); |
311 | 277 | ||
312 | host->clock = clock; | 278 | host->clock = clock; |
313 | |||
314 | clk = SDHCI_CLOCK_INT_EN; | ||
315 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
316 | |||
317 | /* Wait max 20 ms */ | ||
318 | timeout = 20; | ||
319 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) | ||
320 | & SDHCI_CLOCK_INT_STABLE)) { | ||
321 | if (timeout == 0) { | ||
322 | dev_err(dev, "%s: Internal clock never stabilised.\n", | ||
323 | mmc_hostname(host->mmc)); | ||
324 | return; | ||
325 | } | ||
326 | timeout--; | ||
327 | mdelay(1); | ||
328 | } | ||
329 | |||
330 | clk |= SDHCI_CLOCK_CARD_EN; | ||
331 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
332 | } | 279 | } |
333 | 280 | ||
334 | /** | 281 | /** |
@@ -375,27 +322,18 @@ static struct sdhci_ops sdhci_s3c_ops = { | |||
375 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | 322 | static void sdhci_s3c_notify_change(struct platform_device *dev, int state) |
376 | { | 323 | { |
377 | struct sdhci_host *host = platform_get_drvdata(dev); | 324 | struct sdhci_host *host = platform_get_drvdata(dev); |
378 | #ifdef CONFIG_PM_RUNTIME | ||
379 | struct sdhci_s3c *sc = sdhci_priv(host); | ||
380 | #endif | ||
381 | unsigned long flags; | 325 | unsigned long flags; |
382 | 326 | ||
383 | if (host) { | 327 | if (host) { |
384 | spin_lock_irqsave(&host->lock, flags); | 328 | spin_lock_irqsave(&host->lock, flags); |
385 | if (state) { | 329 | if (state) { |
386 | dev_dbg(&dev->dev, "card inserted.\n"); | 330 | dev_dbg(&dev->dev, "card inserted.\n"); |
387 | #ifdef CONFIG_PM_RUNTIME | ||
388 | clk_prepare_enable(sc->clk_io); | ||
389 | #endif | ||
390 | host->flags &= ~SDHCI_DEVICE_DEAD; | 331 | host->flags &= ~SDHCI_DEVICE_DEAD; |
391 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 332 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
392 | } else { | 333 | } else { |
393 | dev_dbg(&dev->dev, "card removed.\n"); | 334 | dev_dbg(&dev->dev, "card removed.\n"); |
394 | host->flags |= SDHCI_DEVICE_DEAD; | 335 | host->flags |= SDHCI_DEVICE_DEAD; |
395 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 336 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
396 | #ifdef CONFIG_PM_RUNTIME | ||
397 | clk_disable_unprepare(sc->clk_io); | ||
398 | #endif | ||
399 | } | 337 | } |
400 | tasklet_schedule(&host->card_tasklet); | 338 | tasklet_schedule(&host->card_tasklet); |
401 | spin_unlock_irqrestore(&host->lock, flags); | 339 | spin_unlock_irqrestore(&host->lock, flags); |
@@ -417,15 +355,13 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) | |||
417 | struct s3c_sdhci_platdata *pdata = sc->pdata; | 355 | struct s3c_sdhci_platdata *pdata = sc->pdata; |
418 | struct device *dev = &sc->pdev->dev; | 356 | struct device *dev = &sc->pdev->dev; |
419 | 357 | ||
420 | if (devm_gpio_request(dev, pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { | 358 | if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { |
421 | sc->ext_cd_gpio = pdata->ext_cd_gpio; | 359 | sc->ext_cd_gpio = pdata->ext_cd_gpio; |
422 | sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); | 360 | sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); |
423 | if (sc->ext_cd_irq && | 361 | if (sc->ext_cd_irq && |
424 | request_threaded_irq(sc->ext_cd_irq, NULL, | 362 | request_threaded_irq(sc->ext_cd_irq, NULL, |
425 | sdhci_s3c_gpio_card_detect_thread, | 363 | sdhci_s3c_gpio_card_detect_thread, |
426 | IRQF_TRIGGER_RISING | | 364 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
427 | IRQF_TRIGGER_FALLING | | ||
428 | IRQF_ONESHOT, | ||
429 | dev_name(dev), sc) == 0) { | 365 | dev_name(dev), sc) == 0) { |
430 | int status = gpio_get_value(sc->ext_cd_gpio); | 366 | int status = gpio_get_value(sc->ext_cd_gpio); |
431 | if (pdata->ext_cd_gpio_invert) | 367 | if (pdata->ext_cd_gpio_invert) |
@@ -440,132 +376,16 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) | |||
440 | } | 376 | } |
441 | } | 377 | } |
442 | 378 | ||
443 | #ifdef CONFIG_OF | 379 | static int __devinit sdhci_s3c_probe(struct platform_device *pdev) |
444 | static int sdhci_s3c_parse_dt(struct device *dev, | ||
445 | struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) | ||
446 | { | 380 | { |
447 | struct device_node *node = dev->of_node; | 381 | struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; |
448 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
449 | u32 max_width; | ||
450 | int gpio, cnt, ret; | ||
451 | |||
452 | /* if the bus-width property is not specified, assume width as 1 */ | ||
453 | if (of_property_read_u32(node, "bus-width", &max_width)) | ||
454 | max_width = 1; | ||
455 | pdata->max_width = max_width; | ||
456 | |||
457 | ourhost->gpios = devm_kzalloc(dev, NUM_GPIOS(pdata->max_width) * | ||
458 | sizeof(int), GFP_KERNEL); | ||
459 | if (!ourhost->gpios) | ||
460 | return -ENOMEM; | ||
461 | |||
462 | /* get the card detection method */ | ||
463 | if (of_get_property(node, "broken-cd", NULL)) { | ||
464 | pdata->cd_type = S3C_SDHCI_CD_NONE; | ||
465 | goto setup_bus; | ||
466 | } | ||
467 | |||
468 | if (of_get_property(node, "non-removable", NULL)) { | ||
469 | pdata->cd_type = S3C_SDHCI_CD_PERMANENT; | ||
470 | goto setup_bus; | ||
471 | } | ||
472 | |||
473 | gpio = of_get_named_gpio(node, "cd-gpios", 0); | ||
474 | if (gpio_is_valid(gpio)) { | ||
475 | pdata->cd_type = S3C_SDHCI_CD_GPIO; | ||
476 | goto found_cd; | ||
477 | } else if (gpio != -ENOENT) { | ||
478 | dev_err(dev, "invalid card detect gpio specified\n"); | ||
479 | return -EINVAL; | ||
480 | } | ||
481 | |||
482 | gpio = of_get_named_gpio(node, "samsung,cd-pinmux-gpio", 0); | ||
483 | if (gpio_is_valid(gpio)) { | ||
484 | pdata->cd_type = S3C_SDHCI_CD_INTERNAL; | ||
485 | goto found_cd; | ||
486 | } else if (gpio != -ENOENT) { | ||
487 | dev_err(dev, "invalid card detect gpio specified\n"); | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | |||
491 | /* assuming internal card detect that will be configured by pinctrl */ | ||
492 | pdata->cd_type = S3C_SDHCI_CD_INTERNAL; | ||
493 | goto setup_bus; | ||
494 | |||
495 | found_cd: | ||
496 | if (pdata->cd_type == S3C_SDHCI_CD_GPIO) { | ||
497 | pdata->ext_cd_gpio = gpio; | ||
498 | ourhost->ext_cd_gpio = -1; | ||
499 | if (of_get_property(node, "cd-inverted", NULL)) | ||
500 | pdata->ext_cd_gpio_invert = 1; | ||
501 | } else if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) { | ||
502 | ret = devm_gpio_request(dev, gpio, "sdhci-cd"); | ||
503 | if (ret) { | ||
504 | dev_err(dev, "card detect gpio request failed\n"); | ||
505 | return -EINVAL; | ||
506 | } | ||
507 | ourhost->ext_cd_gpio = gpio; | ||
508 | } | ||
509 | |||
510 | setup_bus: | ||
511 | if (!IS_ERR(ourhost->pctrl)) | ||
512 | return 0; | ||
513 | |||
514 | /* get the gpios for command, clock and data lines */ | ||
515 | for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) { | ||
516 | gpio = of_get_gpio(node, cnt); | ||
517 | if (!gpio_is_valid(gpio)) { | ||
518 | dev_err(dev, "invalid gpio[%d]\n", cnt); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | ourhost->gpios[cnt] = gpio; | ||
522 | } | ||
523 | |||
524 | for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) { | ||
525 | ret = devm_gpio_request(dev, ourhost->gpios[cnt], "sdhci-gpio"); | ||
526 | if (ret) { | ||
527 | dev_err(dev, "gpio[%d] request failed\n", cnt); | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | #else | ||
535 | static int sdhci_s3c_parse_dt(struct device *dev, | ||
536 | struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) | ||
537 | { | ||
538 | return -EINVAL; | ||
539 | } | ||
540 | #endif | ||
541 | |||
542 | static const struct of_device_id sdhci_s3c_dt_match[]; | ||
543 | |||
544 | static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( | ||
545 | struct platform_device *pdev) | ||
546 | { | ||
547 | #ifdef CONFIG_OF | ||
548 | if (pdev->dev.of_node) { | ||
549 | const struct of_device_id *match; | ||
550 | match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node); | ||
551 | return (struct sdhci_s3c_drv_data *)match->data; | ||
552 | } | ||
553 | #endif | ||
554 | return (struct sdhci_s3c_drv_data *) | ||
555 | platform_get_device_id(pdev)->driver_data; | ||
556 | } | ||
557 | |||
558 | static int sdhci_s3c_probe(struct platform_device *pdev) | ||
559 | { | ||
560 | struct s3c_sdhci_platdata *pdata; | ||
561 | struct sdhci_s3c_drv_data *drv_data; | ||
562 | struct device *dev = &pdev->dev; | 382 | struct device *dev = &pdev->dev; |
563 | struct sdhci_host *host; | 383 | struct sdhci_host *host; |
564 | struct sdhci_s3c *sc; | 384 | struct sdhci_s3c *sc; |
565 | struct resource *res; | 385 | struct resource *res; |
566 | int ret, irq, ptr, clks; | 386 | int ret, irq, ptr, clks; |
567 | 387 | ||
568 | if (!pdev->dev.platform_data && !pdev->dev.of_node) { | 388 | if (!pdata) { |
569 | dev_err(dev, "no device data specified\n"); | 389 | dev_err(dev, "no device data specified\n"); |
570 | return -ENOENT; | 390 | return -ENOENT; |
571 | } | 391 | } |
@@ -576,35 +396,24 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
576 | return irq; | 396 | return irq; |
577 | } | 397 | } |
578 | 398 | ||
399 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
400 | if (!res) { | ||
401 | dev_err(dev, "no memory specified\n"); | ||
402 | return -ENOENT; | ||
403 | } | ||
404 | |||
579 | host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); | 405 | host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); |
580 | if (IS_ERR(host)) { | 406 | if (IS_ERR(host)) { |
581 | dev_err(dev, "sdhci_alloc_host() failed\n"); | 407 | dev_err(dev, "sdhci_alloc_host() failed\n"); |
582 | return PTR_ERR(host); | 408 | return PTR_ERR(host); |
583 | } | 409 | } |
584 | sc = sdhci_priv(host); | ||
585 | 410 | ||
586 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 411 | sc = sdhci_priv(host); |
587 | if (!pdata) { | ||
588 | ret = -ENOMEM; | ||
589 | goto err_pdata_io_clk; | ||
590 | } | ||
591 | |||
592 | sc->pctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
593 | |||
594 | if (pdev->dev.of_node) { | ||
595 | ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); | ||
596 | if (ret) | ||
597 | goto err_pdata_io_clk; | ||
598 | } else { | ||
599 | memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); | ||
600 | sc->ext_cd_gpio = -1; /* invalid gpio number */ | ||
601 | } | ||
602 | |||
603 | drv_data = sdhci_s3c_get_driver_data(pdev); | ||
604 | 412 | ||
605 | sc->host = host; | 413 | sc->host = host; |
606 | sc->pdev = pdev; | 414 | sc->pdev = pdev; |
607 | sc->pdata = pdata; | 415 | sc->pdata = pdata; |
416 | sc->ext_cd_gpio = -1; /* invalid gpio number */ | ||
608 | 417 | ||
609 | platform_set_drvdata(pdev, host); | 418 | platform_set_drvdata(pdev, host); |
610 | 419 | ||
@@ -612,20 +421,24 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
612 | if (IS_ERR(sc->clk_io)) { | 421 | if (IS_ERR(sc->clk_io)) { |
613 | dev_err(dev, "failed to get io clock\n"); | 422 | dev_err(dev, "failed to get io clock\n"); |
614 | ret = PTR_ERR(sc->clk_io); | 423 | ret = PTR_ERR(sc->clk_io); |
615 | goto err_pdata_io_clk; | 424 | goto err_io_clk; |
616 | } | 425 | } |
617 | 426 | ||
618 | /* enable the local io clock and keep it running for the moment. */ | 427 | /* enable the local io clock and keep it running for the moment. */ |
619 | clk_prepare_enable(sc->clk_io); | 428 | clk_enable(sc->clk_io); |
620 | 429 | ||
621 | for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { | 430 | for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { |
622 | struct clk *clk; | 431 | struct clk *clk; |
623 | char name[14]; | 432 | char *name = pdata->clocks[ptr]; |
433 | |||
434 | if (name == NULL) | ||
435 | continue; | ||
624 | 436 | ||
625 | snprintf(name, 14, "mmc_busclk.%d", ptr); | ||
626 | clk = clk_get(dev, name); | 437 | clk = clk_get(dev, name); |
627 | if (IS_ERR(clk)) | 438 | if (IS_ERR(clk)) { |
439 | dev_err(dev, "failed to get clock %s\n", name); | ||
628 | continue; | 440 | continue; |
441 | } | ||
629 | 442 | ||
630 | clks++; | 443 | clks++; |
631 | sc->clk_bus[ptr] = clk; | 444 | sc->clk_bus[ptr] = clk; |
@@ -636,6 +449,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
636 | */ | 449 | */ |
637 | sc->cur_clk = ptr; | 450 | sc->cur_clk = ptr; |
638 | 451 | ||
452 | clk_enable(clk); | ||
453 | |||
639 | dev_info(dev, "clock source %d: %s (%ld Hz)\n", | 454 | dev_info(dev, "clock source %d: %s (%ld Hz)\n", |
640 | ptr, name, clk_get_rate(clk)); | 455 | ptr, name, clk_get_rate(clk)); |
641 | } | 456 | } |
@@ -646,12 +461,15 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
646 | goto err_no_busclks; | 461 | goto err_no_busclks; |
647 | } | 462 | } |
648 | 463 | ||
649 | #ifndef CONFIG_PM_RUNTIME | 464 | sc->ioarea = request_mem_region(res->start, resource_size(res), |
650 | clk_prepare_enable(sc->clk_bus[sc->cur_clk]); | 465 | mmc_hostname(host->mmc)); |
651 | #endif | 466 | if (!sc->ioarea) { |
467 | dev_err(dev, "failed to reserve register area\n"); | ||
468 | ret = -ENXIO; | ||
469 | goto err_req_regs; | ||
470 | } | ||
652 | 471 | ||
653 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 472 | host->ioaddr = ioremap_nocache(res->start, resource_size(res)); |
654 | host->ioaddr = devm_request_and_ioremap(&pdev->dev, res); | ||
655 | if (!host->ioaddr) { | 473 | if (!host->ioaddr) { |
656 | dev_err(dev, "failed to map registers\n"); | 474 | dev_err(dev, "failed to map registers\n"); |
657 | ret = -ENXIO; | 475 | ret = -ENXIO; |
@@ -670,8 +488,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
670 | /* Setup quirks for the controller */ | 488 | /* Setup quirks for the controller */ |
671 | host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; | 489 | host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; |
672 | host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; | 490 | host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; |
673 | if (drv_data) | ||
674 | host->quirks |= drv_data->sdhci_quirks; | ||
675 | 491 | ||
676 | #ifndef CONFIG_MMC_SDHCI_S3C_DMA | 492 | #ifndef CONFIG_MMC_SDHCI_S3C_DMA |
677 | 493 | ||
@@ -699,16 +515,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
699 | if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) | 515 | if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) |
700 | host->mmc->caps = MMC_CAP_NONREMOVABLE; | 516 | host->mmc->caps = MMC_CAP_NONREMOVABLE; |
701 | 517 | ||
702 | switch (pdata->max_width) { | 518 | if (pdata->host_caps) |
703 | case 8: | 519 | host->mmc->caps |= pdata->host_caps; |
704 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
705 | case 4: | ||
706 | host->mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
707 | break; | ||
708 | } | ||
709 | |||
710 | if (pdata->pm_caps) | ||
711 | host->mmc->pm_caps |= pdata->pm_caps; | ||
712 | 520 | ||
713 | host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | | 521 | host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | |
714 | SDHCI_QUIRK_32BIT_DMA_SIZE); | 522 | SDHCI_QUIRK_32BIT_DMA_SIZE); |
@@ -720,7 +528,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
720 | * If controller does not have internal clock divider, | 528 | * If controller does not have internal clock divider, |
721 | * we can use overriding functions instead of default. | 529 | * we can use overriding functions instead of default. |
722 | */ | 530 | */ |
723 | if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { | 531 | if (pdata->clk_type) { |
724 | sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; | 532 | sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; |
725 | sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; | 533 | sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; |
726 | sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; | 534 | sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; |
@@ -730,20 +538,10 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
730 | if (pdata->host_caps) | 538 | if (pdata->host_caps) |
731 | host->mmc->caps |= pdata->host_caps; | 539 | host->mmc->caps |= pdata->host_caps; |
732 | 540 | ||
733 | if (pdata->host_caps2) | ||
734 | host->mmc->caps2 |= pdata->host_caps2; | ||
735 | |||
736 | pm_runtime_enable(&pdev->dev); | ||
737 | pm_runtime_set_autosuspend_delay(&pdev->dev, 50); | ||
738 | pm_runtime_use_autosuspend(&pdev->dev); | ||
739 | pm_suspend_ignore_children(&pdev->dev, 1); | ||
740 | |||
741 | ret = sdhci_add_host(host); | 541 | ret = sdhci_add_host(host); |
742 | if (ret) { | 542 | if (ret) { |
743 | dev_err(dev, "sdhci_add_host() failed\n"); | 543 | dev_err(dev, "sdhci_add_host() failed\n"); |
744 | pm_runtime_forbid(&pdev->dev); | 544 | goto err_add_host; |
745 | pm_runtime_get_noresume(&pdev->dev); | ||
746 | goto err_req_regs; | ||
747 | } | 545 | } |
748 | 546 | ||
749 | /* The following two methods of card detection might call | 547 | /* The following two methods of card detection might call |
@@ -755,37 +553,33 @@ static int sdhci_s3c_probe(struct platform_device *pdev) | |||
755 | gpio_is_valid(pdata->ext_cd_gpio)) | 553 | gpio_is_valid(pdata->ext_cd_gpio)) |
756 | sdhci_s3c_setup_card_detect_gpio(sc); | 554 | sdhci_s3c_setup_card_detect_gpio(sc); |
757 | 555 | ||
758 | #ifdef CONFIG_PM_RUNTIME | ||
759 | if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) | ||
760 | clk_disable_unprepare(sc->clk_io); | ||
761 | #endif | ||
762 | return 0; | 556 | return 0; |
763 | 557 | ||
558 | err_add_host: | ||
559 | release_resource(sc->ioarea); | ||
560 | kfree(sc->ioarea); | ||
561 | |||
764 | err_req_regs: | 562 | err_req_regs: |
765 | #ifndef CONFIG_PM_RUNTIME | ||
766 | clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); | ||
767 | #endif | ||
768 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { | 563 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { |
769 | if (sc->clk_bus[ptr]) { | 564 | clk_disable(sc->clk_bus[ptr]); |
770 | clk_put(sc->clk_bus[ptr]); | 565 | clk_put(sc->clk_bus[ptr]); |
771 | } | ||
772 | } | 566 | } |
773 | 567 | ||
774 | err_no_busclks: | 568 | err_no_busclks: |
775 | clk_disable_unprepare(sc->clk_io); | 569 | clk_disable(sc->clk_io); |
776 | clk_put(sc->clk_io); | 570 | clk_put(sc->clk_io); |
777 | 571 | ||
778 | err_pdata_io_clk: | 572 | err_io_clk: |
779 | sdhci_free_host(host); | 573 | sdhci_free_host(host); |
780 | 574 | ||
781 | return ret; | 575 | return ret; |
782 | } | 576 | } |
783 | 577 | ||
784 | static int sdhci_s3c_remove(struct platform_device *pdev) | 578 | static int __devexit sdhci_s3c_remove(struct platform_device *pdev) |
785 | { | 579 | { |
580 | struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; | ||
786 | struct sdhci_host *host = platform_get_drvdata(pdev); | 581 | struct sdhci_host *host = platform_get_drvdata(pdev); |
787 | struct sdhci_s3c *sc = sdhci_priv(host); | 582 | struct sdhci_s3c *sc = sdhci_priv(host); |
788 | struct s3c_sdhci_platdata *pdata = sc->pdata; | ||
789 | int ptr; | 583 | int ptr; |
790 | 584 | ||
791 | if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) | 585 | if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) |
@@ -794,134 +588,74 @@ static int sdhci_s3c_remove(struct platform_device *pdev) | |||
794 | if (sc->ext_cd_irq) | 588 | if (sc->ext_cd_irq) |
795 | free_irq(sc->ext_cd_irq, sc); | 589 | free_irq(sc->ext_cd_irq, sc); |
796 | 590 | ||
797 | #ifdef CONFIG_PM_RUNTIME | 591 | if (gpio_is_valid(sc->ext_cd_gpio)) |
798 | if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) | 592 | gpio_free(sc->ext_cd_gpio); |
799 | clk_prepare_enable(sc->clk_io); | ||
800 | #endif | ||
801 | sdhci_remove_host(host, 1); | ||
802 | 593 | ||
803 | pm_runtime_dont_use_autosuspend(&pdev->dev); | 594 | sdhci_remove_host(host, 1); |
804 | pm_runtime_disable(&pdev->dev); | ||
805 | 595 | ||
806 | #ifndef CONFIG_PM_RUNTIME | 596 | for (ptr = 0; ptr < 3; ptr++) { |
807 | clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); | ||
808 | #endif | ||
809 | for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { | ||
810 | if (sc->clk_bus[ptr]) { | 597 | if (sc->clk_bus[ptr]) { |
598 | clk_disable(sc->clk_bus[ptr]); | ||
811 | clk_put(sc->clk_bus[ptr]); | 599 | clk_put(sc->clk_bus[ptr]); |
812 | } | 600 | } |
813 | } | 601 | } |
814 | clk_disable_unprepare(sc->clk_io); | 602 | clk_disable(sc->clk_io); |
815 | clk_put(sc->clk_io); | 603 | clk_put(sc->clk_io); |
816 | 604 | ||
605 | iounmap(host->ioaddr); | ||
606 | release_resource(sc->ioarea); | ||
607 | kfree(sc->ioarea); | ||
608 | |||
817 | sdhci_free_host(host); | 609 | sdhci_free_host(host); |
818 | platform_set_drvdata(pdev, NULL); | 610 | platform_set_drvdata(pdev, NULL); |
819 | 611 | ||
820 | return 0; | 612 | return 0; |
821 | } | 613 | } |
822 | 614 | ||
823 | #ifdef CONFIG_PM_SLEEP | 615 | #ifdef CONFIG_PM |
824 | static int sdhci_s3c_suspend(struct device *dev) | ||
825 | { | ||
826 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
827 | |||
828 | return sdhci_suspend_host(host); | ||
829 | } | ||
830 | |||
831 | static int sdhci_s3c_resume(struct device *dev) | ||
832 | { | ||
833 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
834 | |||
835 | return sdhci_resume_host(host); | ||
836 | } | ||
837 | #endif | ||
838 | 616 | ||
839 | #ifdef CONFIG_PM_RUNTIME | 617 | static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm) |
840 | static int sdhci_s3c_runtime_suspend(struct device *dev) | ||
841 | { | 618 | { |
842 | struct sdhci_host *host = dev_get_drvdata(dev); | 619 | struct sdhci_host *host = platform_get_drvdata(dev); |
843 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
844 | struct clk *busclk = ourhost->clk_io; | ||
845 | int ret; | ||
846 | |||
847 | ret = sdhci_runtime_suspend_host(host); | ||
848 | 620 | ||
849 | clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); | 621 | return sdhci_suspend_host(host, pm); |
850 | clk_disable_unprepare(busclk); | ||
851 | return ret; | ||
852 | } | 622 | } |
853 | 623 | ||
854 | static int sdhci_s3c_runtime_resume(struct device *dev) | 624 | static int sdhci_s3c_resume(struct platform_device *dev) |
855 | { | 625 | { |
856 | struct sdhci_host *host = dev_get_drvdata(dev); | 626 | struct sdhci_host *host = platform_get_drvdata(dev); |
857 | struct sdhci_s3c *ourhost = to_s3c(host); | ||
858 | struct clk *busclk = ourhost->clk_io; | ||
859 | int ret; | ||
860 | 627 | ||
861 | clk_prepare_enable(busclk); | 628 | return sdhci_resume_host(host); |
862 | clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); | ||
863 | ret = sdhci_runtime_resume_host(host); | ||
864 | return ret; | ||
865 | } | 629 | } |
866 | #endif | ||
867 | |||
868 | #ifdef CONFIG_PM | ||
869 | static const struct dev_pm_ops sdhci_s3c_pmops = { | ||
870 | SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume) | ||
871 | SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, | ||
872 | NULL) | ||
873 | }; | ||
874 | 630 | ||
875 | #define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops) | ||
876 | |||
877 | #else | ||
878 | #define SDHCI_S3C_PMOPS NULL | ||
879 | #endif | ||
880 | |||
881 | #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) | ||
882 | static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { | ||
883 | .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK, | ||
884 | }; | ||
885 | #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) | ||
886 | #else | 631 | #else |
887 | #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) | 632 | #define sdhci_s3c_suspend NULL |
888 | #endif | 633 | #define sdhci_s3c_resume NULL |
889 | |||
890 | static struct platform_device_id sdhci_s3c_driver_ids[] = { | ||
891 | { | ||
892 | .name = "s3c-sdhci", | ||
893 | .driver_data = (kernel_ulong_t)NULL, | ||
894 | }, { | ||
895 | .name = "exynos4-sdhci", | ||
896 | .driver_data = EXYNOS4_SDHCI_DRV_DATA, | ||
897 | }, | ||
898 | { } | ||
899 | }; | ||
900 | MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); | ||
901 | |||
902 | #ifdef CONFIG_OF | ||
903 | static const struct of_device_id sdhci_s3c_dt_match[] = { | ||
904 | { .compatible = "samsung,s3c6410-sdhci", }, | ||
905 | { .compatible = "samsung,exynos4210-sdhci", | ||
906 | .data = (void *)EXYNOS4_SDHCI_DRV_DATA }, | ||
907 | {}, | ||
908 | }; | ||
909 | MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); | ||
910 | #endif | 634 | #endif |
911 | 635 | ||
912 | static struct platform_driver sdhci_s3c_driver = { | 636 | static struct platform_driver sdhci_s3c_driver = { |
913 | .probe = sdhci_s3c_probe, | 637 | .probe = sdhci_s3c_probe, |
914 | .remove = sdhci_s3c_remove, | 638 | .remove = __devexit_p(sdhci_s3c_remove), |
915 | .id_table = sdhci_s3c_driver_ids, | 639 | .suspend = sdhci_s3c_suspend, |
640 | .resume = sdhci_s3c_resume, | ||
916 | .driver = { | 641 | .driver = { |
917 | .owner = THIS_MODULE, | 642 | .owner = THIS_MODULE, |
918 | .name = "s3c-sdhci", | 643 | .name = "s3c-sdhci", |
919 | .of_match_table = of_match_ptr(sdhci_s3c_dt_match), | ||
920 | .pm = SDHCI_S3C_PMOPS, | ||
921 | }, | 644 | }, |
922 | }; | 645 | }; |
923 | 646 | ||
924 | module_platform_driver(sdhci_s3c_driver); | 647 | static int __init sdhci_s3c_init(void) |
648 | { | ||
649 | return platform_driver_register(&sdhci_s3c_driver); | ||
650 | } | ||
651 | |||
652 | static void __exit sdhci_s3c_exit(void) | ||
653 | { | ||
654 | platform_driver_unregister(&sdhci_s3c_driver); | ||
655 | } | ||
656 | |||
657 | module_init(sdhci_s3c_init); | ||
658 | module_exit(sdhci_s3c_exit); | ||
925 | 659 | ||
926 | MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue"); | 660 | MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue"); |
927 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); | 661 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index c6ece0bd03b..60a4c97d3d1 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Support of SDHCI platform devices for spear soc family | 4 | * Support of SDHCI platform devices for spear soc family |
5 | * | 5 | * |
6 | * Copyright (C) 2010 ST Microelectronics | 6 | * Copyright (C) 2010 ST Microelectronics |
7 | * Viresh Kumar <viresh.linux@gmail.com> | 7 | * Viresh Kumar<viresh.kumar@st.com> |
8 | * | 8 | * |
9 | * Inspired by sdhci-pltfm.c | 9 | * Inspired by sdhci-pltfm.c |
10 | * | 10 | * |
@@ -17,13 +17,9 @@ | |||
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/module.h> | ||
21 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
22 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
23 | #include <linux/of.h> | ||
24 | #include <linux/of_gpio.h> | ||
25 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
26 | #include <linux/pm.h> | ||
27 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
28 | #include <linux/mmc/host.h> | 24 | #include <linux/mmc/host.h> |
29 | #include <linux/mmc/sdhci-spear.h> | 25 | #include <linux/mmc/sdhci-spear.h> |
@@ -70,45 +66,15 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) | |||
70 | return IRQ_HANDLED; | 66 | return IRQ_HANDLED; |
71 | } | 67 | } |
72 | 68 | ||
73 | #ifdef CONFIG_OF | 69 | static int __devinit sdhci_probe(struct platform_device *pdev) |
74 | static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev) | ||
75 | { | 70 | { |
76 | struct device_node *np = pdev->dev.of_node; | ||
77 | struct sdhci_plat_data *pdata = NULL; | ||
78 | int cd_gpio; | ||
79 | |||
80 | cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); | ||
81 | if (!gpio_is_valid(cd_gpio)) | ||
82 | cd_gpio = -1; | ||
83 | |||
84 | /* If pdata is required */ | ||
85 | if (cd_gpio != -1) { | ||
86 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
87 | if (!pdata) { | ||
88 | dev_err(&pdev->dev, "DT: kzalloc failed\n"); | ||
89 | return ERR_PTR(-ENOMEM); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | pdata->card_int_gpio = cd_gpio; | ||
94 | |||
95 | return pdata; | ||
96 | } | ||
97 | #else | ||
98 | static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev) | ||
99 | { | ||
100 | return ERR_PTR(-ENOSYS); | ||
101 | } | ||
102 | #endif | ||
103 | |||
104 | static int sdhci_probe(struct platform_device *pdev) | ||
105 | { | ||
106 | struct device_node *np = pdev->dev.of_node; | ||
107 | struct sdhci_host *host; | 71 | struct sdhci_host *host; |
108 | struct resource *iomem; | 72 | struct resource *iomem; |
109 | struct spear_sdhci *sdhci; | 73 | struct spear_sdhci *sdhci; |
110 | int ret; | 74 | int ret; |
111 | 75 | ||
76 | BUG_ON(pdev == NULL); | ||
77 | |||
112 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 78 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
113 | if (!iomem) { | 79 | if (!iomem) { |
114 | ret = -ENOMEM; | 80 | ret = -ENOMEM; |
@@ -116,18 +82,18 @@ static int sdhci_probe(struct platform_device *pdev) | |||
116 | goto err; | 82 | goto err; |
117 | } | 83 | } |
118 | 84 | ||
119 | if (!devm_request_mem_region(&pdev->dev, iomem->start, | 85 | if (!request_mem_region(iomem->start, resource_size(iomem), |
120 | resource_size(iomem), "spear-sdhci")) { | 86 | "spear-sdhci")) { |
121 | ret = -EBUSY; | 87 | ret = -EBUSY; |
122 | dev_dbg(&pdev->dev, "cannot request region\n"); | 88 | dev_dbg(&pdev->dev, "cannot request region\n"); |
123 | goto err; | 89 | goto err; |
124 | } | 90 | } |
125 | 91 | ||
126 | sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL); | 92 | sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); |
127 | if (!sdhci) { | 93 | if (!sdhci) { |
128 | ret = -ENOMEM; | 94 | ret = -ENOMEM; |
129 | dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); | 95 | dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); |
130 | goto err; | 96 | goto err_kzalloc; |
131 | } | 97 | } |
132 | 98 | ||
133 | /* clk enable */ | 99 | /* clk enable */ |
@@ -135,30 +101,17 @@ static int sdhci_probe(struct platform_device *pdev) | |||
135 | if (IS_ERR(sdhci->clk)) { | 101 | if (IS_ERR(sdhci->clk)) { |
136 | ret = PTR_ERR(sdhci->clk); | 102 | ret = PTR_ERR(sdhci->clk); |
137 | dev_dbg(&pdev->dev, "Error getting clock\n"); | 103 | dev_dbg(&pdev->dev, "Error getting clock\n"); |
138 | goto err; | 104 | goto err_clk_get; |
139 | } | 105 | } |
140 | 106 | ||
141 | ret = clk_prepare_enable(sdhci->clk); | 107 | ret = clk_enable(sdhci->clk); |
142 | if (ret) { | 108 | if (ret) { |
143 | dev_dbg(&pdev->dev, "Error enabling clock\n"); | 109 | dev_dbg(&pdev->dev, "Error enabling clock\n"); |
144 | goto put_clk; | 110 | goto err_clk_enb; |
145 | } | ||
146 | |||
147 | ret = clk_set_rate(sdhci->clk, 50000000); | ||
148 | if (ret) | ||
149 | dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", | ||
150 | clk_get_rate(sdhci->clk)); | ||
151 | |||
152 | if (np) { | ||
153 | sdhci->data = sdhci_probe_config_dt(pdev); | ||
154 | if (IS_ERR(sdhci->data)) { | ||
155 | dev_err(&pdev->dev, "DT: Failed to get pdata\n"); | ||
156 | return -ENODEV; | ||
157 | } | ||
158 | } else { | ||
159 | sdhci->data = dev_get_platdata(&pdev->dev); | ||
160 | } | 111 | } |
161 | 112 | ||
113 | /* overwrite platform_data */ | ||
114 | sdhci->data = dev_get_platdata(&pdev->dev); | ||
162 | pdev->dev.platform_data = sdhci; | 115 | pdev->dev.platform_data = sdhci; |
163 | 116 | ||
164 | if (pdev->dev.parent) | 117 | if (pdev->dev.parent) |
@@ -169,7 +122,7 @@ static int sdhci_probe(struct platform_device *pdev) | |||
169 | if (IS_ERR(host)) { | 122 | if (IS_ERR(host)) { |
170 | ret = PTR_ERR(host); | 123 | ret = PTR_ERR(host); |
171 | dev_dbg(&pdev->dev, "error allocating host\n"); | 124 | dev_dbg(&pdev->dev, "error allocating host\n"); |
172 | goto disable_clk; | 125 | goto err_alloc_host; |
173 | } | 126 | } |
174 | 127 | ||
175 | host->hw_name = "sdhci"; | 128 | host->hw_name = "sdhci"; |
@@ -177,18 +130,17 @@ static int sdhci_probe(struct platform_device *pdev) | |||
177 | host->irq = platform_get_irq(pdev, 0); | 130 | host->irq = platform_get_irq(pdev, 0); |
178 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA; | 131 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA; |
179 | 132 | ||
180 | host->ioaddr = devm_ioremap(&pdev->dev, iomem->start, | 133 | host->ioaddr = ioremap(iomem->start, resource_size(iomem)); |
181 | resource_size(iomem)); | ||
182 | if (!host->ioaddr) { | 134 | if (!host->ioaddr) { |
183 | ret = -ENOMEM; | 135 | ret = -ENOMEM; |
184 | dev_dbg(&pdev->dev, "failed to remap registers\n"); | 136 | dev_dbg(&pdev->dev, "failed to remap registers\n"); |
185 | goto free_host; | 137 | goto err_ioremap; |
186 | } | 138 | } |
187 | 139 | ||
188 | ret = sdhci_add_host(host); | 140 | ret = sdhci_add_host(host); |
189 | if (ret) { | 141 | if (ret) { |
190 | dev_dbg(&pdev->dev, "error adding host\n"); | 142 | dev_dbg(&pdev->dev, "error adding host\n"); |
191 | goto free_host; | 143 | goto err_add_host; |
192 | } | 144 | } |
193 | 145 | ||
194 | platform_set_drvdata(pdev, host); | 146 | platform_set_drvdata(pdev, host); |
@@ -207,12 +159,11 @@ static int sdhci_probe(struct platform_device *pdev) | |||
207 | if (sdhci->data->card_power_gpio >= 0) { | 159 | if (sdhci->data->card_power_gpio >= 0) { |
208 | int val = 0; | 160 | int val = 0; |
209 | 161 | ||
210 | ret = devm_gpio_request(&pdev->dev, | 162 | ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); |
211 | sdhci->data->card_power_gpio, "sdhci"); | ||
212 | if (ret < 0) { | 163 | if (ret < 0) { |
213 | dev_dbg(&pdev->dev, "gpio request fail: %d\n", | 164 | dev_dbg(&pdev->dev, "gpio request fail: %d\n", |
214 | sdhci->data->card_power_gpio); | 165 | sdhci->data->card_power_gpio); |
215 | goto set_drvdata; | 166 | goto err_pgpio_request; |
216 | } | 167 | } |
217 | 168 | ||
218 | if (sdhci->data->power_always_enb) | 169 | if (sdhci->data->power_always_enb) |
@@ -224,126 +175,124 @@ static int sdhci_probe(struct platform_device *pdev) | |||
224 | if (ret) { | 175 | if (ret) { |
225 | dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", | 176 | dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", |
226 | sdhci->data->card_power_gpio); | 177 | sdhci->data->card_power_gpio); |
227 | goto set_drvdata; | 178 | goto err_pgpio_direction; |
228 | } | 179 | } |
180 | |||
181 | gpio_set_value(sdhci->data->card_power_gpio, 1); | ||
229 | } | 182 | } |
230 | 183 | ||
231 | if (sdhci->data->card_int_gpio >= 0) { | 184 | if (sdhci->data->card_int_gpio >= 0) { |
232 | ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio, | 185 | ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); |
233 | "sdhci"); | ||
234 | if (ret < 0) { | 186 | if (ret < 0) { |
235 | dev_dbg(&pdev->dev, "gpio request fail: %d\n", | 187 | dev_dbg(&pdev->dev, "gpio request fail: %d\n", |
236 | sdhci->data->card_int_gpio); | 188 | sdhci->data->card_int_gpio); |
237 | goto set_drvdata; | 189 | goto err_igpio_request; |
238 | } | 190 | } |
239 | 191 | ||
240 | ret = gpio_direction_input(sdhci->data->card_int_gpio); | 192 | ret = gpio_direction_input(sdhci->data->card_int_gpio); |
241 | if (ret) { | 193 | if (ret) { |
242 | dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", | 194 | dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", |
243 | sdhci->data->card_int_gpio); | 195 | sdhci->data->card_int_gpio); |
244 | goto set_drvdata; | 196 | goto err_igpio_direction; |
245 | } | 197 | } |
246 | ret = devm_request_irq(&pdev->dev, | 198 | ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), |
247 | gpio_to_irq(sdhci->data->card_int_gpio), | ||
248 | sdhci_gpio_irq, IRQF_TRIGGER_LOW, | 199 | sdhci_gpio_irq, IRQF_TRIGGER_LOW, |
249 | mmc_hostname(host->mmc), pdev); | 200 | mmc_hostname(host->mmc), pdev); |
250 | if (ret) { | 201 | if (ret) { |
251 | dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", | 202 | dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", |
252 | sdhci->data->card_int_gpio); | 203 | sdhci->data->card_int_gpio); |
253 | goto set_drvdata; | 204 | goto err_igpio_request_irq; |
254 | } | 205 | } |
255 | 206 | ||
256 | } | 207 | } |
257 | 208 | ||
258 | return 0; | 209 | return 0; |
259 | 210 | ||
260 | set_drvdata: | 211 | err_igpio_request_irq: |
212 | err_igpio_direction: | ||
213 | if (sdhci->data->card_int_gpio >= 0) | ||
214 | gpio_free(sdhci->data->card_int_gpio); | ||
215 | err_igpio_request: | ||
216 | err_pgpio_direction: | ||
217 | if (sdhci->data->card_power_gpio >= 0) | ||
218 | gpio_free(sdhci->data->card_power_gpio); | ||
219 | err_pgpio_request: | ||
261 | platform_set_drvdata(pdev, NULL); | 220 | platform_set_drvdata(pdev, NULL); |
262 | sdhci_remove_host(host, 1); | 221 | sdhci_remove_host(host, 1); |
263 | free_host: | 222 | err_add_host: |
223 | iounmap(host->ioaddr); | ||
224 | err_ioremap: | ||
264 | sdhci_free_host(host); | 225 | sdhci_free_host(host); |
265 | disable_clk: | 226 | err_alloc_host: |
266 | clk_disable_unprepare(sdhci->clk); | 227 | clk_disable(sdhci->clk); |
267 | put_clk: | 228 | err_clk_enb: |
268 | clk_put(sdhci->clk); | 229 | clk_put(sdhci->clk); |
230 | err_clk_get: | ||
231 | kfree(sdhci); | ||
232 | err_kzalloc: | ||
233 | release_mem_region(iomem->start, resource_size(iomem)); | ||
269 | err: | 234 | err: |
270 | dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); | 235 | dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); |
271 | return ret; | 236 | return ret; |
272 | } | 237 | } |
273 | 238 | ||
274 | static int sdhci_remove(struct platform_device *pdev) | 239 | static int __devexit sdhci_remove(struct platform_device *pdev) |
275 | { | 240 | { |
276 | struct sdhci_host *host = platform_get_drvdata(pdev); | 241 | struct sdhci_host *host = platform_get_drvdata(pdev); |
242 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
277 | struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); | 243 | struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); |
278 | int dead = 0; | 244 | int dead; |
279 | u32 scratch; | 245 | u32 scratch; |
280 | 246 | ||
247 | if (sdhci->data) { | ||
248 | if (sdhci->data->card_int_gpio >= 0) { | ||
249 | free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev); | ||
250 | gpio_free(sdhci->data->card_int_gpio); | ||
251 | } | ||
252 | |||
253 | if (sdhci->data->card_power_gpio >= 0) | ||
254 | gpio_free(sdhci->data->card_power_gpio); | ||
255 | } | ||
256 | |||
281 | platform_set_drvdata(pdev, NULL); | 257 | platform_set_drvdata(pdev, NULL); |
258 | dead = 0; | ||
282 | scratch = readl(host->ioaddr + SDHCI_INT_STATUS); | 259 | scratch = readl(host->ioaddr + SDHCI_INT_STATUS); |
283 | if (scratch == (u32)-1) | 260 | if (scratch == (u32)-1) |
284 | dead = 1; | 261 | dead = 1; |
285 | 262 | ||
286 | sdhci_remove_host(host, dead); | 263 | sdhci_remove_host(host, dead); |
264 | iounmap(host->ioaddr); | ||
287 | sdhci_free_host(host); | 265 | sdhci_free_host(host); |
288 | clk_disable_unprepare(sdhci->clk); | 266 | clk_disable(sdhci->clk); |
289 | clk_put(sdhci->clk); | 267 | clk_put(sdhci->clk); |
268 | kfree(sdhci); | ||
269 | if (iomem) | ||
270 | release_mem_region(iomem->start, resource_size(iomem)); | ||
290 | 271 | ||
291 | return 0; | 272 | return 0; |
292 | } | 273 | } |
293 | 274 | ||
294 | #ifdef CONFIG_PM | ||
295 | static int sdhci_suspend(struct device *dev) | ||
296 | { | ||
297 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
298 | struct spear_sdhci *sdhci = dev_get_platdata(dev); | ||
299 | int ret; | ||
300 | |||
301 | ret = sdhci_suspend_host(host); | ||
302 | if (!ret) | ||
303 | clk_disable(sdhci->clk); | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | static int sdhci_resume(struct device *dev) | ||
309 | { | ||
310 | struct sdhci_host *host = dev_get_drvdata(dev); | ||
311 | struct spear_sdhci *sdhci = dev_get_platdata(dev); | ||
312 | int ret; | ||
313 | |||
314 | ret = clk_enable(sdhci->clk); | ||
315 | if (ret) { | ||
316 | dev_dbg(dev, "Resume: Error enabling clock\n"); | ||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | return sdhci_resume_host(host); | ||
321 | } | ||
322 | #endif | ||
323 | |||
324 | static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); | ||
325 | |||
326 | #ifdef CONFIG_OF | ||
327 | static const struct of_device_id sdhci_spear_id_table[] = { | ||
328 | { .compatible = "st,spear300-sdhci" }, | ||
329 | {} | ||
330 | }; | ||
331 | MODULE_DEVICE_TABLE(of, sdhci_spear_id_table); | ||
332 | #endif | ||
333 | |||
334 | static struct platform_driver sdhci_driver = { | 275 | static struct platform_driver sdhci_driver = { |
335 | .driver = { | 276 | .driver = { |
336 | .name = "sdhci", | 277 | .name = "sdhci", |
337 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
338 | .pm = &sdhci_pm_ops, | ||
339 | .of_match_table = of_match_ptr(sdhci_spear_id_table), | ||
340 | }, | 279 | }, |
341 | .probe = sdhci_probe, | 280 | .probe = sdhci_probe, |
342 | .remove = sdhci_remove, | 281 | .remove = __devexit_p(sdhci_remove), |
343 | }; | 282 | }; |
344 | 283 | ||
345 | module_platform_driver(sdhci_driver); | 284 | static int __init sdhci_init(void) |
285 | { | ||
286 | return platform_driver_register(&sdhci_driver); | ||
287 | } | ||
288 | module_init(sdhci_init); | ||
289 | |||
290 | static void __exit sdhci_exit(void) | ||
291 | { | ||
292 | platform_driver_unregister(&sdhci_driver); | ||
293 | } | ||
294 | module_exit(sdhci_exit); | ||
346 | 295 | ||
347 | MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); | 296 | MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); |
348 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); | 297 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); |
349 | MODULE_LICENSE("GPL v2"); | 298 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 3695b2e0cbd..67950782e09 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010 Google, Inc. | 2 | * Copyright (C) 2010 Google, Inc. |
3 | * | 3 | * |
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | 6 | * This software is licensed under the terms of the GNU General Public |
5 | * License version 2, as published by the Free Software Foundation, and | 7 | * License version 2, as published by the Free Software Foundation, and |
6 | * may be copied, distributed, and modified under those terms. | 8 | * may be copied, distributed, and modified under those terms. |
@@ -13,40 +15,103 @@ | |||
13 | */ | 15 | */ |
14 | 16 | ||
15 | #include <linux/err.h> | 17 | #include <linux/err.h> |
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
21 | #include <linux/of.h> | ||
22 | #include <linux/of_device.h> | ||
23 | #include <linux/of_gpio.h> | ||
24 | #include <linux/gpio.h> | 22 | #include <linux/gpio.h> |
23 | #include <linux/slab.h> | ||
25 | #include <linux/mmc/card.h> | 24 | #include <linux/mmc/card.h> |
26 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
26 | #include <linux/mmc/sd.h> | ||
27 | #include <linux/regulator/consumer.h> | ||
28 | #include <linux/delay.h> | ||
27 | 29 | ||
28 | #include <asm/gpio.h> | 30 | #include <mach/gpio.h> |
29 | 31 | #include <mach/sdhci.h> | |
30 | #include <linux/platform_data/mmc-sdhci-tegra.h> | 32 | #include <mach/io_dpd.h> |
31 | 33 | ||
32 | #include "sdhci-pltfm.h" | 34 | #include "sdhci-pltfm.h" |
33 | 35 | ||
34 | /* Tegra SDHOST controller vendor register definitions */ | 36 | #define SDHCI_VENDOR_CLOCK_CNTRL 0x100 |
35 | #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 | 37 | #define SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK 0x1 |
36 | #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 | 38 | #define SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE 0x8 |
39 | #define SDHCI_VENDOR_CLOCK_CNTRL_SPI_MODE_CLKEN_OVERRIDE 0x4 | ||
40 | #define SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT 8 | ||
41 | #define SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT 16 | ||
42 | #define SDHCI_VENDOR_CLOCK_CNTRL_SDR50_TUNING 0x20 | ||
43 | |||
44 | #define SDHCI_VENDOR_MISC_CNTRL 0x120 | ||
45 | #define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR104_SUPPORT 0x8 | ||
46 | #define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR50_SUPPORT 0x10 | ||
47 | #define SDHCI_VENDOR_MISC_CNTRL_ENABLE_SD_3_0 0x20 | ||
48 | |||
49 | #define SDMMC_SDMEMCOMPPADCTRL 0x1E0 | ||
50 | #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF | ||
51 | |||
52 | #define SDMMC_AUTO_CAL_CONFIG 0x1E4 | ||
53 | #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000 | ||
54 | #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8 | ||
55 | #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x70 | ||
56 | #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x62 | ||
57 | |||
58 | #define SDHOST_1V8_OCR_MASK 0x8 | ||
59 | #define SDHOST_HIGH_VOLT_MIN 2700000 | ||
60 | #define SDHOST_HIGH_VOLT_MAX 3600000 | ||
61 | #define SDHOST_LOW_VOLT_MIN 1800000 | ||
62 | #define SDHOST_LOW_VOLT_MAX 1800000 | ||
63 | |||
64 | #define TEGRA_SDHOST_MIN_FREQ 50000000 | ||
65 | #define TEGRA2_SDHOST_STD_FREQ 50000000 | ||
66 | #define TEGRA3_SDHOST_STD_FREQ 104000000 | ||
67 | |||
68 | #define SD_SEND_TUNING_PATTERN 19 | ||
69 | #define MAX_TAP_VALUES 256 | ||
70 | |||
71 | static unsigned int tegra_sdhost_min_freq; | ||
72 | static unsigned int tegra_sdhost_std_freq; | ||
73 | static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock); | ||
74 | static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci); | ||
75 | |||
76 | static unsigned int tegra3_sdhost_max_clk[4] = { | ||
77 | 208000000, 104000000, 208000000, 104000000 }; | ||
78 | |||
79 | struct tegra_sdhci_hw_ops{ | ||
80 | /* Set the internal clk and card clk.*/ | ||
81 | void (*set_card_clock)(struct sdhci_host *sdhci, unsigned int clock); | ||
82 | /* Post reset vendor registers configuration */ | ||
83 | void (*sdhost_init)(struct sdhci_host *sdhci); | ||
84 | }; | ||
37 | 85 | ||
38 | #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) | 86 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC |
39 | #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) | 87 | static struct tegra_sdhci_hw_ops tegra_2x_sdhci_ops = { |
40 | #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) | 88 | }; |
89 | #endif | ||
41 | 90 | ||
42 | struct sdhci_tegra_soc_data { | 91 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC |
43 | struct sdhci_pltfm_data *pdata; | 92 | static struct tegra_sdhci_hw_ops tegra_3x_sdhci_ops = { |
44 | u32 nvquirks; | 93 | .set_card_clock = tegra_3x_sdhci_set_card_clock, |
94 | .sdhost_init = tegra3_sdhci_post_reset_init, | ||
45 | }; | 95 | }; |
96 | #endif | ||
46 | 97 | ||
47 | struct sdhci_tegra { | 98 | struct tegra_sdhci_host { |
48 | const struct tegra_sdhci_platform_data *plat; | 99 | bool clk_enabled; |
49 | const struct sdhci_tegra_soc_data *soc_data; | 100 | struct regulator *vdd_io_reg; |
101 | struct regulator *vdd_slot_reg; | ||
102 | /* Pointer to the chip specific HW ops */ | ||
103 | struct tegra_sdhci_hw_ops *hw_ops; | ||
104 | /* Host controller instance */ | ||
105 | unsigned int instance; | ||
106 | /* vddio_min */ | ||
107 | unsigned int vddio_min_uv; | ||
108 | /* vddio_max */ | ||
109 | unsigned int vddio_max_uv; | ||
110 | /* max clk supported by the platform */ | ||
111 | unsigned int max_clk_limit; | ||
112 | struct tegra_io_dpd *dpd; | ||
113 | bool card_present; | ||
114 | bool is_rail_enabled; | ||
50 | }; | 115 | }; |
51 | 116 | ||
52 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) | 117 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) |
@@ -64,25 +129,17 @@ static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) | |||
64 | 129 | ||
65 | static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) | 130 | static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) |
66 | { | 131 | { |
67 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 132 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC |
68 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 133 | if (unlikely(reg == SDHCI_HOST_VERSION)) { |
69 | const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; | ||
70 | |||
71 | if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && | ||
72 | (reg == SDHCI_HOST_VERSION))) { | ||
73 | /* Erratum: Version register is invalid in HW. */ | 134 | /* Erratum: Version register is invalid in HW. */ |
74 | return SDHCI_SPEC_200; | 135 | return SDHCI_SPEC_200; |
75 | } | 136 | } |
76 | 137 | #endif | |
77 | return readw(host->ioaddr + reg); | 138 | return readw(host->ioaddr + reg); |
78 | } | 139 | } |
79 | 140 | ||
80 | static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) | 141 | static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) |
81 | { | 142 | { |
82 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
83 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | ||
84 | const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; | ||
85 | |||
86 | /* Seems like we're getting spurious timeout and crc errors, so | 143 | /* Seems like we're getting spurious timeout and crc errors, so |
87 | * disable signalling of them. In case of real errors software | 144 | * disable signalling of them. In case of real errors software |
88 | * timers should take care of eventually detecting them. | 145 | * timers should take care of eventually detecting them. |
@@ -92,8 +149,8 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) | |||
92 | 149 | ||
93 | writel(val, host->ioaddr + reg); | 150 | writel(val, host->ioaddr + reg); |
94 | 151 | ||
95 | if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && | 152 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC |
96 | (reg == SDHCI_INT_ENABLE))) { | 153 | if (unlikely(reg == SDHCI_INT_ENABLE)) { |
97 | /* Erratum: Must enable block gap interrupt detection */ | 154 | /* Erratum: Must enable block gap interrupt detection */ |
98 | u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); | 155 | u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); |
99 | if (val & SDHCI_INT_CARD_INT) | 156 | if (val & SDHCI_INT_CARD_INT) |
@@ -102,13 +159,23 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) | |||
102 | gap_ctrl &= ~0x8; | 159 | gap_ctrl &= ~0x8; |
103 | writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); | 160 | writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); |
104 | } | 161 | } |
162 | #endif | ||
105 | } | 163 | } |
106 | 164 | ||
107 | static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) | 165 | static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci) |
108 | { | 166 | { |
109 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 167 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); |
110 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 168 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; |
111 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | 169 | |
170 | return tegra_host->card_present; | ||
171 | } | ||
172 | |||
173 | static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) | ||
174 | { | ||
175 | struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); | ||
176 | struct tegra_sdhci_platform_data *plat; | ||
177 | |||
178 | plat = pdev->dev.platform_data; | ||
112 | 179 | ||
113 | if (!gpio_is_valid(plat->wp_gpio)) | 180 | if (!gpio_is_valid(plat->wp_gpio)) |
114 | return -1; | 181 | return -1; |
@@ -116,40 +183,163 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) | |||
116 | return gpio_get_value(plat->wp_gpio); | 183 | return gpio_get_value(plat->wp_gpio); |
117 | } | 184 | } |
118 | 185 | ||
119 | static irqreturn_t carddetect_irq(int irq, void *data) | 186 | static void tegra3_sdhci_post_reset_init(struct sdhci_host *sdhci) |
120 | { | 187 | { |
121 | struct sdhci_host *sdhost = (struct sdhci_host *)data; | 188 | u16 misc_ctrl; |
189 | u32 vendor_ctrl; | ||
190 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
191 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
192 | struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); | ||
193 | struct tegra_sdhci_platform_data *plat; | ||
122 | 194 | ||
123 | tasklet_schedule(&sdhost->card_tasklet); | 195 | plat = pdev->dev.platform_data; |
124 | return IRQ_HANDLED; | 196 | /* Set the base clock frequency */ |
125 | }; | 197 | vendor_ctrl = sdhci_readl(sdhci, SDHCI_VENDOR_CLOCK_CNTRL); |
198 | vendor_ctrl &= ~(0xFF << SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT); | ||
199 | vendor_ctrl |= (tegra3_sdhost_max_clk[tegra_host->instance] / 1000000) << | ||
200 | SDHCI_VENDOR_CLOCK_CNTRL_BASE_CLK_FREQ_SHIFT; | ||
201 | vendor_ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_PADPIPE_CLKEN_OVERRIDE; | ||
202 | vendor_ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_SPI_MODE_CLKEN_OVERRIDE; | ||
203 | |||
204 | /* Set tap delay */ | ||
205 | if (plat->tap_delay) { | ||
206 | vendor_ctrl &= ~(0xFF << | ||
207 | SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT); | ||
208 | vendor_ctrl |= (plat->tap_delay << | ||
209 | SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT); | ||
210 | } | ||
211 | /* Enable frequency tuning for SDR50 mode */ | ||
212 | vendor_ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_SDR50_TUNING; | ||
213 | sdhci_writel(sdhci, vendor_ctrl, SDHCI_VENDOR_CLOCK_CNTRL); | ||
214 | |||
215 | /* Enable SDHOST v3.0 support */ | ||
216 | misc_ctrl = sdhci_readw(sdhci, SDHCI_VENDOR_MISC_CNTRL); | ||
217 | misc_ctrl |= SDHCI_VENDOR_MISC_CNTRL_ENABLE_SD_3_0 | | ||
218 | SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR104_SUPPORT | | ||
219 | SDHCI_VENDOR_MISC_CNTRL_ENABLE_SDR50_SUPPORT; | ||
220 | sdhci_writew(sdhci, misc_ctrl, SDHCI_VENDOR_MISC_CNTRL); | ||
221 | } | ||
126 | 222 | ||
127 | static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask) | 223 | static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, |
224 | unsigned int uhs) | ||
128 | { | 225 | { |
129 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 226 | u16 clk, ctrl_2; |
130 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 227 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
131 | const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; | 228 | |
229 | /* Select Bus Speed Mode for host */ | ||
230 | ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; | ||
231 | switch (uhs) { | ||
232 | case MMC_TIMING_UHS_SDR12: | ||
233 | ctrl_2 |= SDHCI_CTRL_UHS_SDR12; | ||
234 | break; | ||
235 | case MMC_TIMING_UHS_SDR25: | ||
236 | ctrl_2 |= SDHCI_CTRL_UHS_SDR25; | ||
237 | break; | ||
238 | case MMC_TIMING_UHS_SDR50: | ||
239 | ctrl_2 |= SDHCI_CTRL_UHS_SDR50; | ||
240 | break; | ||
241 | case MMC_TIMING_UHS_SDR104: | ||
242 | ctrl_2 |= SDHCI_CTRL_UHS_SDR104; | ||
243 | break; | ||
244 | case MMC_TIMING_UHS_DDR50: | ||
245 | ctrl_2 |= SDHCI_CTRL_UHS_DDR50; | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | ||
250 | |||
251 | if (uhs == MMC_TIMING_UHS_DDR50) { | ||
252 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | ||
253 | clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT); | ||
254 | clk |= 1 << SDHCI_DIVIDER_SHIFT; | ||
255 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
256 | } | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static void tegra_sdhci_reset_exit(struct sdhci_host *sdhci, u8 mask) | ||
261 | { | ||
262 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
263 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
132 | 264 | ||
133 | if (!(mask & SDHCI_RESET_ALL)) | 265 | if (mask & SDHCI_RESET_ALL) { |
266 | if (tegra_host->hw_ops->sdhost_init) | ||
267 | tegra_host->hw_ops->sdhost_init(sdhci); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static void sdhci_status_notify_cb(int card_present, void *dev_id) | ||
272 | { | ||
273 | struct sdhci_host *sdhci = (struct sdhci_host *)dev_id; | ||
274 | struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); | ||
275 | struct tegra_sdhci_platform_data *plat; | ||
276 | unsigned int status, oldstat; | ||
277 | |||
278 | pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc), | ||
279 | card_present); | ||
280 | |||
281 | plat = pdev->dev.platform_data; | ||
282 | if (!plat->mmc_data.status) { | ||
283 | mmc_detect_change(sdhci->mmc, 0); | ||
134 | return; | 284 | return; |
285 | } | ||
135 | 286 | ||
136 | /* Erratum: Enable SDHCI spec v3.00 support */ | 287 | status = plat->mmc_data.status(mmc_dev(sdhci->mmc)); |
137 | if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) { | ||
138 | u32 misc_ctrl; | ||
139 | 288 | ||
140 | misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); | 289 | oldstat = plat->mmc_data.card_present; |
141 | misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; | 290 | plat->mmc_data.card_present = status; |
142 | sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); | 291 | if (status ^ oldstat) { |
292 | pr_debug("%s: Slot status change detected (%d -> %d)\n", | ||
293 | mmc_hostname(sdhci->mmc), oldstat, status); | ||
294 | if (status && !plat->mmc_data.built_in) | ||
295 | mmc_detect_change(sdhci->mmc, (5 * HZ) / 2); | ||
296 | else | ||
297 | mmc_detect_change(sdhci->mmc, 0); | ||
143 | } | 298 | } |
144 | } | 299 | } |
145 | 300 | ||
301 | static irqreturn_t carddetect_irq(int irq, void *data) | ||
302 | { | ||
303 | struct sdhci_host *sdhost = (struct sdhci_host *)data; | ||
304 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost); | ||
305 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
306 | struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc)); | ||
307 | struct tegra_sdhci_platform_data *plat; | ||
308 | |||
309 | plat = pdev->dev.platform_data; | ||
310 | |||
311 | tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0); | ||
312 | |||
313 | if (tegra_host->card_present) { | ||
314 | if (!tegra_host->is_rail_enabled) { | ||
315 | if (tegra_host->vdd_slot_reg) | ||
316 | regulator_enable(tegra_host->vdd_slot_reg); | ||
317 | if (tegra_host->vdd_io_reg) | ||
318 | regulator_enable(tegra_host->vdd_io_reg); | ||
319 | tegra_host->is_rail_enabled = 1; | ||
320 | } | ||
321 | } /* else { | ||
322 | if (tegra_host->is_rail_enabled) { | ||
323 | if (tegra_host->vdd_io_reg) | ||
324 | regulator_disable(tegra_host->vdd_io_reg); | ||
325 | if (tegra_host->vdd_slot_reg) | ||
326 | regulator_disable(tegra_host->vdd_slot_reg); | ||
327 | tegra_host->is_rail_enabled = 0; | ||
328 | } | ||
329 | } */ | ||
330 | |||
331 | tasklet_schedule(&sdhost->card_tasklet); | ||
332 | return IRQ_HANDLED; | ||
333 | }; | ||
334 | |||
146 | static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) | 335 | static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) |
147 | { | 336 | { |
148 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 337 | struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); |
149 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 338 | struct tegra_sdhci_platform_data *plat; |
150 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | ||
151 | u32 ctrl; | 339 | u32 ctrl; |
152 | 340 | ||
341 | plat = pdev->dev.platform_data; | ||
342 | |||
153 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 343 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); |
154 | if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { | 344 | if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { |
155 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 345 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
@@ -165,102 +355,562 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) | |||
165 | return 0; | 355 | return 0; |
166 | } | 356 | } |
167 | 357 | ||
358 | static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci, | ||
359 | unsigned int clock) | ||
360 | { | ||
361 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
362 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
363 | unsigned int clk_rate; | ||
364 | |||
365 | if (sdhci->mmc->card && | ||
366 | mmc_card_ddr_mode(sdhci->mmc->card)) { | ||
367 | /* | ||
368 | * In ddr mode, tegra sdmmc controller clock frequency | ||
369 | * should be double the card clock frequency. | ||
370 | */ | ||
371 | clk_rate = clock * 2; | ||
372 | } else { | ||
373 | if (clock <= tegra_sdhost_min_freq) | ||
374 | clk_rate = tegra_sdhost_min_freq; | ||
375 | else if (clock <= tegra_sdhost_std_freq) | ||
376 | clk_rate = tegra_sdhost_std_freq; | ||
377 | else | ||
378 | clk_rate = clock; | ||
379 | |||
380 | /* | ||
381 | * In SDR50 mode, run the sdmmc controller at 208MHz to ensure | ||
382 | * the core voltage is at 1.2V. If the core voltage is below 1.2V, CRC | ||
383 | * errors would occur during data transfers. | ||
384 | */ | ||
385 | if ((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) && | ||
386 | (clk_rate == tegra_sdhost_std_freq)) | ||
387 | clk_rate <<= 1; | ||
388 | } | ||
389 | |||
390 | if (tegra_host->max_clk_limit && | ||
391 | (clk_rate > tegra_host->max_clk_limit)) | ||
392 | clk_rate = tegra_host->max_clk_limit; | ||
393 | |||
394 | clk_set_rate(pltfm_host->clk, clk_rate); | ||
395 | sdhci->max_clk = clk_get_rate(pltfm_host->clk); | ||
396 | } | ||
397 | |||
398 | static void tegra_3x_sdhci_set_card_clock(struct sdhci_host *sdhci, unsigned int clock) | ||
399 | { | ||
400 | int div; | ||
401 | u16 clk; | ||
402 | unsigned long timeout; | ||
403 | u8 ctrl; | ||
404 | |||
405 | if (clock && clock == sdhci->clock) | ||
406 | return; | ||
407 | |||
408 | sdhci_writew(sdhci, 0, SDHCI_CLOCK_CONTROL); | ||
409 | |||
410 | if (clock == 0) | ||
411 | goto out; | ||
412 | if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) { | ||
413 | div = 1; | ||
414 | goto set_clk; | ||
415 | } | ||
416 | |||
417 | if (sdhci->version >= SDHCI_SPEC_300) { | ||
418 | /* Version 3.00 divisors must be a multiple of 2. */ | ||
419 | if (sdhci->max_clk <= clock) { | ||
420 | div = 1; | ||
421 | } else { | ||
422 | for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { | ||
423 | if ((sdhci->max_clk / div) <= clock) | ||
424 | break; | ||
425 | } | ||
426 | } | ||
427 | } else { | ||
428 | /* Version 2.00 divisors must be a power of 2. */ | ||
429 | for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { | ||
430 | if ((sdhci->max_clk / div) <= clock) | ||
431 | break; | ||
432 | } | ||
433 | } | ||
434 | div >>= 1; | ||
435 | |||
436 | /* | ||
437 | * Tegra3 sdmmc controller internal clock will not be stabilized when | ||
438 | * we use a clock divider value greater than 4. The WAR is as follows. | ||
439 | * - Enable internal clock. | ||
440 | * - Wait for 5 usec and do a dummy write. | ||
441 | * - Poll for clk stable. | ||
442 | */ | ||
443 | set_clk: | ||
444 | clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; | ||
445 | clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) | ||
446 | << SDHCI_DIVIDER_HI_SHIFT; | ||
447 | clk |= SDHCI_CLOCK_INT_EN; | ||
448 | sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL); | ||
449 | |||
450 | /* Wait for 5 usec */ | ||
451 | udelay(5); | ||
452 | |||
453 | /* Do a dummy write */ | ||
454 | ctrl = sdhci_readb(sdhci, SDHCI_CAPABILITIES); | ||
455 | ctrl |= 1; | ||
456 | sdhci_writeb(sdhci, ctrl, SDHCI_CAPABILITIES); | ||
457 | |||
458 | /* Wait max 20 ms */ | ||
459 | timeout = 20; | ||
460 | while (!((clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL)) | ||
461 | & SDHCI_CLOCK_INT_STABLE)) { | ||
462 | if (timeout == 0) { | ||
463 | dev_err(mmc_dev(sdhci->mmc), "Internal clock never stabilised\n"); | ||
464 | return; | ||
465 | } | ||
466 | timeout--; | ||
467 | mdelay(1); | ||
468 | } | ||
469 | |||
470 | clk |= SDHCI_CLOCK_CARD_EN; | ||
471 | sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL); | ||
472 | out: | ||
473 | sdhci->clock = clock; | ||
474 | } | ||
475 | |||
476 | static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock) | ||
477 | { | ||
478 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
479 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
480 | u8 ctrl; | ||
481 | |||
482 | pr_debug("%s %s %u enabled=%u\n", __func__, | ||
483 | mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled); | ||
484 | |||
485 | if (clock) { | ||
486 | /* bring out sd instance from io dpd mode */ | ||
487 | tegra_io_dpd_disable(tegra_host->dpd); | ||
488 | |||
489 | if (!tegra_host->clk_enabled) { | ||
490 | clk_enable(pltfm_host->clk); | ||
491 | ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL); | ||
492 | ctrl |= SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK; | ||
493 | sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL); | ||
494 | tegra_host->clk_enabled = true; | ||
495 | } | ||
496 | tegra_sdhci_set_clk_rate(sdhci, clock); | ||
497 | if (tegra_host->hw_ops->set_card_clock) | ||
498 | tegra_host->hw_ops->set_card_clock(sdhci, clock); | ||
499 | } else if (!clock && tegra_host->clk_enabled) { | ||
500 | if (tegra_host->hw_ops->set_card_clock) | ||
501 | tegra_host->hw_ops->set_card_clock(sdhci, clock); | ||
502 | ctrl = sdhci_readb(sdhci, SDHCI_VENDOR_CLOCK_CNTRL); | ||
503 | ctrl &= ~SDHCI_VENDOR_CLOCK_CNTRL_SDMMC_CLK; | ||
504 | sdhci_writeb(sdhci, ctrl, SDHCI_VENDOR_CLOCK_CNTRL); | ||
505 | clk_disable(pltfm_host->clk); | ||
506 | tegra_host->clk_enabled = false; | ||
507 | /* io dpd enable call for sd instance */ | ||
508 | tegra_io_dpd_enable(tegra_host->dpd); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci, | ||
513 | unsigned int signal_voltage) | ||
514 | { | ||
515 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
516 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
517 | unsigned int min_uV = SDHOST_HIGH_VOLT_MIN; | ||
518 | unsigned int max_uV = SDHOST_HIGH_VOLT_MAX; | ||
519 | unsigned int rc = 0; | ||
520 | u16 clk, ctrl; | ||
521 | unsigned int val; | ||
522 | |||
523 | /* Switch OFF the card clock to prevent glitches on the clock line */ | ||
524 | clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL); | ||
525 | clk &= ~SDHCI_CLOCK_CARD_EN; | ||
526 | sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL); | ||
527 | |||
528 | ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2); | ||
529 | if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) { | ||
530 | ctrl |= SDHCI_CTRL_VDD_180; | ||
531 | min_uV = SDHOST_LOW_VOLT_MIN; | ||
532 | max_uV = SDHOST_LOW_VOLT_MAX; | ||
533 | } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) { | ||
534 | if (ctrl & SDHCI_CTRL_VDD_180) | ||
535 | ctrl &= ~SDHCI_CTRL_VDD_180; | ||
536 | } | ||
537 | sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2); | ||
538 | |||
539 | /* Switch the I/O rail voltage */ | ||
540 | if (tegra_host->vdd_io_reg) { | ||
541 | rc = regulator_set_voltage(tegra_host->vdd_io_reg, | ||
542 | min_uV, max_uV); | ||
543 | if (rc) { | ||
544 | dev_err(mmc_dev(sdhci->mmc), "switching to 1.8V" | ||
545 | "failed . Switching back to 3.3V\n"); | ||
546 | regulator_set_voltage(tegra_host->vdd_io_reg, | ||
547 | SDHOST_HIGH_VOLT_MIN, | ||
548 | SDHOST_HIGH_VOLT_MAX); | ||
549 | goto out; | ||
550 | } | ||
551 | } | ||
552 | |||
553 | /* Wait for 10 msec for the voltage to be switched */ | ||
554 | mdelay(10); | ||
555 | |||
556 | /* Enable the card clock */ | ||
557 | clk |= SDHCI_CLOCK_CARD_EN; | ||
558 | sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL); | ||
559 | |||
560 | /* Wait for 1 msec after enabling clock */ | ||
561 | mdelay(1); | ||
562 | |||
563 | if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) { | ||
564 | /* Do Auto Calibration for 1.8V signal voltage */ | ||
565 | val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG); | ||
566 | val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE; | ||
567 | /* Program Auto cal PD offset(bits 8:14) */ | ||
568 | val &= ~(0x7F << | ||
569 | SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT); | ||
570 | val |= (SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET << | ||
571 | SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT); | ||
572 | /* Program Auto cal PU offset(bits 0:6) */ | ||
573 | val &= ~0x7F; | ||
574 | val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET; | ||
575 | sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG); | ||
576 | |||
577 | val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL); | ||
578 | val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK; | ||
579 | val |= 0x7; | ||
580 | sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL); | ||
581 | } | ||
582 | |||
583 | return rc; | ||
584 | out: | ||
585 | /* Enable the card clock */ | ||
586 | clk |= SDHCI_CLOCK_CARD_EN; | ||
587 | sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL); | ||
588 | |||
589 | /* Wait for 1 msec for the clock to stabilize */ | ||
590 | mdelay(1); | ||
591 | |||
592 | return rc; | ||
593 | } | ||
594 | |||
595 | static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask) | ||
596 | { | ||
597 | unsigned long timeout; | ||
598 | |||
599 | sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET); | ||
600 | |||
601 | /* Wait max 100 ms */ | ||
602 | timeout = 100; | ||
603 | |||
604 | /* hw clears the bit when it's done */ | ||
605 | while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) { | ||
606 | if (timeout == 0) { | ||
607 | dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never" | ||
608 | "completed.\n", (int)mask); | ||
609 | return; | ||
610 | } | ||
611 | timeout--; | ||
612 | mdelay(1); | ||
613 | } | ||
614 | } | ||
615 | |||
616 | static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci, | ||
617 | unsigned int tap_delay) | ||
618 | { | ||
619 | u32 vendor_ctrl; | ||
620 | |||
621 | /* Max tap delay value is 255 */ | ||
622 | BUG_ON(tap_delay > MAX_TAP_VALUES); | ||
623 | |||
624 | vendor_ctrl = sdhci_readl(sdhci, SDHCI_VENDOR_CLOCK_CNTRL); | ||
625 | vendor_ctrl &= ~(0xFF << SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT); | ||
626 | vendor_ctrl |= (tap_delay << SDHCI_VENDOR_CLOCK_CNTRL_TAP_VALUE_SHIFT); | ||
627 | sdhci_writel(sdhci, vendor_ctrl, SDHCI_VENDOR_CLOCK_CNTRL); | ||
628 | } | ||
629 | |||
630 | static void sdhci_tegra_clear_set_irqs(struct sdhci_host *host, | ||
631 | u32 clear, u32 set) | ||
632 | { | ||
633 | u32 ier; | ||
634 | |||
635 | ier = sdhci_readl(host, SDHCI_INT_ENABLE); | ||
636 | ier &= ~clear; | ||
637 | ier |= set; | ||
638 | sdhci_writel(host, ier, SDHCI_INT_ENABLE); | ||
639 | sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); | ||
640 | } | ||
641 | |||
642 | static int sdhci_tegra_run_frequency_tuning(struct sdhci_host *sdhci) | ||
643 | { | ||
644 | int err = 0; | ||
645 | u8 ctrl; | ||
646 | u32 ier; | ||
647 | u32 mask; | ||
648 | unsigned int timeout = 10; | ||
649 | int flags; | ||
650 | u32 intstatus; | ||
651 | |||
652 | /* | ||
653 | * As per the Host Controller spec v3.00, tuning command | ||
654 | * generates Buffer Read Ready interrupt only, so enable that. | ||
655 | */ | ||
656 | ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE); | ||
657 | sdhci_tegra_clear_set_irqs(sdhci, ier, SDHCI_INT_DATA_AVAIL | | ||
658 | SDHCI_INT_DATA_CRC); | ||
659 | |||
660 | mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT; | ||
661 | while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) { | ||
662 | if (timeout == 0) { | ||
663 | dev_err(mmc_dev(sdhci->mmc), "Controller never" | ||
664 | "released inhibit bit(s).\n"); | ||
665 | err = -ETIMEDOUT; | ||
666 | goto out; | ||
667 | } | ||
668 | timeout--; | ||
669 | mdelay(1); | ||
670 | } | ||
671 | |||
672 | ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2); | ||
673 | ctrl &= ~SDHCI_CTRL_TUNED_CLK; | ||
674 | sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2); | ||
675 | |||
676 | ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2); | ||
677 | ctrl |= SDHCI_CTRL_EXEC_TUNING; | ||
678 | sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2); | ||
679 | |||
680 | /* | ||
681 | * In response to CMD19, the card sends 64 bytes of tuning | ||
682 | * block to the Host Controller. So we set the block size | ||
683 | * to 64 here. | ||
684 | */ | ||
685 | sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); | ||
686 | |||
687 | sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL); | ||
688 | |||
689 | sdhci_writeb(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); | ||
690 | |||
691 | sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT); | ||
692 | |||
693 | /* Set the cmd flags */ | ||
694 | flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA; | ||
695 | /* Issue the command */ | ||
696 | sdhci_writew(sdhci, SDHCI_MAKE_CMD( | ||
697 | SD_SEND_TUNING_PATTERN, flags), SDHCI_COMMAND); | ||
698 | |||
699 | timeout = 5; | ||
700 | do { | ||
701 | timeout--; | ||
702 | mdelay(1); | ||
703 | intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS); | ||
704 | if (intstatus) { | ||
705 | sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS); | ||
706 | break; | ||
707 | } | ||
708 | } while(timeout); | ||
709 | |||
710 | if ((intstatus & SDHCI_INT_DATA_AVAIL) && | ||
711 | !(intstatus & SDHCI_INT_DATA_CRC)) { | ||
712 | err = 0; | ||
713 | sdhci->tuning_done = 1; | ||
714 | } else { | ||
715 | tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD); | ||
716 | tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA); | ||
717 | err = -EIO; | ||
718 | } | ||
719 | |||
720 | if (sdhci->tuning_done) { | ||
721 | sdhci->tuning_done = 0; | ||
722 | ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2); | ||
723 | if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) && | ||
724 | (ctrl & SDHCI_CTRL_TUNED_CLK)) | ||
725 | err = 0; | ||
726 | else | ||
727 | err = -EIO; | ||
728 | } | ||
729 | mdelay(1); | ||
730 | out: | ||
731 | sdhci_tegra_clear_set_irqs(sdhci, SDHCI_INT_DATA_AVAIL, ier); | ||
732 | return err; | ||
733 | } | ||
734 | |||
735 | static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci) | ||
736 | { | ||
737 | int err; | ||
738 | u16 ctrl_2; | ||
739 | u8 *tap_delay_status; | ||
740 | unsigned int i = 0; | ||
741 | unsigned int temp_low_pass_tap = 0; | ||
742 | unsigned int temp_pass_window = 0; | ||
743 | unsigned int best_low_pass_tap = 0; | ||
744 | unsigned int best_pass_window = 0; | ||
745 | |||
746 | /* Tuning is valid only in SDR104 and SDR50 modes */ | ||
747 | ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2); | ||
748 | if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || | ||
749 | (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && | ||
750 | (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING)))) | ||
751 | return 0; | ||
752 | |||
753 | tap_delay_status = kzalloc(MAX_TAP_VALUES, GFP_KERNEL); | ||
754 | if (tap_delay_status == NULL) { | ||
755 | dev_err(mmc_dev(sdhci->mmc), "failed to allocate memory" | ||
756 | "for storing tap_delay_status\n"); | ||
757 | err = -ENOMEM; | ||
758 | goto out; | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * Set each tap delay value and run frequency tuning. After each | ||
763 | * run, update the tap delay status as working or not working. | ||
764 | */ | ||
765 | do { | ||
766 | /* Set the tap delay */ | ||
767 | sdhci_tegra_set_tap_delay(sdhci, i); | ||
768 | |||
769 | /* Run frequency tuning */ | ||
770 | err = sdhci_tegra_run_frequency_tuning(sdhci); | ||
771 | |||
772 | /* Update whether the tap delay worked or not */ | ||
773 | tap_delay_status[i] = (err) ? 0: 1; | ||
774 | i++; | ||
775 | } while (i < 0xFF); | ||
776 | |||
777 | /* Find the best possible tap range */ | ||
778 | for (i = 0; i < 0xFF; i++) { | ||
779 | temp_pass_window = 0; | ||
780 | |||
781 | /* Find the first passing tap in the current window */ | ||
782 | if (tap_delay_status[i]) { | ||
783 | temp_low_pass_tap = i; | ||
784 | |||
785 | /* Find the pass window */ | ||
786 | do { | ||
787 | temp_pass_window++; | ||
788 | i++; | ||
789 | if (i > 0xFF) | ||
790 | break; | ||
791 | } while (tap_delay_status[i]); | ||
792 | |||
793 | if ((temp_pass_window > best_pass_window) && (temp_pass_window > 1)){ | ||
794 | best_low_pass_tap = temp_low_pass_tap; | ||
795 | best_pass_window = temp_pass_window; | ||
796 | } | ||
797 | } | ||
798 | } | ||
799 | |||
800 | |||
801 | pr_debug("%s: best pass tap window: start %d, end %d\n", | ||
802 | mmc_hostname(sdhci->mmc), best_low_pass_tap, | ||
803 | (best_low_pass_tap + best_pass_window)); | ||
804 | |||
805 | /* Set the best tap */ | ||
806 | sdhci_tegra_set_tap_delay(sdhci, | ||
807 | (best_low_pass_tap + ((best_pass_window * 3) / 4))); | ||
808 | |||
809 | /* Run frequency tuning */ | ||
810 | err = sdhci_tegra_run_frequency_tuning(sdhci); | ||
811 | |||
812 | out: | ||
813 | if (tap_delay_status) | ||
814 | kfree(tap_delay_status); | ||
815 | |||
816 | return err; | ||
817 | } | ||
818 | |||
819 | static int tegra_sdhci_suspend(struct sdhci_host *sdhci, pm_message_t state) | ||
820 | { | ||
821 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
822 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
823 | |||
824 | tegra_sdhci_set_clock(sdhci, 0); | ||
825 | |||
826 | /* Disable the power rails if any */ | ||
827 | if (tegra_host->card_present) { | ||
828 | if (tegra_host->is_rail_enabled) { | ||
829 | if (tegra_host->vdd_io_reg) | ||
830 | regulator_disable(tegra_host->vdd_io_reg); | ||
831 | if (tegra_host->vdd_slot_reg) | ||
832 | regulator_disable(tegra_host->vdd_slot_reg); | ||
833 | tegra_host->is_rail_enabled = 0; | ||
834 | } | ||
835 | } | ||
836 | |||
837 | return 0; | ||
838 | } | ||
839 | |||
840 | static int tegra_sdhci_resume(struct sdhci_host *sdhci) | ||
841 | { | ||
842 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); | ||
843 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; | ||
844 | |||
845 | /* Enable the power rails if any */ | ||
846 | if (tegra_host->card_present) { | ||
847 | if (!tegra_host->is_rail_enabled) { | ||
848 | if (tegra_host->vdd_slot_reg) | ||
849 | regulator_enable(tegra_host->vdd_slot_reg); | ||
850 | if (tegra_host->vdd_io_reg) { | ||
851 | regulator_enable(tegra_host->vdd_io_reg); | ||
852 | tegra_sdhci_signal_voltage_switch(sdhci, MMC_SIGNAL_VOLTAGE_330); | ||
853 | } | ||
854 | tegra_host->is_rail_enabled = 1; | ||
855 | } | ||
856 | } | ||
857 | /* Setting the min identification clock of freq 400KHz */ | ||
858 | tegra_sdhci_set_clock(sdhci, 400000); | ||
859 | |||
860 | /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/ | ||
861 | if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) { | ||
862 | tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL); | ||
863 | sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); | ||
864 | sdhci->pwr = 0; | ||
865 | } | ||
866 | |||
867 | return 0; | ||
868 | } | ||
869 | |||
168 | static struct sdhci_ops tegra_sdhci_ops = { | 870 | static struct sdhci_ops tegra_sdhci_ops = { |
169 | .get_ro = tegra_sdhci_get_ro, | 871 | .get_ro = tegra_sdhci_get_ro, |
872 | .get_cd = tegra_sdhci_get_cd, | ||
170 | .read_l = tegra_sdhci_readl, | 873 | .read_l = tegra_sdhci_readl, |
171 | .read_w = tegra_sdhci_readw, | 874 | .read_w = tegra_sdhci_readw, |
172 | .write_l = tegra_sdhci_writel, | 875 | .write_l = tegra_sdhci_writel, |
173 | .platform_8bit_width = tegra_sdhci_8bit, | 876 | .platform_8bit_width = tegra_sdhci_8bit, |
877 | .set_clock = tegra_sdhci_set_clock, | ||
878 | .suspend = tegra_sdhci_suspend, | ||
879 | .resume = tegra_sdhci_resume, | ||
174 | .platform_reset_exit = tegra_sdhci_reset_exit, | 880 | .platform_reset_exit = tegra_sdhci_reset_exit, |
881 | .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, | ||
882 | .switch_signal_voltage = tegra_sdhci_signal_voltage_switch, | ||
883 | .execute_freq_tuning = sdhci_tegra_execute_tuning, | ||
175 | }; | 884 | }; |
176 | 885 | ||
177 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | 886 | static struct sdhci_pltfm_data sdhci_tegra_pdata = { |
178 | static struct sdhci_pltfm_data sdhci_tegra20_pdata = { | ||
179 | .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 887 | .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | |
180 | SDHCI_QUIRK_SINGLE_POWER_WRITE | | 888 | #ifndef CONFIG_ARCH_TEGRA_2x_SOC |
181 | SDHCI_QUIRK_NO_HISPD_BIT | | 889 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | |
182 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, | 890 | SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING | |
183 | .ops = &tegra_sdhci_ops, | ||
184 | }; | ||
185 | |||
186 | static struct sdhci_tegra_soc_data soc_data_tegra20 = { | ||
187 | .pdata = &sdhci_tegra20_pdata, | ||
188 | .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | | ||
189 | NVQUIRK_ENABLE_BLOCK_GAP_DET, | ||
190 | }; | ||
191 | #endif | 891 | #endif |
192 | |||
193 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC | 892 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC |
194 | static struct sdhci_pltfm_data sdhci_tegra30_pdata = { | 893 | SDHCI_QUIRK_NONSTANDARD_CLOCK | |
195 | .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 894 | SDHCI_QUIRK_NON_STANDARD_TUNING | |
196 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | | 895 | #endif |
197 | SDHCI_QUIRK_SINGLE_POWER_WRITE | | 896 | SDHCI_QUIRK_SINGLE_POWER_WRITE | |
198 | SDHCI_QUIRK_NO_HISPD_BIT | | 897 | SDHCI_QUIRK_NO_HISPD_BIT | |
199 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, | 898 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | |
899 | SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO | | ||
900 | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | ||
200 | .ops = &tegra_sdhci_ops, | 901 | .ops = &tegra_sdhci_ops, |
201 | }; | 902 | }; |
202 | 903 | ||
203 | static struct sdhci_tegra_soc_data soc_data_tegra30 = { | 904 | static int __devinit sdhci_tegra_probe(struct platform_device *pdev) |
204 | .pdata = &sdhci_tegra30_pdata, | ||
205 | .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300, | ||
206 | }; | ||
207 | #endif | ||
208 | |||
209 | static const struct of_device_id sdhci_tegra_dt_match[] = { | ||
210 | #ifdef CONFIG_ARCH_TEGRA_3x_SOC | ||
211 | { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, | ||
212 | #endif | ||
213 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
214 | { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, | ||
215 | #endif | ||
216 | {} | ||
217 | }; | ||
218 | MODULE_DEVICE_TABLE(of, sdhci_dt_ids); | ||
219 | |||
220 | static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata( | ||
221 | struct platform_device *pdev) | ||
222 | { | 905 | { |
223 | struct tegra_sdhci_platform_data *plat; | ||
224 | struct device_node *np = pdev->dev.of_node; | ||
225 | u32 bus_width; | ||
226 | |||
227 | if (!np) | ||
228 | return NULL; | ||
229 | |||
230 | plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); | ||
231 | if (!plat) { | ||
232 | dev_err(&pdev->dev, "Can't allocate platform data\n"); | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); | ||
237 | plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); | ||
238 | plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0); | ||
239 | |||
240 | if (of_property_read_u32(np, "bus-width", &bus_width) == 0 && | ||
241 | bus_width == 8) | ||
242 | plat->is_8bit = 1; | ||
243 | |||
244 | return plat; | ||
245 | } | ||
246 | |||
247 | static int sdhci_tegra_probe(struct platform_device *pdev) | ||
248 | { | ||
249 | const struct of_device_id *match; | ||
250 | const struct sdhci_tegra_soc_data *soc_data; | ||
251 | struct sdhci_host *host; | ||
252 | struct sdhci_pltfm_host *pltfm_host; | 906 | struct sdhci_pltfm_host *pltfm_host; |
253 | struct tegra_sdhci_platform_data *plat; | 907 | struct tegra_sdhci_platform_data *plat; |
254 | struct sdhci_tegra *tegra_host; | 908 | struct sdhci_host *host; |
909 | struct tegra_sdhci_host *tegra_host; | ||
255 | struct clk *clk; | 910 | struct clk *clk; |
256 | int rc; | 911 | int rc; |
257 | 912 | ||
258 | match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); | 913 | host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata); |
259 | if (!match) | ||
260 | return -EINVAL; | ||
261 | soc_data = match->data; | ||
262 | |||
263 | host = sdhci_pltfm_init(pdev, soc_data->pdata); | ||
264 | if (IS_ERR(host)) | 914 | if (IS_ERR(host)) |
265 | return PTR_ERR(host); | 915 | return PTR_ERR(host); |
266 | 916 | ||
@@ -268,26 +918,27 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
268 | 918 | ||
269 | plat = pdev->dev.platform_data; | 919 | plat = pdev->dev.platform_data; |
270 | 920 | ||
271 | if (plat == NULL) | ||
272 | plat = sdhci_tegra_dt_parse_pdata(pdev); | ||
273 | |||
274 | if (plat == NULL) { | 921 | if (plat == NULL) { |
275 | dev_err(mmc_dev(host->mmc), "missing platform data\n"); | 922 | dev_err(mmc_dev(host->mmc), "missing platform data\n"); |
276 | rc = -ENXIO; | 923 | rc = -ENXIO; |
277 | goto err_no_plat; | 924 | goto err_no_plat; |
278 | } | 925 | } |
279 | 926 | ||
280 | tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); | 927 | tegra_host = kzalloc(sizeof(struct tegra_sdhci_host), GFP_KERNEL); |
281 | if (!tegra_host) { | 928 | if (tegra_host == NULL) { |
282 | dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); | 929 | dev_err(mmc_dev(host->mmc), "failed to allocate tegra host\n"); |
283 | rc = -ENOMEM; | 930 | rc = -ENOMEM; |
284 | goto err_no_plat; | 931 | goto err_no_mem; |
285 | } | 932 | } |
286 | 933 | ||
287 | tegra_host->plat = plat; | 934 | #ifdef CONFIG_MMC_EMBEDDED_SDIO |
288 | tegra_host->soc_data = soc_data; | 935 | if (plat->mmc_data.embedded_sdio) |
289 | 936 | mmc_set_embedded_sdio_data(host->mmc, | |
290 | pltfm_host->priv = tegra_host; | 937 | &plat->mmc_data.embedded_sdio->cis, |
938 | &plat->mmc_data.embedded_sdio->cccr, | ||
939 | plat->mmc_data.embedded_sdio->funcs, | ||
940 | plat->mmc_data.embedded_sdio->num_funcs); | ||
941 | #endif | ||
291 | 942 | ||
292 | if (gpio_is_valid(plat->power_gpio)) { | 943 | if (gpio_is_valid(plat->power_gpio)) { |
293 | rc = gpio_request(plat->power_gpio, "sdhci_power"); | 944 | rc = gpio_request(plat->power_gpio, "sdhci_power"); |
@@ -296,6 +947,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
296 | "failed to allocate power gpio\n"); | 947 | "failed to allocate power gpio\n"); |
297 | goto err_power_req; | 948 | goto err_power_req; |
298 | } | 949 | } |
950 | tegra_gpio_enable(plat->power_gpio); | ||
299 | gpio_direction_output(plat->power_gpio, 1); | 951 | gpio_direction_output(plat->power_gpio, 1); |
300 | } | 952 | } |
301 | 953 | ||
@@ -306,9 +958,13 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
306 | "failed to allocate cd gpio\n"); | 958 | "failed to allocate cd gpio\n"); |
307 | goto err_cd_req; | 959 | goto err_cd_req; |
308 | } | 960 | } |
961 | tegra_gpio_enable(plat->cd_gpio); | ||
309 | gpio_direction_input(plat->cd_gpio); | 962 | gpio_direction_input(plat->cd_gpio); |
310 | 963 | ||
311 | rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, | 964 | tegra_host->card_present = (gpio_get_value(plat->cd_gpio) == 0); |
965 | |||
966 | rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL, | ||
967 | carddetect_irq, | ||
312 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 968 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, |
313 | mmc_hostname(host->mmc), host); | 969 | mmc_hostname(host->mmc), host); |
314 | 970 | ||
@@ -316,7 +972,18 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
316 | dev_err(mmc_dev(host->mmc), "request irq error\n"); | 972 | dev_err(mmc_dev(host->mmc), "request irq error\n"); |
317 | goto err_cd_irq_req; | 973 | goto err_cd_irq_req; |
318 | } | 974 | } |
975 | rc = enable_irq_wake(gpio_to_irq(plat->cd_gpio)); | ||
976 | if (rc < 0) | ||
977 | dev_err(mmc_dev(host->mmc), | ||
978 | "SD card wake-up event registration" | ||
979 | "failed with eroor: %d\n", rc); | ||
980 | |||
981 | } else if (plat->mmc_data.register_status_notify) { | ||
982 | plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host); | ||
983 | } | ||
319 | 984 | ||
985 | if (plat->mmc_data.status) { | ||
986 | plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc)); | ||
320 | } | 987 | } |
321 | 988 | ||
322 | if (gpio_is_valid(plat->wp_gpio)) { | 989 | if (gpio_is_valid(plat->wp_gpio)) { |
@@ -326,22 +993,104 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
326 | "failed to allocate wp gpio\n"); | 993 | "failed to allocate wp gpio\n"); |
327 | goto err_wp_req; | 994 | goto err_wp_req; |
328 | } | 995 | } |
996 | tegra_gpio_enable(plat->wp_gpio); | ||
329 | gpio_direction_input(plat->wp_gpio); | 997 | gpio_direction_input(plat->wp_gpio); |
330 | } | 998 | } |
331 | 999 | ||
1000 | /* | ||
1001 | * If there is no card detect gpio, assume that the | ||
1002 | * card is always present. | ||
1003 | */ | ||
1004 | if (!gpio_is_valid(plat->cd_gpio)) | ||
1005 | tegra_host->card_present = 1; | ||
1006 | |||
1007 | if (!plat->mmc_data.built_in) { | ||
1008 | if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) { | ||
1009 | tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN; | ||
1010 | tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX; | ||
1011 | } else { | ||
1012 | /* | ||
1013 | * Set the minV and maxV to default | ||
1014 | * voltage range of 2.7V - 3.6V | ||
1015 | */ | ||
1016 | tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN; | ||
1017 | tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX; | ||
1018 | } | ||
1019 | tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc), "vddio_sdmmc"); | ||
1020 | if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) { | ||
1021 | dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n", | ||
1022 | "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg)); | ||
1023 | tegra_host->vdd_io_reg = NULL; | ||
1024 | } else { | ||
1025 | rc = regulator_set_voltage(tegra_host->vdd_io_reg, | ||
1026 | tegra_host->vddio_min_uv, | ||
1027 | tegra_host->vddio_max_uv); | ||
1028 | if (rc) { | ||
1029 | dev_err(mmc_dev(host->mmc), "%s regulator_set_voltage failed: %d", | ||
1030 | "vddio_sdmmc", rc); | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc), "vddio_sd_slot"); | ||
1035 | if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) { | ||
1036 | dev_err(mmc_dev(host->mmc), "%s regulator not found: %ld\n", | ||
1037 | "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg)); | ||
1038 | tegra_host->vdd_slot_reg = NULL; | ||
1039 | } | ||
1040 | |||
1041 | if (tegra_host->card_present) { | ||
1042 | if (tegra_host->vdd_slot_reg) | ||
1043 | regulator_enable(tegra_host->vdd_slot_reg); | ||
1044 | if (tegra_host->vdd_io_reg) | ||
1045 | regulator_enable(tegra_host->vdd_io_reg); | ||
1046 | tegra_host->is_rail_enabled = 1; | ||
1047 | } | ||
1048 | } | ||
1049 | |||
332 | clk = clk_get(mmc_dev(host->mmc), NULL); | 1050 | clk = clk_get(mmc_dev(host->mmc), NULL); |
333 | if (IS_ERR(clk)) { | 1051 | if (IS_ERR(clk)) { |
334 | dev_err(mmc_dev(host->mmc), "clk err\n"); | 1052 | dev_err(mmc_dev(host->mmc), "clk err\n"); |
335 | rc = PTR_ERR(clk); | 1053 | rc = PTR_ERR(clk); |
336 | goto err_clk_get; | 1054 | goto err_clk_get; |
337 | } | 1055 | } |
338 | clk_prepare_enable(clk); | 1056 | rc = clk_enable(clk); |
1057 | if (rc != 0) | ||
1058 | goto err_clk_put; | ||
339 | pltfm_host->clk = clk; | 1059 | pltfm_host->clk = clk; |
340 | 1060 | pltfm_host->priv = tegra_host; | |
341 | host->mmc->pm_caps = plat->pm_flags; | 1061 | tegra_host->clk_enabled = true; |
342 | 1062 | tegra_host->max_clk_limit = plat->max_clk_limit; | |
1063 | tegra_host->instance = pdev->id; | ||
1064 | tegra_host->dpd = tegra_io_dpd_get(mmc_dev(host->mmc)); | ||
1065 | |||
1066 | host->mmc->pm_caps |= plat->pm_caps; | ||
1067 | host->mmc->pm_flags |= plat->pm_flags; | ||
1068 | |||
1069 | host->mmc->caps |= MMC_CAP_ERASE; | ||
1070 | host->mmc->caps |= MMC_CAP_DISABLE; | ||
1071 | /* enable 1/8V DDR capable */ | ||
1072 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | ||
343 | if (plat->is_8bit) | 1073 | if (plat->is_8bit) |
344 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | 1074 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; |
1075 | host->mmc->caps |= MMC_CAP_SDIO_IRQ; | ||
1076 | |||
1077 | host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY; | ||
1078 | if (plat->mmc_data.built_in) { | ||
1079 | host->mmc->caps |= MMC_CAP_NONREMOVABLE; | ||
1080 | host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; | ||
1081 | } | ||
1082 | /* Do not turn OFF embedded sdio cards as it support Wake on Wireless */ | ||
1083 | if (plat->mmc_data.embedded_sdio) | ||
1084 | host->mmc->pm_flags |= MMC_PM_KEEP_POWER; | ||
1085 | |||
1086 | tegra_sdhost_min_freq = TEGRA_SDHOST_MIN_FREQ; | ||
1087 | #ifdef CONFIG_ARCH_TEGRA_2x_SOC | ||
1088 | tegra_host->hw_ops = &tegra_2x_sdhci_ops; | ||
1089 | tegra_sdhost_std_freq = TEGRA2_SDHOST_STD_FREQ; | ||
1090 | #else | ||
1091 | tegra_host->hw_ops = &tegra_3x_sdhci_ops; | ||
1092 | tegra_sdhost_std_freq = TEGRA3_SDHOST_STD_FREQ; | ||
1093 | #endif | ||
345 | 1094 | ||
346 | rc = sdhci_add_host(host); | 1095 | rc = sdhci_add_host(host); |
347 | if (rc) | 1096 | if (rc) |
@@ -350,51 +1099,81 @@ static int sdhci_tegra_probe(struct platform_device *pdev) | |||
350 | return 0; | 1099 | return 0; |
351 | 1100 | ||
352 | err_add_host: | 1101 | err_add_host: |
353 | clk_disable_unprepare(pltfm_host->clk); | 1102 | clk_disable(pltfm_host->clk); |
1103 | err_clk_put: | ||
354 | clk_put(pltfm_host->clk); | 1104 | clk_put(pltfm_host->clk); |
355 | err_clk_get: | 1105 | err_clk_get: |
356 | if (gpio_is_valid(plat->wp_gpio)) | 1106 | if (gpio_is_valid(plat->wp_gpio)) { |
1107 | tegra_gpio_disable(plat->wp_gpio); | ||
357 | gpio_free(plat->wp_gpio); | 1108 | gpio_free(plat->wp_gpio); |
1109 | } | ||
358 | err_wp_req: | 1110 | err_wp_req: |
359 | if (gpio_is_valid(plat->cd_gpio)) | 1111 | if (gpio_is_valid(plat->cd_gpio)) |
360 | free_irq(gpio_to_irq(plat->cd_gpio), host); | 1112 | free_irq(gpio_to_irq(plat->cd_gpio), host); |
361 | err_cd_irq_req: | 1113 | err_cd_irq_req: |
362 | if (gpio_is_valid(plat->cd_gpio)) | 1114 | if (gpio_is_valid(plat->cd_gpio)) { |
1115 | tegra_gpio_disable(plat->cd_gpio); | ||
363 | gpio_free(plat->cd_gpio); | 1116 | gpio_free(plat->cd_gpio); |
1117 | } | ||
364 | err_cd_req: | 1118 | err_cd_req: |
365 | if (gpio_is_valid(plat->power_gpio)) | 1119 | if (gpio_is_valid(plat->power_gpio)) { |
1120 | tegra_gpio_disable(plat->power_gpio); | ||
366 | gpio_free(plat->power_gpio); | 1121 | gpio_free(plat->power_gpio); |
1122 | } | ||
367 | err_power_req: | 1123 | err_power_req: |
1124 | err_no_mem: | ||
1125 | kfree(tegra_host); | ||
368 | err_no_plat: | 1126 | err_no_plat: |
369 | sdhci_pltfm_free(pdev); | 1127 | sdhci_pltfm_free(pdev); |
370 | return rc; | 1128 | return rc; |
371 | } | 1129 | } |
372 | 1130 | ||
373 | static int sdhci_tegra_remove(struct platform_device *pdev) | 1131 | static int __devexit sdhci_tegra_remove(struct platform_device *pdev) |
374 | { | 1132 | { |
375 | struct sdhci_host *host = platform_get_drvdata(pdev); | 1133 | struct sdhci_host *host = platform_get_drvdata(pdev); |
376 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 1134 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
377 | struct sdhci_tegra *tegra_host = pltfm_host->priv; | 1135 | struct tegra_sdhci_host *tegra_host = pltfm_host->priv; |
378 | const struct tegra_sdhci_platform_data *plat = tegra_host->plat; | 1136 | struct tegra_sdhci_platform_data *plat; |
379 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | 1137 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); |
380 | 1138 | ||
381 | sdhci_remove_host(host, dead); | 1139 | sdhci_remove_host(host, dead); |
382 | 1140 | ||
383 | if (gpio_is_valid(plat->wp_gpio)) | 1141 | plat = pdev->dev.platform_data; |
1142 | |||
1143 | disable_irq_wake(gpio_to_irq(plat->cd_gpio)); | ||
1144 | |||
1145 | if (tegra_host->vdd_slot_reg) { | ||
1146 | regulator_disable(tegra_host->vdd_slot_reg); | ||
1147 | regulator_put(tegra_host->vdd_slot_reg); | ||
1148 | } | ||
1149 | |||
1150 | if (tegra_host->vdd_io_reg) { | ||
1151 | regulator_disable(tegra_host->vdd_io_reg); | ||
1152 | regulator_put(tegra_host->vdd_io_reg); | ||
1153 | } | ||
1154 | |||
1155 | if (gpio_is_valid(plat->wp_gpio)) { | ||
1156 | tegra_gpio_disable(plat->wp_gpio); | ||
384 | gpio_free(plat->wp_gpio); | 1157 | gpio_free(plat->wp_gpio); |
1158 | } | ||
385 | 1159 | ||
386 | if (gpio_is_valid(plat->cd_gpio)) { | 1160 | if (gpio_is_valid(plat->cd_gpio)) { |
387 | free_irq(gpio_to_irq(plat->cd_gpio), host); | 1161 | free_irq(gpio_to_irq(plat->cd_gpio), host); |
1162 | tegra_gpio_disable(plat->cd_gpio); | ||
388 | gpio_free(plat->cd_gpio); | 1163 | gpio_free(plat->cd_gpio); |
389 | } | 1164 | } |
390 | 1165 | ||
391 | if (gpio_is_valid(plat->power_gpio)) | 1166 | if (gpio_is_valid(plat->power_gpio)) { |
1167 | tegra_gpio_disable(plat->power_gpio); | ||
392 | gpio_free(plat->power_gpio); | 1168 | gpio_free(plat->power_gpio); |
1169 | } | ||
393 | 1170 | ||
394 | clk_disable_unprepare(pltfm_host->clk); | 1171 | if (tegra_host->clk_enabled) |
1172 | clk_disable(pltfm_host->clk); | ||
395 | clk_put(pltfm_host->clk); | 1173 | clk_put(pltfm_host->clk); |
396 | 1174 | ||
397 | sdhci_pltfm_free(pdev); | 1175 | sdhci_pltfm_free(pdev); |
1176 | kfree(tegra_host); | ||
398 | 1177 | ||
399 | return 0; | 1178 | return 0; |
400 | } | 1179 | } |
@@ -403,15 +1182,27 @@ static struct platform_driver sdhci_tegra_driver = { | |||
403 | .driver = { | 1182 | .driver = { |
404 | .name = "sdhci-tegra", | 1183 | .name = "sdhci-tegra", |
405 | .owner = THIS_MODULE, | 1184 | .owner = THIS_MODULE, |
406 | .of_match_table = sdhci_tegra_dt_match, | ||
407 | .pm = SDHCI_PLTFM_PMOPS, | ||
408 | }, | 1185 | }, |
409 | .probe = sdhci_tegra_probe, | 1186 | .probe = sdhci_tegra_probe, |
410 | .remove = sdhci_tegra_remove, | 1187 | .remove = __devexit_p(sdhci_tegra_remove), |
1188 | #ifdef CONFIG_PM | ||
1189 | .suspend = sdhci_pltfm_suspend, | ||
1190 | .resume = sdhci_pltfm_resume, | ||
1191 | #endif | ||
411 | }; | 1192 | }; |
412 | 1193 | ||
413 | module_platform_driver(sdhci_tegra_driver); | 1194 | static int __init sdhci_tegra_init(void) |
1195 | { | ||
1196 | return platform_driver_register(&sdhci_tegra_driver); | ||
1197 | } | ||
1198 | module_init(sdhci_tegra_init); | ||
1199 | |||
1200 | static void __exit sdhci_tegra_exit(void) | ||
1201 | { | ||
1202 | platform_driver_unregister(&sdhci_tegra_driver); | ||
1203 | } | ||
1204 | module_exit(sdhci_tegra_exit); | ||
414 | 1205 | ||
415 | MODULE_DESCRIPTION("SDHCI driver for Tegra"); | 1206 | MODULE_DESCRIPTION("SDHCI driver for Tegra"); |
416 | MODULE_AUTHOR("Google, Inc."); | 1207 | MODULE_AUTHOR(" Google, Inc."); |
417 | MODULE_LICENSE("GPL v2"); | 1208 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6f0bfc0c8c9..c6822c39541 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -16,19 +16,16 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | #include <linux/scatterlist.h> | 21 | #include <linux/scatterlist.h> |
23 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
24 | #include <linux/pm_runtime.h> | ||
25 | 23 | ||
26 | #include <linux/leds.h> | 24 | #include <linux/leds.h> |
27 | 25 | ||
28 | #include <linux/mmc/mmc.h> | 26 | #include <linux/mmc/mmc.h> |
29 | #include <linux/mmc/host.h> | 27 | #include <linux/mmc/host.h> |
30 | #include <linux/mmc/card.h> | 28 | #include <linux/mmc/card.h> |
31 | #include <linux/mmc/slot-gpio.h> | ||
32 | 29 | ||
33 | #include "sdhci.h" | 30 | #include "sdhci.h" |
34 | 31 | ||
@@ -45,76 +42,61 @@ | |||
45 | #define MAX_TUNING_LOOP 40 | 42 | #define MAX_TUNING_LOOP 40 |
46 | 43 | ||
47 | static unsigned int debug_quirks = 0; | 44 | static unsigned int debug_quirks = 0; |
48 | static unsigned int debug_quirks2; | ||
49 | 45 | ||
50 | static void sdhci_finish_data(struct sdhci_host *); | 46 | static void sdhci_finish_data(struct sdhci_host *); |
51 | 47 | ||
52 | static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); | 48 | static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); |
53 | static void sdhci_finish_command(struct sdhci_host *); | 49 | static void sdhci_finish_command(struct sdhci_host *); |
54 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); | 50 | static int sdhci_execute_tuning(struct mmc_host *mmc); |
55 | static void sdhci_tuning_timer(unsigned long data); | 51 | static void sdhci_tuning_timer(unsigned long data); |
56 | 52 | ||
57 | #ifdef CONFIG_PM_RUNTIME | ||
58 | static int sdhci_runtime_pm_get(struct sdhci_host *host); | ||
59 | static int sdhci_runtime_pm_put(struct sdhci_host *host); | ||
60 | #else | ||
61 | static inline int sdhci_runtime_pm_get(struct sdhci_host *host) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | static inline int sdhci_runtime_pm_put(struct sdhci_host *host) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static void sdhci_dumpregs(struct sdhci_host *host) | 53 | static void sdhci_dumpregs(struct sdhci_host *host) |
72 | { | 54 | { |
73 | pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", | 55 | printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", |
74 | mmc_hostname(host->mmc)); | 56 | mmc_hostname(host->mmc)); |
75 | 57 | ||
76 | pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", | 58 | printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", |
77 | sdhci_readl(host, SDHCI_DMA_ADDRESS), | 59 | sdhci_readl(host, SDHCI_DMA_ADDRESS), |
78 | sdhci_readw(host, SDHCI_HOST_VERSION)); | 60 | sdhci_readw(host, SDHCI_HOST_VERSION)); |
79 | pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", | 61 | printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", |
80 | sdhci_readw(host, SDHCI_BLOCK_SIZE), | 62 | sdhci_readw(host, SDHCI_BLOCK_SIZE), |
81 | sdhci_readw(host, SDHCI_BLOCK_COUNT)); | 63 | sdhci_readw(host, SDHCI_BLOCK_COUNT)); |
82 | pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", | 64 | printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", |
83 | sdhci_readl(host, SDHCI_ARGUMENT), | 65 | sdhci_readl(host, SDHCI_ARGUMENT), |
84 | sdhci_readw(host, SDHCI_TRANSFER_MODE)); | 66 | sdhci_readw(host, SDHCI_TRANSFER_MODE)); |
85 | pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", | 67 | printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", |
86 | sdhci_readl(host, SDHCI_PRESENT_STATE), | 68 | sdhci_readl(host, SDHCI_PRESENT_STATE), |
87 | sdhci_readb(host, SDHCI_HOST_CONTROL)); | 69 | sdhci_readb(host, SDHCI_HOST_CONTROL)); |
88 | pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", | 70 | printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", |
89 | sdhci_readb(host, SDHCI_POWER_CONTROL), | 71 | sdhci_readb(host, SDHCI_POWER_CONTROL), |
90 | sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); | 72 | sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); |
91 | pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", | 73 | printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", |
92 | sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), | 74 | sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), |
93 | sdhci_readw(host, SDHCI_CLOCK_CONTROL)); | 75 | sdhci_readw(host, SDHCI_CLOCK_CONTROL)); |
94 | pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", | 76 | printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", |
95 | sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), | 77 | sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), |
96 | sdhci_readl(host, SDHCI_INT_STATUS)); | 78 | sdhci_readl(host, SDHCI_INT_STATUS)); |
97 | pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", | 79 | printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", |
98 | sdhci_readl(host, SDHCI_INT_ENABLE), | 80 | sdhci_readl(host, SDHCI_INT_ENABLE), |
99 | sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); | 81 | sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); |
100 | pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", | 82 | printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", |
101 | sdhci_readw(host, SDHCI_ACMD12_ERR), | 83 | sdhci_readw(host, SDHCI_ACMD12_ERR), |
102 | sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); | 84 | sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); |
103 | pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", | 85 | printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", |
104 | sdhci_readl(host, SDHCI_CAPABILITIES), | 86 | sdhci_readl(host, SDHCI_CAPABILITIES), |
105 | sdhci_readl(host, SDHCI_CAPABILITIES_1)); | 87 | sdhci_readl(host, SDHCI_CAPABILITIES_1)); |
106 | pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", | 88 | printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", |
107 | sdhci_readw(host, SDHCI_COMMAND), | 89 | sdhci_readw(host, SDHCI_COMMAND), |
108 | sdhci_readl(host, SDHCI_MAX_CURRENT)); | 90 | sdhci_readl(host, SDHCI_MAX_CURRENT)); |
109 | pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", | 91 | printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n", |
110 | sdhci_readw(host, SDHCI_HOST_CONTROL2)); | 92 | sdhci_readw(host, SDHCI_HOST_CONTROL2)); |
111 | 93 | ||
112 | if (host->flags & SDHCI_USE_ADMA) | 94 | if (host->flags & SDHCI_USE_ADMA) |
113 | pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", | 95 | printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", |
114 | readl(host->ioaddr + SDHCI_ADMA_ERROR), | 96 | readl(host->ioaddr + SDHCI_ADMA_ERROR), |
115 | readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); | 97 | readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); |
116 | 98 | ||
117 | pr_debug(DRIVER_NAME ": ===========================================\n"); | 99 | printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); |
118 | } | 100 | } |
119 | 101 | ||
120 | /*****************************************************************************\ | 102 | /*****************************************************************************\ |
@@ -148,8 +130,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) | |||
148 | { | 130 | { |
149 | u32 present, irqs; | 131 | u32 present, irqs; |
150 | 132 | ||
151 | if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || | 133 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) |
152 | (host->mmc->caps & MMC_CAP_NONREMOVABLE)) | ||
153 | return; | 134 | return; |
154 | 135 | ||
155 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | 136 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & |
@@ -200,7 +181,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) | |||
200 | /* hw clears the bit when it's done */ | 181 | /* hw clears the bit when it's done */ |
201 | while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { | 182 | while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { |
202 | if (timeout == 0) { | 183 | if (timeout == 0) { |
203 | pr_err("%s: Reset 0x%x never completed.\n", | 184 | printk(KERN_ERR "%s: Reset 0x%x never completed.\n", |
204 | mmc_hostname(host->mmc), (int)mask); | 185 | mmc_hostname(host->mmc), (int)mask); |
205 | sdhci_dumpregs(host); | 186 | sdhci_dumpregs(host); |
206 | return; | 187 | return; |
@@ -214,11 +195,6 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) | |||
214 | 195 | ||
215 | if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) | 196 | if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) |
216 | sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); | 197 | sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); |
217 | |||
218 | if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { | ||
219 | if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) | ||
220 | host->ops->enable_dma(host); | ||
221 | } | ||
222 | } | 198 | } |
223 | 199 | ||
224 | static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); | 200 | static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); |
@@ -246,19 +222,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) | |||
246 | static void sdhci_reinit(struct sdhci_host *host) | 222 | static void sdhci_reinit(struct sdhci_host *host) |
247 | { | 223 | { |
248 | sdhci_init(host, 0); | 224 | sdhci_init(host, 0); |
249 | /* | ||
250 | * Retuning stuffs are affected by different cards inserted and only | ||
251 | * applicable to UHS-I cards. So reset these fields to their initial | ||
252 | * value when card is removed. | ||
253 | */ | ||
254 | if (host->flags & SDHCI_USING_RETUNING_TIMER) { | ||
255 | host->flags &= ~SDHCI_USING_RETUNING_TIMER; | ||
256 | |||
257 | del_timer_sync(&host->tuning_timer); | ||
258 | host->flags &= ~SDHCI_NEEDS_RETUNING; | ||
259 | host->mmc->max_blk_count = | ||
260 | (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; | ||
261 | } | ||
262 | sdhci_enable_card_detection(host); | 225 | sdhci_enable_card_detection(host); |
263 | } | 226 | } |
264 | 227 | ||
@@ -289,14 +252,11 @@ static void sdhci_led_control(struct led_classdev *led, | |||
289 | 252 | ||
290 | spin_lock_irqsave(&host->lock, flags); | 253 | spin_lock_irqsave(&host->lock, flags); |
291 | 254 | ||
292 | if (host->runtime_suspended) | ||
293 | goto out; | ||
294 | |||
295 | if (brightness == LED_OFF) | 255 | if (brightness == LED_OFF) |
296 | sdhci_deactivate_led(host); | 256 | sdhci_deactivate_led(host); |
297 | else | 257 | else |
298 | sdhci_activate_led(host); | 258 | sdhci_activate_led(host); |
299 | out: | 259 | |
300 | spin_unlock_irqrestore(&host->lock, flags); | 260 | spin_unlock_irqrestore(&host->lock, flags); |
301 | } | 261 | } |
302 | #endif | 262 | #endif |
@@ -441,12 +401,12 @@ static void sdhci_transfer_pio(struct sdhci_host *host) | |||
441 | static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | 401 | static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) |
442 | { | 402 | { |
443 | local_irq_save(*flags); | 403 | local_irq_save(*flags); |
444 | return kmap_atomic(sg_page(sg)) + sg->offset; | 404 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; |
445 | } | 405 | } |
446 | 406 | ||
447 | static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) | 407 | static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) |
448 | { | 408 | { |
449 | kunmap_atomic(buffer); | 409 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); |
450 | local_irq_restore(*flags); | 410 | local_irq_restore(*flags); |
451 | } | 411 | } |
452 | 412 | ||
@@ -695,8 +655,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
695 | } | 655 | } |
696 | 656 | ||
697 | if (count >= 0xF) { | 657 | if (count >= 0xF) { |
698 | DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", | 658 | printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n", |
699 | mmc_hostname(host->mmc), count, cmd->opcode); | 659 | mmc_hostname(host->mmc), cmd->opcode); |
700 | count = 0xE; | 660 | count = 0xE; |
701 | } | 661 | } |
702 | 662 | ||
@@ -990,7 +950,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) | |||
990 | 950 | ||
991 | while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { | 951 | while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { |
992 | if (timeout == 0) { | 952 | if (timeout == 0) { |
993 | pr_err("%s: Controller never released " | 953 | printk(KERN_ERR "%s: Controller never released " |
994 | "inhibit bit(s).\n", mmc_hostname(host->mmc)); | 954 | "inhibit bit(s).\n", mmc_hostname(host->mmc)); |
995 | sdhci_dumpregs(host); | 955 | sdhci_dumpregs(host); |
996 | cmd->error = -EIO; | 956 | cmd->error = -EIO; |
@@ -1012,7 +972,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) | |||
1012 | sdhci_set_transfer_mode(host, cmd); | 972 | sdhci_set_transfer_mode(host, cmd); |
1013 | 973 | ||
1014 | if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { | 974 | if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { |
1015 | pr_err("%s: Unsupported response type!\n", | 975 | printk(KERN_ERR "%s: Unsupported response type!\n", |
1016 | mmc_hostname(host->mmc)); | 976 | mmc_hostname(host->mmc)); |
1017 | cmd->error = -EINVAL; | 977 | cmd->error = -EINVAL; |
1018 | tasklet_schedule(&host->finish_tasklet); | 978 | tasklet_schedule(&host->finish_tasklet); |
@@ -1034,8 +994,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) | |||
1034 | flags |= SDHCI_CMD_INDEX; | 994 | flags |= SDHCI_CMD_INDEX; |
1035 | 995 | ||
1036 | /* CMD19 is special in that the Data Present Select should be set */ | 996 | /* CMD19 is special in that the Data Present Select should be set */ |
1037 | if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || | 997 | if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK)) |
1038 | cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) | ||
1039 | flags |= SDHCI_CMD_DATA; | 998 | flags |= SDHCI_CMD_DATA; |
1040 | 999 | ||
1041 | sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); | 1000 | sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); |
@@ -1085,20 +1044,14 @@ static void sdhci_finish_command(struct sdhci_host *host) | |||
1085 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | 1044 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) |
1086 | { | 1045 | { |
1087 | int div = 0; /* Initialized for compiler warning */ | 1046 | int div = 0; /* Initialized for compiler warning */ |
1088 | int real_div = div, clk_mul = 1; | ||
1089 | u16 clk = 0; | 1047 | u16 clk = 0; |
1090 | unsigned long timeout; | 1048 | unsigned long timeout; |
1091 | 1049 | ||
1092 | if (clock && clock == host->clock) | 1050 | if (clock && clock == host->clock) |
1093 | return; | 1051 | return; |
1094 | 1052 | ||
1095 | host->mmc->actual_clock = 0; | 1053 | if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) |
1096 | 1054 | return; | |
1097 | if (host->ops->set_clock) { | ||
1098 | host->ops->set_clock(host, clock); | ||
1099 | if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) | ||
1100 | return; | ||
1101 | } | ||
1102 | 1055 | ||
1103 | sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); | 1056 | sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); |
1104 | 1057 | ||
@@ -1131,8 +1084,6 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1131 | * Control register. | 1084 | * Control register. |
1132 | */ | 1085 | */ |
1133 | clk = SDHCI_PROG_CLOCK_MODE; | 1086 | clk = SDHCI_PROG_CLOCK_MODE; |
1134 | real_div = div; | ||
1135 | clk_mul = host->clk_mul; | ||
1136 | div--; | 1087 | div--; |
1137 | } | 1088 | } |
1138 | } else { | 1089 | } else { |
@@ -1146,7 +1097,6 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1146 | break; | 1097 | break; |
1147 | } | 1098 | } |
1148 | } | 1099 | } |
1149 | real_div = div; | ||
1150 | div >>= 1; | 1100 | div >>= 1; |
1151 | } | 1101 | } |
1152 | } else { | 1102 | } else { |
@@ -1155,13 +1105,9 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1155 | if ((host->max_clk / div) <= clock) | 1105 | if ((host->max_clk / div) <= clock) |
1156 | break; | 1106 | break; |
1157 | } | 1107 | } |
1158 | real_div = div; | ||
1159 | div >>= 1; | 1108 | div >>= 1; |
1160 | } | 1109 | } |
1161 | 1110 | ||
1162 | if (real_div) | ||
1163 | host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; | ||
1164 | |||
1165 | clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; | 1111 | clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; |
1166 | clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) | 1112 | clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) |
1167 | << SDHCI_DIVIDER_HI_SHIFT; | 1113 | << SDHCI_DIVIDER_HI_SHIFT; |
@@ -1173,7 +1119,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
1173 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) | 1119 | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) |
1174 | & SDHCI_CLOCK_INT_STABLE)) { | 1120 | & SDHCI_CLOCK_INT_STABLE)) { |
1175 | if (timeout == 0) { | 1121 | if (timeout == 0) { |
1176 | pr_err("%s: Internal clock never " | 1122 | printk(KERN_ERR "%s: Internal clock never " |
1177 | "stabilised.\n", mmc_hostname(host->mmc)); | 1123 | "stabilised.\n", mmc_hostname(host->mmc)); |
1178 | sdhci_dumpregs(host); | 1124 | sdhci_dumpregs(host); |
1179 | return; | 1125 | return; |
@@ -1189,7 +1135,7 @@ out: | |||
1189 | host->clock = clock; | 1135 | host->clock = clock; |
1190 | } | 1136 | } |
1191 | 1137 | ||
1192 | static int sdhci_set_power(struct sdhci_host *host, unsigned short power) | 1138 | static void sdhci_set_power(struct sdhci_host *host, unsigned short power) |
1193 | { | 1139 | { |
1194 | u8 pwr = 0; | 1140 | u8 pwr = 0; |
1195 | 1141 | ||
@@ -1212,13 +1158,13 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
1212 | } | 1158 | } |
1213 | 1159 | ||
1214 | if (host->pwr == pwr) | 1160 | if (host->pwr == pwr) |
1215 | return -1; | 1161 | return; |
1216 | 1162 | ||
1217 | host->pwr = pwr; | 1163 | host->pwr = pwr; |
1218 | 1164 | ||
1219 | if (pwr == 0) { | 1165 | if (pwr == 0) { |
1220 | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); | 1166 | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); |
1221 | return 0; | 1167 | return; |
1222 | } | 1168 | } |
1223 | 1169 | ||
1224 | /* | 1170 | /* |
@@ -1245,8 +1191,6 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
1245 | */ | 1191 | */ |
1246 | if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) | 1192 | if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) |
1247 | mdelay(10); | 1193 | mdelay(10); |
1248 | |||
1249 | return power; | ||
1250 | } | 1194 | } |
1251 | 1195 | ||
1252 | /*****************************************************************************\ | 1196 | /*****************************************************************************\ |
@@ -1260,12 +1204,9 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1260 | struct sdhci_host *host; | 1204 | struct sdhci_host *host; |
1261 | bool present; | 1205 | bool present; |
1262 | unsigned long flags; | 1206 | unsigned long flags; |
1263 | u32 tuning_opcode; | ||
1264 | 1207 | ||
1265 | host = mmc_priv(mmc); | 1208 | host = mmc_priv(mmc); |
1266 | 1209 | ||
1267 | sdhci_runtime_pm_get(host); | ||
1268 | |||
1269 | spin_lock_irqsave(&host->lock, flags); | 1210 | spin_lock_irqsave(&host->lock, flags); |
1270 | 1211 | ||
1271 | WARN_ON(host->mrq != NULL); | 1212 | WARN_ON(host->mrq != NULL); |
@@ -1288,17 +1229,14 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1288 | host->mrq = mrq; | 1229 | host->mrq = mrq; |
1289 | 1230 | ||
1290 | /* If polling, assume that the card is always present. */ | 1231 | /* If polling, assume that the card is always present. */ |
1291 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 1232 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) { |
1292 | present = true; | 1233 | if (host->ops->get_cd) |
1293 | else | 1234 | present = host->ops->get_cd(host); |
1235 | else | ||
1236 | present = true; | ||
1237 | } else { | ||
1294 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | 1238 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & |
1295 | SDHCI_CARD_PRESENT; | 1239 | SDHCI_CARD_PRESENT; |
1296 | |||
1297 | /* If we're using a cd-gpio, testing the presence bit might fail. */ | ||
1298 | if (!present) { | ||
1299 | int ret = mmc_gpio_get_cd(host->mmc); | ||
1300 | if (ret > 0) | ||
1301 | present = true; | ||
1302 | } | 1240 | } |
1303 | 1241 | ||
1304 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { | 1242 | if (!present || host->flags & SDHCI_DEVICE_DEAD) { |
@@ -1315,19 +1253,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1315 | */ | 1253 | */ |
1316 | if ((host->flags & SDHCI_NEEDS_RETUNING) && | 1254 | if ((host->flags & SDHCI_NEEDS_RETUNING) && |
1317 | !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { | 1255 | !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { |
1318 | if (mmc->card) { | 1256 | spin_unlock_irqrestore(&host->lock, flags); |
1319 | /* eMMC uses cmd21 but sd and sdio use cmd19 */ | 1257 | sdhci_execute_tuning(mmc); |
1320 | tuning_opcode = | 1258 | spin_lock_irqsave(&host->lock, flags); |
1321 | mmc->card->type == MMC_TYPE_MMC ? | 1259 | |
1322 | MMC_SEND_TUNING_BLOCK_HS200 : | 1260 | /* Restore original mmc_request structure */ |
1323 | MMC_SEND_TUNING_BLOCK; | 1261 | host->mrq = mrq; |
1324 | spin_unlock_irqrestore(&host->lock, flags); | ||
1325 | sdhci_execute_tuning(mmc, tuning_opcode); | ||
1326 | spin_lock_irqsave(&host->lock, flags); | ||
1327 | |||
1328 | /* Restore original mmc_request structure */ | ||
1329 | host->mrq = mrq; | ||
1330 | } | ||
1331 | } | 1262 | } |
1332 | 1263 | ||
1333 | if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) | 1264 | if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) |
@@ -1340,21 +1271,33 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1340 | spin_unlock_irqrestore(&host->lock, flags); | 1271 | spin_unlock_irqrestore(&host->lock, flags); |
1341 | } | 1272 | } |
1342 | 1273 | ||
1343 | static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | 1274 | static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
1344 | { | 1275 | { |
1276 | struct sdhci_host *host; | ||
1345 | unsigned long flags; | 1277 | unsigned long flags; |
1346 | int vdd_bit = -1; | ||
1347 | u8 ctrl; | 1278 | u8 ctrl; |
1348 | 1279 | ||
1349 | spin_lock_irqsave(&host->lock, flags); | 1280 | host = mmc_priv(mmc); |
1350 | 1281 | ||
1351 | if (host->flags & SDHCI_DEVICE_DEAD) { | 1282 | /* |
1352 | spin_unlock_irqrestore(&host->lock, flags); | 1283 | * Controller registers should not be updated without the |
1353 | if (host->vmmc && ios->power_mode == MMC_POWER_OFF) | 1284 | * controller clock enabled. Set the minimum controller |
1354 | mmc_regulator_set_ocr(host->mmc, host->vmmc, 0); | 1285 | * clock if there is no clock. |
1355 | return; | 1286 | */ |
1287 | if (host->ops->set_clock) { | ||
1288 | if (!host->clock && !ios->clock) { | ||
1289 | host->ops->set_clock(host, host->mmc->f_min); | ||
1290 | host->clock = host->mmc->f_min; | ||
1291 | } else if (ios->clock && (ios->clock != host->clock)) { | ||
1292 | host->ops->set_clock(host, ios->clock); | ||
1293 | } | ||
1356 | } | 1294 | } |
1357 | 1295 | ||
1296 | spin_lock_irqsave(&host->lock, flags); | ||
1297 | |||
1298 | if (host->flags & SDHCI_DEVICE_DEAD) | ||
1299 | goto out; | ||
1300 | |||
1358 | /* | 1301 | /* |
1359 | * Reset the chip on each power off. | 1302 | * Reset the chip on each power off. |
1360 | * Should clear out any weird states. | 1303 | * Should clear out any weird states. |
@@ -1364,18 +1307,12 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1364 | sdhci_reinit(host); | 1307 | sdhci_reinit(host); |
1365 | } | 1308 | } |
1366 | 1309 | ||
1367 | sdhci_set_clock(host, ios->clock); | ||
1368 | |||
1369 | if (ios->power_mode == MMC_POWER_OFF) | 1310 | if (ios->power_mode == MMC_POWER_OFF) |
1370 | vdd_bit = sdhci_set_power(host, -1); | 1311 | sdhci_set_power(host, -1); |
1371 | else | 1312 | else |
1372 | vdd_bit = sdhci_set_power(host, ios->vdd); | 1313 | sdhci_set_power(host, ios->vdd); |
1373 | 1314 | ||
1374 | if (host->vmmc && vdd_bit != -1) { | 1315 | sdhci_set_clock(host, ios->clock); |
1375 | spin_unlock_irqrestore(&host->lock, flags); | ||
1376 | mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); | ||
1377 | spin_lock_irqsave(&host->lock, flags); | ||
1378 | } | ||
1379 | 1316 | ||
1380 | if (host->ops->platform_send_init_74_clocks) | 1317 | if (host->ops->platform_send_init_74_clocks) |
1381 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); | 1318 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); |
@@ -1418,11 +1355,12 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1418 | unsigned int clock; | 1355 | unsigned int clock; |
1419 | 1356 | ||
1420 | /* In case of UHS-I modes, set High Speed Enable */ | 1357 | /* In case of UHS-I modes, set High Speed Enable */ |
1421 | if ((ios->timing == MMC_TIMING_MMC_HS200) || | 1358 | if (((ios->timing == MMC_TIMING_UHS_SDR50) || |
1422 | (ios->timing == MMC_TIMING_UHS_SDR50) || | ||
1423 | (ios->timing == MMC_TIMING_UHS_SDR104) || | 1359 | (ios->timing == MMC_TIMING_UHS_SDR104) || |
1424 | (ios->timing == MMC_TIMING_UHS_DDR50) || | 1360 | (ios->timing == MMC_TIMING_UHS_DDR50) || |
1425 | (ios->timing == MMC_TIMING_UHS_SDR25)) | 1361 | (ios->timing == MMC_TIMING_UHS_SDR25) || |
1362 | (ios->timing == MMC_TIMING_UHS_SDR12)) | ||
1363 | && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) | ||
1426 | ctrl |= SDHCI_CTRL_HISPD; | 1364 | ctrl |= SDHCI_CTRL_HISPD; |
1427 | 1365 | ||
1428 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1366 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
@@ -1455,9 +1393,9 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1455 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 1393 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); |
1456 | 1394 | ||
1457 | /* Re-enable SD Clock */ | 1395 | /* Re-enable SD Clock */ |
1458 | clock = host->clock; | 1396 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); |
1459 | host->clock = 0; | 1397 | clk |= SDHCI_CLOCK_CARD_EN; |
1460 | sdhci_set_clock(host, clock); | 1398 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); |
1461 | } | 1399 | } |
1462 | 1400 | ||
1463 | 1401 | ||
@@ -1472,9 +1410,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1472 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1410 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1473 | /* Select Bus Speed Mode for host */ | 1411 | /* Select Bus Speed Mode for host */ |
1474 | ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; | 1412 | ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; |
1475 | if (ios->timing == MMC_TIMING_MMC_HS200) | 1413 | if (ios->timing == MMC_TIMING_UHS_SDR12) |
1476 | ctrl_2 |= SDHCI_CTRL_HS_SDR200; | ||
1477 | else if (ios->timing == MMC_TIMING_UHS_SDR12) | ||
1478 | ctrl_2 |= SDHCI_CTRL_UHS_SDR12; | 1414 | ctrl_2 |= SDHCI_CTRL_UHS_SDR12; |
1479 | else if (ios->timing == MMC_TIMING_UHS_SDR25) | 1415 | else if (ios->timing == MMC_TIMING_UHS_SDR25) |
1480 | ctrl_2 |= SDHCI_CTRL_UHS_SDR25; | 1416 | ctrl_2 |= SDHCI_CTRL_UHS_SDR25; |
@@ -1488,9 +1424,9 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1488 | } | 1424 | } |
1489 | 1425 | ||
1490 | /* Re-enable SD Clock */ | 1426 | /* Re-enable SD Clock */ |
1491 | clock = host->clock; | 1427 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); |
1492 | host->clock = 0; | 1428 | clk |= SDHCI_CLOCK_CARD_EN; |
1493 | sdhci_set_clock(host, clock); | 1429 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); |
1494 | } else | 1430 | } else |
1495 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 1431 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); |
1496 | 1432 | ||
@@ -1502,20 +1438,18 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) | |||
1502 | if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) | 1438 | if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) |
1503 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); | 1439 | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); |
1504 | 1440 | ||
1441 | out: | ||
1505 | mmiowb(); | 1442 | mmiowb(); |
1506 | spin_unlock_irqrestore(&host->lock, flags); | 1443 | spin_unlock_irqrestore(&host->lock, flags); |
1444 | /* | ||
1445 | * Controller clock should only be disabled after all the register | ||
1446 | * writes are done. | ||
1447 | */ | ||
1448 | if (!ios->clock && host->ops->set_clock) | ||
1449 | host->ops->set_clock(host, ios->clock); | ||
1507 | } | 1450 | } |
1508 | 1451 | ||
1509 | static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 1452 | static int check_ro(struct sdhci_host *host) |
1510 | { | ||
1511 | struct sdhci_host *host = mmc_priv(mmc); | ||
1512 | |||
1513 | sdhci_runtime_pm_get(host); | ||
1514 | sdhci_do_set_ios(host, ios); | ||
1515 | sdhci_runtime_pm_put(host); | ||
1516 | } | ||
1517 | |||
1518 | static int sdhci_check_ro(struct sdhci_host *host) | ||
1519 | { | 1453 | { |
1520 | unsigned long flags; | 1454 | unsigned long flags; |
1521 | int is_readonly; | 1455 | int is_readonly; |
@@ -1539,16 +1473,19 @@ static int sdhci_check_ro(struct sdhci_host *host) | |||
1539 | 1473 | ||
1540 | #define SAMPLE_COUNT 5 | 1474 | #define SAMPLE_COUNT 5 |
1541 | 1475 | ||
1542 | static int sdhci_do_get_ro(struct sdhci_host *host) | 1476 | static int sdhci_get_ro(struct mmc_host *mmc) |
1543 | { | 1477 | { |
1478 | struct sdhci_host *host; | ||
1544 | int i, ro_count; | 1479 | int i, ro_count; |
1545 | 1480 | ||
1481 | host = mmc_priv(mmc); | ||
1482 | |||
1546 | if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) | 1483 | if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) |
1547 | return sdhci_check_ro(host); | 1484 | return check_ro(host); |
1548 | 1485 | ||
1549 | ro_count = 0; | 1486 | ro_count = 0; |
1550 | for (i = 0; i < SAMPLE_COUNT; i++) { | 1487 | for (i = 0; i < SAMPLE_COUNT; i++) { |
1551 | if (sdhci_check_ro(host)) { | 1488 | if (check_ro(host)) { |
1552 | if (++ro_count > SAMPLE_COUNT / 2) | 1489 | if (++ro_count > SAMPLE_COUNT / 2) |
1553 | return 1; | 1490 | return 1; |
1554 | } | 1491 | } |
@@ -1557,125 +1494,96 @@ static int sdhci_do_get_ro(struct sdhci_host *host) | |||
1557 | return 0; | 1494 | return 0; |
1558 | } | 1495 | } |
1559 | 1496 | ||
1560 | static void sdhci_hw_reset(struct mmc_host *mmc) | 1497 | static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) |
1561 | { | 1498 | { |
1562 | struct sdhci_host *host = mmc_priv(mmc); | 1499 | struct sdhci_host *host; |
1563 | 1500 | unsigned long flags; | |
1564 | if (host->ops && host->ops->hw_reset) | ||
1565 | host->ops->hw_reset(host); | ||
1566 | } | ||
1567 | 1501 | ||
1568 | static int sdhci_get_ro(struct mmc_host *mmc) | 1502 | host = mmc_priv(mmc); |
1569 | { | ||
1570 | struct sdhci_host *host = mmc_priv(mmc); | ||
1571 | int ret; | ||
1572 | 1503 | ||
1573 | sdhci_runtime_pm_get(host); | 1504 | spin_lock_irqsave(&host->lock, flags); |
1574 | ret = sdhci_do_get_ro(host); | ||
1575 | sdhci_runtime_pm_put(host); | ||
1576 | return ret; | ||
1577 | } | ||
1578 | 1505 | ||
1579 | static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) | ||
1580 | { | ||
1581 | if (host->flags & SDHCI_DEVICE_DEAD) | 1506 | if (host->flags & SDHCI_DEVICE_DEAD) |
1582 | goto out; | 1507 | goto out; |
1583 | 1508 | ||
1584 | if (enable) | 1509 | if (enable) |
1585 | host->flags |= SDHCI_SDIO_IRQ_ENABLED; | ||
1586 | else | ||
1587 | host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; | ||
1588 | |||
1589 | /* SDIO IRQ will be enabled as appropriate in runtime resume */ | ||
1590 | if (host->runtime_suspended) | ||
1591 | goto out; | ||
1592 | |||
1593 | if (enable) | ||
1594 | sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); | 1510 | sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); |
1595 | else | 1511 | else |
1596 | sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); | 1512 | sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); |
1597 | out: | 1513 | out: |
1598 | mmiowb(); | 1514 | mmiowb(); |
1599 | } | ||
1600 | 1515 | ||
1601 | static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
1602 | { | ||
1603 | struct sdhci_host *host = mmc_priv(mmc); | ||
1604 | unsigned long flags; | ||
1605 | |||
1606 | spin_lock_irqsave(&host->lock, flags); | ||
1607 | sdhci_enable_sdio_irq_nolock(host, enable); | ||
1608 | spin_unlock_irqrestore(&host->lock, flags); | 1516 | spin_unlock_irqrestore(&host->lock, flags); |
1609 | } | 1517 | } |
1610 | 1518 | ||
1611 | static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host, | 1519 | static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, |
1612 | u16 ctrl) | 1520 | struct mmc_ios *ios) |
1613 | { | 1521 | { |
1614 | int ret; | 1522 | struct sdhci_host *host; |
1615 | 1523 | u8 pwr; | |
1616 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ | 1524 | u16 clk, ctrl; |
1617 | ctrl &= ~SDHCI_CTRL_VDD_180; | 1525 | u32 present_state; |
1618 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | ||
1619 | 1526 | ||
1620 | if (host->vqmmc) { | 1527 | host = mmc_priv(mmc); |
1621 | ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); | ||
1622 | if (ret) { | ||
1623 | pr_warning("%s: Switching to 3.3V signalling voltage " | ||
1624 | " failed\n", mmc_hostname(host->mmc)); | ||
1625 | return -EIO; | ||
1626 | } | ||
1627 | } | ||
1628 | /* Wait for 5ms */ | ||
1629 | usleep_range(5000, 5500); | ||
1630 | 1528 | ||
1631 | /* 3.3V regulator output should be stable within 5 ms */ | 1529 | /* |
1632 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1530 | * Signal Voltage Switching is only applicable for Host Controllers |
1633 | if (!(ctrl & SDHCI_CTRL_VDD_180)) | 1531 | * v3.00 and above. |
1532 | */ | ||
1533 | if (host->version < SDHCI_SPEC_300) | ||
1634 | return 0; | 1534 | return 0; |
1635 | 1535 | ||
1636 | pr_warning("%s: 3.3V regulator output did not became stable\n", | 1536 | if (host->quirks & SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING) { |
1637 | mmc_hostname(host->mmc)); | 1537 | if (host->ops->switch_signal_voltage) |
1638 | 1538 | return host->ops->switch_signal_voltage( | |
1639 | return -EIO; | 1539 | host, ios->signal_voltage); |
1640 | } | 1540 | } |
1641 | 1541 | ||
1642 | static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host, | 1542 | /* |
1643 | u16 ctrl) | 1543 | * We first check whether the request is to set signalling voltage |
1644 | { | 1544 | * to 3.3V. If so, we change the voltage to 3.3V and return quickly. |
1645 | u8 pwr; | 1545 | */ |
1646 | u16 clk; | 1546 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1647 | u32 present_state; | 1547 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { |
1648 | int ret; | 1548 | /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ |
1549 | ctrl &= ~SDHCI_CTRL_VDD_180; | ||
1550 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | ||
1649 | 1551 | ||
1650 | /* Stop SDCLK */ | 1552 | /* Wait for 5ms */ |
1651 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | 1553 | usleep_range(5000, 5500); |
1652 | clk &= ~SDHCI_CLOCK_CARD_EN; | ||
1653 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
1654 | 1554 | ||
1655 | /* Check whether DAT[3:0] is 0000 */ | 1555 | /* 3.3V regulator output should be stable within 5 ms */ |
1656 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | 1556 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1657 | if (!((present_state & SDHCI_DATA_LVL_MASK) >> | 1557 | if (!(ctrl & SDHCI_CTRL_VDD_180)) |
1658 | SDHCI_DATA_LVL_SHIFT)) { | 1558 | return 0; |
1659 | /* | 1559 | else { |
1660 | * Enable 1.8V Signal Enable in the Host Control2 | 1560 | printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V " |
1661 | * register | 1561 | "signalling voltage failed\n"); |
1662 | */ | 1562 | return -EIO; |
1663 | if (host->vqmmc) | 1563 | } |
1664 | ret = regulator_set_voltage(host->vqmmc, | 1564 | } else if (!(ctrl & SDHCI_CTRL_VDD_180) && |
1665 | 1700000, 1950000); | 1565 | (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { |
1666 | else | 1566 | /* Stop SDCLK */ |
1667 | ret = 0; | 1567 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); |
1568 | clk &= ~SDHCI_CLOCK_CARD_EN; | ||
1569 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | ||
1668 | 1570 | ||
1669 | if (!ret) { | 1571 | /* Check whether DAT[3:0] is 0000 */ |
1572 | present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); | ||
1573 | if (!((present_state & SDHCI_DATA_LVL_MASK) >> | ||
1574 | SDHCI_DATA_LVL_SHIFT)) { | ||
1575 | /* | ||
1576 | * Enable 1.8V Signal Enable in the Host Control2 | ||
1577 | * register | ||
1578 | */ | ||
1670 | ctrl |= SDHCI_CTRL_VDD_180; | 1579 | ctrl |= SDHCI_CTRL_VDD_180; |
1671 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1580 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1672 | 1581 | ||
1673 | /* Wait for 5ms */ | 1582 | /* Wait for 5ms */ |
1674 | usleep_range(5000, 5500); | 1583 | usleep_range(5000, 5500); |
1675 | |||
1676 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1584 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1677 | if (ctrl & SDHCI_CTRL_VDD_180) { | 1585 | if (ctrl & SDHCI_CTRL_VDD_180) { |
1678 | /* Provide SDCLK again and wait for 1ms */ | 1586 | /* Provide SDCLK again and wait for 1ms*/ |
1679 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); | 1587 | clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); |
1680 | clk |= SDHCI_CLOCK_CARD_EN; | 1588 | clk |= SDHCI_CLOCK_CARD_EN; |
1681 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 1589 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); |
@@ -1692,74 +1600,30 @@ static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host, | |||
1692 | return 0; | 1600 | return 0; |
1693 | } | 1601 | } |
1694 | } | 1602 | } |
1695 | } | ||
1696 | |||
1697 | /* | ||
1698 | * If we are here, that means the switch to 1.8V signaling | ||
1699 | * failed. We power cycle the card, and retry initialization | ||
1700 | * sequence by setting S18R to 0. | ||
1701 | */ | ||
1702 | pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); | ||
1703 | pwr &= ~SDHCI_POWER_ON; | ||
1704 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | ||
1705 | if (host->vmmc) | ||
1706 | regulator_disable(host->vmmc); | ||
1707 | |||
1708 | /* Wait for 1ms as per the spec */ | ||
1709 | usleep_range(1000, 1500); | ||
1710 | pwr |= SDHCI_POWER_ON; | ||
1711 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | ||
1712 | if (host->vmmc) | ||
1713 | regulator_enable(host->vmmc); | ||
1714 | |||
1715 | pr_warning("%s: Switching to 1.8V signalling voltage failed, " | ||
1716 | "retrying with S18R set to 0\n", mmc_hostname(host->mmc)); | ||
1717 | 1603 | ||
1718 | return -EAGAIN; | 1604 | /* |
1719 | } | 1605 | * If we are here, that means the switch to 1.8V signaling |
1720 | 1606 | * failed. We power cycle the card, and retry initialization | |
1721 | static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, | 1607 | * sequence by setting S18R to 0. |
1722 | struct mmc_ios *ios) | 1608 | */ |
1723 | { | 1609 | pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); |
1724 | u16 ctrl; | 1610 | pwr &= ~SDHCI_POWER_ON; |
1611 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | ||
1725 | 1612 | ||
1726 | /* | 1613 | /* Wait for 1ms as per the spec */ |
1727 | * Signal Voltage Switching is only applicable for Host Controllers | 1614 | usleep_range(1000, 1500); |
1728 | * v3.00 and above. | 1615 | pwr |= SDHCI_POWER_ON; |
1729 | */ | 1616 | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); |
1730 | if (host->version < SDHCI_SPEC_300) | ||
1731 | return 0; | ||
1732 | 1617 | ||
1733 | /* | 1618 | printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling " |
1734 | * We first check whether the request is to set signalling voltage | 1619 | "voltage failed, retrying with S18R set to 0\n"); |
1735 | * to 3.3V. If so, we change the voltage to 3.3V and return quickly. | 1620 | return -EAGAIN; |
1736 | */ | 1621 | } else |
1737 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
1738 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) | ||
1739 | return sdhci_do_3_3v_signal_voltage_switch(host, ctrl); | ||
1740 | else if (!(ctrl & SDHCI_CTRL_VDD_180) && | ||
1741 | (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) | ||
1742 | return sdhci_do_1_8v_signal_voltage_switch(host, ctrl); | ||
1743 | else | ||
1744 | /* No signal voltage switch required */ | 1622 | /* No signal voltage switch required */ |
1745 | return 0; | 1623 | return 0; |
1746 | } | 1624 | } |
1747 | 1625 | ||
1748 | static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, | 1626 | static int sdhci_execute_tuning(struct mmc_host *mmc) |
1749 | struct mmc_ios *ios) | ||
1750 | { | ||
1751 | struct sdhci_host *host = mmc_priv(mmc); | ||
1752 | int err; | ||
1753 | |||
1754 | if (host->version < SDHCI_SPEC_300) | ||
1755 | return 0; | ||
1756 | sdhci_runtime_pm_get(host); | ||
1757 | err = sdhci_do_start_signal_voltage_switch(host, ios); | ||
1758 | sdhci_runtime_pm_put(host); | ||
1759 | return err; | ||
1760 | } | ||
1761 | |||
1762 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | ||
1763 | { | 1627 | { |
1764 | struct sdhci_host *host; | 1628 | struct sdhci_host *host; |
1765 | u16 ctrl; | 1629 | u16 ctrl; |
@@ -1767,35 +1631,34 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1767 | int tuning_loop_counter = MAX_TUNING_LOOP; | 1631 | int tuning_loop_counter = MAX_TUNING_LOOP; |
1768 | unsigned long timeout; | 1632 | unsigned long timeout; |
1769 | int err = 0; | 1633 | int err = 0; |
1770 | bool requires_tuning_nonuhs = false; | ||
1771 | 1634 | ||
1772 | host = mmc_priv(mmc); | 1635 | host = mmc_priv(mmc); |
1773 | 1636 | ||
1774 | sdhci_runtime_pm_get(host); | ||
1775 | disable_irq(host->irq); | 1637 | disable_irq(host->irq); |
1776 | spin_lock(&host->lock); | 1638 | spin_lock(&host->lock); |
1777 | 1639 | ||
1640 | if ((host->quirks & SDHCI_QUIRK_NON_STANDARD_TUNING) && | ||
1641 | host->ops->execute_freq_tuning) { | ||
1642 | err = host->ops->execute_freq_tuning(host); | ||
1643 | spin_unlock(&host->lock); | ||
1644 | enable_irq(host->irq); | ||
1645 | return err; | ||
1646 | } | ||
1647 | |||
1778 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1648 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
1779 | 1649 | ||
1780 | /* | 1650 | /* |
1781 | * The Host Controller needs tuning only in case of SDR104 mode | 1651 | * Host Controller needs tuning only in case of SDR104 mode |
1782 | * and for SDR50 mode when Use Tuning for SDR50 is set in the | 1652 | * and for SDR50 mode when Use Tuning for SDR50 is set in |
1783 | * Capabilities register. | 1653 | * Capabilities register. |
1784 | * If the Host Controller supports the HS200 mode then the | ||
1785 | * tuning function has to be executed. | ||
1786 | */ | 1654 | */ |
1787 | if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && | ||
1788 | (host->flags & SDHCI_SDR50_NEEDS_TUNING || | ||
1789 | host->flags & SDHCI_HS200_NEEDS_TUNING)) | ||
1790 | requires_tuning_nonuhs = true; | ||
1791 | |||
1792 | if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || | 1655 | if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || |
1793 | requires_tuning_nonuhs) | 1656 | (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && |
1657 | (host->flags & SDHCI_SDR50_NEEDS_TUNING))) | ||
1794 | ctrl |= SDHCI_CTRL_EXEC_TUNING; | 1658 | ctrl |= SDHCI_CTRL_EXEC_TUNING; |
1795 | else { | 1659 | else { |
1796 | spin_unlock(&host->lock); | 1660 | spin_unlock(&host->lock); |
1797 | enable_irq(host->irq); | 1661 | enable_irq(host->irq); |
1798 | sdhci_runtime_pm_put(host); | ||
1799 | return 0; | 1662 | return 0; |
1800 | } | 1663 | } |
1801 | 1664 | ||
@@ -1821,12 +1684,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1821 | timeout = 150; | 1684 | timeout = 150; |
1822 | do { | 1685 | do { |
1823 | struct mmc_command cmd = {0}; | 1686 | struct mmc_command cmd = {0}; |
1824 | struct mmc_request mrq = {NULL}; | 1687 | struct mmc_request mrq = {0}; |
1825 | 1688 | ||
1826 | if (!tuning_loop_counter && !timeout) | 1689 | if (!tuning_loop_counter && !timeout) |
1827 | break; | 1690 | break; |
1828 | 1691 | ||
1829 | cmd.opcode = opcode; | 1692 | cmd.opcode = MMC_SEND_TUNING_BLOCK; |
1830 | cmd.arg = 0; | 1693 | cmd.arg = 0; |
1831 | cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; | 1694 | cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; |
1832 | cmd.retries = 0; | 1695 | cmd.retries = 0; |
@@ -1841,17 +1704,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1841 | * block to the Host Controller. So we set the block size | 1704 | * block to the Host Controller. So we set the block size |
1842 | * to 64 here. | 1705 | * to 64 here. |
1843 | */ | 1706 | */ |
1844 | if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { | 1707 | sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); |
1845 | if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) | ||
1846 | sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), | ||
1847 | SDHCI_BLOCK_SIZE); | ||
1848 | else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) | ||
1849 | sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), | ||
1850 | SDHCI_BLOCK_SIZE); | ||
1851 | } else { | ||
1852 | sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), | ||
1853 | SDHCI_BLOCK_SIZE); | ||
1854 | } | ||
1855 | 1708 | ||
1856 | /* | 1709 | /* |
1857 | * The tuning block is sent by the card to the host controller. | 1710 | * The tuning block is sent by the card to the host controller. |
@@ -1877,7 +1730,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1877 | spin_lock(&host->lock); | 1730 | spin_lock(&host->lock); |
1878 | 1731 | ||
1879 | if (!host->tuning_done) { | 1732 | if (!host->tuning_done) { |
1880 | pr_info(DRIVER_NAME ": Timeout waiting for " | 1733 | printk(KERN_INFO DRIVER_NAME ": Timeout waiting for " |
1881 | "Buffer Read Ready interrupt during tuning " | 1734 | "Buffer Read Ready interrupt during tuning " |
1882 | "procedure, falling back to fixed sampling " | 1735 | "procedure, falling back to fixed sampling " |
1883 | "clock\n"); | 1736 | "clock\n"); |
@@ -1907,7 +1760,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1907 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1760 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1908 | } else { | 1761 | } else { |
1909 | if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { | 1762 | if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { |
1910 | pr_info(DRIVER_NAME ": Tuning procedure" | 1763 | printk(KERN_INFO DRIVER_NAME ": Tuning procedure" |
1911 | " failed, falling back to fixed sampling" | 1764 | " failed, falling back to fixed sampling" |
1912 | " clock\n"); | 1765 | " clock\n"); |
1913 | err = -EIO; | 1766 | err = -EIO; |
@@ -1923,7 +1776,6 @@ out: | |||
1923 | */ | 1776 | */ |
1924 | if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && | 1777 | if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && |
1925 | (host->tuning_mode == SDHCI_TUNING_MODE_1)) { | 1778 | (host->tuning_mode == SDHCI_TUNING_MODE_1)) { |
1926 | host->flags |= SDHCI_USING_RETUNING_TIMER; | ||
1927 | mod_timer(&host->tuning_timer, jiffies + | 1779 | mod_timer(&host->tuning_timer, jiffies + |
1928 | host->tuning_count * HZ); | 1780 | host->tuning_count * HZ); |
1929 | /* Tuning mode 1 limits the maximum data length to 4MB */ | 1781 | /* Tuning mode 1 limits the maximum data length to 4MB */ |
@@ -1941,29 +1793,41 @@ out: | |||
1941 | * try tuning again at a later time, when the re-tuning timer expires. | 1793 | * try tuning again at a later time, when the re-tuning timer expires. |
1942 | * So for these controllers, we return 0. Since there might be other | 1794 | * So for these controllers, we return 0. Since there might be other |
1943 | * controllers who do not have this capability, we return error for | 1795 | * controllers who do not have this capability, we return error for |
1944 | * them. SDHCI_USING_RETUNING_TIMER means the host is currently using | 1796 | * them. |
1945 | * a retuning timer to do the retuning for the card. | ||
1946 | */ | 1797 | */ |
1947 | if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) | 1798 | if (err && host->tuning_count && |
1799 | host->tuning_mode == SDHCI_TUNING_MODE_1) | ||
1948 | err = 0; | 1800 | err = 0; |
1949 | 1801 | ||
1950 | sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); | 1802 | sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); |
1951 | spin_unlock(&host->lock); | 1803 | spin_unlock(&host->lock); |
1952 | enable_irq(host->irq); | 1804 | enable_irq(host->irq); |
1953 | sdhci_runtime_pm_put(host); | ||
1954 | 1805 | ||
1955 | return err; | 1806 | return err; |
1956 | } | 1807 | } |
1957 | 1808 | ||
1958 | static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) | 1809 | static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) |
1959 | { | 1810 | { |
1811 | struct sdhci_host *host; | ||
1960 | u16 ctrl; | 1812 | u16 ctrl; |
1961 | unsigned long flags; | 1813 | unsigned long flags; |
1962 | 1814 | ||
1815 | host = mmc_priv(mmc); | ||
1816 | |||
1963 | /* Host Controller v3.00 defines preset value registers */ | 1817 | /* Host Controller v3.00 defines preset value registers */ |
1964 | if (host->version < SDHCI_SPEC_300) | 1818 | if (host->version < SDHCI_SPEC_300) |
1965 | return; | 1819 | return; |
1966 | 1820 | ||
1821 | /* | ||
1822 | * Enabling preset value would make programming clock | ||
1823 | * divider ineffective. The controller would use the | ||
1824 | * values present in the preset value registers. In | ||
1825 | * case of non-standard clock, let the platform driver | ||
1826 | * decide whether to enable preset or not. | ||
1827 | */ | ||
1828 | if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) | ||
1829 | return; | ||
1830 | |||
1967 | spin_lock_irqsave(&host->lock, flags); | 1831 | spin_lock_irqsave(&host->lock, flags); |
1968 | 1832 | ||
1969 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | 1833 | ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); |
@@ -1975,60 +1839,54 @@ static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) | |||
1975 | if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { | 1839 | if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { |
1976 | ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; | 1840 | ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; |
1977 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1841 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1978 | host->flags |= SDHCI_PV_ENABLED; | ||
1979 | } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { | 1842 | } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { |
1980 | ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; | 1843 | ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; |
1981 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); | 1844 | sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); |
1982 | host->flags &= ~SDHCI_PV_ENABLED; | ||
1983 | } | 1845 | } |
1984 | 1846 | ||
1985 | spin_unlock_irqrestore(&host->lock, flags); | 1847 | spin_unlock_irqrestore(&host->lock, flags); |
1986 | } | 1848 | } |
1987 | 1849 | ||
1988 | static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) | 1850 | int sdhci_enable(struct mmc_host *mmc) |
1989 | { | 1851 | { |
1990 | struct sdhci_host *host = mmc_priv(mmc); | 1852 | struct sdhci_host *host = mmc_priv(mmc); |
1991 | 1853 | ||
1992 | sdhci_runtime_pm_get(host); | 1854 | if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO) |
1993 | sdhci_do_enable_preset_value(host, enable); | 1855 | return 0; |
1994 | sdhci_runtime_pm_put(host); | 1856 | |
1857 | if (mmc->ios.clock) { | ||
1858 | if (host->ops->set_clock) | ||
1859 | host->ops->set_clock(host, mmc->ios.clock); | ||
1860 | sdhci_set_clock(host, mmc->ios.clock); | ||
1861 | } | ||
1862 | |||
1863 | return 0; | ||
1995 | } | 1864 | } |
1996 | 1865 | ||
1997 | static void sdhci_card_event(struct mmc_host *mmc) | 1866 | int sdhci_disable(struct mmc_host *mmc, int lazy) |
1998 | { | 1867 | { |
1999 | struct sdhci_host *host = mmc_priv(mmc); | 1868 | struct sdhci_host *host = mmc_priv(mmc); |
2000 | unsigned long flags; | ||
2001 | |||
2002 | spin_lock_irqsave(&host->lock, flags); | ||
2003 | 1869 | ||
2004 | /* Check host->mrq first in case we are runtime suspended */ | 1870 | if (!mmc->card || mmc->card->type == MMC_TYPE_SDIO) |
2005 | if (host->mrq && | 1871 | return 0; |
2006 | !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { | ||
2007 | pr_err("%s: Card removed during transfer!\n", | ||
2008 | mmc_hostname(host->mmc)); | ||
2009 | pr_err("%s: Resetting controller.\n", | ||
2010 | mmc_hostname(host->mmc)); | ||
2011 | |||
2012 | sdhci_reset(host, SDHCI_RESET_CMD); | ||
2013 | sdhci_reset(host, SDHCI_RESET_DATA); | ||
2014 | 1872 | ||
2015 | host->mrq->cmd->error = -ENOMEDIUM; | 1873 | sdhci_set_clock(host, 0); |
2016 | tasklet_schedule(&host->finish_tasklet); | 1874 | if (host->ops->set_clock) |
2017 | } | 1875 | host->ops->set_clock(host, 0); |
2018 | 1876 | ||
2019 | spin_unlock_irqrestore(&host->lock, flags); | 1877 | return 0; |
2020 | } | 1878 | } |
2021 | 1879 | ||
2022 | static const struct mmc_host_ops sdhci_ops = { | 1880 | static const struct mmc_host_ops sdhci_ops = { |
2023 | .request = sdhci_request, | 1881 | .request = sdhci_request, |
2024 | .set_ios = sdhci_set_ios, | 1882 | .set_ios = sdhci_set_ios, |
2025 | .get_ro = sdhci_get_ro, | 1883 | .get_ro = sdhci_get_ro, |
2026 | .hw_reset = sdhci_hw_reset, | 1884 | .enable = sdhci_enable, |
1885 | .disable = sdhci_disable, | ||
2027 | .enable_sdio_irq = sdhci_enable_sdio_irq, | 1886 | .enable_sdio_irq = sdhci_enable_sdio_irq, |
2028 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, | 1887 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, |
2029 | .execute_tuning = sdhci_execute_tuning, | 1888 | .execute_tuning = sdhci_execute_tuning, |
2030 | .enable_preset_value = sdhci_enable_preset_value, | 1889 | .enable_preset_value = sdhci_enable_preset_value, |
2031 | .card_event = sdhci_card_event, | ||
2032 | }; | 1890 | }; |
2033 | 1891 | ||
2034 | /*****************************************************************************\ | 1892 | /*****************************************************************************\ |
@@ -2039,9 +1897,29 @@ static const struct mmc_host_ops sdhci_ops = { | |||
2039 | 1897 | ||
2040 | static void sdhci_tasklet_card(unsigned long param) | 1898 | static void sdhci_tasklet_card(unsigned long param) |
2041 | { | 1899 | { |
2042 | struct sdhci_host *host = (struct sdhci_host*)param; | 1900 | struct sdhci_host *host; |
1901 | unsigned long flags; | ||
1902 | |||
1903 | host = (struct sdhci_host*)param; | ||
1904 | |||
1905 | spin_lock_irqsave(&host->lock, flags); | ||
1906 | |||
1907 | if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { | ||
1908 | if (host->mrq) { | ||
1909 | printk(KERN_ERR "%s: Card removed during transfer!\n", | ||
1910 | mmc_hostname(host->mmc)); | ||
1911 | printk(KERN_ERR "%s: Resetting controller.\n", | ||
1912 | mmc_hostname(host->mmc)); | ||
2043 | 1913 | ||
2044 | sdhci_card_event(host->mmc); | 1914 | sdhci_reset(host, SDHCI_RESET_CMD); |
1915 | sdhci_reset(host, SDHCI_RESET_DATA); | ||
1916 | |||
1917 | host->mrq->cmd->error = -ENOMEDIUM; | ||
1918 | tasklet_schedule(&host->finish_tasklet); | ||
1919 | } | ||
1920 | } | ||
1921 | |||
1922 | spin_unlock_irqrestore(&host->lock, flags); | ||
2045 | 1923 | ||
2046 | mmc_detect_change(host->mmc, msecs_to_jiffies(200)); | 1924 | mmc_detect_change(host->mmc, msecs_to_jiffies(200)); |
2047 | } | 1925 | } |
@@ -2054,16 +1932,14 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
2054 | 1932 | ||
2055 | host = (struct sdhci_host*)param; | 1933 | host = (struct sdhci_host*)param; |
2056 | 1934 | ||
2057 | spin_lock_irqsave(&host->lock, flags); | ||
2058 | |||
2059 | /* | 1935 | /* |
2060 | * If this tasklet gets rescheduled while running, it will | 1936 | * If this tasklet gets rescheduled while running, it will |
2061 | * be run again afterwards but without any active request. | 1937 | * be run again afterwards but without any active request. |
2062 | */ | 1938 | */ |
2063 | if (!host->mrq) { | 1939 | if (!host->mrq) |
2064 | spin_unlock_irqrestore(&host->lock, flags); | ||
2065 | return; | 1940 | return; |
2066 | } | 1941 | |
1942 | spin_lock_irqsave(&host->lock, flags); | ||
2067 | 1943 | ||
2068 | del_timer(&host->timer); | 1944 | del_timer(&host->timer); |
2069 | 1945 | ||
@@ -2086,6 +1962,8 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
2086 | /* This is to force an update */ | 1962 | /* This is to force an update */ |
2087 | clock = host->clock; | 1963 | clock = host->clock; |
2088 | host->clock = 0; | 1964 | host->clock = 0; |
1965 | if (host->ops->set_clock) | ||
1966 | host->ops->set_clock(host, clock); | ||
2089 | sdhci_set_clock(host, clock); | 1967 | sdhci_set_clock(host, clock); |
2090 | } | 1968 | } |
2091 | 1969 | ||
@@ -2107,7 +1985,6 @@ static void sdhci_tasklet_finish(unsigned long param) | |||
2107 | spin_unlock_irqrestore(&host->lock, flags); | 1985 | spin_unlock_irqrestore(&host->lock, flags); |
2108 | 1986 | ||
2109 | mmc_request_done(host->mmc, mrq); | 1987 | mmc_request_done(host->mmc, mrq); |
2110 | sdhci_runtime_pm_put(host); | ||
2111 | } | 1988 | } |
2112 | 1989 | ||
2113 | static void sdhci_timeout_timer(unsigned long data) | 1990 | static void sdhci_timeout_timer(unsigned long data) |
@@ -2120,7 +1997,7 @@ static void sdhci_timeout_timer(unsigned long data) | |||
2120 | spin_lock_irqsave(&host->lock, flags); | 1997 | spin_lock_irqsave(&host->lock, flags); |
2121 | 1998 | ||
2122 | if (host->mrq) { | 1999 | if (host->mrq) { |
2123 | pr_err("%s: Timeout waiting for hardware " | 2000 | printk(KERN_ERR "%s: Timeout waiting for hardware " |
2124 | "interrupt.\n", mmc_hostname(host->mmc)); | 2001 | "interrupt.\n", mmc_hostname(host->mmc)); |
2125 | sdhci_dumpregs(host); | 2002 | sdhci_dumpregs(host); |
2126 | 2003 | ||
@@ -2166,7 +2043,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) | |||
2166 | BUG_ON(intmask == 0); | 2043 | BUG_ON(intmask == 0); |
2167 | 2044 | ||
2168 | if (!host->cmd) { | 2045 | if (!host->cmd) { |
2169 | pr_err("%s: Got command interrupt 0x%08x even " | 2046 | printk(KERN_ERR "%s: Got command interrupt 0x%08x even " |
2170 | "though no command operation was in progress.\n", | 2047 | "though no command operation was in progress.\n", |
2171 | mmc_hostname(host->mmc), (unsigned)intmask); | 2048 | mmc_hostname(host->mmc), (unsigned)intmask); |
2172 | sdhci_dumpregs(host); | 2049 | sdhci_dumpregs(host); |
@@ -2241,14 +2118,12 @@ static void sdhci_show_adma_error(struct sdhci_host *host) { } | |||
2241 | 2118 | ||
2242 | static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | 2119 | static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) |
2243 | { | 2120 | { |
2244 | u32 command; | ||
2245 | BUG_ON(intmask == 0); | 2121 | BUG_ON(intmask == 0); |
2246 | 2122 | ||
2247 | /* CMD19 generates _only_ Buffer Read Ready interrupt */ | 2123 | /* CMD19 generates _only_ Buffer Read Ready interrupt */ |
2248 | if (intmask & SDHCI_INT_DATA_AVAIL) { | 2124 | if (intmask & SDHCI_INT_DATA_AVAIL) { |
2249 | command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); | 2125 | if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) == |
2250 | if (command == MMC_SEND_TUNING_BLOCK || | 2126 | MMC_SEND_TUNING_BLOCK) { |
2251 | command == MMC_SEND_TUNING_BLOCK_HS200) { | ||
2252 | host->tuning_done = 1; | 2127 | host->tuning_done = 1; |
2253 | wake_up(&host->buf_ready_int); | 2128 | wake_up(&host->buf_ready_int); |
2254 | return; | 2129 | return; |
@@ -2268,7 +2143,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
2268 | } | 2143 | } |
2269 | } | 2144 | } |
2270 | 2145 | ||
2271 | pr_err("%s: Got data interrupt 0x%08x even " | 2146 | printk(KERN_ERR "%s: Got data interrupt 0x%08x even " |
2272 | "though no data operation was in progress.\n", | 2147 | "though no data operation was in progress.\n", |
2273 | mmc_hostname(host->mmc), (unsigned)intmask); | 2148 | mmc_hostname(host->mmc), (unsigned)intmask); |
2274 | sdhci_dumpregs(host); | 2149 | sdhci_dumpregs(host); |
@@ -2285,11 +2160,9 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
2285 | != MMC_BUS_TEST_R) | 2160 | != MMC_BUS_TEST_R) |
2286 | host->data->error = -EILSEQ; | 2161 | host->data->error = -EILSEQ; |
2287 | else if (intmask & SDHCI_INT_ADMA_ERROR) { | 2162 | else if (intmask & SDHCI_INT_ADMA_ERROR) { |
2288 | pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); | 2163 | printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); |
2289 | sdhci_show_adma_error(host); | 2164 | sdhci_show_adma_error(host); |
2290 | host->data->error = -EIO; | 2165 | host->data->error = -EIO; |
2291 | if (host->ops->adma_workaround) | ||
2292 | host->ops->adma_workaround(host, intmask); | ||
2293 | } | 2166 | } |
2294 | 2167 | ||
2295 | if (host->data->error) | 2168 | if (host->data->error) |
@@ -2343,19 +2216,12 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |||
2343 | static irqreturn_t sdhci_irq(int irq, void *dev_id) | 2216 | static irqreturn_t sdhci_irq(int irq, void *dev_id) |
2344 | { | 2217 | { |
2345 | irqreturn_t result; | 2218 | irqreturn_t result; |
2346 | struct sdhci_host *host = dev_id; | 2219 | struct sdhci_host* host = dev_id; |
2347 | u32 intmask, unexpected = 0; | 2220 | u32 intmask; |
2348 | int cardint = 0, max_loops = 16; | 2221 | int cardint = 0; |
2349 | 2222 | ||
2350 | spin_lock(&host->lock); | 2223 | spin_lock(&host->lock); |
2351 | 2224 | ||
2352 | if (host->runtime_suspended) { | ||
2353 | spin_unlock(&host->lock); | ||
2354 | pr_warning("%s: got irq while runtime suspended\n", | ||
2355 | mmc_hostname(host->mmc)); | ||
2356 | return IRQ_HANDLED; | ||
2357 | } | ||
2358 | |||
2359 | intmask = sdhci_readl(host, SDHCI_INT_STATUS); | 2225 | intmask = sdhci_readl(host, SDHCI_INT_STATUS); |
2360 | 2226 | ||
2361 | if (!intmask || intmask == 0xffffffff) { | 2227 | if (!intmask || intmask == 0xffffffff) { |
@@ -2363,7 +2229,6 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
2363 | goto out; | 2229 | goto out; |
2364 | } | 2230 | } |
2365 | 2231 | ||
2366 | again: | ||
2367 | DBG("*** %s got interrupt: 0x%08x\n", | 2232 | DBG("*** %s got interrupt: 0x%08x\n", |
2368 | mmc_hostname(host->mmc), intmask); | 2233 | mmc_hostname(host->mmc), intmask); |
2369 | 2234 | ||
@@ -2409,7 +2274,7 @@ again: | |||
2409 | intmask &= ~SDHCI_INT_ERROR; | 2274 | intmask &= ~SDHCI_INT_ERROR; |
2410 | 2275 | ||
2411 | if (intmask & SDHCI_INT_BUS_POWER) { | 2276 | if (intmask & SDHCI_INT_BUS_POWER) { |
2412 | pr_err("%s: Card is consuming too much power!\n", | 2277 | printk(KERN_ERR "%s: Card is consuming too much power!\n", |
2413 | mmc_hostname(host->mmc)); | 2278 | mmc_hostname(host->mmc)); |
2414 | sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); | 2279 | sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); |
2415 | } | 2280 | } |
@@ -2422,23 +2287,19 @@ again: | |||
2422 | intmask &= ~SDHCI_INT_CARD_INT; | 2287 | intmask &= ~SDHCI_INT_CARD_INT; |
2423 | 2288 | ||
2424 | if (intmask) { | 2289 | if (intmask) { |
2425 | unexpected |= intmask; | 2290 | printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", |
2291 | mmc_hostname(host->mmc), intmask); | ||
2292 | sdhci_dumpregs(host); | ||
2293 | |||
2426 | sdhci_writel(host, intmask, SDHCI_INT_STATUS); | 2294 | sdhci_writel(host, intmask, SDHCI_INT_STATUS); |
2427 | } | 2295 | } |
2428 | 2296 | ||
2429 | result = IRQ_HANDLED; | 2297 | result = IRQ_HANDLED; |
2430 | 2298 | ||
2431 | intmask = sdhci_readl(host, SDHCI_INT_STATUS); | 2299 | mmiowb(); |
2432 | if (intmask && --max_loops) | ||
2433 | goto again; | ||
2434 | out: | 2300 | out: |
2435 | spin_unlock(&host->lock); | 2301 | spin_unlock(&host->lock); |
2436 | 2302 | ||
2437 | if (unexpected) { | ||
2438 | pr_err("%s: Unexpected interrupt 0x%08x.\n", | ||
2439 | mmc_hostname(host->mmc), unexpected); | ||
2440 | sdhci_dumpregs(host); | ||
2441 | } | ||
2442 | /* | 2303 | /* |
2443 | * We have to delay this as it calls back into the driver. | 2304 | * We have to delay this as it calls back into the driver. |
2444 | */ | 2305 | */ |
@@ -2456,35 +2317,35 @@ out: | |||
2456 | 2317 | ||
2457 | #ifdef CONFIG_PM | 2318 | #ifdef CONFIG_PM |
2458 | 2319 | ||
2459 | int sdhci_suspend_host(struct sdhci_host *host) | 2320 | int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) |
2460 | { | 2321 | { |
2461 | int ret; | 2322 | int ret = 0; |
2462 | 2323 | struct mmc_host *mmc = host->mmc; | |
2463 | if (host->ops->platform_suspend) | ||
2464 | host->ops->platform_suspend(host); | ||
2465 | 2324 | ||
2466 | sdhci_disable_card_detection(host); | 2325 | sdhci_disable_card_detection(host); |
2467 | 2326 | ||
2468 | /* Disable tuning since we are suspending */ | 2327 | /* Disable tuning since we are suspending */ |
2469 | if (host->flags & SDHCI_USING_RETUNING_TIMER) { | 2328 | if (host->version >= SDHCI_SPEC_300 && host->tuning_count && |
2470 | del_timer_sync(&host->tuning_timer); | 2329 | host->tuning_mode == SDHCI_TUNING_MODE_1) { |
2471 | host->flags &= ~SDHCI_NEEDS_RETUNING; | 2330 | host->flags &= ~SDHCI_NEEDS_RETUNING; |
2331 | mod_timer(&host->tuning_timer, jiffies + | ||
2332 | host->tuning_count * HZ); | ||
2472 | } | 2333 | } |
2473 | 2334 | ||
2474 | ret = mmc_suspend_host(host->mmc); | 2335 | if (mmc->card) |
2475 | if (ret) { | 2336 | ret = mmc_suspend_host(host->mmc); |
2476 | if (host->flags & SDHCI_USING_RETUNING_TIMER) { | ||
2477 | host->flags |= SDHCI_NEEDS_RETUNING; | ||
2478 | mod_timer(&host->tuning_timer, jiffies + | ||
2479 | host->tuning_count * HZ); | ||
2480 | } | ||
2481 | 2337 | ||
2482 | sdhci_enable_card_detection(host); | 2338 | if (mmc->pm_flags & MMC_PM_KEEP_POWER) |
2339 | host->card_int_set = sdhci_readl(host, SDHCI_INT_ENABLE) & | ||
2340 | SDHCI_INT_CARD_INT; | ||
2483 | 2341 | ||
2484 | return ret; | 2342 | sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); |
2485 | } | ||
2486 | 2343 | ||
2487 | free_irq(host->irq, host); | 2344 | if (host->vmmc) |
2345 | ret = regulator_disable(host->vmmc); | ||
2346 | |||
2347 | if (host->irq) | ||
2348 | disable_irq(host->irq); | ||
2488 | 2349 | ||
2489 | return ret; | 2350 | return ret; |
2490 | } | 2351 | } |
@@ -2493,38 +2354,41 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host); | |||
2493 | 2354 | ||
2494 | int sdhci_resume_host(struct sdhci_host *host) | 2355 | int sdhci_resume_host(struct sdhci_host *host) |
2495 | { | 2356 | { |
2496 | int ret; | 2357 | int ret = 0; |
2358 | struct mmc_host *mmc = host->mmc; | ||
2359 | |||
2360 | if (host->vmmc) { | ||
2361 | int ret = regulator_enable(host->vmmc); | ||
2362 | if (ret) | ||
2363 | return ret; | ||
2364 | } | ||
2365 | |||
2497 | 2366 | ||
2498 | if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { | 2367 | if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { |
2499 | if (host->ops->enable_dma) | 2368 | if (host->ops->enable_dma) |
2500 | host->ops->enable_dma(host); | 2369 | host->ops->enable_dma(host); |
2501 | } | 2370 | } |
2502 | 2371 | ||
2503 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, | 2372 | if (host->irq) |
2504 | mmc_hostname(host->mmc), host); | 2373 | enable_irq(host->irq); |
2505 | if (ret) | ||
2506 | return ret; | ||
2507 | 2374 | ||
2508 | if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && | 2375 | sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); |
2509 | (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { | 2376 | mmiowb(); |
2510 | /* Card keeps power but host controller does not */ | 2377 | |
2511 | sdhci_init(host, 0); | 2378 | if (mmc->card) { |
2512 | host->pwr = 0; | 2379 | ret = mmc_resume_host(host->mmc); |
2513 | host->clock = 0; | 2380 | /* Enable card interrupt as it is overwritten in sdhci_init */ |
2514 | sdhci_do_set_ios(host, &host->mmc->ios); | 2381 | if ((mmc->caps & MMC_CAP_SDIO_IRQ) && |
2515 | } else { | 2382 | (mmc->pm_flags & MMC_PM_KEEP_POWER)) |
2516 | sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); | 2383 | if (host->card_int_set) |
2517 | mmiowb(); | 2384 | mmc->ops->enable_sdio_irq(mmc, true); |
2518 | } | 2385 | } |
2519 | 2386 | ||
2520 | ret = mmc_resume_host(host->mmc); | ||
2521 | sdhci_enable_card_detection(host); | 2387 | sdhci_enable_card_detection(host); |
2522 | 2388 | ||
2523 | if (host->ops->platform_resume) | ||
2524 | host->ops->platform_resume(host); | ||
2525 | |||
2526 | /* Set the re-tuning expiration flag */ | 2389 | /* Set the re-tuning expiration flag */ |
2527 | if (host->flags & SDHCI_USING_RETUNING_TIMER) | 2390 | if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && |
2391 | (host->tuning_mode == SDHCI_TUNING_MODE_1)) | ||
2528 | host->flags |= SDHCI_NEEDS_RETUNING; | 2392 | host->flags |= SDHCI_NEEDS_RETUNING; |
2529 | 2393 | ||
2530 | return ret; | 2394 | return ret; |
@@ -2544,88 +2408,6 @@ EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); | |||
2544 | 2408 | ||
2545 | #endif /* CONFIG_PM */ | 2409 | #endif /* CONFIG_PM */ |
2546 | 2410 | ||
2547 | #ifdef CONFIG_PM_RUNTIME | ||
2548 | |||
2549 | static int sdhci_runtime_pm_get(struct sdhci_host *host) | ||
2550 | { | ||
2551 | return pm_runtime_get_sync(host->mmc->parent); | ||
2552 | } | ||
2553 | |||
2554 | static int sdhci_runtime_pm_put(struct sdhci_host *host) | ||
2555 | { | ||
2556 | pm_runtime_mark_last_busy(host->mmc->parent); | ||
2557 | return pm_runtime_put_autosuspend(host->mmc->parent); | ||
2558 | } | ||
2559 | |||
2560 | int sdhci_runtime_suspend_host(struct sdhci_host *host) | ||
2561 | { | ||
2562 | unsigned long flags; | ||
2563 | int ret = 0; | ||
2564 | |||
2565 | /* Disable tuning since we are suspending */ | ||
2566 | if (host->flags & SDHCI_USING_RETUNING_TIMER) { | ||
2567 | del_timer_sync(&host->tuning_timer); | ||
2568 | host->flags &= ~SDHCI_NEEDS_RETUNING; | ||
2569 | } | ||
2570 | |||
2571 | spin_lock_irqsave(&host->lock, flags); | ||
2572 | sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); | ||
2573 | spin_unlock_irqrestore(&host->lock, flags); | ||
2574 | |||
2575 | synchronize_irq(host->irq); | ||
2576 | |||
2577 | spin_lock_irqsave(&host->lock, flags); | ||
2578 | host->runtime_suspended = true; | ||
2579 | spin_unlock_irqrestore(&host->lock, flags); | ||
2580 | |||
2581 | return ret; | ||
2582 | } | ||
2583 | EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); | ||
2584 | |||
2585 | int sdhci_runtime_resume_host(struct sdhci_host *host) | ||
2586 | { | ||
2587 | unsigned long flags; | ||
2588 | int ret = 0, host_flags = host->flags; | ||
2589 | |||
2590 | if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { | ||
2591 | if (host->ops->enable_dma) | ||
2592 | host->ops->enable_dma(host); | ||
2593 | } | ||
2594 | |||
2595 | sdhci_init(host, 0); | ||
2596 | |||
2597 | /* Force clock and power re-program */ | ||
2598 | host->pwr = 0; | ||
2599 | host->clock = 0; | ||
2600 | sdhci_do_set_ios(host, &host->mmc->ios); | ||
2601 | |||
2602 | sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); | ||
2603 | if (host_flags & SDHCI_PV_ENABLED) | ||
2604 | sdhci_do_enable_preset_value(host, true); | ||
2605 | |||
2606 | /* Set the re-tuning expiration flag */ | ||
2607 | if (host->flags & SDHCI_USING_RETUNING_TIMER) | ||
2608 | host->flags |= SDHCI_NEEDS_RETUNING; | ||
2609 | |||
2610 | spin_lock_irqsave(&host->lock, flags); | ||
2611 | |||
2612 | host->runtime_suspended = false; | ||
2613 | |||
2614 | /* Enable SDIO IRQ */ | ||
2615 | if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) | ||
2616 | sdhci_enable_sdio_irq_nolock(host, true); | ||
2617 | |||
2618 | /* Enable Card Detection */ | ||
2619 | sdhci_enable_card_detection(host); | ||
2620 | |||
2621 | spin_unlock_irqrestore(&host->lock, flags); | ||
2622 | |||
2623 | return ret; | ||
2624 | } | ||
2625 | EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); | ||
2626 | |||
2627 | #endif | ||
2628 | |||
2629 | /*****************************************************************************\ | 2411 | /*****************************************************************************\ |
2630 | * * | 2412 | * * |
2631 | * Device allocation/registration * | 2413 | * Device allocation/registration * |
@@ -2655,7 +2437,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); | |||
2655 | int sdhci_add_host(struct sdhci_host *host) | 2437 | int sdhci_add_host(struct sdhci_host *host) |
2656 | { | 2438 | { |
2657 | struct mmc_host *mmc; | 2439 | struct mmc_host *mmc; |
2658 | u32 caps[2] = {0, 0}; | 2440 | u32 caps[2]; |
2659 | u32 max_current_caps; | 2441 | u32 max_current_caps; |
2660 | unsigned int ocr_avail; | 2442 | unsigned int ocr_avail; |
2661 | int ret; | 2443 | int ret; |
@@ -2668,8 +2450,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2668 | 2450 | ||
2669 | if (debug_quirks) | 2451 | if (debug_quirks) |
2670 | host->quirks = debug_quirks; | 2452 | host->quirks = debug_quirks; |
2671 | if (debug_quirks2) | ||
2672 | host->quirks2 = debug_quirks2; | ||
2673 | 2453 | ||
2674 | sdhci_reset(host, SDHCI_RESET_ALL); | 2454 | sdhci_reset(host, SDHCI_RESET_ALL); |
2675 | 2455 | ||
@@ -2677,7 +2457,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2677 | host->version = (host->version & SDHCI_SPEC_VER_MASK) | 2457 | host->version = (host->version & SDHCI_SPEC_VER_MASK) |
2678 | >> SDHCI_SPEC_VER_SHIFT; | 2458 | >> SDHCI_SPEC_VER_SHIFT; |
2679 | if (host->version > SDHCI_SPEC_300) { | 2459 | if (host->version > SDHCI_SPEC_300) { |
2680 | pr_err("%s: Unknown controller version (%d). " | 2460 | printk(KERN_ERR "%s: Unknown controller version (%d). " |
2681 | "You may experience problems.\n", mmc_hostname(mmc), | 2461 | "You may experience problems.\n", mmc_hostname(mmc), |
2682 | host->version); | 2462 | host->version); |
2683 | } | 2463 | } |
@@ -2685,10 +2465,8 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2685 | caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : | 2465 | caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : |
2686 | sdhci_readl(host, SDHCI_CAPABILITIES); | 2466 | sdhci_readl(host, SDHCI_CAPABILITIES); |
2687 | 2467 | ||
2688 | if (host->version >= SDHCI_SPEC_300) | 2468 | caps[1] = (host->version >= SDHCI_SPEC_300) ? |
2689 | caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? | 2469 | sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; |
2690 | host->caps1 : | ||
2691 | sdhci_readl(host, SDHCI_CAPABILITIES_1); | ||
2692 | 2470 | ||
2693 | if (host->quirks & SDHCI_QUIRK_FORCE_DMA) | 2471 | if (host->quirks & SDHCI_QUIRK_FORCE_DMA) |
2694 | host->flags |= SDHCI_USE_SDMA; | 2472 | host->flags |= SDHCI_USE_SDMA; |
@@ -2716,7 +2494,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2716 | if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { | 2494 | if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { |
2717 | if (host->ops->enable_dma) { | 2495 | if (host->ops->enable_dma) { |
2718 | if (host->ops->enable_dma(host)) { | 2496 | if (host->ops->enable_dma(host)) { |
2719 | pr_warning("%s: No suitable DMA " | 2497 | printk(KERN_WARNING "%s: No suitable DMA " |
2720 | "available. Falling back to PIO.\n", | 2498 | "available. Falling back to PIO.\n", |
2721 | mmc_hostname(mmc)); | 2499 | mmc_hostname(mmc)); |
2722 | host->flags &= | 2500 | host->flags &= |
@@ -2736,7 +2514,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2736 | if (!host->adma_desc || !host->align_buffer) { | 2514 | if (!host->adma_desc || !host->align_buffer) { |
2737 | kfree(host->adma_desc); | 2515 | kfree(host->adma_desc); |
2738 | kfree(host->align_buffer); | 2516 | kfree(host->align_buffer); |
2739 | pr_warning("%s: Unable to allocate ADMA " | 2517 | printk(KERN_WARNING "%s: Unable to allocate ADMA " |
2740 | "buffers. Falling back to standard DMA.\n", | 2518 | "buffers. Falling back to standard DMA.\n", |
2741 | mmc_hostname(mmc)); | 2519 | mmc_hostname(mmc)); |
2742 | host->flags &= ~SDHCI_USE_ADMA; | 2520 | host->flags &= ~SDHCI_USE_ADMA; |
@@ -2764,7 +2542,8 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2764 | if (host->max_clk == 0 || host->quirks & | 2542 | if (host->max_clk == 0 || host->quirks & |
2765 | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { | 2543 | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { |
2766 | if (!host->ops->get_max_clock) { | 2544 | if (!host->ops->get_max_clock) { |
2767 | pr_err("%s: Hardware doesn't specify base clock " | 2545 | printk(KERN_ERR |
2546 | "%s: Hardware doesn't specify base clock " | ||
2768 | "frequency.\n", mmc_hostname(mmc)); | 2547 | "frequency.\n", mmc_hostname(mmc)); |
2769 | return -ENODEV; | 2548 | return -ENODEV; |
2770 | } | 2549 | } |
@@ -2810,7 +2589,8 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2810 | host->timeout_clk = host->ops->get_timeout_clock(host); | 2589 | host->timeout_clk = host->ops->get_timeout_clock(host); |
2811 | } else if (!(host->quirks & | 2590 | } else if (!(host->quirks & |
2812 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | 2591 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { |
2813 | pr_err("%s: Hardware doesn't specify timeout clock " | 2592 | printk(KERN_ERR |
2593 | "%s: Hardware doesn't specify timeout clock " | ||
2814 | "frequency.\n", mmc_hostname(mmc)); | 2594 | "frequency.\n", mmc_hostname(mmc)); |
2815 | return -ENODEV; | 2595 | return -ENODEV; |
2816 | } | 2596 | } |
@@ -2821,9 +2601,8 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2821 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | 2601 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) |
2822 | host->timeout_clk = mmc->f_max / 1000; | 2602 | host->timeout_clk = mmc->f_max / 1000; |
2823 | 2603 | ||
2824 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; | 2604 | if (!(host->quirks & SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO)) |
2825 | 2605 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; | |
2826 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; | ||
2827 | 2606 | ||
2828 | if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) | 2607 | if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) |
2829 | host->flags |= SDHCI_AUTO_CMD12; | 2608 | host->flags |= SDHCI_AUTO_CMD12; |
@@ -2848,40 +2627,15 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2848 | if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) | 2627 | if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) |
2849 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 2628 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
2850 | 2629 | ||
2851 | if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) | ||
2852 | mmc->caps &= ~MMC_CAP_CMD23; | ||
2853 | |||
2854 | if (caps[0] & SDHCI_CAN_DO_HISPD) | 2630 | if (caps[0] & SDHCI_CAN_DO_HISPD) |
2855 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; | 2631 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
2856 | 2632 | ||
2857 | if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && | 2633 | if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && |
2858 | !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) | 2634 | mmc_card_is_removable(mmc) && !(host->ops->get_cd)) |
2859 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 2635 | mmc->caps |= MMC_CAP_NEEDS_POLL; |
2860 | 2636 | ||
2861 | /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ | 2637 | /* UHS-I mode(s) supported by the host controller. */ |
2862 | host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc"); | 2638 | if (host->version >= SDHCI_SPEC_300) |
2863 | if (IS_ERR_OR_NULL(host->vqmmc)) { | ||
2864 | if (PTR_ERR(host->vqmmc) < 0) { | ||
2865 | pr_info("%s: no vqmmc regulator found\n", | ||
2866 | mmc_hostname(mmc)); | ||
2867 | host->vqmmc = NULL; | ||
2868 | } | ||
2869 | } else { | ||
2870 | regulator_enable(host->vqmmc); | ||
2871 | if (!regulator_is_supported_voltage(host->vqmmc, 1700000, | ||
2872 | 1950000)) | ||
2873 | caps[1] &= ~(SDHCI_SUPPORT_SDR104 | | ||
2874 | SDHCI_SUPPORT_SDR50 | | ||
2875 | SDHCI_SUPPORT_DDR50); | ||
2876 | } | ||
2877 | |||
2878 | if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) | ||
2879 | caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | | ||
2880 | SDHCI_SUPPORT_DDR50); | ||
2881 | |||
2882 | /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ | ||
2883 | if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | | ||
2884 | SDHCI_SUPPORT_DDR50)) | ||
2885 | mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; | 2639 | mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; |
2886 | 2640 | ||
2887 | /* SDR104 supports also implies SDR50 support */ | 2641 | /* SDR104 supports also implies SDR50 support */ |
@@ -2893,14 +2647,10 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2893 | if (caps[1] & SDHCI_SUPPORT_DDR50) | 2647 | if (caps[1] & SDHCI_SUPPORT_DDR50) |
2894 | mmc->caps |= MMC_CAP_UHS_DDR50; | 2648 | mmc->caps |= MMC_CAP_UHS_DDR50; |
2895 | 2649 | ||
2896 | /* Does the host need tuning for SDR50? */ | 2650 | /* Does the host needs tuning for SDR50? */ |
2897 | if (caps[1] & SDHCI_USE_SDR50_TUNING) | 2651 | if (caps[1] & SDHCI_USE_SDR50_TUNING) |
2898 | host->flags |= SDHCI_SDR50_NEEDS_TUNING; | 2652 | host->flags |= SDHCI_SDR50_NEEDS_TUNING; |
2899 | 2653 | ||
2900 | /* Does the host need tuning for HS200? */ | ||
2901 | if (mmc->caps2 & MMC_CAP2_HS200) | ||
2902 | host->flags |= SDHCI_HS200_NEEDS_TUNING; | ||
2903 | |||
2904 | /* Driver Type(s) (A, C, D) supported by the host */ | 2654 | /* Driver Type(s) (A, C, D) supported by the host */ |
2905 | if (caps[1] & SDHCI_DRIVER_TYPE_A) | 2655 | if (caps[1] & SDHCI_DRIVER_TYPE_A) |
2906 | mmc->caps |= MMC_CAP_DRIVER_TYPE_A; | 2656 | mmc->caps |= MMC_CAP_DRIVER_TYPE_A; |
@@ -2925,31 +2675,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2925 | SDHCI_RETUNING_MODE_SHIFT; | 2675 | SDHCI_RETUNING_MODE_SHIFT; |
2926 | 2676 | ||
2927 | ocr_avail = 0; | 2677 | ocr_avail = 0; |
2928 | |||
2929 | host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); | ||
2930 | if (IS_ERR_OR_NULL(host->vmmc)) { | ||
2931 | if (PTR_ERR(host->vmmc) < 0) { | ||
2932 | pr_info("%s: no vmmc regulator found\n", | ||
2933 | mmc_hostname(mmc)); | ||
2934 | host->vmmc = NULL; | ||
2935 | } | ||
2936 | } | ||
2937 | |||
2938 | #ifdef CONFIG_REGULATOR | ||
2939 | if (host->vmmc) { | ||
2940 | ret = regulator_is_supported_voltage(host->vmmc, 2700000, | ||
2941 | 3600000); | ||
2942 | if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) | ||
2943 | caps[0] &= ~SDHCI_CAN_VDD_330; | ||
2944 | if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300))) | ||
2945 | caps[0] &= ~SDHCI_CAN_VDD_300; | ||
2946 | ret = regulator_is_supported_voltage(host->vmmc, 1700000, | ||
2947 | 1950000); | ||
2948 | if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180))) | ||
2949 | caps[0] &= ~SDHCI_CAN_VDD_180; | ||
2950 | } | ||
2951 | #endif /* CONFIG_REGULATOR */ | ||
2952 | |||
2953 | /* | 2678 | /* |
2954 | * According to SD Host Controller spec v3.00, if the Host System | 2679 | * According to SD Host Controller spec v3.00, if the Host System |
2955 | * can afford more than 150mA, Host Driver should set XPC to 1. Also | 2680 | * can afford more than 150mA, Host Driver should set XPC to 1. Also |
@@ -2958,45 +2683,55 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2958 | * value. | 2683 | * value. |
2959 | */ | 2684 | */ |
2960 | max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); | 2685 | max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); |
2961 | if (!max_current_caps && host->vmmc) { | ||
2962 | u32 curr = regulator_get_current_limit(host->vmmc); | ||
2963 | if (curr > 0) { | ||
2964 | |||
2965 | /* convert to SDHCI_MAX_CURRENT format */ | ||
2966 | curr = curr/1000; /* convert to mA */ | ||
2967 | curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; | ||
2968 | |||
2969 | curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); | ||
2970 | max_current_caps = | ||
2971 | (curr << SDHCI_MAX_CURRENT_330_SHIFT) | | ||
2972 | (curr << SDHCI_MAX_CURRENT_300_SHIFT) | | ||
2973 | (curr << SDHCI_MAX_CURRENT_180_SHIFT); | ||
2974 | } | ||
2975 | } | ||
2976 | 2686 | ||
2977 | if (caps[0] & SDHCI_CAN_VDD_330) { | 2687 | if (caps[0] & SDHCI_CAN_VDD_330) { |
2688 | int max_current_330; | ||
2689 | |||
2978 | ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; | 2690 | ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; |
2979 | 2691 | ||
2980 | mmc->max_current_330 = ((max_current_caps & | 2692 | max_current_330 = ((max_current_caps & |
2981 | SDHCI_MAX_CURRENT_330_MASK) >> | 2693 | SDHCI_MAX_CURRENT_330_MASK) >> |
2982 | SDHCI_MAX_CURRENT_330_SHIFT) * | 2694 | SDHCI_MAX_CURRENT_330_SHIFT) * |
2983 | SDHCI_MAX_CURRENT_MULTIPLIER; | 2695 | SDHCI_MAX_CURRENT_MULTIPLIER; |
2696 | |||
2697 | if (max_current_330 > 150) | ||
2698 | mmc->caps |= MMC_CAP_SET_XPC_330; | ||
2984 | } | 2699 | } |
2985 | if (caps[0] & SDHCI_CAN_VDD_300) { | 2700 | if (caps[0] & SDHCI_CAN_VDD_300) { |
2701 | int max_current_300; | ||
2702 | |||
2986 | ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; | 2703 | ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; |
2987 | 2704 | ||
2988 | mmc->max_current_300 = ((max_current_caps & | 2705 | max_current_300 = ((max_current_caps & |
2989 | SDHCI_MAX_CURRENT_300_MASK) >> | 2706 | SDHCI_MAX_CURRENT_300_MASK) >> |
2990 | SDHCI_MAX_CURRENT_300_SHIFT) * | 2707 | SDHCI_MAX_CURRENT_300_SHIFT) * |
2991 | SDHCI_MAX_CURRENT_MULTIPLIER; | 2708 | SDHCI_MAX_CURRENT_MULTIPLIER; |
2709 | |||
2710 | if (max_current_300 > 150) | ||
2711 | mmc->caps |= MMC_CAP_SET_XPC_300; | ||
2992 | } | 2712 | } |
2993 | if (caps[0] & SDHCI_CAN_VDD_180) { | 2713 | if (caps[0] & SDHCI_CAN_VDD_180) { |
2714 | int max_current_180; | ||
2715 | |||
2994 | ocr_avail |= MMC_VDD_165_195; | 2716 | ocr_avail |= MMC_VDD_165_195; |
2995 | 2717 | ||
2996 | mmc->max_current_180 = ((max_current_caps & | 2718 | max_current_180 = ((max_current_caps & |
2997 | SDHCI_MAX_CURRENT_180_MASK) >> | 2719 | SDHCI_MAX_CURRENT_180_MASK) >> |
2998 | SDHCI_MAX_CURRENT_180_SHIFT) * | 2720 | SDHCI_MAX_CURRENT_180_SHIFT) * |
2999 | SDHCI_MAX_CURRENT_MULTIPLIER; | 2721 | SDHCI_MAX_CURRENT_MULTIPLIER; |
2722 | |||
2723 | if (max_current_180 > 150) | ||
2724 | mmc->caps |= MMC_CAP_SET_XPC_180; | ||
2725 | |||
2726 | /* Maximum current capabilities of the host at 1.8V */ | ||
2727 | if (max_current_180 >= 800) | ||
2728 | mmc->caps |= MMC_CAP_MAX_CURRENT_800; | ||
2729 | else if (max_current_180 >= 600) | ||
2730 | mmc->caps |= MMC_CAP_MAX_CURRENT_600; | ||
2731 | else if (max_current_180 >= 400) | ||
2732 | mmc->caps |= MMC_CAP_MAX_CURRENT_400; | ||
2733 | else | ||
2734 | mmc->caps |= MMC_CAP_MAX_CURRENT_200; | ||
3000 | } | 2735 | } |
3001 | 2736 | ||
3002 | mmc->ocr_avail = ocr_avail; | 2737 | mmc->ocr_avail = ocr_avail; |
@@ -3013,7 +2748,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3013 | mmc->ocr_avail_mmc &= host->ocr_avail_mmc; | 2748 | mmc->ocr_avail_mmc &= host->ocr_avail_mmc; |
3014 | 2749 | ||
3015 | if (mmc->ocr_avail == 0) { | 2750 | if (mmc->ocr_avail == 0) { |
3016 | pr_err("%s: Hardware doesn't report any " | 2751 | printk(KERN_ERR "%s: Hardware doesn't report any " |
3017 | "support voltages.\n", mmc_hostname(mmc)); | 2752 | "support voltages.\n", mmc_hostname(mmc)); |
3018 | return -ENODEV; | 2753 | return -ENODEV; |
3019 | } | 2754 | } |
@@ -3061,7 +2796,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3061 | mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> | 2796 | mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> |
3062 | SDHCI_MAX_BLOCK_SHIFT; | 2797 | SDHCI_MAX_BLOCK_SHIFT; |
3063 | if (mmc->max_blk_size >= 3) { | 2798 | if (mmc->max_blk_size >= 3) { |
3064 | pr_warning("%s: Invalid maximum block size, " | 2799 | printk(KERN_WARNING "%s: Invalid maximum block size, " |
3065 | "assuming 512 bytes\n", mmc_hostname(mmc)); | 2800 | "assuming 512 bytes\n", mmc_hostname(mmc)); |
3066 | mmc->max_blk_size = 0; | 2801 | mmc->max_blk_size = 0; |
3067 | } | 2802 | } |
@@ -3095,10 +2830,15 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3095 | 2830 | ||
3096 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, | 2831 | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, |
3097 | mmc_hostname(mmc), host); | 2832 | mmc_hostname(mmc), host); |
3098 | if (ret) { | 2833 | if (ret) |
3099 | pr_err("%s: Failed to request IRQ %d: %d\n", | ||
3100 | mmc_hostname(mmc), host->irq, ret); | ||
3101 | goto untasklet; | 2834 | goto untasklet; |
2835 | |||
2836 | host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); | ||
2837 | if (IS_ERR(host->vmmc)) { | ||
2838 | printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); | ||
2839 | host->vmmc = NULL; | ||
2840 | } else { | ||
2841 | regulator_enable(host->vmmc); | ||
3102 | } | 2842 | } |
3103 | 2843 | ||
3104 | sdhci_init(host, 0); | 2844 | sdhci_init(host, 0); |
@@ -3116,18 +2856,15 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3116 | host->led.brightness_set = sdhci_led_control; | 2856 | host->led.brightness_set = sdhci_led_control; |
3117 | 2857 | ||
3118 | ret = led_classdev_register(mmc_dev(mmc), &host->led); | 2858 | ret = led_classdev_register(mmc_dev(mmc), &host->led); |
3119 | if (ret) { | 2859 | if (ret) |
3120 | pr_err("%s: Failed to register LED device: %d\n", | ||
3121 | mmc_hostname(mmc), ret); | ||
3122 | goto reset; | 2860 | goto reset; |
3123 | } | ||
3124 | #endif | 2861 | #endif |
3125 | 2862 | ||
3126 | mmiowb(); | 2863 | mmiowb(); |
3127 | 2864 | ||
3128 | mmc_add_host(mmc); | 2865 | mmc_add_host(mmc); |
3129 | 2866 | ||
3130 | pr_info("%s: SDHCI controller on %s [%s] using %s\n", | 2867 | printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", |
3131 | mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), | 2868 | mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), |
3132 | (host->flags & SDHCI_USE_ADMA) ? "ADMA" : | 2869 | (host->flags & SDHCI_USE_ADMA) ? "ADMA" : |
3133 | (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); | 2870 | (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); |
@@ -3160,7 +2897,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
3160 | host->flags |= SDHCI_DEVICE_DEAD; | 2897 | host->flags |= SDHCI_DEVICE_DEAD; |
3161 | 2898 | ||
3162 | if (host->mrq) { | 2899 | if (host->mrq) { |
3163 | pr_err("%s: Controller removed during " | 2900 | printk(KERN_ERR "%s: Controller removed during " |
3164 | " transfer!\n", mmc_hostname(host->mmc)); | 2901 | " transfer!\n", mmc_hostname(host->mmc)); |
3165 | 2902 | ||
3166 | host->mrq->cmd->error = -ENOMEDIUM; | 2903 | host->mrq->cmd->error = -ENOMEDIUM; |
@@ -3184,6 +2921,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
3184 | free_irq(host->irq, host); | 2921 | free_irq(host->irq, host); |
3185 | 2922 | ||
3186 | del_timer_sync(&host->timer); | 2923 | del_timer_sync(&host->timer); |
2924 | if (host->version >= SDHCI_SPEC_300) | ||
2925 | del_timer_sync(&host->tuning_timer); | ||
3187 | 2926 | ||
3188 | tasklet_kill(&host->card_tasklet); | 2927 | tasklet_kill(&host->card_tasklet); |
3189 | tasklet_kill(&host->finish_tasklet); | 2928 | tasklet_kill(&host->finish_tasklet); |
@@ -3193,11 +2932,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) | |||
3193 | regulator_put(host->vmmc); | 2932 | regulator_put(host->vmmc); |
3194 | } | 2933 | } |
3195 | 2934 | ||
3196 | if (host->vqmmc) { | ||
3197 | regulator_disable(host->vqmmc); | ||
3198 | regulator_put(host->vqmmc); | ||
3199 | } | ||
3200 | |||
3201 | kfree(host->adma_desc); | 2935 | kfree(host->adma_desc); |
3202 | kfree(host->align_buffer); | 2936 | kfree(host->align_buffer); |
3203 | 2937 | ||
@@ -3222,9 +2956,9 @@ EXPORT_SYMBOL_GPL(sdhci_free_host); | |||
3222 | 2956 | ||
3223 | static int __init sdhci_drv_init(void) | 2957 | static int __init sdhci_drv_init(void) |
3224 | { | 2958 | { |
3225 | pr_info(DRIVER_NAME | 2959 | printk(KERN_INFO DRIVER_NAME |
3226 | ": Secure Digital Host Controller Interface driver\n"); | 2960 | ": Secure Digital Host Controller Interface driver\n"); |
3227 | pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); | 2961 | printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); |
3228 | 2962 | ||
3229 | return 0; | 2963 | return 0; |
3230 | } | 2964 | } |
@@ -3237,11 +2971,9 @@ module_init(sdhci_drv_init); | |||
3237 | module_exit(sdhci_drv_exit); | 2971 | module_exit(sdhci_drv_exit); |
3238 | 2972 | ||
3239 | module_param(debug_quirks, uint, 0444); | 2973 | module_param(debug_quirks, uint, 0444); |
3240 | module_param(debug_quirks2, uint, 0444); | ||
3241 | 2974 | ||
3242 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); | 2975 | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); |
3243 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); | 2976 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); |
3244 | MODULE_LICENSE("GPL"); | 2977 | MODULE_LICENSE("GPL"); |
3245 | 2978 | ||
3246 | MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); | 2979 | MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); |
3247 | MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); | ||
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index a6d69b7bdea..c00833de19d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -120,7 +120,6 @@ | |||
120 | #define SDHCI_SIGNAL_ENABLE 0x38 | 120 | #define SDHCI_SIGNAL_ENABLE 0x38 |
121 | #define SDHCI_INT_RESPONSE 0x00000001 | 121 | #define SDHCI_INT_RESPONSE 0x00000001 |
122 | #define SDHCI_INT_DATA_END 0x00000002 | 122 | #define SDHCI_INT_DATA_END 0x00000002 |
123 | #define SDHCI_INT_BLK_GAP 0x00000004 | ||
124 | #define SDHCI_INT_DMA_END 0x00000008 | 123 | #define SDHCI_INT_DMA_END 0x00000008 |
125 | #define SDHCI_INT_SPACE_AVAIL 0x00000010 | 124 | #define SDHCI_INT_SPACE_AVAIL 0x00000010 |
126 | #define SDHCI_INT_DATA_AVAIL 0x00000020 | 125 | #define SDHCI_INT_DATA_AVAIL 0x00000020 |
@@ -147,8 +146,7 @@ | |||
147 | #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ | 146 | #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ |
148 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ | 147 | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ |
149 | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ | 148 | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ |
150 | SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \ | 149 | SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR) |
151 | SDHCI_INT_BLK_GAP) | ||
152 | #define SDHCI_INT_ALL_MASK ((unsigned int)-1) | 150 | #define SDHCI_INT_ALL_MASK ((unsigned int)-1) |
153 | 151 | ||
154 | #define SDHCI_ACMD12_ERR 0x3C | 152 | #define SDHCI_ACMD12_ERR 0x3C |
@@ -160,7 +158,6 @@ | |||
160 | #define SDHCI_CTRL_UHS_SDR50 0x0002 | 158 | #define SDHCI_CTRL_UHS_SDR50 0x0002 |
161 | #define SDHCI_CTRL_UHS_SDR104 0x0003 | 159 | #define SDHCI_CTRL_UHS_SDR104 0x0003 |
162 | #define SDHCI_CTRL_UHS_DDR50 0x0004 | 160 | #define SDHCI_CTRL_UHS_DDR50 0x0004 |
163 | #define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */ | ||
164 | #define SDHCI_CTRL_VDD_180 0x0008 | 161 | #define SDHCI_CTRL_VDD_180 0x0008 |
165 | #define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 | 162 | #define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 |
166 | #define SDHCI_CTRL_DRV_TYPE_B 0x0000 | 163 | #define SDHCI_CTRL_DRV_TYPE_B 0x0000 |
@@ -207,7 +204,6 @@ | |||
207 | #define SDHCI_CAPABILITIES_1 0x44 | 204 | #define SDHCI_CAPABILITIES_1 0x44 |
208 | 205 | ||
209 | #define SDHCI_MAX_CURRENT 0x48 | 206 | #define SDHCI_MAX_CURRENT 0x48 |
210 | #define SDHCI_MAX_CURRENT_LIMIT 0xFF | ||
211 | #define SDHCI_MAX_CURRENT_330_MASK 0x0000FF | 207 | #define SDHCI_MAX_CURRENT_330_MASK 0x0000FF |
212 | #define SDHCI_MAX_CURRENT_330_SHIFT 0 | 208 | #define SDHCI_MAX_CURRENT_330_SHIFT 0 |
213 | #define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 | 209 | #define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 |
@@ -274,14 +270,15 @@ struct sdhci_ops { | |||
274 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, | 270 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, |
275 | u8 power_mode); | 271 | u8 power_mode); |
276 | unsigned int (*get_ro)(struct sdhci_host *host); | 272 | unsigned int (*get_ro)(struct sdhci_host *host); |
273 | unsigned int (*get_cd)(struct sdhci_host *host); | ||
277 | void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); | 274 | void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); |
278 | void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); | 275 | void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); |
279 | int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); | 276 | int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); |
280 | void (*hw_reset)(struct sdhci_host *host); | 277 | int (*suspend)(struct sdhci_host *host, pm_message_t state); |
281 | void (*platform_suspend)(struct sdhci_host *host); | 278 | int (*resume)(struct sdhci_host *host); |
282 | void (*platform_resume)(struct sdhci_host *host); | 279 | int (*switch_signal_voltage)(struct sdhci_host *host, |
283 | void (*adma_workaround)(struct sdhci_host *host, u32 intmask); | 280 | unsigned int signal_voltage); |
284 | void (*platform_init)(struct sdhci_host *host); | 281 | int (*execute_freq_tuning)(struct sdhci_host *sdhci); |
285 | }; | 282 | }; |
286 | 283 | ||
287 | #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS | 284 | #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS |
@@ -382,14 +379,9 @@ extern int sdhci_add_host(struct sdhci_host *host); | |||
382 | extern void sdhci_remove_host(struct sdhci_host *host, int dead); | 379 | extern void sdhci_remove_host(struct sdhci_host *host, int dead); |
383 | 380 | ||
384 | #ifdef CONFIG_PM | 381 | #ifdef CONFIG_PM |
385 | extern int sdhci_suspend_host(struct sdhci_host *host); | 382 | extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); |
386 | extern int sdhci_resume_host(struct sdhci_host *host); | 383 | extern int sdhci_resume_host(struct sdhci_host *host); |
387 | extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); | 384 | extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); |
388 | #endif | 385 | #endif |
389 | 386 | ||
390 | #ifdef CONFIG_PM_RUNTIME | ||
391 | extern int sdhci_runtime_suspend_host(struct sdhci_host *host); | ||
392 | extern int sdhci_runtime_resume_host(struct sdhci_host *host); | ||
393 | #endif | ||
394 | |||
395 | #endif /* __SDHCI_HW_H */ | 387 | #endif /* __SDHCI_HW_H */ |
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 7009f17ad6c..496b7efbc6b 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c | |||
@@ -26,7 +26,6 @@ | |||
26 | */ | 26 | */ |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
31 | #include <linux/ioport.h> | 30 | #include <linux/ioport.h> |
32 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 9a4c151067d..557886bee9c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -16,33 +16,6 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | ||
20 | * The MMCIF driver is now processing MMC requests asynchronously, according | ||
21 | * to the Linux MMC API requirement. | ||
22 | * | ||
23 | * The MMCIF driver processes MMC requests in up to 3 stages: command, optional | ||
24 | * data, and optional stop. To achieve asynchronous processing each of these | ||
25 | * stages is split into two halves: a top and a bottom half. The top half | ||
26 | * initialises the hardware, installs a timeout handler to handle completion | ||
27 | * timeouts, and returns. In case of the command stage this immediately returns | ||
28 | * control to the caller, leaving all further processing to run asynchronously. | ||
29 | * All further request processing is performed by the bottom halves. | ||
30 | * | ||
31 | * The bottom half further consists of a "hard" IRQ handler, an IRQ handler | ||
32 | * thread, a DMA completion callback, if DMA is used, a timeout work, and | ||
33 | * request- and stage-specific handler methods. | ||
34 | * | ||
35 | * Each bottom half run begins with either a hardware interrupt, a DMA callback | ||
36 | * invocation, or a timeout work run. In case of an error or a successful | ||
37 | * processing completion, the MMC core is informed and the request processing is | ||
38 | * finished. In case processing has to continue, i.e., if data has to be read | ||
39 | * from or written to the card, or if a stop command has to be sent, the next | ||
40 | * top half is called, which performs the necessary hardware handling and | ||
41 | * reschedules the timeout work. This returns the driver state machine into the | ||
42 | * bottom half waiting state. | ||
43 | */ | ||
44 | |||
45 | #include <linux/bitops.h> | ||
46 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
47 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
48 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
@@ -54,14 +27,10 @@ | |||
54 | #include <linux/mmc/mmc.h> | 27 | #include <linux/mmc/mmc.h> |
55 | #include <linux/mmc/sdio.h> | 28 | #include <linux/mmc/sdio.h> |
56 | #include <linux/mmc/sh_mmcif.h> | 29 | #include <linux/mmc/sh_mmcif.h> |
57 | #include <linux/mmc/slot-gpio.h> | ||
58 | #include <linux/mod_devicetable.h> | ||
59 | #include <linux/pagemap.h> | 30 | #include <linux/pagemap.h> |
60 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
61 | #include <linux/pm_qos.h> | ||
62 | #include <linux/pm_runtime.h> | 32 | #include <linux/pm_runtime.h> |
63 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
64 | #include <linux/module.h> | ||
65 | 34 | ||
66 | #define DRIVER_NAME "sh_mmcif" | 35 | #define DRIVER_NAME "sh_mmcif" |
67 | #define DRIVER_VERSION "2010-04-28" | 36 | #define DRIVER_VERSION "2010-04-28" |
@@ -153,11 +122,6 @@ | |||
153 | #define MASK_MRBSYTO (1 << 1) | 122 | #define MASK_MRBSYTO (1 << 1) |
154 | #define MASK_MRSPTO (1 << 0) | 123 | #define MASK_MRSPTO (1 << 0) |
155 | 124 | ||
156 | #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ | ||
157 | MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ | ||
158 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ | ||
159 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) | ||
160 | |||
161 | /* CE_HOST_STS1 */ | 125 | /* CE_HOST_STS1 */ |
162 | #define STS1_CMDSEQ (1 << 31) | 126 | #define STS1_CMDSEQ (1 << 31) |
163 | 127 | ||
@@ -197,37 +161,19 @@ enum mmcif_state { | |||
197 | STATE_IOS, | 161 | STATE_IOS, |
198 | }; | 162 | }; |
199 | 163 | ||
200 | enum mmcif_wait_for { | ||
201 | MMCIF_WAIT_FOR_REQUEST, | ||
202 | MMCIF_WAIT_FOR_CMD, | ||
203 | MMCIF_WAIT_FOR_MREAD, | ||
204 | MMCIF_WAIT_FOR_MWRITE, | ||
205 | MMCIF_WAIT_FOR_READ, | ||
206 | MMCIF_WAIT_FOR_WRITE, | ||
207 | MMCIF_WAIT_FOR_READ_END, | ||
208 | MMCIF_WAIT_FOR_WRITE_END, | ||
209 | MMCIF_WAIT_FOR_STOP, | ||
210 | }; | ||
211 | |||
212 | struct sh_mmcif_host { | 164 | struct sh_mmcif_host { |
213 | struct mmc_host *mmc; | 165 | struct mmc_host *mmc; |
214 | struct mmc_request *mrq; | 166 | struct mmc_data *data; |
215 | struct platform_device *pd; | 167 | struct platform_device *pd; |
216 | struct clk *hclk; | 168 | struct clk *hclk; |
217 | unsigned int clk; | 169 | unsigned int clk; |
218 | int bus_width; | 170 | int bus_width; |
219 | bool sd_error; | 171 | bool sd_error; |
220 | bool dying; | ||
221 | long timeout; | 172 | long timeout; |
222 | void __iomem *addr; | 173 | void __iomem *addr; |
223 | u32 *pio_ptr; | 174 | struct completion intr_wait; |
224 | spinlock_t lock; /* protect sh_mmcif_host::state */ | ||
225 | enum mmcif_state state; | 175 | enum mmcif_state state; |
226 | enum mmcif_wait_for wait_for; | 176 | spinlock_t lock; |
227 | struct delayed_work timeout_work; | ||
228 | size_t blocksize; | ||
229 | int sg_idx; | ||
230 | int sg_blkidx; | ||
231 | bool power; | 177 | bool power; |
232 | bool card_present; | 178 | bool card_present; |
233 | 179 | ||
@@ -253,21 +199,19 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, | |||
253 | static void mmcif_dma_complete(void *arg) | 199 | static void mmcif_dma_complete(void *arg) |
254 | { | 200 | { |
255 | struct sh_mmcif_host *host = arg; | 201 | struct sh_mmcif_host *host = arg; |
256 | struct mmc_data *data = host->mrq->data; | ||
257 | |||
258 | dev_dbg(&host->pd->dev, "Command completed\n"); | 202 | dev_dbg(&host->pd->dev, "Command completed\n"); |
259 | 203 | ||
260 | if (WARN(!data, "%s: NULL data in DMA completion!\n", | 204 | if (WARN(!host->data, "%s: NULL data in DMA completion!\n", |
261 | dev_name(&host->pd->dev))) | 205 | dev_name(&host->pd->dev))) |
262 | return; | 206 | return; |
263 | 207 | ||
264 | if (data->flags & MMC_DATA_READ) | 208 | if (host->data->flags & MMC_DATA_READ) |
265 | dma_unmap_sg(host->chan_rx->device->dev, | 209 | dma_unmap_sg(host->chan_rx->device->dev, |
266 | data->sg, data->sg_len, | 210 | host->data->sg, host->data->sg_len, |
267 | DMA_FROM_DEVICE); | 211 | DMA_FROM_DEVICE); |
268 | else | 212 | else |
269 | dma_unmap_sg(host->chan_tx->device->dev, | 213 | dma_unmap_sg(host->chan_tx->device->dev, |
270 | data->sg, data->sg_len, | 214 | host->data->sg, host->data->sg_len, |
271 | DMA_TO_DEVICE); | 215 | DMA_TO_DEVICE); |
272 | 216 | ||
273 | complete(&host->dma_complete); | 217 | complete(&host->dma_complete); |
@@ -275,19 +219,18 @@ static void mmcif_dma_complete(void *arg) | |||
275 | 219 | ||
276 | static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | 220 | static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) |
277 | { | 221 | { |
278 | struct mmc_data *data = host->mrq->data; | 222 | struct scatterlist *sg = host->data->sg; |
279 | struct scatterlist *sg = data->sg; | ||
280 | struct dma_async_tx_descriptor *desc = NULL; | 223 | struct dma_async_tx_descriptor *desc = NULL; |
281 | struct dma_chan *chan = host->chan_rx; | 224 | struct dma_chan *chan = host->chan_rx; |
282 | dma_cookie_t cookie = -EINVAL; | 225 | dma_cookie_t cookie = -EINVAL; |
283 | int ret; | 226 | int ret; |
284 | 227 | ||
285 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, | 228 | ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, |
286 | DMA_FROM_DEVICE); | 229 | DMA_FROM_DEVICE); |
287 | if (ret > 0) { | 230 | if (ret > 0) { |
288 | host->dma_active = true; | 231 | host->dma_active = true; |
289 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 232 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
290 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 233 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
291 | } | 234 | } |
292 | 235 | ||
293 | if (desc) { | 236 | if (desc) { |
@@ -298,7 +241,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | |||
298 | dma_async_issue_pending(chan); | 241 | dma_async_issue_pending(chan); |
299 | } | 242 | } |
300 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", | 243 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", |
301 | __func__, data->sg_len, ret, cookie); | 244 | __func__, host->data->sg_len, ret, cookie); |
302 | 245 | ||
303 | if (!desc) { | 246 | if (!desc) { |
304 | /* DMA failed, fall back to PIO */ | 247 | /* DMA failed, fall back to PIO */ |
@@ -319,24 +262,23 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | |||
319 | } | 262 | } |
320 | 263 | ||
321 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | 264 | dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, |
322 | desc, cookie, data->sg_len); | 265 | desc, cookie, host->data->sg_len); |
323 | } | 266 | } |
324 | 267 | ||
325 | static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | 268 | static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) |
326 | { | 269 | { |
327 | struct mmc_data *data = host->mrq->data; | 270 | struct scatterlist *sg = host->data->sg; |
328 | struct scatterlist *sg = data->sg; | ||
329 | struct dma_async_tx_descriptor *desc = NULL; | 271 | struct dma_async_tx_descriptor *desc = NULL; |
330 | struct dma_chan *chan = host->chan_tx; | 272 | struct dma_chan *chan = host->chan_tx; |
331 | dma_cookie_t cookie = -EINVAL; | 273 | dma_cookie_t cookie = -EINVAL; |
332 | int ret; | 274 | int ret; |
333 | 275 | ||
334 | ret = dma_map_sg(chan->device->dev, sg, data->sg_len, | 276 | ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, |
335 | DMA_TO_DEVICE); | 277 | DMA_TO_DEVICE); |
336 | if (ret > 0) { | 278 | if (ret > 0) { |
337 | host->dma_active = true; | 279 | host->dma_active = true; |
338 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 280 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
339 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 281 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
340 | } | 282 | } |
341 | 283 | ||
342 | if (desc) { | 284 | if (desc) { |
@@ -347,7 +289,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | |||
347 | dma_async_issue_pending(chan); | 289 | dma_async_issue_pending(chan); |
348 | } | 290 | } |
349 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", | 291 | dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", |
350 | __func__, data->sg_len, ret, cookie); | 292 | __func__, host->data->sg_len, ret, cookie); |
351 | 293 | ||
352 | if (!desc) { | 294 | if (!desc) { |
353 | /* DMA failed, fall back to PIO */ | 295 | /* DMA failed, fall back to PIO */ |
@@ -371,69 +313,46 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | |||
371 | desc, cookie); | 313 | desc, cookie); |
372 | } | 314 | } |
373 | 315 | ||
316 | static bool sh_mmcif_filter(struct dma_chan *chan, void *arg) | ||
317 | { | ||
318 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
319 | chan->private = arg; | ||
320 | return true; | ||
321 | } | ||
322 | |||
374 | static void sh_mmcif_request_dma(struct sh_mmcif_host *host, | 323 | static void sh_mmcif_request_dma(struct sh_mmcif_host *host, |
375 | struct sh_mmcif_plat_data *pdata) | 324 | struct sh_mmcif_plat_data *pdata) |
376 | { | 325 | { |
377 | struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); | ||
378 | struct dma_slave_config cfg; | ||
379 | dma_cap_mask_t mask; | ||
380 | int ret; | ||
381 | |||
382 | host->dma_active = false; | 326 | host->dma_active = false; |
383 | 327 | ||
384 | if (!pdata) | ||
385 | return; | ||
386 | |||
387 | if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) | ||
388 | return; | ||
389 | |||
390 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | 328 | /* We can only either use DMA for both Tx and Rx or not use it at all */ |
391 | dma_cap_zero(mask); | 329 | if (pdata->dma) { |
392 | dma_cap_set(DMA_SLAVE, mask); | 330 | dma_cap_mask_t mask; |
393 | 331 | ||
394 | host->chan_tx = dma_request_channel(mask, shdma_chan_filter, | 332 | dma_cap_zero(mask); |
395 | (void *)pdata->slave_id_tx); | 333 | dma_cap_set(DMA_SLAVE, mask); |
396 | dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, | ||
397 | host->chan_tx); | ||
398 | 334 | ||
399 | if (!host->chan_tx) | 335 | host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, |
400 | return; | 336 | &pdata->dma->chan_priv_tx); |
401 | 337 | dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, | |
402 | cfg.slave_id = pdata->slave_id_tx; | 338 | host->chan_tx); |
403 | cfg.direction = DMA_MEM_TO_DEV; | ||
404 | cfg.dst_addr = res->start + MMCIF_CE_DATA; | ||
405 | cfg.src_addr = 0; | ||
406 | ret = dmaengine_slave_config(host->chan_tx, &cfg); | ||
407 | if (ret < 0) | ||
408 | goto ecfgtx; | ||
409 | |||
410 | host->chan_rx = dma_request_channel(mask, shdma_chan_filter, | ||
411 | (void *)pdata->slave_id_rx); | ||
412 | dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, | ||
413 | host->chan_rx); | ||
414 | |||
415 | if (!host->chan_rx) | ||
416 | goto erqrx; | ||
417 | 339 | ||
418 | cfg.slave_id = pdata->slave_id_rx; | 340 | if (!host->chan_tx) |
419 | cfg.direction = DMA_DEV_TO_MEM; | 341 | return; |
420 | cfg.dst_addr = 0; | ||
421 | cfg.src_addr = res->start + MMCIF_CE_DATA; | ||
422 | ret = dmaengine_slave_config(host->chan_rx, &cfg); | ||
423 | if (ret < 0) | ||
424 | goto ecfgrx; | ||
425 | 342 | ||
426 | init_completion(&host->dma_complete); | 343 | host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, |
344 | &pdata->dma->chan_priv_rx); | ||
345 | dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, | ||
346 | host->chan_rx); | ||
427 | 347 | ||
428 | return; | 348 | if (!host->chan_rx) { |
349 | dma_release_channel(host->chan_tx); | ||
350 | host->chan_tx = NULL; | ||
351 | return; | ||
352 | } | ||
429 | 353 | ||
430 | ecfgrx: | 354 | init_completion(&host->dma_complete); |
431 | dma_release_channel(host->chan_rx); | 355 | } |
432 | host->chan_rx = NULL; | ||
433 | erqrx: | ||
434 | ecfgtx: | ||
435 | dma_release_channel(host->chan_tx); | ||
436 | host->chan_tx = NULL; | ||
437 | } | 356 | } |
438 | 357 | ||
439 | static void sh_mmcif_release_dma(struct sh_mmcif_host *host) | 358 | static void sh_mmcif_release_dma(struct sh_mmcif_host *host) |
@@ -457,19 +376,17 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host) | |||
457 | static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) | 376 | static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) |
458 | { | 377 | { |
459 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | 378 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; |
460 | bool sup_pclk = p ? p->sup_pclk : false; | ||
461 | 379 | ||
462 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); | 380 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); |
463 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); | 381 | sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); |
464 | 382 | ||
465 | if (!clk) | 383 | if (!clk) |
466 | return; | 384 | return; |
467 | if (sup_pclk && clk == host->clk) | 385 | if (p->sup_pclk && clk == host->clk) |
468 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); | 386 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); |
469 | else | 387 | else |
470 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & | 388 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & |
471 | ((fls(DIV_ROUND_UP(host->clk, | 389 | (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); |
472 | clk) - 1) - 1) << 16)); | ||
473 | 390 | ||
474 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); | 391 | sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); |
475 | } | 392 | } |
@@ -491,7 +408,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) | |||
491 | static int sh_mmcif_error_manage(struct sh_mmcif_host *host) | 408 | static int sh_mmcif_error_manage(struct sh_mmcif_host *host) |
492 | { | 409 | { |
493 | u32 state1, state2; | 410 | u32 state1, state2; |
494 | int ret, timeout; | 411 | int ret, timeout = 10000000; |
495 | 412 | ||
496 | host->sd_error = false; | 413 | host->sd_error = false; |
497 | 414 | ||
@@ -503,212 +420,155 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) | |||
503 | if (state1 & STS1_CMDSEQ) { | 420 | if (state1 & STS1_CMDSEQ) { |
504 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); | 421 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); |
505 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); | 422 | sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); |
506 | for (timeout = 10000000; timeout; timeout--) { | 423 | while (1) { |
424 | timeout--; | ||
425 | if (timeout < 0) { | ||
426 | dev_err(&host->pd->dev, | ||
427 | "Forceed end of command sequence timeout err\n"); | ||
428 | return -EIO; | ||
429 | } | ||
507 | if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) | 430 | if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) |
508 | & STS1_CMDSEQ)) | 431 | & STS1_CMDSEQ)) |
509 | break; | 432 | break; |
510 | mdelay(1); | 433 | mdelay(1); |
511 | } | 434 | } |
512 | if (!timeout) { | ||
513 | dev_err(&host->pd->dev, | ||
514 | "Forced end of command sequence timeout err\n"); | ||
515 | return -EIO; | ||
516 | } | ||
517 | sh_mmcif_sync_reset(host); | 435 | sh_mmcif_sync_reset(host); |
518 | dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); | 436 | dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); |
519 | return -EIO; | 437 | return -EIO; |
520 | } | 438 | } |
521 | 439 | ||
522 | if (state2 & STS2_CRC_ERR) { | 440 | if (state2 & STS2_CRC_ERR) { |
523 | dev_dbg(&host->pd->dev, ": CRC error\n"); | 441 | dev_dbg(&host->pd->dev, ": Happened CRC error\n"); |
524 | ret = -EIO; | 442 | ret = -EIO; |
525 | } else if (state2 & STS2_TIMEOUT_ERR) { | 443 | } else if (state2 & STS2_TIMEOUT_ERR) { |
526 | dev_dbg(&host->pd->dev, ": Timeout\n"); | 444 | dev_dbg(&host->pd->dev, ": Happened Timeout error\n"); |
527 | ret = -ETIMEDOUT; | 445 | ret = -ETIMEDOUT; |
528 | } else { | 446 | } else { |
529 | dev_dbg(&host->pd->dev, ": End/Index error\n"); | 447 | dev_dbg(&host->pd->dev, ": Happened End/Index error\n"); |
530 | ret = -EIO; | 448 | ret = -EIO; |
531 | } | 449 | } |
532 | return ret; | 450 | return ret; |
533 | } | 451 | } |
534 | 452 | ||
535 | static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) | 453 | static int sh_mmcif_single_read(struct sh_mmcif_host *host, |
536 | { | 454 | struct mmc_request *mrq) |
537 | struct mmc_data *data = host->mrq->data; | ||
538 | |||
539 | host->sg_blkidx += host->blocksize; | ||
540 | |||
541 | /* data->sg->length must be a multiple of host->blocksize? */ | ||
542 | BUG_ON(host->sg_blkidx > data->sg->length); | ||
543 | |||
544 | if (host->sg_blkidx == data->sg->length) { | ||
545 | host->sg_blkidx = 0; | ||
546 | if (++host->sg_idx < data->sg_len) | ||
547 | host->pio_ptr = sg_virt(++data->sg); | ||
548 | } else { | ||
549 | host->pio_ptr = p; | ||
550 | } | ||
551 | |||
552 | if (host->sg_idx == data->sg_len) | ||
553 | return false; | ||
554 | |||
555 | return true; | ||
556 | } | ||
557 | |||
558 | static void sh_mmcif_single_read(struct sh_mmcif_host *host, | ||
559 | struct mmc_request *mrq) | ||
560 | { | 455 | { |
561 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 456 | struct mmc_data *data = mrq->data; |
562 | BLOCK_SIZE_MASK) + 3; | 457 | long time; |
563 | 458 | u32 blocksize, i, *p = sg_virt(data->sg); | |
564 | host->wait_for = MMCIF_WAIT_FOR_READ; | ||
565 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
566 | 459 | ||
567 | /* buf read enable */ | 460 | /* buf read enable */ |
568 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 461 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
569 | } | 462 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
570 | 463 | host->timeout); | |
571 | static bool sh_mmcif_read_block(struct sh_mmcif_host *host) | 464 | if (time <= 0 || host->sd_error) |
572 | { | 465 | return sh_mmcif_error_manage(host); |
573 | struct mmc_data *data = host->mrq->data; | 466 | |
574 | u32 *p = sg_virt(data->sg); | 467 | blocksize = (BLOCK_SIZE_MASK & |
575 | int i; | 468 | sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; |
576 | 469 | for (i = 0; i < blocksize / 4; i++) | |
577 | if (host->sd_error) { | ||
578 | data->error = sh_mmcif_error_manage(host); | ||
579 | return false; | ||
580 | } | ||
581 | |||
582 | for (i = 0; i < host->blocksize / 4; i++) | ||
583 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); | 470 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); |
584 | 471 | ||
585 | /* buffer read end */ | 472 | /* buffer read end */ |
586 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | 473 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); |
587 | host->wait_for = MMCIF_WAIT_FOR_READ_END; | 474 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
475 | host->timeout); | ||
476 | if (time <= 0 || host->sd_error) | ||
477 | return sh_mmcif_error_manage(host); | ||
588 | 478 | ||
589 | return true; | 479 | return 0; |
590 | } | 480 | } |
591 | 481 | ||
592 | static void sh_mmcif_multi_read(struct sh_mmcif_host *host, | 482 | static int sh_mmcif_multi_read(struct sh_mmcif_host *host, |
593 | struct mmc_request *mrq) | 483 | struct mmc_request *mrq) |
594 | { | 484 | { |
595 | struct mmc_data *data = mrq->data; | 485 | struct mmc_data *data = mrq->data; |
596 | 486 | long time; | |
597 | if (!data->sg_len || !data->sg->length) | 487 | u32 blocksize, i, j, sec, *p; |
598 | return; | 488 | |
599 | 489 | blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, | |
600 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 490 | MMCIF_CE_BLOCK_SET); |
601 | BLOCK_SIZE_MASK; | 491 | for (j = 0; j < data->sg_len; j++) { |
602 | 492 | p = sg_virt(data->sg); | |
603 | host->wait_for = MMCIF_WAIT_FOR_MREAD; | 493 | for (sec = 0; sec < data->sg->length / blocksize; sec++) { |
604 | host->sg_idx = 0; | 494 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); |
605 | host->sg_blkidx = 0; | 495 | /* buf read enable */ |
606 | host->pio_ptr = sg_virt(data->sg); | 496 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
607 | schedule_delayed_work(&host->timeout_work, host->timeout); | 497 | host->timeout); |
608 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | 498 | |
609 | } | 499 | if (time <= 0 || host->sd_error) |
610 | 500 | return sh_mmcif_error_manage(host); | |
611 | static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) | 501 | |
612 | { | 502 | for (i = 0; i < blocksize / 4; i++) |
613 | struct mmc_data *data = host->mrq->data; | 503 | *p++ = sh_mmcif_readl(host->addr, |
614 | u32 *p = host->pio_ptr; | 504 | MMCIF_CE_DATA); |
615 | int i; | 505 | } |
616 | 506 | if (j < data->sg_len - 1) | |
617 | if (host->sd_error) { | 507 | data->sg++; |
618 | data->error = sh_mmcif_error_manage(host); | ||
619 | return false; | ||
620 | } | 508 | } |
621 | 509 | return 0; | |
622 | BUG_ON(!data->sg->length); | ||
623 | |||
624 | for (i = 0; i < host->blocksize / 4; i++) | ||
625 | *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); | ||
626 | |||
627 | if (!sh_mmcif_next_block(host, p)) | ||
628 | return false; | ||
629 | |||
630 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
631 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); | ||
632 | |||
633 | return true; | ||
634 | } | 510 | } |
635 | 511 | ||
636 | static void sh_mmcif_single_write(struct sh_mmcif_host *host, | 512 | static int sh_mmcif_single_write(struct sh_mmcif_host *host, |
637 | struct mmc_request *mrq) | 513 | struct mmc_request *mrq) |
638 | { | 514 | { |
639 | host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | 515 | struct mmc_data *data = mrq->data; |
640 | BLOCK_SIZE_MASK) + 3; | 516 | long time; |
641 | 517 | u32 blocksize, i, *p = sg_virt(data->sg); | |
642 | host->wait_for = MMCIF_WAIT_FOR_WRITE; | ||
643 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
644 | 518 | ||
645 | /* buf write enable */ | ||
646 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 519 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
647 | } | ||
648 | |||
649 | static bool sh_mmcif_write_block(struct sh_mmcif_host *host) | ||
650 | { | ||
651 | struct mmc_data *data = host->mrq->data; | ||
652 | u32 *p = sg_virt(data->sg); | ||
653 | int i; | ||
654 | |||
655 | if (host->sd_error) { | ||
656 | data->error = sh_mmcif_error_manage(host); | ||
657 | return false; | ||
658 | } | ||
659 | 520 | ||
660 | for (i = 0; i < host->blocksize / 4; i++) | 521 | /* buf write enable */ |
522 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, | ||
523 | host->timeout); | ||
524 | if (time <= 0 || host->sd_error) | ||
525 | return sh_mmcif_error_manage(host); | ||
526 | |||
527 | blocksize = (BLOCK_SIZE_MASK & | ||
528 | sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; | ||
529 | for (i = 0; i < blocksize / 4; i++) | ||
661 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); | 530 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); |
662 | 531 | ||
663 | /* buffer write end */ | 532 | /* buffer write end */ |
664 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | 533 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); |
665 | host->wait_for = MMCIF_WAIT_FOR_WRITE_END; | ||
666 | 534 | ||
667 | return true; | 535 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
536 | host->timeout); | ||
537 | if (time <= 0 || host->sd_error) | ||
538 | return sh_mmcif_error_manage(host); | ||
539 | |||
540 | return 0; | ||
668 | } | 541 | } |
669 | 542 | ||
670 | static void sh_mmcif_multi_write(struct sh_mmcif_host *host, | 543 | static int sh_mmcif_multi_write(struct sh_mmcif_host *host, |
671 | struct mmc_request *mrq) | 544 | struct mmc_request *mrq) |
672 | { | 545 | { |
673 | struct mmc_data *data = mrq->data; | 546 | struct mmc_data *data = mrq->data; |
547 | long time; | ||
548 | u32 i, sec, j, blocksize, *p; | ||
674 | 549 | ||
675 | if (!data->sg_len || !data->sg->length) | 550 | blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, |
676 | return; | 551 | MMCIF_CE_BLOCK_SET); |
677 | |||
678 | host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & | ||
679 | BLOCK_SIZE_MASK; | ||
680 | 552 | ||
681 | host->wait_for = MMCIF_WAIT_FOR_MWRITE; | 553 | for (j = 0; j < data->sg_len; j++) { |
682 | host->sg_idx = 0; | 554 | p = sg_virt(data->sg); |
683 | host->sg_blkidx = 0; | 555 | for (sec = 0; sec < data->sg->length / blocksize; sec++) { |
684 | host->pio_ptr = sg_virt(data->sg); | 556 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); |
685 | schedule_delayed_work(&host->timeout_work, host->timeout); | 557 | /* buf write enable*/ |
686 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | 558 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
687 | } | 559 | host->timeout); |
688 | 560 | ||
689 | static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) | 561 | if (time <= 0 || host->sd_error) |
690 | { | 562 | return sh_mmcif_error_manage(host); |
691 | struct mmc_data *data = host->mrq->data; | ||
692 | u32 *p = host->pio_ptr; | ||
693 | int i; | ||
694 | 563 | ||
695 | if (host->sd_error) { | 564 | for (i = 0; i < blocksize / 4; i++) |
696 | data->error = sh_mmcif_error_manage(host); | 565 | sh_mmcif_writel(host->addr, |
697 | return false; | 566 | MMCIF_CE_DATA, *p++); |
567 | } | ||
568 | if (j < data->sg_len - 1) | ||
569 | data->sg++; | ||
698 | } | 570 | } |
699 | 571 | return 0; | |
700 | BUG_ON(!data->sg->length); | ||
701 | |||
702 | for (i = 0; i < host->blocksize / 4; i++) | ||
703 | sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); | ||
704 | |||
705 | if (!sh_mmcif_next_block(host, p)) | ||
706 | return false; | ||
707 | |||
708 | schedule_delayed_work(&host->timeout_work, host->timeout); | ||
709 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); | ||
710 | |||
711 | return true; | ||
712 | } | 572 | } |
713 | 573 | ||
714 | static void sh_mmcif_get_response(struct sh_mmcif_host *host, | 574 | static void sh_mmcif_get_response(struct sh_mmcif_host *host, |
@@ -730,11 +590,8 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, | |||
730 | } | 590 | } |
731 | 591 | ||
732 | static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | 592 | static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, |
733 | struct mmc_request *mrq) | 593 | struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) |
734 | { | 594 | { |
735 | struct mmc_data *data = mrq->data; | ||
736 | struct mmc_command *cmd = mrq->cmd; | ||
737 | u32 opc = cmd->opcode; | ||
738 | u32 tmp = 0; | 595 | u32 tmp = 0; |
739 | 596 | ||
740 | /* Response Type check */ | 597 | /* Response Type check */ |
@@ -761,11 +618,12 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | |||
761 | case MMC_SET_WRITE_PROT: | 618 | case MMC_SET_WRITE_PROT: |
762 | case MMC_CLR_WRITE_PROT: | 619 | case MMC_CLR_WRITE_PROT: |
763 | case MMC_ERASE: | 620 | case MMC_ERASE: |
621 | case MMC_GEN_CMD: | ||
764 | tmp |= CMD_SET_RBSY; | 622 | tmp |= CMD_SET_RBSY; |
765 | break; | 623 | break; |
766 | } | 624 | } |
767 | /* WDAT / DATW */ | 625 | /* WDAT / DATW */ |
768 | if (data) { | 626 | if (host->data) { |
769 | tmp |= CMD_SET_WDAT; | 627 | tmp |= CMD_SET_WDAT; |
770 | switch (host->bus_width) { | 628 | switch (host->bus_width) { |
771 | case MMC_BUS_WIDTH_1: | 629 | case MMC_BUS_WIDTH_1: |
@@ -789,7 +647,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | |||
789 | if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { | 647 | if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { |
790 | tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; | 648 | tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; |
791 | sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, | 649 | sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, |
792 | data->blocks << 16); | 650 | mrq->data->blocks << 16); |
793 | } | 651 | } |
794 | /* RIDXC[1:0] check bits */ | 652 | /* RIDXC[1:0] check bits */ |
795 | if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || | 653 | if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || |
@@ -803,59 +661,68 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, | |||
803 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) | 661 | opc == MMC_SEND_CSD || opc == MMC_SEND_CID) |
804 | tmp |= CMD_SET_CRC7C_INTERNAL; | 662 | tmp |= CMD_SET_CRC7C_INTERNAL; |
805 | 663 | ||
806 | return (opc << 24) | tmp; | 664 | return opc = ((opc << 24) | tmp); |
807 | } | 665 | } |
808 | 666 | ||
809 | static int sh_mmcif_data_trans(struct sh_mmcif_host *host, | 667 | static int sh_mmcif_data_trans(struct sh_mmcif_host *host, |
810 | struct mmc_request *mrq, u32 opc) | 668 | struct mmc_request *mrq, u32 opc) |
811 | { | 669 | { |
670 | int ret; | ||
671 | |||
812 | switch (opc) { | 672 | switch (opc) { |
813 | case MMC_READ_MULTIPLE_BLOCK: | 673 | case MMC_READ_MULTIPLE_BLOCK: |
814 | sh_mmcif_multi_read(host, mrq); | 674 | ret = sh_mmcif_multi_read(host, mrq); |
815 | return 0; | 675 | break; |
816 | case MMC_WRITE_MULTIPLE_BLOCK: | 676 | case MMC_WRITE_MULTIPLE_BLOCK: |
817 | sh_mmcif_multi_write(host, mrq); | 677 | ret = sh_mmcif_multi_write(host, mrq); |
818 | return 0; | 678 | break; |
819 | case MMC_WRITE_BLOCK: | 679 | case MMC_WRITE_BLOCK: |
820 | sh_mmcif_single_write(host, mrq); | 680 | ret = sh_mmcif_single_write(host, mrq); |
821 | return 0; | 681 | break; |
822 | case MMC_READ_SINGLE_BLOCK: | 682 | case MMC_READ_SINGLE_BLOCK: |
823 | case MMC_SEND_EXT_CSD: | 683 | case MMC_SEND_EXT_CSD: |
824 | sh_mmcif_single_read(host, mrq); | 684 | ret = sh_mmcif_single_read(host, mrq); |
825 | return 0; | 685 | break; |
826 | default: | 686 | default: |
827 | dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); | 687 | dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); |
828 | return -EINVAL; | 688 | ret = -EINVAL; |
689 | break; | ||
829 | } | 690 | } |
691 | return ret; | ||
830 | } | 692 | } |
831 | 693 | ||
832 | static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, | 694 | static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, |
833 | struct mmc_request *mrq) | 695 | struct mmc_request *mrq, struct mmc_command *cmd) |
834 | { | 696 | { |
835 | struct mmc_command *cmd = mrq->cmd; | 697 | long time; |
698 | int ret = 0, mask = 0; | ||
836 | u32 opc = cmd->opcode; | 699 | u32 opc = cmd->opcode; |
837 | u32 mask; | ||
838 | 700 | ||
839 | switch (opc) { | 701 | switch (opc) { |
840 | /* response busy check */ | 702 | /* respons busy check */ |
841 | case MMC_SWITCH: | 703 | case MMC_SWITCH: |
842 | case MMC_STOP_TRANSMISSION: | 704 | case MMC_STOP_TRANSMISSION: |
843 | case MMC_SET_WRITE_PROT: | 705 | case MMC_SET_WRITE_PROT: |
844 | case MMC_CLR_WRITE_PROT: | 706 | case MMC_CLR_WRITE_PROT: |
845 | case MMC_ERASE: | 707 | case MMC_ERASE: |
846 | mask = MASK_START_CMD | MASK_MRBSYE; | 708 | case MMC_GEN_CMD: |
709 | mask = MASK_MRBSYE; | ||
847 | break; | 710 | break; |
848 | default: | 711 | default: |
849 | mask = MASK_START_CMD | MASK_MCRSPE; | 712 | mask = MASK_MCRSPE; |
850 | break; | 713 | break; |
851 | } | 714 | } |
715 | mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | | ||
716 | MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | | ||
717 | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | | ||
718 | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; | ||
852 | 719 | ||
853 | if (mrq->data) { | 720 | if (host->data) { |
854 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); | 721 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); |
855 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, | 722 | sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, |
856 | mrq->data->blksz); | 723 | mrq->data->blksz); |
857 | } | 724 | } |
858 | opc = sh_mmcif_set_cmd(host, mrq); | 725 | opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); |
859 | 726 | ||
860 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); | 727 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); |
861 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); | 728 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); |
@@ -864,28 +731,80 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, | |||
864 | /* set cmd */ | 731 | /* set cmd */ |
865 | sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); | 732 | sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); |
866 | 733 | ||
867 | host->wait_for = MMCIF_WAIT_FOR_CMD; | 734 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
868 | schedule_delayed_work(&host->timeout_work, host->timeout); | 735 | host->timeout); |
736 | if (time <= 0) { | ||
737 | cmd->error = sh_mmcif_error_manage(host); | ||
738 | return; | ||
739 | } | ||
740 | if (host->sd_error) { | ||
741 | switch (cmd->opcode) { | ||
742 | case MMC_ALL_SEND_CID: | ||
743 | case MMC_SELECT_CARD: | ||
744 | case MMC_APP_CMD: | ||
745 | cmd->error = -ETIMEDOUT; | ||
746 | break; | ||
747 | default: | ||
748 | dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n", | ||
749 | cmd->opcode); | ||
750 | cmd->error = sh_mmcif_error_manage(host); | ||
751 | break; | ||
752 | } | ||
753 | host->sd_error = false; | ||
754 | return; | ||
755 | } | ||
756 | if (!(cmd->flags & MMC_RSP_PRESENT)) { | ||
757 | cmd->error = 0; | ||
758 | return; | ||
759 | } | ||
760 | sh_mmcif_get_response(host, cmd); | ||
761 | if (host->data) { | ||
762 | if (!host->dma_active) { | ||
763 | ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); | ||
764 | } else { | ||
765 | long time = | ||
766 | wait_for_completion_interruptible_timeout(&host->dma_complete, | ||
767 | host->timeout); | ||
768 | if (!time) | ||
769 | ret = -ETIMEDOUT; | ||
770 | else if (time < 0) | ||
771 | ret = time; | ||
772 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, | ||
773 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | ||
774 | host->dma_active = false; | ||
775 | } | ||
776 | if (ret < 0) | ||
777 | mrq->data->bytes_xfered = 0; | ||
778 | else | ||
779 | mrq->data->bytes_xfered = | ||
780 | mrq->data->blocks * mrq->data->blksz; | ||
781 | } | ||
782 | cmd->error = ret; | ||
869 | } | 783 | } |
870 | 784 | ||
871 | static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, | 785 | static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, |
872 | struct mmc_request *mrq) | 786 | struct mmc_request *mrq, struct mmc_command *cmd) |
873 | { | 787 | { |
874 | switch (mrq->cmd->opcode) { | 788 | long time; |
875 | case MMC_READ_MULTIPLE_BLOCK: | 789 | |
790 | if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) | ||
876 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); | 791 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); |
877 | break; | 792 | else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) |
878 | case MMC_WRITE_MULTIPLE_BLOCK: | ||
879 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); | 793 | sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); |
880 | break; | 794 | else { |
881 | default: | ||
882 | dev_err(&host->pd->dev, "unsupported stop cmd\n"); | 795 | dev_err(&host->pd->dev, "unsupported stop cmd\n"); |
883 | mrq->stop->error = sh_mmcif_error_manage(host); | 796 | cmd->error = sh_mmcif_error_manage(host); |
884 | return; | 797 | return; |
885 | } | 798 | } |
886 | 799 | ||
887 | host->wait_for = MMCIF_WAIT_FOR_STOP; | 800 | time = wait_for_completion_interruptible_timeout(&host->intr_wait, |
888 | schedule_delayed_work(&host->timeout_work, host->timeout); | 801 | host->timeout); |
802 | if (time <= 0 || host->sd_error) { | ||
803 | cmd->error = sh_mmcif_error_manage(host); | ||
804 | return; | ||
805 | } | ||
806 | sh_mmcif_get_cmd12response(host, cmd); | ||
807 | cmd->error = 0; | ||
889 | } | 808 | } |
890 | 809 | ||
891 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | 810 | static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) |
@@ -906,53 +825,47 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
906 | 825 | ||
907 | switch (mrq->cmd->opcode) { | 826 | switch (mrq->cmd->opcode) { |
908 | /* MMCIF does not support SD/SDIO command */ | 827 | /* MMCIF does not support SD/SDIO command */ |
909 | case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */ | 828 | case SD_IO_SEND_OP_COND: |
910 | case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ | ||
911 | if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) | ||
912 | break; | ||
913 | case MMC_APP_CMD: | 829 | case MMC_APP_CMD: |
914 | host->state = STATE_IDLE; | 830 | host->state = STATE_IDLE; |
915 | mrq->cmd->error = -ETIMEDOUT; | 831 | mrq->cmd->error = -ETIMEDOUT; |
916 | mmc_request_done(mmc, mrq); | 832 | mmc_request_done(mmc, mrq); |
917 | return; | 833 | return; |
834 | case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ | ||
835 | if (!mrq->data) { | ||
836 | /* send_if_cond cmd (not support) */ | ||
837 | host->state = STATE_IDLE; | ||
838 | mrq->cmd->error = -ETIMEDOUT; | ||
839 | mmc_request_done(mmc, mrq); | ||
840 | return; | ||
841 | } | ||
842 | break; | ||
918 | default: | 843 | default: |
919 | break; | 844 | break; |
920 | } | 845 | } |
921 | 846 | host->data = mrq->data; | |
922 | host->mrq = mrq; | 847 | if (mrq->data) { |
923 | 848 | if (mrq->data->flags & MMC_DATA_READ) { | |
924 | sh_mmcif_start_cmd(host, mrq); | 849 | if (host->chan_rx) |
925 | } | 850 | sh_mmcif_start_dma_rx(host); |
926 | 851 | } else { | |
927 | static int sh_mmcif_clk_update(struct sh_mmcif_host *host) | 852 | if (host->chan_tx) |
928 | { | 853 | sh_mmcif_start_dma_tx(host); |
929 | int ret = clk_enable(host->hclk); | 854 | } |
930 | |||
931 | if (!ret) { | ||
932 | host->clk = clk_get_rate(host->hclk); | ||
933 | host->mmc->f_max = host->clk / 2; | ||
934 | host->mmc->f_min = host->clk / 512; | ||
935 | } | 855 | } |
856 | sh_mmcif_start_cmd(host, mrq, mrq->cmd); | ||
857 | host->data = NULL; | ||
936 | 858 | ||
937 | return ret; | 859 | if (!mrq->cmd->error && mrq->stop) |
938 | } | 860 | sh_mmcif_stop_cmd(host, mrq, mrq->stop); |
939 | 861 | host->state = STATE_IDLE; | |
940 | static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) | 862 | mmc_request_done(mmc, mrq); |
941 | { | ||
942 | struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; | ||
943 | struct mmc_host *mmc = host->mmc; | ||
944 | |||
945 | if (pd && pd->set_pwr) | ||
946 | pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF); | ||
947 | if (!IS_ERR(mmc->supply.vmmc)) | ||
948 | /* Errors ignored... */ | ||
949 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, | ||
950 | ios->power_mode ? ios->vdd : 0); | ||
951 | } | 863 | } |
952 | 864 | ||
953 | static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 865 | static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
954 | { | 866 | { |
955 | struct sh_mmcif_host *host = mmc_priv(mmc); | 867 | struct sh_mmcif_host *host = mmc_priv(mmc); |
868 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | ||
956 | unsigned long flags; | 869 | unsigned long flags; |
957 | 870 | ||
958 | spin_lock_irqsave(&host->lock, flags); | 871 | spin_lock_irqsave(&host->lock, flags); |
@@ -970,7 +883,6 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
970 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); | 883 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); |
971 | host->card_present = true; | 884 | host->card_present = true; |
972 | } | 885 | } |
973 | sh_mmcif_set_power(host, ios); | ||
974 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | 886 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { |
975 | /* clock stop */ | 887 | /* clock stop */ |
976 | sh_mmcif_clock_control(host, 0); | 888 | sh_mmcif_clock_control(host, 0); |
@@ -982,10 +894,9 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
982 | } | 894 | } |
983 | if (host->power) { | 895 | if (host->power) { |
984 | pm_runtime_put(&host->pd->dev); | 896 | pm_runtime_put(&host->pd->dev); |
985 | clk_disable(host->hclk); | ||
986 | host->power = false; | 897 | host->power = false; |
987 | if (ios->power_mode == MMC_POWER_OFF) | 898 | if (p->down_pwr) |
988 | sh_mmcif_set_power(host, ios); | 899 | p->down_pwr(host->pd); |
989 | } | 900 | } |
990 | host->state = STATE_IDLE; | 901 | host->state = STATE_IDLE; |
991 | return; | 902 | return; |
@@ -993,7 +904,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
993 | 904 | ||
994 | if (ios->clock) { | 905 | if (ios->clock) { |
995 | if (!host->power) { | 906 | if (!host->power) { |
996 | sh_mmcif_clk_update(host); | 907 | if (p->set_pwr) |
908 | p->set_pwr(host->pd, ios->power_mode); | ||
997 | pm_runtime_get_sync(&host->pd->dev); | 909 | pm_runtime_get_sync(&host->pd->dev); |
998 | host->power = true; | 910 | host->power = true; |
999 | sh_mmcif_sync_reset(host); | 911 | sh_mmcif_sync_reset(host); |
@@ -1009,12 +921,8 @@ static int sh_mmcif_get_cd(struct mmc_host *mmc) | |||
1009 | { | 921 | { |
1010 | struct sh_mmcif_host *host = mmc_priv(mmc); | 922 | struct sh_mmcif_host *host = mmc_priv(mmc); |
1011 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; | 923 | struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; |
1012 | int ret = mmc_gpio_get_cd(mmc); | ||
1013 | 924 | ||
1014 | if (ret >= 0) | 925 | if (!p->get_cd) |
1015 | return ret; | ||
1016 | |||
1017 | if (!p || !p->get_cd) | ||
1018 | return -ENOSYS; | 926 | return -ENOSYS; |
1019 | else | 927 | else |
1020 | return p->get_cd(host->pd); | 928 | return p->get_cd(host->pd); |
@@ -1026,156 +934,9 @@ static struct mmc_host_ops sh_mmcif_ops = { | |||
1026 | .get_cd = sh_mmcif_get_cd, | 934 | .get_cd = sh_mmcif_get_cd, |
1027 | }; | 935 | }; |
1028 | 936 | ||
1029 | static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) | 937 | static void sh_mmcif_detect(struct mmc_host *mmc) |
1030 | { | 938 | { |
1031 | struct mmc_command *cmd = host->mrq->cmd; | 939 | mmc_detect_change(mmc, 0); |
1032 | struct mmc_data *data = host->mrq->data; | ||
1033 | long time; | ||
1034 | |||
1035 | if (host->sd_error) { | ||
1036 | switch (cmd->opcode) { | ||
1037 | case MMC_ALL_SEND_CID: | ||
1038 | case MMC_SELECT_CARD: | ||
1039 | case MMC_APP_CMD: | ||
1040 | cmd->error = -ETIMEDOUT; | ||
1041 | host->sd_error = false; | ||
1042 | break; | ||
1043 | default: | ||
1044 | cmd->error = sh_mmcif_error_manage(host); | ||
1045 | dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", | ||
1046 | cmd->opcode, cmd->error); | ||
1047 | break; | ||
1048 | } | ||
1049 | return false; | ||
1050 | } | ||
1051 | if (!(cmd->flags & MMC_RSP_PRESENT)) { | ||
1052 | cmd->error = 0; | ||
1053 | return false; | ||
1054 | } | ||
1055 | |||
1056 | sh_mmcif_get_response(host, cmd); | ||
1057 | |||
1058 | if (!data) | ||
1059 | return false; | ||
1060 | |||
1061 | if (data->flags & MMC_DATA_READ) { | ||
1062 | if (host->chan_rx) | ||
1063 | sh_mmcif_start_dma_rx(host); | ||
1064 | } else { | ||
1065 | if (host->chan_tx) | ||
1066 | sh_mmcif_start_dma_tx(host); | ||
1067 | } | ||
1068 | |||
1069 | if (!host->dma_active) { | ||
1070 | data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); | ||
1071 | if (!data->error) | ||
1072 | return true; | ||
1073 | return false; | ||
1074 | } | ||
1075 | |||
1076 | /* Running in the IRQ thread, can sleep */ | ||
1077 | time = wait_for_completion_interruptible_timeout(&host->dma_complete, | ||
1078 | host->timeout); | ||
1079 | if (host->sd_error) { | ||
1080 | dev_err(host->mmc->parent, | ||
1081 | "Error IRQ while waiting for DMA completion!\n"); | ||
1082 | /* Woken up by an error IRQ: abort DMA */ | ||
1083 | if (data->flags & MMC_DATA_READ) | ||
1084 | dmaengine_terminate_all(host->chan_rx); | ||
1085 | else | ||
1086 | dmaengine_terminate_all(host->chan_tx); | ||
1087 | data->error = sh_mmcif_error_manage(host); | ||
1088 | } else if (!time) { | ||
1089 | data->error = -ETIMEDOUT; | ||
1090 | } else if (time < 0) { | ||
1091 | data->error = time; | ||
1092 | } | ||
1093 | sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, | ||
1094 | BUF_ACC_DMAREN | BUF_ACC_DMAWEN); | ||
1095 | host->dma_active = false; | ||
1096 | |||
1097 | if (data->error) | ||
1098 | data->bytes_xfered = 0; | ||
1099 | |||
1100 | return false; | ||
1101 | } | ||
1102 | |||
1103 | static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) | ||
1104 | { | ||
1105 | struct sh_mmcif_host *host = dev_id; | ||
1106 | struct mmc_request *mrq = host->mrq; | ||
1107 | |||
1108 | cancel_delayed_work_sync(&host->timeout_work); | ||
1109 | |||
1110 | /* | ||
1111 | * All handlers return true, if processing continues, and false, if the | ||
1112 | * request has to be completed - successfully or not | ||
1113 | */ | ||
1114 | switch (host->wait_for) { | ||
1115 | case MMCIF_WAIT_FOR_REQUEST: | ||
1116 | /* We're too late, the timeout has already kicked in */ | ||
1117 | return IRQ_HANDLED; | ||
1118 | case MMCIF_WAIT_FOR_CMD: | ||
1119 | if (sh_mmcif_end_cmd(host)) | ||
1120 | /* Wait for data */ | ||
1121 | return IRQ_HANDLED; | ||
1122 | break; | ||
1123 | case MMCIF_WAIT_FOR_MREAD: | ||
1124 | if (sh_mmcif_mread_block(host)) | ||
1125 | /* Wait for more data */ | ||
1126 | return IRQ_HANDLED; | ||
1127 | break; | ||
1128 | case MMCIF_WAIT_FOR_READ: | ||
1129 | if (sh_mmcif_read_block(host)) | ||
1130 | /* Wait for data end */ | ||
1131 | return IRQ_HANDLED; | ||
1132 | break; | ||
1133 | case MMCIF_WAIT_FOR_MWRITE: | ||
1134 | if (sh_mmcif_mwrite_block(host)) | ||
1135 | /* Wait data to write */ | ||
1136 | return IRQ_HANDLED; | ||
1137 | break; | ||
1138 | case MMCIF_WAIT_FOR_WRITE: | ||
1139 | if (sh_mmcif_write_block(host)) | ||
1140 | /* Wait for data end */ | ||
1141 | return IRQ_HANDLED; | ||
1142 | break; | ||
1143 | case MMCIF_WAIT_FOR_STOP: | ||
1144 | if (host->sd_error) { | ||
1145 | mrq->stop->error = sh_mmcif_error_manage(host); | ||
1146 | break; | ||
1147 | } | ||
1148 | sh_mmcif_get_cmd12response(host, mrq->stop); | ||
1149 | mrq->stop->error = 0; | ||
1150 | break; | ||
1151 | case MMCIF_WAIT_FOR_READ_END: | ||
1152 | case MMCIF_WAIT_FOR_WRITE_END: | ||
1153 | if (host->sd_error) | ||
1154 | mrq->data->error = sh_mmcif_error_manage(host); | ||
1155 | break; | ||
1156 | default: | ||
1157 | BUG(); | ||
1158 | } | ||
1159 | |||
1160 | if (host->wait_for != MMCIF_WAIT_FOR_STOP) { | ||
1161 | struct mmc_data *data = mrq->data; | ||
1162 | if (!mrq->cmd->error && data && !data->error) | ||
1163 | data->bytes_xfered = | ||
1164 | data->blocks * data->blksz; | ||
1165 | |||
1166 | if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { | ||
1167 | sh_mmcif_stop_cmd(host, mrq); | ||
1168 | if (!mrq->stop->error) | ||
1169 | return IRQ_HANDLED; | ||
1170 | } | ||
1171 | } | ||
1172 | |||
1173 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; | ||
1174 | host->state = STATE_IDLE; | ||
1175 | host->mrq = NULL; | ||
1176 | mmc_request_done(host->mmc, mrq); | ||
1177 | |||
1178 | return IRQ_HANDLED; | ||
1179 | } | 940 | } |
1180 | 941 | ||
1181 | static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | 942 | static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) |
@@ -1186,12 +947,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
1186 | 947 | ||
1187 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); | 948 | state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); |
1188 | 949 | ||
1189 | if (state & INT_ERR_STS) { | 950 | if (state & INT_RBSYE) { |
1190 | /* error interrupts - process first */ | ||
1191 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | ||
1192 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | ||
1193 | err = 1; | ||
1194 | } else if (state & INT_RBSYE) { | ||
1195 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 951 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
1196 | ~(INT_RBSYE | INT_CRSPE)); | 952 | ~(INT_RBSYE | INT_CRSPE)); |
1197 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); | 953 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); |
@@ -1213,14 +969,17 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
1213 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); | 969 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); |
1214 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); | 970 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); |
1215 | } else if (state & INT_DTRANE) { | 971 | } else if (state & INT_DTRANE) { |
1216 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 972 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); |
1217 | ~(INT_CMD12DRE | INT_CMD12RBE | | ||
1218 | INT_CMD12CRE | INT_DTRANE)); | ||
1219 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); | 973 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); |
1220 | } else if (state & INT_CMD12RBE) { | 974 | } else if (state & INT_CMD12RBE) { |
1221 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, | 975 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, |
1222 | ~(INT_CMD12RBE | INT_CMD12CRE)); | 976 | ~(INT_CMD12RBE | INT_CMD12CRE)); |
1223 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); | 977 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); |
978 | } else if (state & INT_ERR_STS) { | ||
979 | /* err interrupts */ | ||
980 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | ||
981 | sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); | ||
982 | err = 1; | ||
1224 | } else { | 983 | } else { |
1225 | dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); | 984 | dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); |
1226 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); | 985 | sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); |
@@ -1231,81 +990,23 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) | |||
1231 | host->sd_error = true; | 990 | host->sd_error = true; |
1232 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); | 991 | dev_dbg(&host->pd->dev, "int err state = %08x\n", state); |
1233 | } | 992 | } |
1234 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { | 993 | if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) |
1235 | if (!host->dma_active) | 994 | complete(&host->intr_wait); |
1236 | return IRQ_WAKE_THREAD; | 995 | else |
1237 | else if (host->sd_error) | ||
1238 | mmcif_dma_complete(host); | ||
1239 | } else { | ||
1240 | dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); | 996 | dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); |
1241 | } | ||
1242 | 997 | ||
1243 | return IRQ_HANDLED; | 998 | return IRQ_HANDLED; |
1244 | } | 999 | } |
1245 | 1000 | ||
1246 | static void mmcif_timeout_work(struct work_struct *work) | 1001 | static int __devinit sh_mmcif_probe(struct platform_device *pdev) |
1247 | { | ||
1248 | struct delayed_work *d = container_of(work, struct delayed_work, work); | ||
1249 | struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); | ||
1250 | struct mmc_request *mrq = host->mrq; | ||
1251 | |||
1252 | if (host->dying) | ||
1253 | /* Don't run after mmc_remove_host() */ | ||
1254 | return; | ||
1255 | |||
1256 | /* | ||
1257 | * Handle races with cancel_delayed_work(), unless | ||
1258 | * cancel_delayed_work_sync() is used | ||
1259 | */ | ||
1260 | switch (host->wait_for) { | ||
1261 | case MMCIF_WAIT_FOR_CMD: | ||
1262 | mrq->cmd->error = sh_mmcif_error_manage(host); | ||
1263 | break; | ||
1264 | case MMCIF_WAIT_FOR_STOP: | ||
1265 | mrq->stop->error = sh_mmcif_error_manage(host); | ||
1266 | break; | ||
1267 | case MMCIF_WAIT_FOR_MREAD: | ||
1268 | case MMCIF_WAIT_FOR_MWRITE: | ||
1269 | case MMCIF_WAIT_FOR_READ: | ||
1270 | case MMCIF_WAIT_FOR_WRITE: | ||
1271 | case MMCIF_WAIT_FOR_READ_END: | ||
1272 | case MMCIF_WAIT_FOR_WRITE_END: | ||
1273 | mrq->data->error = sh_mmcif_error_manage(host); | ||
1274 | break; | ||
1275 | default: | ||
1276 | BUG(); | ||
1277 | } | ||
1278 | |||
1279 | host->state = STATE_IDLE; | ||
1280 | host->wait_for = MMCIF_WAIT_FOR_REQUEST; | ||
1281 | host->mrq = NULL; | ||
1282 | mmc_request_done(host->mmc, mrq); | ||
1283 | } | ||
1284 | |||
1285 | static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) | ||
1286 | { | ||
1287 | struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; | ||
1288 | struct mmc_host *mmc = host->mmc; | ||
1289 | |||
1290 | mmc_regulator_get_supply(mmc); | ||
1291 | |||
1292 | if (!pd) | ||
1293 | return; | ||
1294 | |||
1295 | if (!mmc->ocr_avail) | ||
1296 | mmc->ocr_avail = pd->ocr; | ||
1297 | else if (pd->ocr) | ||
1298 | dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); | ||
1299 | } | ||
1300 | |||
1301 | static int sh_mmcif_probe(struct platform_device *pdev) | ||
1302 | { | 1002 | { |
1303 | int ret = 0, irq[2]; | 1003 | int ret = 0, irq[2]; |
1304 | struct mmc_host *mmc; | 1004 | struct mmc_host *mmc; |
1305 | struct sh_mmcif_host *host; | 1005 | struct sh_mmcif_host *host; |
1306 | struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; | 1006 | struct sh_mmcif_plat_data *pd; |
1307 | struct resource *res; | 1007 | struct resource *res; |
1308 | void __iomem *reg; | 1008 | void __iomem *reg; |
1009 | char clk_name[8]; | ||
1309 | 1010 | ||
1310 | irq[0] = platform_get_irq(pdev, 0); | 1011 | irq[0] = platform_get_irq(pdev, 0); |
1311 | irq[1] = platform_get_irq(pdev, 1); | 1012 | irq[1] = platform_get_irq(pdev, 1); |
@@ -1323,26 +1024,49 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1323 | dev_err(&pdev->dev, "ioremap error.\n"); | 1024 | dev_err(&pdev->dev, "ioremap error.\n"); |
1324 | return -ENOMEM; | 1025 | return -ENOMEM; |
1325 | } | 1026 | } |
1326 | 1027 | pd = pdev->dev.platform_data; | |
1028 | if (!pd) { | ||
1029 | dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); | ||
1030 | ret = -ENXIO; | ||
1031 | goto clean_up; | ||
1032 | } | ||
1327 | mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); | 1033 | mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); |
1328 | if (!mmc) { | 1034 | if (!mmc) { |
1329 | ret = -ENOMEM; | 1035 | ret = -ENOMEM; |
1330 | goto ealloch; | 1036 | goto clean_up; |
1331 | } | 1037 | } |
1332 | host = mmc_priv(mmc); | 1038 | host = mmc_priv(mmc); |
1333 | host->mmc = mmc; | 1039 | host->mmc = mmc; |
1334 | host->addr = reg; | 1040 | host->addr = reg; |
1335 | host->timeout = 1000; | 1041 | host->timeout = 1000; |
1336 | 1042 | ||
1043 | snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); | ||
1044 | host->hclk = clk_get(&pdev->dev, clk_name); | ||
1045 | if (IS_ERR(host->hclk)) { | ||
1046 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
1047 | ret = PTR_ERR(host->hclk); | ||
1048 | goto clean_up1; | ||
1049 | } | ||
1050 | clk_enable(host->hclk); | ||
1051 | host->clk = clk_get_rate(host->hclk); | ||
1337 | host->pd = pdev; | 1052 | host->pd = pdev; |
1338 | 1053 | ||
1054 | init_completion(&host->intr_wait); | ||
1339 | spin_lock_init(&host->lock); | 1055 | spin_lock_init(&host->lock); |
1340 | 1056 | ||
1341 | mmc->ops = &sh_mmcif_ops; | 1057 | mmc->ops = &sh_mmcif_ops; |
1342 | sh_mmcif_init_ocr(host); | 1058 | mmc->f_max = host->clk; |
1343 | 1059 | /* close to 400KHz */ | |
1060 | if (mmc->f_max < 51200000) | ||
1061 | mmc->f_min = mmc->f_max / 128; | ||
1062 | else if (mmc->f_max < 102400000) | ||
1063 | mmc->f_min = mmc->f_max / 256; | ||
1064 | else | ||
1065 | mmc->f_min = mmc->f_max / 512; | ||
1066 | if (pd->ocr) | ||
1067 | mmc->ocr_avail = pd->ocr; | ||
1344 | mmc->caps = MMC_CAP_MMC_HIGHSPEED; | 1068 | mmc->caps = MMC_CAP_MMC_HIGHSPEED; |
1345 | if (pd && pd->caps) | 1069 | if (pd->caps) |
1346 | mmc->caps |= pd->caps; | 1070 | mmc->caps |= pd->caps; |
1347 | mmc->max_segs = 32; | 1071 | mmc->max_segs = 32; |
1348 | mmc->max_blk_size = 512; | 1072 | mmc->max_blk_size = 512; |
@@ -1350,105 +1074,63 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1350 | mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; | 1074 | mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; |
1351 | mmc->max_seg_size = mmc->max_req_size; | 1075 | mmc->max_seg_size = mmc->max_req_size; |
1352 | 1076 | ||
1077 | sh_mmcif_sync_reset(host); | ||
1353 | platform_set_drvdata(pdev, host); | 1078 | platform_set_drvdata(pdev, host); |
1354 | 1079 | ||
1355 | pm_runtime_enable(&pdev->dev); | 1080 | pm_runtime_enable(&pdev->dev); |
1356 | host->power = false; | 1081 | host->power = false; |
1357 | 1082 | ||
1358 | host->hclk = clk_get(&pdev->dev, NULL); | ||
1359 | if (IS_ERR(host->hclk)) { | ||
1360 | ret = PTR_ERR(host->hclk); | ||
1361 | dev_err(&pdev->dev, "cannot get clock: %d\n", ret); | ||
1362 | goto eclkget; | ||
1363 | } | ||
1364 | ret = sh_mmcif_clk_update(host); | ||
1365 | if (ret < 0) | ||
1366 | goto eclkupdate; | ||
1367 | |||
1368 | ret = pm_runtime_resume(&pdev->dev); | 1083 | ret = pm_runtime_resume(&pdev->dev); |
1369 | if (ret < 0) | 1084 | if (ret < 0) |
1370 | goto eresume; | 1085 | goto clean_up2; |
1371 | 1086 | ||
1372 | INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); | 1087 | mmc_add_host(mmc); |
1373 | 1088 | ||
1374 | sh_mmcif_sync_reset(host); | ||
1375 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1089 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1376 | 1090 | ||
1377 | ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); | 1091 | ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); |
1378 | if (ret) { | 1092 | if (ret) { |
1379 | dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); | 1093 | dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); |
1380 | goto ereqirq0; | 1094 | goto clean_up3; |
1381 | } | 1095 | } |
1382 | ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); | 1096 | ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); |
1383 | if (ret) { | 1097 | if (ret) { |
1098 | free_irq(irq[0], host); | ||
1384 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); | 1099 | dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); |
1385 | goto ereqirq1; | 1100 | goto clean_up3; |
1386 | } | 1101 | } |
1387 | 1102 | ||
1388 | if (pd && pd->use_cd_gpio) { | 1103 | sh_mmcif_detect(host->mmc); |
1389 | ret = mmc_gpio_request_cd(mmc, pd->cd_gpio); | ||
1390 | if (ret < 0) | ||
1391 | goto erqcd; | ||
1392 | } | ||
1393 | |||
1394 | clk_disable(host->hclk); | ||
1395 | ret = mmc_add_host(mmc); | ||
1396 | if (ret < 0) | ||
1397 | goto emmcaddh; | ||
1398 | |||
1399 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); | ||
1400 | 1104 | ||
1401 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); | 1105 | dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); |
1402 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", | 1106 | dev_dbg(&pdev->dev, "chip ver H'%04x\n", |
1403 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); | 1107 | sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); |
1404 | return ret; | 1108 | return ret; |
1405 | 1109 | ||
1406 | emmcaddh: | 1110 | clean_up3: |
1407 | if (pd && pd->use_cd_gpio) | 1111 | mmc_remove_host(mmc); |
1408 | mmc_gpio_free_cd(mmc); | ||
1409 | erqcd: | ||
1410 | free_irq(irq[1], host); | ||
1411 | ereqirq1: | ||
1412 | free_irq(irq[0], host); | ||
1413 | ereqirq0: | ||
1414 | pm_runtime_suspend(&pdev->dev); | 1112 | pm_runtime_suspend(&pdev->dev); |
1415 | eresume: | 1113 | clean_up2: |
1416 | clk_disable(host->hclk); | ||
1417 | eclkupdate: | ||
1418 | clk_put(host->hclk); | ||
1419 | eclkget: | ||
1420 | pm_runtime_disable(&pdev->dev); | 1114 | pm_runtime_disable(&pdev->dev); |
1115 | clk_disable(host->hclk); | ||
1116 | clean_up1: | ||
1421 | mmc_free_host(mmc); | 1117 | mmc_free_host(mmc); |
1422 | ealloch: | 1118 | clean_up: |
1423 | iounmap(reg); | 1119 | if (reg) |
1120 | iounmap(reg); | ||
1424 | return ret; | 1121 | return ret; |
1425 | } | 1122 | } |
1426 | 1123 | ||
1427 | static int sh_mmcif_remove(struct platform_device *pdev) | 1124 | static int __devexit sh_mmcif_remove(struct platform_device *pdev) |
1428 | { | 1125 | { |
1429 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | 1126 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); |
1430 | struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; | ||
1431 | int irq[2]; | 1127 | int irq[2]; |
1432 | 1128 | ||
1433 | host->dying = true; | ||
1434 | clk_enable(host->hclk); | ||
1435 | pm_runtime_get_sync(&pdev->dev); | 1129 | pm_runtime_get_sync(&pdev->dev); |
1436 | 1130 | ||
1437 | dev_pm_qos_hide_latency_limit(&pdev->dev); | ||
1438 | |||
1439 | if (pd && pd->use_cd_gpio) | ||
1440 | mmc_gpio_free_cd(host->mmc); | ||
1441 | |||
1442 | mmc_remove_host(host->mmc); | 1131 | mmc_remove_host(host->mmc); |
1443 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1132 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1444 | 1133 | ||
1445 | /* | ||
1446 | * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the | ||
1447 | * mmc_remove_host() call above. But swapping order doesn't help either | ||
1448 | * (a query on the linux-mmc mailing list didn't bring any replies). | ||
1449 | */ | ||
1450 | cancel_delayed_work_sync(&host->timeout_work); | ||
1451 | |||
1452 | if (host->addr) | 1134 | if (host->addr) |
1453 | iounmap(host->addr); | 1135 | iounmap(host->addr); |
1454 | 1136 | ||
@@ -1471,18 +1153,24 @@ static int sh_mmcif_remove(struct platform_device *pdev) | |||
1471 | #ifdef CONFIG_PM | 1153 | #ifdef CONFIG_PM |
1472 | static int sh_mmcif_suspend(struct device *dev) | 1154 | static int sh_mmcif_suspend(struct device *dev) |
1473 | { | 1155 | { |
1474 | struct sh_mmcif_host *host = dev_get_drvdata(dev); | 1156 | struct platform_device *pdev = to_platform_device(dev); |
1157 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | ||
1475 | int ret = mmc_suspend_host(host->mmc); | 1158 | int ret = mmc_suspend_host(host->mmc); |
1476 | 1159 | ||
1477 | if (!ret) | 1160 | if (!ret) { |
1478 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); | 1161 | sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); |
1162 | clk_disable(host->hclk); | ||
1163 | } | ||
1479 | 1164 | ||
1480 | return ret; | 1165 | return ret; |
1481 | } | 1166 | } |
1482 | 1167 | ||
1483 | static int sh_mmcif_resume(struct device *dev) | 1168 | static int sh_mmcif_resume(struct device *dev) |
1484 | { | 1169 | { |
1485 | struct sh_mmcif_host *host = dev_get_drvdata(dev); | 1170 | struct platform_device *pdev = to_platform_device(dev); |
1171 | struct sh_mmcif_host *host = platform_get_drvdata(pdev); | ||
1172 | |||
1173 | clk_enable(host->hclk); | ||
1486 | 1174 | ||
1487 | return mmc_resume_host(host->mmc); | 1175 | return mmc_resume_host(host->mmc); |
1488 | } | 1176 | } |
@@ -1491,12 +1179,6 @@ static int sh_mmcif_resume(struct device *dev) | |||
1491 | #define sh_mmcif_resume NULL | 1179 | #define sh_mmcif_resume NULL |
1492 | #endif /* CONFIG_PM */ | 1180 | #endif /* CONFIG_PM */ |
1493 | 1181 | ||
1494 | static const struct of_device_id mmcif_of_match[] = { | ||
1495 | { .compatible = "renesas,sh-mmcif" }, | ||
1496 | { } | ||
1497 | }; | ||
1498 | MODULE_DEVICE_TABLE(of, mmcif_of_match); | ||
1499 | |||
1500 | static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { | 1182 | static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { |
1501 | .suspend = sh_mmcif_suspend, | 1183 | .suspend = sh_mmcif_suspend, |
1502 | .resume = sh_mmcif_resume, | 1184 | .resume = sh_mmcif_resume, |
@@ -1508,12 +1190,22 @@ static struct platform_driver sh_mmcif_driver = { | |||
1508 | .driver = { | 1190 | .driver = { |
1509 | .name = DRIVER_NAME, | 1191 | .name = DRIVER_NAME, |
1510 | .pm = &sh_mmcif_dev_pm_ops, | 1192 | .pm = &sh_mmcif_dev_pm_ops, |
1511 | .owner = THIS_MODULE, | ||
1512 | .of_match_table = mmcif_of_match, | ||
1513 | }, | 1193 | }, |
1514 | }; | 1194 | }; |
1515 | 1195 | ||
1516 | module_platform_driver(sh_mmcif_driver); | 1196 | static int __init sh_mmcif_init(void) |
1197 | { | ||
1198 | return platform_driver_register(&sh_mmcif_driver); | ||
1199 | } | ||
1200 | |||
1201 | static void __exit sh_mmcif_exit(void) | ||
1202 | { | ||
1203 | platform_driver_unregister(&sh_mmcif_driver); | ||
1204 | } | ||
1205 | |||
1206 | module_init(sh_mmcif_init); | ||
1207 | module_exit(sh_mmcif_exit); | ||
1208 | |||
1517 | 1209 | ||
1518 | MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); | 1210 | MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); |
1519 | MODULE_LICENSE("GPL"); | 1211 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 524a7f77382..0c4a672f5db 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -21,8 +21,6 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/mod_devicetable.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
27 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
28 | #include <linux/mmc/sh_mobile_sdhi.h> | 26 | #include <linux/mmc/sh_mobile_sdhi.h> |
@@ -40,39 +38,22 @@ struct sh_mobile_sdhi { | |||
40 | struct tmio_mmc_dma dma_priv; | 38 | struct tmio_mmc_dma dma_priv; |
41 | }; | 39 | }; |
42 | 40 | ||
43 | static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f) | ||
44 | { | ||
45 | struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); | ||
46 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
47 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
48 | int ret = clk_enable(priv->clk); | ||
49 | if (ret < 0) | ||
50 | return ret; | ||
51 | |||
52 | *f = clk_get_rate(priv->clk); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev) | ||
57 | { | ||
58 | struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); | ||
59 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
60 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
61 | clk_disable(priv->clk); | ||
62 | } | ||
63 | |||
64 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) | 41 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) |
65 | { | 42 | { |
66 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 43 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
67 | 44 | ||
68 | p->set_pwr(pdev, state); | 45 | if (p && p->set_pwr) |
46 | p->set_pwr(pdev, state); | ||
69 | } | 47 | } |
70 | 48 | ||
71 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) | 49 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) |
72 | { | 50 | { |
73 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 51 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
74 | 52 | ||
75 | return p->get_cd(pdev); | 53 | if (p && p->get_cd) |
54 | return p->get_cd(pdev); | ||
55 | else | ||
56 | return -ENOSYS; | ||
76 | } | 57 | } |
77 | 58 | ||
78 | static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) | 59 | static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) |
@@ -108,23 +89,14 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) | |||
108 | return 0; | 89 | return 0; |
109 | } | 90 | } |
110 | 91 | ||
111 | static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev) | 92 | static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) |
112 | { | ||
113 | mmc_detect_change(dev_get_drvdata(&pdev->dev), msecs_to_jiffies(100)); | ||
114 | } | ||
115 | |||
116 | static const struct sh_mobile_sdhi_ops sdhi_ops = { | ||
117 | .cd_wakeup = sh_mobile_sdhi_cd_wakeup, | ||
118 | }; | ||
119 | |||
120 | static int sh_mobile_sdhi_probe(struct platform_device *pdev) | ||
121 | { | 93 | { |
122 | struct sh_mobile_sdhi *priv; | 94 | struct sh_mobile_sdhi *priv; |
123 | struct tmio_mmc_data *mmc_data; | 95 | struct tmio_mmc_data *mmc_data; |
124 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 96 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
125 | struct tmio_mmc_host *host; | 97 | struct tmio_mmc_host *host; |
126 | int irq, ret, i = 0; | 98 | char clk_name[8]; |
127 | bool multiplexed_isr = true; | 99 | int i, irq, ret; |
128 | 100 | ||
129 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | 101 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); |
130 | if (priv == NULL) { | 102 | if (priv == NULL) { |
@@ -133,25 +105,21 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
133 | } | 105 | } |
134 | 106 | ||
135 | mmc_data = &priv->mmc_data; | 107 | mmc_data = &priv->mmc_data; |
108 | p->pdata = mmc_data; | ||
136 | 109 | ||
137 | if (p) { | 110 | snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); |
138 | p->pdata = mmc_data; | 111 | priv->clk = clk_get(&pdev->dev, clk_name); |
139 | if (p->init) { | ||
140 | ret = p->init(pdev, &sdhi_ops); | ||
141 | if (ret) | ||
142 | goto einit; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | priv->clk = clk_get(&pdev->dev, NULL); | ||
147 | if (IS_ERR(priv->clk)) { | 112 | if (IS_ERR(priv->clk)) { |
113 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
148 | ret = PTR_ERR(priv->clk); | 114 | ret = PTR_ERR(priv->clk); |
149 | dev_err(&pdev->dev, "cannot get clock: %d\n", ret); | ||
150 | goto eclkget; | 115 | goto eclkget; |
151 | } | 116 | } |
152 | 117 | ||
153 | mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; | 118 | clk_enable(priv->clk); |
154 | mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; | 119 | |
120 | mmc_data->hclk = clk_get_rate(priv->clk); | ||
121 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; | ||
122 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; | ||
155 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | 123 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; |
156 | if (p) { | 124 | if (p) { |
157 | mmc_data->flags = p->tmio_flags; | 125 | mmc_data->flags = p->tmio_flags; |
@@ -159,18 +127,12 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
159 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | 127 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; |
160 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 128 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
161 | mmc_data->capabilities |= p->tmio_caps; | 129 | mmc_data->capabilities |= p->tmio_caps; |
162 | mmc_data->capabilities2 |= p->tmio_caps2; | ||
163 | mmc_data->cd_gpio = p->cd_gpio; | ||
164 | if (p->set_pwr) | ||
165 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; | ||
166 | if (p->get_cd) | ||
167 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; | ||
168 | 130 | ||
169 | if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { | 131 | if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { |
170 | priv->param_tx.shdma_slave.slave_id = p->dma_slave_tx; | 132 | priv->param_tx.slave_id = p->dma_slave_tx; |
171 | priv->param_rx.shdma_slave.slave_id = p->dma_slave_rx; | 133 | priv->param_rx.slave_id = p->dma_slave_rx; |
172 | priv->dma_priv.chan_priv_tx = &priv->param_tx.shdma_slave; | 134 | priv->dma_priv.chan_priv_tx = &priv->param_tx; |
173 | priv->dma_priv.chan_priv_rx = &priv->param_rx.shdma_slave; | 135 | priv->dma_priv.chan_priv_rx = &priv->param_rx; |
174 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | 136 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ |
175 | mmc_data->dma = &priv->dma_priv; | 137 | mmc_data->dma = &priv->dma_priv; |
176 | } | 138 | } |
@@ -191,88 +153,40 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
191 | if (ret < 0) | 153 | if (ret < 0) |
192 | goto eprobe; | 154 | goto eprobe; |
193 | 155 | ||
194 | /* | 156 | for (i = 0; i < 3; i++) { |
195 | * Allow one or more specific (named) ISRs or | 157 | irq = platform_get_irq(pdev, i); |
196 | * one or more multiplexed (un-named) ISRs. | 158 | if (irq < 0) { |
197 | */ | 159 | if (i) { |
198 | 160 | continue; | |
199 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); | 161 | } else { |
200 | if (irq >= 0) { | 162 | ret = irq; |
201 | multiplexed_isr = false; | 163 | goto eirq; |
202 | ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, | 164 | } |
203 | dev_name(&pdev->dev), host); | 165 | } |
204 | if (ret) | 166 | ret = request_irq(irq, tmio_mmc_irq, 0, |
205 | goto eirq_card_detect; | ||
206 | } | ||
207 | |||
208 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); | ||
209 | if (irq >= 0) { | ||
210 | multiplexed_isr = false; | ||
211 | ret = request_irq(irq, tmio_mmc_sdio_irq, 0, | ||
212 | dev_name(&pdev->dev), host); | ||
213 | if (ret) | ||
214 | goto eirq_sdio; | ||
215 | } | ||
216 | |||
217 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); | ||
218 | if (irq >= 0) { | ||
219 | multiplexed_isr = false; | ||
220 | ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, | ||
221 | dev_name(&pdev->dev), host); | 167 | dev_name(&pdev->dev), host); |
222 | if (ret) | 168 | if (ret) { |
223 | goto eirq_sdcard; | 169 | while (i--) { |
224 | } else if (!multiplexed_isr) { | 170 | irq = platform_get_irq(pdev, i); |
225 | dev_err(&pdev->dev, | 171 | if (irq >= 0) |
226 | "Principal SD-card IRQ is missing among named interrupts\n"); | 172 | free_irq(irq, host); |
227 | ret = irq; | 173 | } |
228 | goto eirq_sdcard; | 174 | goto eirq; |
229 | } | ||
230 | |||
231 | if (multiplexed_isr) { | ||
232 | while (1) { | ||
233 | irq = platform_get_irq(pdev, i); | ||
234 | if (irq < 0) | ||
235 | break; | ||
236 | i++; | ||
237 | ret = request_irq(irq, tmio_mmc_irq, 0, | ||
238 | dev_name(&pdev->dev), host); | ||
239 | if (ret) | ||
240 | goto eirq_multiplexed; | ||
241 | } | 175 | } |
242 | |||
243 | /* There must be at least one IRQ source */ | ||
244 | if (!i) | ||
245 | goto eirq_multiplexed; | ||
246 | } | 176 | } |
247 | |||
248 | dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", | 177 | dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", |
249 | mmc_hostname(host->mmc), (unsigned long) | 178 | mmc_hostname(host->mmc), (unsigned long) |
250 | (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), | 179 | (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), |
251 | host->mmc->f_max / 1000000); | 180 | mmc_data->hclk / 1000000); |
252 | 181 | ||
253 | return ret; | 182 | return ret; |
254 | 183 | ||
255 | eirq_multiplexed: | 184 | eirq: |
256 | while (i--) { | ||
257 | irq = platform_get_irq(pdev, i); | ||
258 | free_irq(irq, host); | ||
259 | } | ||
260 | eirq_sdcard: | ||
261 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); | ||
262 | if (irq >= 0) | ||
263 | free_irq(irq, host); | ||
264 | eirq_sdio: | ||
265 | irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); | ||
266 | if (irq >= 0) | ||
267 | free_irq(irq, host); | ||
268 | eirq_card_detect: | ||
269 | tmio_mmc_host_remove(host); | 185 | tmio_mmc_host_remove(host); |
270 | eprobe: | 186 | eprobe: |
187 | clk_disable(priv->clk); | ||
271 | clk_put(priv->clk); | 188 | clk_put(priv->clk); |
272 | eclkget: | 189 | eclkget: |
273 | if (p && p->cleanup) | ||
274 | p->cleanup(pdev); | ||
275 | einit: | ||
276 | kfree(priv); | 190 | kfree(priv); |
277 | return ret; | 191 | return ret; |
278 | } | 192 | } |
@@ -283,25 +197,20 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) | |||
283 | struct tmio_mmc_host *host = mmc_priv(mmc); | 197 | struct tmio_mmc_host *host = mmc_priv(mmc); |
284 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | 198 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); |
285 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 199 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
286 | int i = 0, irq; | 200 | int i, irq; |
287 | 201 | ||
288 | if (p) | 202 | p->pdata = NULL; |
289 | p->pdata = NULL; | ||
290 | 203 | ||
291 | tmio_mmc_host_remove(host); | 204 | tmio_mmc_host_remove(host); |
292 | 205 | ||
293 | while (1) { | 206 | for (i = 0; i < 3; i++) { |
294 | irq = platform_get_irq(pdev, i++); | 207 | irq = platform_get_irq(pdev, i); |
295 | if (irq < 0) | 208 | if (irq >= 0) |
296 | break; | 209 | free_irq(irq, host); |
297 | free_irq(irq, host); | ||
298 | } | 210 | } |
299 | 211 | ||
212 | clk_disable(priv->clk); | ||
300 | clk_put(priv->clk); | 213 | clk_put(priv->clk); |
301 | |||
302 | if (p && p->cleanup) | ||
303 | p->cleanup(pdev); | ||
304 | |||
305 | kfree(priv); | 214 | kfree(priv); |
306 | 215 | ||
307 | return 0; | 216 | return 0; |
@@ -314,24 +223,28 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { | |||
314 | .runtime_resume = tmio_mmc_host_runtime_resume, | 223 | .runtime_resume = tmio_mmc_host_runtime_resume, |
315 | }; | 224 | }; |
316 | 225 | ||
317 | static const struct of_device_id sh_mobile_sdhi_of_match[] = { | ||
318 | { .compatible = "renesas,shmobile-sdhi" }, | ||
319 | { } | ||
320 | }; | ||
321 | MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); | ||
322 | |||
323 | static struct platform_driver sh_mobile_sdhi_driver = { | 226 | static struct platform_driver sh_mobile_sdhi_driver = { |
324 | .driver = { | 227 | .driver = { |
325 | .name = "sh_mobile_sdhi", | 228 | .name = "sh_mobile_sdhi", |
326 | .owner = THIS_MODULE, | 229 | .owner = THIS_MODULE, |
327 | .pm = &tmio_mmc_dev_pm_ops, | 230 | .pm = &tmio_mmc_dev_pm_ops, |
328 | .of_match_table = sh_mobile_sdhi_of_match, | ||
329 | }, | 231 | }, |
330 | .probe = sh_mobile_sdhi_probe, | 232 | .probe = sh_mobile_sdhi_probe, |
331 | .remove = sh_mobile_sdhi_remove, | 233 | .remove = __devexit_p(sh_mobile_sdhi_remove), |
332 | }; | 234 | }; |
333 | 235 | ||
334 | module_platform_driver(sh_mobile_sdhi_driver); | 236 | static int __init sh_mobile_sdhi_init(void) |
237 | { | ||
238 | return platform_driver_register(&sh_mobile_sdhi_driver); | ||
239 | } | ||
240 | |||
241 | static void __exit sh_mobile_sdhi_exit(void) | ||
242 | { | ||
243 | platform_driver_unregister(&sh_mobile_sdhi_driver); | ||
244 | } | ||
245 | |||
246 | module_init(sh_mobile_sdhi_init); | ||
247 | module_exit(sh_mobile_sdhi_exit); | ||
335 | 248 | ||
336 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | 249 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); |
337 | MODULE_AUTHOR("Magnus Damm"); | 250 | MODULE_AUTHOR("Magnus Damm"); |
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 43d962829f8..457c26ea09d 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c | |||
@@ -16,14 +16,13 @@ | |||
16 | #include <linux/mmc/host.h> | 16 | #include <linux/mmc/host.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/scatterlist.h> | 18 | #include <linux/scatterlist.h> |
19 | #include <linux/module.h> | ||
20 | #include <asm/io.h> | 19 | #include <asm/io.h> |
21 | 20 | ||
22 | #define DRIVER_NAME "tifm_sd" | 21 | #define DRIVER_NAME "tifm_sd" |
23 | #define DRIVER_VERSION "0.8" | 22 | #define DRIVER_VERSION "0.8" |
24 | 23 | ||
25 | static bool no_dma = 0; | 24 | static int no_dma = 0; |
26 | static bool fixed_timeout = 0; | 25 | static int fixed_timeout = 0; |
27 | module_param(no_dma, bool, 0644); | 26 | module_param(no_dma, bool, 0644); |
28 | module_param(fixed_timeout, bool, 0644); | 27 | module_param(fixed_timeout, bool, 0644); |
29 | 28 | ||
@@ -118,7 +117,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, | |||
118 | unsigned char *buf; | 117 | unsigned char *buf; |
119 | unsigned int pos = 0, val; | 118 | unsigned int pos = 0, val; |
120 | 119 | ||
121 | buf = kmap_atomic(pg) + off; | 120 | buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off; |
122 | if (host->cmd_flags & DATA_CARRY) { | 121 | if (host->cmd_flags & DATA_CARRY) { |
123 | buf[pos++] = host->bounce_buf_data[0]; | 122 | buf[pos++] = host->bounce_buf_data[0]; |
124 | host->cmd_flags &= ~DATA_CARRY; | 123 | host->cmd_flags &= ~DATA_CARRY; |
@@ -134,7 +133,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, | |||
134 | } | 133 | } |
135 | buf[pos++] = (val >> 8) & 0xff; | 134 | buf[pos++] = (val >> 8) & 0xff; |
136 | } | 135 | } |
137 | kunmap_atomic(buf - off); | 136 | kunmap_atomic(buf - off, KM_BIO_DST_IRQ); |
138 | } | 137 | } |
139 | 138 | ||
140 | static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, | 139 | static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, |
@@ -144,7 +143,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, | |||
144 | unsigned char *buf; | 143 | unsigned char *buf; |
145 | unsigned int pos = 0, val; | 144 | unsigned int pos = 0, val; |
146 | 145 | ||
147 | buf = kmap_atomic(pg) + off; | 146 | buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off; |
148 | if (host->cmd_flags & DATA_CARRY) { | 147 | if (host->cmd_flags & DATA_CARRY) { |
149 | val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); | 148 | val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); |
150 | writel(val, sock->addr + SOCK_MMCSD_DATA); | 149 | writel(val, sock->addr + SOCK_MMCSD_DATA); |
@@ -161,7 +160,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, | |||
161 | val |= (buf[pos++] << 8) & 0xff00; | 160 | val |= (buf[pos++] << 8) & 0xff00; |
162 | writel(val, sock->addr + SOCK_MMCSD_DATA); | 161 | writel(val, sock->addr + SOCK_MMCSD_DATA); |
163 | } | 162 | } |
164 | kunmap_atomic(buf - off); | 163 | kunmap_atomic(buf - off, KM_BIO_SRC_IRQ); |
165 | } | 164 | } |
166 | 165 | ||
167 | static void tifm_sd_transfer_data(struct tifm_sd *host) | 166 | static void tifm_sd_transfer_data(struct tifm_sd *host) |
@@ -212,13 +211,13 @@ static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off, | |||
212 | struct page *src, unsigned int src_off, | 211 | struct page *src, unsigned int src_off, |
213 | unsigned int count) | 212 | unsigned int count) |
214 | { | 213 | { |
215 | unsigned char *src_buf = kmap_atomic(src) + src_off; | 214 | unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off; |
216 | unsigned char *dst_buf = kmap_atomic(dst) + dst_off; | 215 | unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off; |
217 | 216 | ||
218 | memcpy(dst_buf, src_buf, count); | 217 | memcpy(dst_buf, src_buf, count); |
219 | 218 | ||
220 | kunmap_atomic(dst_buf - dst_off); | 219 | kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ); |
221 | kunmap_atomic(src_buf - src_off); | 220 | kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ); |
222 | } | 221 | } |
223 | 222 | ||
224 | static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) | 223 | static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) |
@@ -632,7 +631,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
632 | } | 631 | } |
633 | 632 | ||
634 | if (host->req) { | 633 | if (host->req) { |
635 | pr_err("%s : unfinished request detected\n", | 634 | printk(KERN_ERR "%s : unfinished request detected\n", |
636 | dev_name(&sock->dev)); | 635 | dev_name(&sock->dev)); |
637 | mrq->cmd->error = -ETIMEDOUT; | 636 | mrq->cmd->error = -ETIMEDOUT; |
638 | goto err_out; | 637 | goto err_out; |
@@ -672,7 +671,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
672 | r_data->flags & MMC_DATA_WRITE | 671 | r_data->flags & MMC_DATA_WRITE |
673 | ? PCI_DMA_TODEVICE | 672 | ? PCI_DMA_TODEVICE |
674 | : PCI_DMA_FROMDEVICE)) { | 673 | : PCI_DMA_FROMDEVICE)) { |
675 | pr_err("%s : scatterlist map failed\n", | 674 | printk(KERN_ERR "%s : scatterlist map failed\n", |
676 | dev_name(&sock->dev)); | 675 | dev_name(&sock->dev)); |
677 | mrq->cmd->error = -ENOMEM; | 676 | mrq->cmd->error = -ENOMEM; |
678 | goto err_out; | 677 | goto err_out; |
@@ -684,7 +683,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
684 | ? PCI_DMA_TODEVICE | 683 | ? PCI_DMA_TODEVICE |
685 | : PCI_DMA_FROMDEVICE); | 684 | : PCI_DMA_FROMDEVICE); |
686 | if (host->sg_len < 1) { | 685 | if (host->sg_len < 1) { |
687 | pr_err("%s : scatterlist map failed\n", | 686 | printk(KERN_ERR "%s : scatterlist map failed\n", |
688 | dev_name(&sock->dev)); | 687 | dev_name(&sock->dev)); |
689 | tifm_unmap_sg(sock, &host->bounce_buf, 1, | 688 | tifm_unmap_sg(sock, &host->bounce_buf, 1, |
690 | r_data->flags & MMC_DATA_WRITE | 689 | r_data->flags & MMC_DATA_WRITE |
@@ -748,7 +747,7 @@ static void tifm_sd_end_cmd(unsigned long data) | |||
748 | host->req = NULL; | 747 | host->req = NULL; |
749 | 748 | ||
750 | if (!mrq) { | 749 | if (!mrq) { |
751 | pr_err(" %s : no request to complete?\n", | 750 | printk(KERN_ERR " %s : no request to complete?\n", |
752 | dev_name(&sock->dev)); | 751 | dev_name(&sock->dev)); |
753 | spin_unlock_irqrestore(&sock->lock, flags); | 752 | spin_unlock_irqrestore(&sock->lock, flags); |
754 | return; | 753 | return; |
@@ -787,7 +786,8 @@ static void tifm_sd_abort(unsigned long data) | |||
787 | { | 786 | { |
788 | struct tifm_sd *host = (struct tifm_sd*)data; | 787 | struct tifm_sd *host = (struct tifm_sd*)data; |
789 | 788 | ||
790 | pr_err("%s : card failed to respond for a long period of time " | 789 | printk(KERN_ERR |
790 | "%s : card failed to respond for a long period of time " | ||
791 | "(%x, %x)\n", | 791 | "(%x, %x)\n", |
792 | dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); | 792 | dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); |
793 | 793 | ||
@@ -905,7 +905,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host) | |||
905 | } | 905 | } |
906 | 906 | ||
907 | if (rc) { | 907 | if (rc) { |
908 | pr_err("%s : controller failed to reset\n", | 908 | printk(KERN_ERR "%s : controller failed to reset\n", |
909 | dev_name(&sock->dev)); | 909 | dev_name(&sock->dev)); |
910 | return -ENODEV; | 910 | return -ENODEV; |
911 | } | 911 | } |
@@ -931,7 +931,8 @@ static int tifm_sd_initialize_host(struct tifm_sd *host) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | if (rc) { | 933 | if (rc) { |
934 | pr_err("%s : card not ready - probe failed on initialization\n", | 934 | printk(KERN_ERR |
935 | "%s : card not ready - probe failed on initialization\n", | ||
935 | dev_name(&sock->dev)); | 936 | dev_name(&sock->dev)); |
936 | return -ENODEV; | 937 | return -ENODEV; |
937 | } | 938 | } |
@@ -952,7 +953,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
952 | 953 | ||
953 | if (!(TIFM_SOCK_STATE_OCCUPIED | 954 | if (!(TIFM_SOCK_STATE_OCCUPIED |
954 | & readl(sock->addr + SOCK_PRESENT_STATE))) { | 955 | & readl(sock->addr + SOCK_PRESENT_STATE))) { |
955 | pr_warning("%s : card gone, unexpectedly\n", | 956 | printk(KERN_WARNING "%s : card gone, unexpectedly\n", |
956 | dev_name(&sock->dev)); | 957 | dev_name(&sock->dev)); |
957 | return rc; | 958 | return rc; |
958 | } | 959 | } |
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 139212e79cd..44a9668c4b7 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -57,7 +57,7 @@ static int tmio_mmc_resume(struct platform_device *dev) | |||
57 | #define tmio_mmc_resume NULL | 57 | #define tmio_mmc_resume NULL |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | static int tmio_mmc_probe(struct platform_device *pdev) | 60 | static int __devinit tmio_mmc_probe(struct platform_device *pdev) |
61 | { | 61 | { |
62 | const struct mfd_cell *cell = mfd_get_cell(pdev); | 62 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
63 | struct tmio_mmc_data *pdata; | 63 | struct tmio_mmc_data *pdata; |
@@ -88,8 +88,8 @@ static int tmio_mmc_probe(struct platform_device *pdev) | |||
88 | if (ret) | 88 | if (ret) |
89 | goto cell_disable; | 89 | goto cell_disable; |
90 | 90 | ||
91 | ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING, | 91 | ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED | |
92 | dev_name(&pdev->dev), host); | 92 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host); |
93 | if (ret) | 93 | if (ret) |
94 | goto host_remove; | 94 | goto host_remove; |
95 | 95 | ||
@@ -107,7 +107,7 @@ out: | |||
107 | return ret; | 107 | return ret; |
108 | } | 108 | } |
109 | 109 | ||
110 | static int tmio_mmc_remove(struct platform_device *pdev) | 110 | static int __devexit tmio_mmc_remove(struct platform_device *pdev) |
111 | { | 111 | { |
112 | const struct mfd_cell *cell = mfd_get_cell(pdev); | 112 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
113 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 113 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
@@ -133,12 +133,24 @@ static struct platform_driver tmio_mmc_driver = { | |||
133 | .owner = THIS_MODULE, | 133 | .owner = THIS_MODULE, |
134 | }, | 134 | }, |
135 | .probe = tmio_mmc_probe, | 135 | .probe = tmio_mmc_probe, |
136 | .remove = tmio_mmc_remove, | 136 | .remove = __devexit_p(tmio_mmc_remove), |
137 | .suspend = tmio_mmc_suspend, | 137 | .suspend = tmio_mmc_suspend, |
138 | .resume = tmio_mmc_resume, | 138 | .resume = tmio_mmc_resume, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | module_platform_driver(tmio_mmc_driver); | 141 | |
142 | static int __init tmio_mmc_init(void) | ||
143 | { | ||
144 | return platform_driver_register(&tmio_mmc_driver); | ||
145 | } | ||
146 | |||
147 | static void __exit tmio_mmc_exit(void) | ||
148 | { | ||
149 | platform_driver_unregister(&tmio_mmc_driver); | ||
150 | } | ||
151 | |||
152 | module_init(tmio_mmc_init); | ||
153 | module_exit(tmio_mmc_exit); | ||
142 | 154 | ||
143 | MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); | 155 | MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); |
144 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | 156 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index d857f5c6e7d..eeaf64391fb 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -20,8 +20,8 @@ | |||
20 | #include <linux/mmc/tmio.h> | 20 | #include <linux/mmc/tmio.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/scatterlist.h> | ||
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/scatterlist.h> | ||
25 | 25 | ||
26 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | 26 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ |
27 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | 27 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 |
@@ -47,14 +47,16 @@ struct tmio_mmc_host { | |||
47 | struct mmc_request *mrq; | 47 | struct mmc_request *mrq; |
48 | struct mmc_data *data; | 48 | struct mmc_data *data; |
49 | struct mmc_host *mmc; | 49 | struct mmc_host *mmc; |
50 | 50 | unsigned int sdio_irq_enabled; | |
51 | /* Controller power state */ | ||
52 | bool power; | ||
53 | 51 | ||
54 | /* Callbacks for clock / power control */ | 52 | /* Callbacks for clock / power control */ |
55 | void (*set_pwr)(struct platform_device *host, int state); | 53 | void (*set_pwr)(struct platform_device *host, int state); |
56 | void (*set_clk_div)(struct platform_device *host, int state); | 54 | void (*set_clk_div)(struct platform_device *host, int state); |
57 | 55 | ||
56 | int pm_error; | ||
57 | /* recognise system-wide suspend in runtime PM methods */ | ||
58 | bool pm_global; | ||
59 | |||
58 | /* pio related stuff */ | 60 | /* pio related stuff */ |
59 | struct scatterlist *sg_ptr; | 61 | struct scatterlist *sg_ptr; |
60 | struct scatterlist *sg_orig; | 62 | struct scatterlist *sg_orig; |
@@ -77,14 +79,9 @@ struct tmio_mmc_host { | |||
77 | struct delayed_work delayed_reset_work; | 79 | struct delayed_work delayed_reset_work; |
78 | struct work_struct done; | 80 | struct work_struct done; |
79 | 81 | ||
80 | /* Cache IRQ mask */ | ||
81 | u32 sdcard_irq_mask; | ||
82 | u32 sdio_irq_mask; | ||
83 | |||
84 | spinlock_t lock; /* protect host private data */ | 82 | spinlock_t lock; /* protect host private data */ |
85 | unsigned long last_req_ts; | 83 | unsigned long last_req_ts; |
86 | struct mutex ios_lock; /* protect set_ios() context */ | 84 | struct mutex ios_lock; /* protect set_ios() context */ |
87 | bool native_hotplug; | ||
88 | }; | 85 | }; |
89 | 86 | ||
90 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | 87 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, |
@@ -96,21 +93,18 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); | |||
96 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | 93 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); |
97 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | 94 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); |
98 | irqreturn_t tmio_mmc_irq(int irq, void *devid); | 95 | irqreturn_t tmio_mmc_irq(int irq, void *devid); |
99 | irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid); | ||
100 | irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid); | ||
101 | irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid); | ||
102 | 96 | ||
103 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, | 97 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, |
104 | unsigned long *flags) | 98 | unsigned long *flags) |
105 | { | 99 | { |
106 | local_irq_save(*flags); | 100 | local_irq_save(*flags); |
107 | return kmap_atomic(sg_page(sg)) + sg->offset; | 101 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; |
108 | } | 102 | } |
109 | 103 | ||
110 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | 104 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, |
111 | unsigned long *flags, void *virt) | 105 | unsigned long *flags, void *virt) |
112 | { | 106 | { |
113 | kunmap_atomic(virt - sg->offset); | 107 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); |
114 | local_irq_restore(*flags); | 108 | local_irq_restore(*flags); |
115 | } | 109 | } |
116 | 110 | ||
@@ -119,7 +113,6 @@ void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | |||
119 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); | 113 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); |
120 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | 114 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); |
121 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | 115 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); |
122 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host); | ||
123 | #else | 116 | #else |
124 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | 117 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, |
125 | struct mmc_data *data) | 118 | struct mmc_data *data) |
@@ -140,10 +133,6 @@ static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | |||
140 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | 133 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) |
141 | { | 134 | { |
142 | } | 135 | } |
143 | |||
144 | static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) | ||
145 | { | ||
146 | } | ||
147 | #endif | 136 | #endif |
148 | 137 | ||
149 | #ifdef CONFIG_PM | 138 | #ifdef CONFIG_PM |
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index fff92860485..86f259cdfcb 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -34,18 +34,6 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | |||
34 | #endif | 34 | #endif |
35 | } | 35 | } |
36 | 36 | ||
37 | void tmio_mmc_abort_dma(struct tmio_mmc_host *host) | ||
38 | { | ||
39 | tmio_mmc_enable_dma(host, false); | ||
40 | |||
41 | if (host->chan_rx) | ||
42 | dmaengine_terminate_all(host->chan_rx); | ||
43 | if (host->chan_tx) | ||
44 | dmaengine_terminate_all(host->chan_tx); | ||
45 | |||
46 | tmio_mmc_enable_dma(host, true); | ||
47 | } | ||
48 | |||
49 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | 37 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
50 | { | 38 | { |
51 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | 39 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
@@ -88,8 +76,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
88 | 76 | ||
89 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | 77 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
90 | if (ret > 0) | 78 | if (ret > 0) |
91 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 79 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
92 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); | 80 | DMA_FROM_DEVICE, DMA_CTRL_ACK); |
93 | 81 | ||
94 | if (desc) { | 82 | if (desc) { |
95 | cookie = dmaengine_submit(desc); | 83 | cookie = dmaengine_submit(desc); |
@@ -169,8 +157,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
169 | 157 | ||
170 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | 158 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); |
171 | if (ret > 0) | 159 | if (ret > 0) |
172 | desc = dmaengine_prep_slave_sg(chan, sg, ret, | 160 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
173 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); | 161 | DMA_TO_DEVICE, DMA_CTRL_ACK); |
174 | 162 | ||
175 | if (desc) { | 163 | if (desc) { |
176 | cookie = dmaengine_submit(desc); | 164 | cookie = dmaengine_submit(desc); |
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 50bf495a988..1f16357e730 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -35,30 +35,27 @@ | |||
35 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
36 | #include <linux/mfd/tmio.h> | 36 | #include <linux/mfd/tmio.h> |
37 | #include <linux/mmc/host.h> | 37 | #include <linux/mmc/host.h> |
38 | #include <linux/mmc/mmc.h> | ||
39 | #include <linux/mmc/slot-gpio.h> | ||
40 | #include <linux/mmc/tmio.h> | 38 | #include <linux/mmc/tmio.h> |
41 | #include <linux/module.h> | 39 | #include <linux/module.h> |
42 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
43 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
44 | #include <linux/pm_qos.h> | ||
45 | #include <linux/pm_runtime.h> | 42 | #include <linux/pm_runtime.h> |
46 | #include <linux/scatterlist.h> | 43 | #include <linux/scatterlist.h> |
47 | #include <linux/spinlock.h> | ||
48 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
45 | #include <linux/spinlock.h> | ||
49 | 46 | ||
50 | #include "tmio_mmc.h" | 47 | #include "tmio_mmc.h" |
51 | 48 | ||
52 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | 49 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
53 | { | 50 | { |
54 | host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); | 51 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); |
55 | sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); | 52 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); |
56 | } | 53 | } |
57 | 54 | ||
58 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | 55 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
59 | { | 56 | { |
60 | host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); | 57 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); |
61 | sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); | 58 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); |
62 | } | 59 | } |
63 | 60 | ||
64 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | 61 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
@@ -95,7 +92,7 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | |||
95 | static void pr_debug_status(u32 status) | 92 | static void pr_debug_status(u32 status) |
96 | { | 93 | { |
97 | int i = 0; | 94 | int i = 0; |
98 | pr_debug("status: %08x = ", status); | 95 | printk(KERN_DEBUG "status: %08x = ", status); |
99 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | 96 | STATUS_TO_TEXT(CARD_REMOVE, status, i); |
100 | STATUS_TO_TEXT(CARD_INSERT, status, i); | 97 | STATUS_TO_TEXT(CARD_INSERT, status, i); |
101 | STATUS_TO_TEXT(SIGSTATE, status, i); | 98 | STATUS_TO_TEXT(SIGSTATE, status, i); |
@@ -129,14 +126,14 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
129 | struct tmio_mmc_host *host = mmc_priv(mmc); | 126 | struct tmio_mmc_host *host = mmc_priv(mmc); |
130 | 127 | ||
131 | if (enable) { | 128 | if (enable) { |
132 | host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & | 129 | host->sdio_irq_enabled = 1; |
133 | ~TMIO_SDIO_STAT_IOIRQ; | ||
134 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | 130 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); |
135 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); | 131 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, |
132 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
136 | } else { | 133 | } else { |
137 | host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; | 134 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); |
138 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); | ||
139 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | 135 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); |
136 | host->sdio_irq_enabled = 0; | ||
140 | } | 137 | } |
141 | } | 138 | } |
142 | 139 | ||
@@ -247,7 +244,6 @@ static void tmio_mmc_reset_work(struct work_struct *work) | |||
247 | /* Ready for new calls */ | 244 | /* Ready for new calls */ |
248 | host->mrq = NULL; | 245 | host->mrq = NULL; |
249 | 246 | ||
250 | tmio_mmc_abort_dma(host); | ||
251 | mmc_request_done(host->mmc, mrq); | 247 | mmc_request_done(host->mmc, mrq); |
252 | } | 248 | } |
253 | 249 | ||
@@ -274,9 +270,6 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | |||
274 | host->mrq = NULL; | 270 | host->mrq = NULL; |
275 | spin_unlock_irqrestore(&host->lock, flags); | 271 | spin_unlock_irqrestore(&host->lock, flags); |
276 | 272 | ||
277 | if (mrq->cmd->error || (mrq->data && mrq->data->error)) | ||
278 | tmio_mmc_abort_dma(host); | ||
279 | |||
280 | mmc_request_done(host->mmc, mrq); | 273 | mmc_request_done(host->mmc, mrq); |
281 | } | 274 | } |
282 | 275 | ||
@@ -304,10 +297,9 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command | |||
304 | { | 297 | { |
305 | struct mmc_data *data = host->data; | 298 | struct mmc_data *data = host->data; |
306 | int c = cmd->opcode; | 299 | int c = cmd->opcode; |
307 | u32 irq_mask = TMIO_MASK_CMD; | ||
308 | 300 | ||
309 | /* CMD12 is handled by hardware */ | 301 | /* Command 12 is handled by hardware */ |
310 | if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { | 302 | if (cmd->opcode == 12 && !cmd->arg) { |
311 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | 303 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); |
312 | return 0; | 304 | return 0; |
313 | } | 305 | } |
@@ -340,9 +332,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command | |||
340 | c |= TRANSFER_READ; | 332 | c |= TRANSFER_READ; |
341 | } | 333 | } |
342 | 334 | ||
343 | if (!host->native_hotplug) | 335 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); |
344 | irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); | ||
345 | tmio_mmc_enable_mmc_irqs(host, irq_mask); | ||
346 | 336 | ||
347 | /* Fire off the command */ | 337 | /* Fire off the command */ |
348 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | 338 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); |
@@ -450,7 +440,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | |||
450 | } | 440 | } |
451 | 441 | ||
452 | if (stop) { | 442 | if (stop) { |
453 | if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) | 443 | if (stop->opcode == 12 && !stop->arg) |
454 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | 444 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); |
455 | else | 445 | else |
456 | BUG(); | 446 | BUG(); |
@@ -553,20 +543,45 @@ out: | |||
553 | spin_unlock(&host->lock); | 543 | spin_unlock(&host->lock); |
554 | } | 544 | } |
555 | 545 | ||
556 | static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, | 546 | irqreturn_t tmio_mmc_irq(int irq, void *devid) |
557 | int *ireg, int *status) | ||
558 | { | 547 | { |
559 | *status = sd_ctrl_read32(host, CTL_STATUS); | 548 | struct tmio_mmc_host *host = devid; |
560 | *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; | 549 | struct mmc_host *mmc = host->mmc; |
550 | struct tmio_mmc_data *pdata = host->pdata; | ||
551 | unsigned int ireg, irq_mask, status; | ||
552 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
561 | 553 | ||
562 | pr_debug_status(*status); | 554 | pr_debug("MMC IRQ begin\n"); |
563 | pr_debug_status(*ireg); | ||
564 | } | ||
565 | 555 | ||
566 | static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, | 556 | status = sd_ctrl_read32(host, CTL_STATUS); |
567 | int ireg, int status) | 557 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); |
568 | { | 558 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; |
569 | struct mmc_host *mmc = host->mmc; | 559 | |
560 | sdio_ireg = 0; | ||
561 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
562 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
563 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
564 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
565 | |||
566 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
567 | |||
568 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
569 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
570 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
571 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
572 | goto out; | ||
573 | } | ||
574 | |||
575 | if (mmc->caps & MMC_CAP_SDIO_IRQ && | ||
576 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
577 | mmc_signal_sdio_irq(mmc); | ||
578 | |||
579 | if (sdio_ireg) | ||
580 | goto out; | ||
581 | } | ||
582 | |||
583 | pr_debug_status(status); | ||
584 | pr_debug_status(ireg); | ||
570 | 585 | ||
571 | /* Card insert / remove attempts */ | 586 | /* Card insert / remove attempts */ |
572 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | 587 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { |
@@ -576,102 +591,43 @@ static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, | |||
576 | ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && | 591 | ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && |
577 | !work_pending(&mmc->detect.work)) | 592 | !work_pending(&mmc->detect.work)) |
578 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | 593 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); |
579 | return true; | 594 | goto out; |
580 | } | 595 | } |
581 | 596 | ||
582 | return false; | 597 | /* CRC and other errors */ |
583 | } | 598 | /* if (ireg & TMIO_STAT_ERR_IRQ) |
584 | 599 | * handled |= tmio_error_irq(host, irq, stat); | |
585 | irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid) | 600 | */ |
586 | { | ||
587 | unsigned int ireg, status; | ||
588 | struct tmio_mmc_host *host = devid; | ||
589 | |||
590 | tmio_mmc_card_irq_status(host, &ireg, &status); | ||
591 | __tmio_mmc_card_detect_irq(host, ireg, status); | ||
592 | |||
593 | return IRQ_HANDLED; | ||
594 | } | ||
595 | EXPORT_SYMBOL(tmio_mmc_card_detect_irq); | ||
596 | 601 | ||
597 | static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, | ||
598 | int ireg, int status) | ||
599 | { | ||
600 | /* Command completion */ | 602 | /* Command completion */ |
601 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | 603 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { |
602 | tmio_mmc_ack_mmc_irqs(host, | 604 | tmio_mmc_ack_mmc_irqs(host, |
603 | TMIO_STAT_CMDRESPEND | | 605 | TMIO_STAT_CMDRESPEND | |
604 | TMIO_STAT_CMDTIMEOUT); | 606 | TMIO_STAT_CMDTIMEOUT); |
605 | tmio_mmc_cmd_irq(host, status); | 607 | tmio_mmc_cmd_irq(host, status); |
606 | return true; | 608 | goto out; |
607 | } | 609 | } |
608 | 610 | ||
609 | /* Data transfer */ | 611 | /* Data transfer */ |
610 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | 612 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { |
611 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | 613 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); |
612 | tmio_mmc_pio_irq(host); | 614 | tmio_mmc_pio_irq(host); |
613 | return true; | 615 | goto out; |
614 | } | 616 | } |
615 | 617 | ||
616 | /* Data transfer completion */ | 618 | /* Data transfer completion */ |
617 | if (ireg & TMIO_STAT_DATAEND) { | 619 | if (ireg & TMIO_STAT_DATAEND) { |
618 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | 620 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); |
619 | tmio_mmc_data_irq(host); | 621 | tmio_mmc_data_irq(host); |
620 | return true; | 622 | goto out; |
621 | } | 623 | } |
622 | 624 | ||
623 | return false; | 625 | pr_warning("tmio_mmc: Spurious irq, disabling! " |
624 | } | 626 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); |
625 | 627 | pr_debug_status(status); | |
626 | irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid) | 628 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); |
627 | { | ||
628 | unsigned int ireg, status; | ||
629 | struct tmio_mmc_host *host = devid; | ||
630 | |||
631 | tmio_mmc_card_irq_status(host, &ireg, &status); | ||
632 | __tmio_mmc_sdcard_irq(host, ireg, status); | ||
633 | |||
634 | return IRQ_HANDLED; | ||
635 | } | ||
636 | EXPORT_SYMBOL(tmio_mmc_sdcard_irq); | ||
637 | |||
638 | irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid) | ||
639 | { | ||
640 | struct tmio_mmc_host *host = devid; | ||
641 | struct mmc_host *mmc = host->mmc; | ||
642 | struct tmio_mmc_data *pdata = host->pdata; | ||
643 | unsigned int ireg, status; | ||
644 | |||
645 | if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) | ||
646 | return IRQ_HANDLED; | ||
647 | |||
648 | status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
649 | ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; | ||
650 | |||
651 | sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL); | ||
652 | |||
653 | if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) | ||
654 | mmc_signal_sdio_irq(mmc); | ||
655 | |||
656 | return IRQ_HANDLED; | ||
657 | } | ||
658 | EXPORT_SYMBOL(tmio_mmc_sdio_irq); | ||
659 | |||
660 | irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
661 | { | ||
662 | struct tmio_mmc_host *host = devid; | ||
663 | unsigned int ireg, status; | ||
664 | |||
665 | pr_debug("MMC IRQ begin\n"); | ||
666 | |||
667 | tmio_mmc_card_irq_status(host, &ireg, &status); | ||
668 | if (__tmio_mmc_card_detect_irq(host, ireg, status)) | ||
669 | return IRQ_HANDLED; | ||
670 | if (__tmio_mmc_sdcard_irq(host, ireg, status)) | ||
671 | return IRQ_HANDLED; | ||
672 | |||
673 | tmio_mmc_sdio_irq(irq, devid); | ||
674 | 629 | ||
630 | out: | ||
675 | return IRQ_HANDLED; | 631 | return IRQ_HANDLED; |
676 | } | 632 | } |
677 | EXPORT_SYMBOL(tmio_mmc_irq); | 633 | EXPORT_SYMBOL(tmio_mmc_irq); |
@@ -752,34 +708,6 @@ fail: | |||
752 | mmc_request_done(mmc, mrq); | 708 | mmc_request_done(mmc, mrq); |
753 | } | 709 | } |
754 | 710 | ||
755 | static int tmio_mmc_clk_update(struct mmc_host *mmc) | ||
756 | { | ||
757 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
758 | struct tmio_mmc_data *pdata = host->pdata; | ||
759 | int ret; | ||
760 | |||
761 | if (!pdata->clk_enable) | ||
762 | return -ENOTSUPP; | ||
763 | |||
764 | ret = pdata->clk_enable(host->pdev, &mmc->f_max); | ||
765 | if (!ret) | ||
766 | mmc->f_min = mmc->f_max / 512; | ||
767 | |||
768 | return ret; | ||
769 | } | ||
770 | |||
771 | static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios) | ||
772 | { | ||
773 | struct mmc_host *mmc = host->mmc; | ||
774 | |||
775 | if (host->set_pwr) | ||
776 | host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF); | ||
777 | if (!IS_ERR(mmc->supply.vmmc)) | ||
778 | /* Errors ignored... */ | ||
779 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, | ||
780 | ios->power_mode ? ios->vdd : 0); | ||
781 | } | ||
782 | |||
783 | /* Set MMC clock / power. | 711 | /* Set MMC clock / power. |
784 | * Note: This controller uses a simple divider scheme therefore it cannot | 712 | * Note: This controller uses a simple divider scheme therefore it cannot |
785 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | 713 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as |
@@ -789,7 +717,7 @@ static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios) | |||
789 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 717 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
790 | { | 718 | { |
791 | struct tmio_mmc_host *host = mmc_priv(mmc); | 719 | struct tmio_mmc_host *host = mmc_priv(mmc); |
792 | struct device *dev = &host->pdev->dev; | 720 | struct tmio_mmc_data *pdata = host->pdata; |
793 | unsigned long flags; | 721 | unsigned long flags; |
794 | 722 | ||
795 | mutex_lock(&host->ios_lock); | 723 | mutex_lock(&host->ios_lock); |
@@ -797,13 +725,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
797 | spin_lock_irqsave(&host->lock, flags); | 725 | spin_lock_irqsave(&host->lock, flags); |
798 | if (host->mrq) { | 726 | if (host->mrq) { |
799 | if (IS_ERR(host->mrq)) { | 727 | if (IS_ERR(host->mrq)) { |
800 | dev_dbg(dev, | 728 | dev_dbg(&host->pdev->dev, |
801 | "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", | 729 | "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", |
802 | current->comm, task_pid_nr(current), | 730 | current->comm, task_pid_nr(current), |
803 | ios->clock, ios->power_mode); | 731 | ios->clock, ios->power_mode); |
804 | host->mrq = ERR_PTR(-EINTR); | 732 | host->mrq = ERR_PTR(-EINTR); |
805 | } else { | 733 | } else { |
806 | dev_dbg(dev, | 734 | dev_dbg(&host->pdev->dev, |
807 | "%s.%d: CMD%u active since %lu, now %lu!\n", | 735 | "%s.%d: CMD%u active since %lu, now %lu!\n", |
808 | current->comm, task_pid_nr(current), | 736 | current->comm, task_pid_nr(current), |
809 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); | 737 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); |
@@ -819,44 +747,38 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
819 | spin_unlock_irqrestore(&host->lock, flags); | 747 | spin_unlock_irqrestore(&host->lock, flags); |
820 | 748 | ||
821 | /* | 749 | /* |
822 | * host->power toggles between false and true in both cases - either | 750 | * pdata->power == false only if COLD_CD is available, otherwise only |
823 | * or not the controller can be runtime-suspended during inactivity. | 751 | * in short time intervals during probing or resuming |
824 | * But if the controller has to be kept on, the runtime-pm usage_count | ||
825 | * is kept positive, so no suspending actually takes place. | ||
826 | */ | 752 | */ |
827 | if (ios->power_mode == MMC_POWER_ON && ios->clock) { | 753 | if (ios->power_mode == MMC_POWER_ON && ios->clock) { |
828 | if (!host->power) { | 754 | if (!pdata->power) { |
829 | tmio_mmc_clk_update(mmc); | 755 | pm_runtime_get_sync(&host->pdev->dev); |
830 | pm_runtime_get_sync(dev); | 756 | pdata->power = true; |
831 | host->power = true; | ||
832 | } | 757 | } |
833 | tmio_mmc_set_clock(host, ios->clock); | 758 | tmio_mmc_set_clock(host, ios->clock); |
834 | /* power up SD bus */ | 759 | /* power up SD bus */ |
835 | tmio_mmc_set_power(host, ios); | 760 | if (host->set_pwr) |
761 | host->set_pwr(host->pdev, 1); | ||
836 | /* start bus clock */ | 762 | /* start bus clock */ |
837 | tmio_mmc_clk_start(host); | 763 | tmio_mmc_clk_start(host); |
838 | } else if (ios->power_mode != MMC_POWER_UP) { | 764 | } else if (ios->power_mode != MMC_POWER_UP) { |
839 | if (ios->power_mode == MMC_POWER_OFF) | 765 | if (host->set_pwr) |
840 | tmio_mmc_set_power(host, ios); | 766 | host->set_pwr(host->pdev, 0); |
841 | if (host->power) { | 767 | if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && |
842 | struct tmio_mmc_data *pdata = host->pdata; | 768 | pdata->power) { |
843 | tmio_mmc_clk_stop(host); | 769 | pdata->power = false; |
844 | host->power = false; | 770 | pm_runtime_put(&host->pdev->dev); |
845 | pm_runtime_put(dev); | ||
846 | if (pdata->clk_disable) | ||
847 | pdata->clk_disable(host->pdev); | ||
848 | } | 771 | } |
772 | tmio_mmc_clk_stop(host); | ||
849 | } | 773 | } |
850 | 774 | ||
851 | if (host->power) { | 775 | switch (ios->bus_width) { |
852 | switch (ios->bus_width) { | 776 | case MMC_BUS_WIDTH_1: |
853 | case MMC_BUS_WIDTH_1: | 777 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); |
854 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | 778 | break; |
855 | break; | 779 | case MMC_BUS_WIDTH_4: |
856 | case MMC_BUS_WIDTH_4: | 780 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); |
857 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | 781 | break; |
858 | break; | ||
859 | } | ||
860 | } | 782 | } |
861 | 783 | ||
862 | /* Let things settle. delay taken from winCE driver */ | 784 | /* Let things settle. delay taken from winCE driver */ |
@@ -875,9 +797,6 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc) | |||
875 | { | 797 | { |
876 | struct tmio_mmc_host *host = mmc_priv(mmc); | 798 | struct tmio_mmc_host *host = mmc_priv(mmc); |
877 | struct tmio_mmc_data *pdata = host->pdata; | 799 | struct tmio_mmc_data *pdata = host->pdata; |
878 | int ret = mmc_gpio_get_ro(mmc); | ||
879 | if (ret >= 0) | ||
880 | return ret; | ||
881 | 800 | ||
882 | return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | 801 | return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || |
883 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | 802 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); |
@@ -887,9 +806,6 @@ static int tmio_mmc_get_cd(struct mmc_host *mmc) | |||
887 | { | 806 | { |
888 | struct tmio_mmc_host *host = mmc_priv(mmc); | 807 | struct tmio_mmc_host *host = mmc_priv(mmc); |
889 | struct tmio_mmc_data *pdata = host->pdata; | 808 | struct tmio_mmc_data *pdata = host->pdata; |
890 | int ret = mmc_gpio_get_cd(mmc); | ||
891 | if (ret >= 0) | ||
892 | return ret; | ||
893 | 809 | ||
894 | if (!pdata->get_cd) | 810 | if (!pdata->get_cd) |
895 | return -ENOSYS; | 811 | return -ENOSYS; |
@@ -905,20 +821,7 @@ static const struct mmc_host_ops tmio_mmc_ops = { | |||
905 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | 821 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, |
906 | }; | 822 | }; |
907 | 823 | ||
908 | static void tmio_mmc_init_ocr(struct tmio_mmc_host *host) | 824 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, |
909 | { | ||
910 | struct tmio_mmc_data *pdata = host->pdata; | ||
911 | struct mmc_host *mmc = host->mmc; | ||
912 | |||
913 | mmc_regulator_get_supply(mmc); | ||
914 | |||
915 | if (!mmc->ocr_avail) | ||
916 | mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34; | ||
917 | else if (pdata->ocr_mask) | ||
918 | dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); | ||
919 | } | ||
920 | |||
921 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
922 | struct platform_device *pdev, | 825 | struct platform_device *pdev, |
923 | struct tmio_mmc_data *pdata) | 826 | struct tmio_mmc_data *pdata) |
924 | { | 827 | { |
@@ -957,61 +860,29 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
957 | 860 | ||
958 | mmc->ops = &tmio_mmc_ops; | 861 | mmc->ops = &tmio_mmc_ops; |
959 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | 862 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; |
960 | mmc->caps2 = pdata->capabilities2; | 863 | mmc->f_max = pdata->hclk; |
864 | mmc->f_min = mmc->f_max / 512; | ||
961 | mmc->max_segs = 32; | 865 | mmc->max_segs = 32; |
962 | mmc->max_blk_size = 512; | 866 | mmc->max_blk_size = 512; |
963 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | 867 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * |
964 | mmc->max_segs; | 868 | mmc->max_segs; |
965 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | 869 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
966 | mmc->max_seg_size = mmc->max_req_size; | 870 | mmc->max_seg_size = mmc->max_req_size; |
967 | tmio_mmc_init_ocr(_host); | 871 | if (pdata->ocr_mask) |
968 | 872 | mmc->ocr_avail = pdata->ocr_mask; | |
969 | _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || | 873 | else |
970 | mmc->caps & MMC_CAP_NEEDS_POLL || | 874 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
971 | mmc->caps & MMC_CAP_NONREMOVABLE); | ||
972 | 875 | ||
973 | _host->power = false; | 876 | pdata->power = false; |
974 | pm_runtime_enable(&pdev->dev); | 877 | pm_runtime_enable(&pdev->dev); |
975 | ret = pm_runtime_resume(&pdev->dev); | 878 | ret = pm_runtime_resume(&pdev->dev); |
976 | if (ret < 0) | 879 | if (ret < 0) |
977 | goto pm_disable; | 880 | goto pm_disable; |
978 | 881 | ||
979 | if (tmio_mmc_clk_update(mmc) < 0) { | ||
980 | mmc->f_max = pdata->hclk; | ||
981 | mmc->f_min = mmc->f_max / 512; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * There are 4 different scenarios for the card detection: | ||
986 | * 1) an external gpio irq handles the cd (best for power savings) | ||
987 | * 2) internal sdhi irq handles the cd | ||
988 | * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL | ||
989 | * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE | ||
990 | * | ||
991 | * While we increment the runtime PM counter for all scenarios when | ||
992 | * the mmc core activates us by calling an appropriate set_ios(), we | ||
993 | * must additionally ensure that in case 2) the tmio mmc hardware stays | ||
994 | * powered on during runtime for the card detection to work. | ||
995 | */ | ||
996 | if (_host->native_hotplug) | ||
997 | pm_runtime_get_noresume(&pdev->dev); | ||
998 | |||
999 | tmio_mmc_clk_stop(_host); | 882 | tmio_mmc_clk_stop(_host); |
1000 | tmio_mmc_reset(_host); | 883 | tmio_mmc_reset(_host); |
1001 | 884 | ||
1002 | _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); | ||
1003 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | 885 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); |
1004 | |||
1005 | /* Unmask the IRQs we want to know about */ | ||
1006 | if (!_host->chan_rx) | ||
1007 | irq_mask |= TMIO_MASK_READOP; | ||
1008 | if (!_host->chan_tx) | ||
1009 | irq_mask |= TMIO_MASK_WRITEOP; | ||
1010 | if (!_host->native_hotplug) | ||
1011 | irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); | ||
1012 | |||
1013 | _host->sdcard_irq_mask &= ~irq_mask; | ||
1014 | |||
1015 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | 886 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) |
1016 | tmio_mmc_enable_sdio_irq(mmc, 0); | 887 | tmio_mmc_enable_sdio_irq(mmc, 0); |
1017 | 888 | ||
@@ -1025,23 +896,21 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
1025 | /* See if we also get DMA */ | 896 | /* See if we also get DMA */ |
1026 | tmio_mmc_request_dma(_host, pdata); | 897 | tmio_mmc_request_dma(_host, pdata); |
1027 | 898 | ||
1028 | ret = mmc_add_host(mmc); | 899 | /* We have to keep the device powered for its card detection to work */ |
1029 | if (pdata->clk_disable) | 900 | if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) { |
1030 | pdata->clk_disable(pdev); | 901 | pdata->power = true; |
1031 | if (ret < 0) { | 902 | pm_runtime_get_noresume(&pdev->dev); |
1032 | tmio_mmc_host_remove(_host); | ||
1033 | return ret; | ||
1034 | } | 903 | } |
1035 | 904 | ||
1036 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); | 905 | mmc_add_host(mmc); |
1037 | 906 | ||
1038 | if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { | 907 | /* Unmask the IRQs we want to know about */ |
1039 | ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio); | 908 | if (!_host->chan_rx) |
1040 | if (ret < 0) { | 909 | irq_mask |= TMIO_MASK_READOP; |
1041 | tmio_mmc_host_remove(_host); | 910 | if (!_host->chan_tx) |
1042 | return ret; | 911 | irq_mask |= TMIO_MASK_WRITEOP; |
1043 | } | 912 | |
1044 | } | 913 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); |
1045 | 914 | ||
1046 | *host = _host; | 915 | *host = _host; |
1047 | 916 | ||
@@ -1060,22 +929,18 @@ EXPORT_SYMBOL(tmio_mmc_host_probe); | |||
1060 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | 929 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) |
1061 | { | 930 | { |
1062 | struct platform_device *pdev = host->pdev; | 931 | struct platform_device *pdev = host->pdev; |
1063 | struct tmio_mmc_data *pdata = host->pdata; | ||
1064 | struct mmc_host *mmc = host->mmc; | ||
1065 | 932 | ||
1066 | if (pdata->flags & TMIO_MMC_USE_GPIO_CD) | 933 | /* |
1067 | /* | 934 | * We don't have to manipulate pdata->power here: if there is a card in |
1068 | * This means we can miss a card-eject, but this is anyway | 935 | * the slot, the runtime PM is active and our .runtime_resume() will not |
1069 | * possible, because of delayed processing of hotplug events. | 936 | * be run. If there is no card in the slot and the platform can suspend |
1070 | */ | 937 | * the controller, the runtime PM is suspended and pdata->power == false, |
1071 | mmc_gpio_free_cd(mmc); | 938 | * so, our .runtime_resume() will not try to detect a card in the slot. |
1072 | 939 | */ | |
1073 | if (!host->native_hotplug) | 940 | if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD) |
1074 | pm_runtime_get_sync(&pdev->dev); | 941 | pm_runtime_get_sync(&pdev->dev); |
1075 | 942 | ||
1076 | dev_pm_qos_hide_latency_limit(&pdev->dev); | 943 | mmc_remove_host(host->mmc); |
1077 | |||
1078 | mmc_remove_host(mmc); | ||
1079 | cancel_work_sync(&host->done); | 944 | cancel_work_sync(&host->done); |
1080 | cancel_delayed_work_sync(&host->delayed_reset_work); | 945 | cancel_delayed_work_sync(&host->delayed_reset_work); |
1081 | tmio_mmc_release_dma(host); | 946 | tmio_mmc_release_dma(host); |
@@ -1084,7 +949,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) | |||
1084 | pm_runtime_disable(&pdev->dev); | 949 | pm_runtime_disable(&pdev->dev); |
1085 | 950 | ||
1086 | iounmap(host->ctl); | 951 | iounmap(host->ctl); |
1087 | mmc_free_host(mmc); | 952 | mmc_free_host(host->mmc); |
1088 | } | 953 | } |
1089 | EXPORT_SYMBOL(tmio_mmc_host_remove); | 954 | EXPORT_SYMBOL(tmio_mmc_host_remove); |
1090 | 955 | ||
@@ -1098,6 +963,8 @@ int tmio_mmc_host_suspend(struct device *dev) | |||
1098 | if (!ret) | 963 | if (!ret) |
1099 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); | 964 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); |
1100 | 965 | ||
966 | host->pm_error = pm_runtime_put_sync(dev); | ||
967 | |||
1101 | return ret; | 968 | return ret; |
1102 | } | 969 | } |
1103 | EXPORT_SYMBOL(tmio_mmc_host_suspend); | 970 | EXPORT_SYMBOL(tmio_mmc_host_suspend); |
@@ -1107,10 +974,20 @@ int tmio_mmc_host_resume(struct device *dev) | |||
1107 | struct mmc_host *mmc = dev_get_drvdata(dev); | 974 | struct mmc_host *mmc = dev_get_drvdata(dev); |
1108 | struct tmio_mmc_host *host = mmc_priv(mmc); | 975 | struct tmio_mmc_host *host = mmc_priv(mmc); |
1109 | 976 | ||
1110 | tmio_mmc_reset(host); | ||
1111 | tmio_mmc_enable_dma(host, true); | ||
1112 | |||
1113 | /* The MMC core will perform the complete set up */ | 977 | /* The MMC core will perform the complete set up */ |
978 | host->pdata->power = false; | ||
979 | |||
980 | host->pm_global = true; | ||
981 | if (!host->pm_error) | ||
982 | pm_runtime_get_sync(dev); | ||
983 | |||
984 | if (host->pm_global) { | ||
985 | /* Runtime PM resume callback didn't run */ | ||
986 | tmio_mmc_reset(host); | ||
987 | tmio_mmc_enable_dma(host, true); | ||
988 | host->pm_global = false; | ||
989 | } | ||
990 | |||
1114 | return mmc_resume_host(mmc); | 991 | return mmc_resume_host(mmc); |
1115 | } | 992 | } |
1116 | EXPORT_SYMBOL(tmio_mmc_host_resume); | 993 | EXPORT_SYMBOL(tmio_mmc_host_resume); |
@@ -1127,10 +1004,19 @@ int tmio_mmc_host_runtime_resume(struct device *dev) | |||
1127 | { | 1004 | { |
1128 | struct mmc_host *mmc = dev_get_drvdata(dev); | 1005 | struct mmc_host *mmc = dev_get_drvdata(dev); |
1129 | struct tmio_mmc_host *host = mmc_priv(mmc); | 1006 | struct tmio_mmc_host *host = mmc_priv(mmc); |
1007 | struct tmio_mmc_data *pdata = host->pdata; | ||
1130 | 1008 | ||
1131 | tmio_mmc_reset(host); | 1009 | tmio_mmc_reset(host); |
1132 | tmio_mmc_enable_dma(host, true); | 1010 | tmio_mmc_enable_dma(host, true); |
1133 | 1011 | ||
1012 | if (pdata->power) { | ||
1013 | /* Only entered after a card-insert interrupt */ | ||
1014 | if (!mmc->card) | ||
1015 | tmio_mmc_set_ios(mmc, &mmc->ios); | ||
1016 | mmc_detect_change(mmc, msecs_to_jiffies(100)); | ||
1017 | } | ||
1018 | host->pm_global = false; | ||
1019 | |||
1134 | return 0; | 1020 | return 0; |
1135 | } | 1021 | } |
1136 | EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); | 1022 | EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); |
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index c0105a2e269..f08f944ac53 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c | |||
@@ -562,7 +562,17 @@ static struct usb_driver ushc_driver = { | |||
562 | .disconnect = ushc_disconnect, | 562 | .disconnect = ushc_disconnect, |
563 | }; | 563 | }; |
564 | 564 | ||
565 | module_usb_driver(ushc_driver); | 565 | static int __init ushc_init(void) |
566 | { | ||
567 | return usb_register(&ushc_driver); | ||
568 | } | ||
569 | module_init(ushc_init); | ||
570 | |||
571 | static void __exit ushc_exit(void) | ||
572 | { | ||
573 | usb_deregister(&ushc_driver); | ||
574 | } | ||
575 | module_exit(ushc_exit); | ||
566 | 576 | ||
567 | MODULE_DESCRIPTION("USB SD Host Controller driver"); | 577 | MODULE_DESCRIPTION("USB SD Host Controller driver"); |
568 | MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>"); | 578 | MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>"); |
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 4f84586c6e9..4dfe2c02ea9 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/module.h> | ||
13 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
14 | #include <linux/highmem.h> | 13 | #include <linux/highmem.h> |
15 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
@@ -1082,7 +1081,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host) | |||
1082 | msleep(1); | 1081 | msleep(1); |
1083 | } | 1082 | } |
1084 | 1083 | ||
1085 | static int via_sd_probe(struct pci_dev *pcidev, | 1084 | static int __devinit via_sd_probe(struct pci_dev *pcidev, |
1086 | const struct pci_device_id *id) | 1085 | const struct pci_device_id *id) |
1087 | { | 1086 | { |
1088 | struct mmc_host *mmc; | 1087 | struct mmc_host *mmc; |
@@ -1176,7 +1175,7 @@ disable: | |||
1176 | return ret; | 1175 | return ret; |
1177 | } | 1176 | } |
1178 | 1177 | ||
1179 | static void via_sd_remove(struct pci_dev *pcidev) | 1178 | static void __devexit via_sd_remove(struct pci_dev *pcidev) |
1180 | { | 1179 | { |
1181 | struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev); | 1180 | struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev); |
1182 | unsigned long flags; | 1181 | unsigned long flags; |
@@ -1192,7 +1191,7 @@ static void via_sd_remove(struct pci_dev *pcidev) | |||
1192 | mmiowb(); | 1191 | mmiowb(); |
1193 | 1192 | ||
1194 | if (sdhost->mrq) { | 1193 | if (sdhost->mrq) { |
1195 | pr_err("%s: Controller removed during " | 1194 | printk(KERN_ERR "%s: Controller removed during " |
1196 | "transfer\n", mmc_hostname(sdhost->mmc)); | 1195 | "transfer\n", mmc_hostname(sdhost->mmc)); |
1197 | 1196 | ||
1198 | /* make sure all DMA is stopped */ | 1197 | /* make sure all DMA is stopped */ |
@@ -1332,12 +1331,26 @@ static struct pci_driver via_sd_driver = { | |||
1332 | .name = DRV_NAME, | 1331 | .name = DRV_NAME, |
1333 | .id_table = via_ids, | 1332 | .id_table = via_ids, |
1334 | .probe = via_sd_probe, | 1333 | .probe = via_sd_probe, |
1335 | .remove = via_sd_remove, | 1334 | .remove = __devexit_p(via_sd_remove), |
1336 | .suspend = via_sd_suspend, | 1335 | .suspend = via_sd_suspend, |
1337 | .resume = via_sd_resume, | 1336 | .resume = via_sd_resume, |
1338 | }; | 1337 | }; |
1339 | 1338 | ||
1340 | module_pci_driver(via_sd_driver); | 1339 | static int __init via_sd_drv_init(void) |
1340 | { | ||
1341 | pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver " | ||
1342 | "(C) 2008 VIA Technologies, Inc.\n"); | ||
1343 | |||
1344 | return pci_register_driver(&via_sd_driver); | ||
1345 | } | ||
1346 | |||
1347 | static void __exit via_sd_drv_exit(void) | ||
1348 | { | ||
1349 | pci_unregister_driver(&via_sd_driver); | ||
1350 | } | ||
1351 | |||
1352 | module_init(via_sd_drv_init); | ||
1353 | module_exit(via_sd_drv_exit); | ||
1341 | 1354 | ||
1342 | MODULE_LICENSE("GPL"); | 1355 | MODULE_LICENSE("GPL"); |
1343 | MODULE_AUTHOR("VIA Technologies Inc."); | 1356 | MODULE_AUTHOR("VIA Technologies Inc."); |
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index cb9f361c03a..2ec978bc32b 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c | |||
@@ -223,25 +223,25 @@ enum SD_RESPONSE_TYPE { | |||
223 | #define FUN(c) (0x000007 & (c->arg>>28)) | 223 | #define FUN(c) (0x000007 & (c->arg>>28)) |
224 | #define REG(c) (0x01FFFF & (c->arg>>9)) | 224 | #define REG(c) (0x01FFFF & (c->arg>>9)) |
225 | 225 | ||
226 | static bool limit_speed_to_24_MHz; | 226 | static int limit_speed_to_24_MHz; |
227 | module_param(limit_speed_to_24_MHz, bool, 0644); | 227 | module_param(limit_speed_to_24_MHz, bool, 0644); |
228 | MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); | 228 | MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); |
229 | 229 | ||
230 | static bool pad_input_to_usb_pkt; | 230 | static int pad_input_to_usb_pkt; |
231 | module_param(pad_input_to_usb_pkt, bool, 0644); | 231 | module_param(pad_input_to_usb_pkt, bool, 0644); |
232 | MODULE_PARM_DESC(pad_input_to_usb_pkt, | 232 | MODULE_PARM_DESC(pad_input_to_usb_pkt, |
233 | "Pad USB data input transfers to whole USB Packet"); | 233 | "Pad USB data input transfers to whole USB Packet"); |
234 | 234 | ||
235 | static bool disable_offload_processing; | 235 | static int disable_offload_processing; |
236 | module_param(disable_offload_processing, bool, 0644); | 236 | module_param(disable_offload_processing, bool, 0644); |
237 | MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); | 237 | MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); |
238 | 238 | ||
239 | static bool force_1_bit_data_xfers; | 239 | static int force_1_bit_data_xfers; |
240 | module_param(force_1_bit_data_xfers, bool, 0644); | 240 | module_param(force_1_bit_data_xfers, bool, 0644); |
241 | MODULE_PARM_DESC(force_1_bit_data_xfers, | 241 | MODULE_PARM_DESC(force_1_bit_data_xfers, |
242 | "Force SDIO Data Transfers to 1-bit Mode"); | 242 | "Force SDIO Data Transfers to 1-bit Mode"); |
243 | 243 | ||
244 | static bool force_polling_for_irqs; | 244 | static int force_polling_for_irqs; |
245 | module_param(force_polling_for_irqs, bool, 0644); | 245 | module_param(force_polling_for_irqs, bool, 0644); |
246 | MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); | 246 | MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); |
247 | 247 | ||
@@ -806,7 +806,7 @@ static void command_res_completed(struct urb *urb) | |||
806 | * we suspect a buggy USB host controller | 806 | * we suspect a buggy USB host controller |
807 | */ | 807 | */ |
808 | } else if (!vub300->data) { | 808 | } else if (!vub300->data) { |
809 | /* this means that the command (typically CMD52) succeeded */ | 809 | /* this means that the command (typically CMD52) suceeded */ |
810 | } else if (vub300->resp.common.header_type != 0x02) { | 810 | } else if (vub300->resp.common.header_type != 0x02) { |
811 | /* | 811 | /* |
812 | * this is an error response from the VUB300 chip | 812 | * this is an error response from the VUB300 chip |
@@ -2358,11 +2358,10 @@ error5: | |||
2358 | * which is contained at the end of struct mmc | 2358 | * which is contained at the end of struct mmc |
2359 | */ | 2359 | */ |
2360 | error4: | 2360 | error4: |
2361 | usb_free_urb(command_res_urb); | ||
2362 | error1: | ||
2363 | usb_free_urb(command_out_urb); | 2361 | usb_free_urb(command_out_urb); |
2362 | error1: | ||
2363 | usb_free_urb(command_res_urb); | ||
2364 | error0: | 2364 | error0: |
2365 | usb_put_dev(udev); | ||
2366 | return retval; | 2365 | return retval; |
2367 | } | 2366 | } |
2368 | 2367 | ||
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index e954b775887..62e5a4d171e 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c | |||
@@ -194,7 +194,7 @@ static void wbsd_reset(struct wbsd_host *host) | |||
194 | { | 194 | { |
195 | u8 setup; | 195 | u8 setup; |
196 | 196 | ||
197 | pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc)); | 197 | printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc)); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Soft reset of chip (SD/MMC part). | 200 | * Soft reset of chip (SD/MMC part). |
@@ -721,7 +721,7 @@ static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) | |||
721 | * Any leftover data? | 721 | * Any leftover data? |
722 | */ | 722 | */ |
723 | if (count) { | 723 | if (count) { |
724 | pr_err("%s: Incomplete DMA transfer. " | 724 | printk(KERN_ERR "%s: Incomplete DMA transfer. " |
725 | "%d bytes left.\n", | 725 | "%d bytes left.\n", |
726 | mmc_hostname(host->mmc), count); | 726 | mmc_hostname(host->mmc), count); |
727 | 727 | ||
@@ -803,7 +803,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
803 | 803 | ||
804 | default: | 804 | default: |
805 | #ifdef CONFIG_MMC_DEBUG | 805 | #ifdef CONFIG_MMC_DEBUG |
806 | pr_warning("%s: Data command %d is not " | 806 | printk(KERN_WARNING "%s: Data command %d is not " |
807 | "supported by this controller.\n", | 807 | "supported by this controller.\n", |
808 | mmc_hostname(host->mmc), cmd->opcode); | 808 | mmc_hostname(host->mmc), cmd->opcode); |
809 | #endif | 809 | #endif |
@@ -1029,7 +1029,7 @@ static void wbsd_tasklet_card(unsigned long param) | |||
1029 | host->flags &= ~WBSD_FCARD_PRESENT; | 1029 | host->flags &= ~WBSD_FCARD_PRESENT; |
1030 | 1030 | ||
1031 | if (host->mrq) { | 1031 | if (host->mrq) { |
1032 | pr_err("%s: Card removed during transfer!\n", | 1032 | printk(KERN_ERR "%s: Card removed during transfer!\n", |
1033 | mmc_hostname(host->mmc)); | 1033 | mmc_hostname(host->mmc)); |
1034 | wbsd_reset(host); | 1034 | wbsd_reset(host); |
1035 | 1035 | ||
@@ -1196,7 +1196,7 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id) | |||
1196 | * Allocate/free MMC structure. | 1196 | * Allocate/free MMC structure. |
1197 | */ | 1197 | */ |
1198 | 1198 | ||
1199 | static int wbsd_alloc_mmc(struct device *dev) | 1199 | static int __devinit wbsd_alloc_mmc(struct device *dev) |
1200 | { | 1200 | { |
1201 | struct mmc_host *mmc; | 1201 | struct mmc_host *mmc; |
1202 | struct wbsd_host *host; | 1202 | struct wbsd_host *host; |
@@ -1288,7 +1288,7 @@ static void wbsd_free_mmc(struct device *dev) | |||
1288 | * Scan for known chip id:s | 1288 | * Scan for known chip id:s |
1289 | */ | 1289 | */ |
1290 | 1290 | ||
1291 | static int wbsd_scan(struct wbsd_host *host) | 1291 | static int __devinit wbsd_scan(struct wbsd_host *host) |
1292 | { | 1292 | { |
1293 | int i, j, k; | 1293 | int i, j, k; |
1294 | int id; | 1294 | int id; |
@@ -1344,7 +1344,7 @@ static int wbsd_scan(struct wbsd_host *host) | |||
1344 | * Allocate/free io port ranges | 1344 | * Allocate/free io port ranges |
1345 | */ | 1345 | */ |
1346 | 1346 | ||
1347 | static int wbsd_request_region(struct wbsd_host *host, int base) | 1347 | static int __devinit wbsd_request_region(struct wbsd_host *host, int base) |
1348 | { | 1348 | { |
1349 | if (base & 0x7) | 1349 | if (base & 0x7) |
1350 | return -EINVAL; | 1350 | return -EINVAL; |
@@ -1374,7 +1374,7 @@ static void wbsd_release_regions(struct wbsd_host *host) | |||
1374 | * Allocate/free DMA port and buffer | 1374 | * Allocate/free DMA port and buffer |
1375 | */ | 1375 | */ |
1376 | 1376 | ||
1377 | static void wbsd_request_dma(struct wbsd_host *host, int dma) | 1377 | static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) |
1378 | { | 1378 | { |
1379 | if (dma < 0) | 1379 | if (dma < 0) |
1380 | return; | 1380 | return; |
@@ -1429,7 +1429,7 @@ free: | |||
1429 | free_dma(dma); | 1429 | free_dma(dma); |
1430 | 1430 | ||
1431 | err: | 1431 | err: |
1432 | pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. " | 1432 | printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " |
1433 | "Falling back on FIFO.\n", dma); | 1433 | "Falling back on FIFO.\n", dma); |
1434 | } | 1434 | } |
1435 | 1435 | ||
@@ -1452,7 +1452,7 @@ static void wbsd_release_dma(struct wbsd_host *host) | |||
1452 | * Allocate/free IRQ. | 1452 | * Allocate/free IRQ. |
1453 | */ | 1453 | */ |
1454 | 1454 | ||
1455 | static int wbsd_request_irq(struct wbsd_host *host, int irq) | 1455 | static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) |
1456 | { | 1456 | { |
1457 | int ret; | 1457 | int ret; |
1458 | 1458 | ||
@@ -1502,7 +1502,7 @@ static void wbsd_release_irq(struct wbsd_host *host) | |||
1502 | * Allocate all resources for the host. | 1502 | * Allocate all resources for the host. |
1503 | */ | 1503 | */ |
1504 | 1504 | ||
1505 | static int wbsd_request_resources(struct wbsd_host *host, | 1505 | static int __devinit wbsd_request_resources(struct wbsd_host *host, |
1506 | int base, int irq, int dma) | 1506 | int base, int irq, int dma) |
1507 | { | 1507 | { |
1508 | int ret; | 1508 | int ret; |
@@ -1644,7 +1644,7 @@ static void wbsd_chip_poweroff(struct wbsd_host *host) | |||
1644 | * * | 1644 | * * |
1645 | \*****************************************************************************/ | 1645 | \*****************************************************************************/ |
1646 | 1646 | ||
1647 | static int wbsd_init(struct device *dev, int base, int irq, int dma, | 1647 | static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, |
1648 | int pnp) | 1648 | int pnp) |
1649 | { | 1649 | { |
1650 | struct wbsd_host *host = NULL; | 1650 | struct wbsd_host *host = NULL; |
@@ -1664,7 +1664,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, | |||
1664 | ret = wbsd_scan(host); | 1664 | ret = wbsd_scan(host); |
1665 | if (ret) { | 1665 | if (ret) { |
1666 | if (pnp && (ret == -ENODEV)) { | 1666 | if (pnp && (ret == -ENODEV)) { |
1667 | pr_warning(DRIVER_NAME | 1667 | printk(KERN_WARNING DRIVER_NAME |
1668 | ": Unable to confirm device presence. You may " | 1668 | ": Unable to confirm device presence. You may " |
1669 | "experience lock-ups.\n"); | 1669 | "experience lock-ups.\n"); |
1670 | } else { | 1670 | } else { |
@@ -1688,7 +1688,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, | |||
1688 | */ | 1688 | */ |
1689 | if (pnp) { | 1689 | if (pnp) { |
1690 | if ((host->config != 0) && !wbsd_chip_validate(host)) { | 1690 | if ((host->config != 0) && !wbsd_chip_validate(host)) { |
1691 | pr_warning(DRIVER_NAME | 1691 | printk(KERN_WARNING DRIVER_NAME |
1692 | ": PnP active but chip not configured! " | 1692 | ": PnP active but chip not configured! " |
1693 | "You probably have a buggy BIOS. " | 1693 | "You probably have a buggy BIOS. " |
1694 | "Configuring chip manually.\n"); | 1694 | "Configuring chip manually.\n"); |
@@ -1720,7 +1720,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, | |||
1720 | 1720 | ||
1721 | mmc_add_host(mmc); | 1721 | mmc_add_host(mmc); |
1722 | 1722 | ||
1723 | pr_info("%s: W83L51xD", mmc_hostname(mmc)); | 1723 | printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); |
1724 | if (host->chip_id != 0) | 1724 | if (host->chip_id != 0) |
1725 | printk(" id %x", (int)host->chip_id); | 1725 | printk(" id %x", (int)host->chip_id); |
1726 | printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); | 1726 | printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); |
@@ -1735,7 +1735,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, | |||
1735 | return 0; | 1735 | return 0; |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | static void wbsd_shutdown(struct device *dev, int pnp) | 1738 | static void __devexit wbsd_shutdown(struct device *dev, int pnp) |
1739 | { | 1739 | { |
1740 | struct mmc_host *mmc = dev_get_drvdata(dev); | 1740 | struct mmc_host *mmc = dev_get_drvdata(dev); |
1741 | struct wbsd_host *host; | 1741 | struct wbsd_host *host; |
@@ -1762,13 +1762,13 @@ static void wbsd_shutdown(struct device *dev, int pnp) | |||
1762 | * Non-PnP | 1762 | * Non-PnP |
1763 | */ | 1763 | */ |
1764 | 1764 | ||
1765 | static int wbsd_probe(struct platform_device *dev) | 1765 | static int __devinit wbsd_probe(struct platform_device *dev) |
1766 | { | 1766 | { |
1767 | /* Use the module parameters for resources */ | 1767 | /* Use the module parameters for resources */ |
1768 | return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); | 1768 | return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); |
1769 | } | 1769 | } |
1770 | 1770 | ||
1771 | static int wbsd_remove(struct platform_device *dev) | 1771 | static int __devexit wbsd_remove(struct platform_device *dev) |
1772 | { | 1772 | { |
1773 | wbsd_shutdown(&dev->dev, 0); | 1773 | wbsd_shutdown(&dev->dev, 0); |
1774 | 1774 | ||
@@ -1781,7 +1781,7 @@ static int wbsd_remove(struct platform_device *dev) | |||
1781 | 1781 | ||
1782 | #ifdef CONFIG_PNP | 1782 | #ifdef CONFIG_PNP |
1783 | 1783 | ||
1784 | static int | 1784 | static int __devinit |
1785 | wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) | 1785 | wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) |
1786 | { | 1786 | { |
1787 | int io, irq, dma; | 1787 | int io, irq, dma; |
@@ -1801,7 +1801,7 @@ wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) | |||
1801 | return wbsd_init(&pnpdev->dev, io, irq, dma, 1); | 1801 | return wbsd_init(&pnpdev->dev, io, irq, dma, 1); |
1802 | } | 1802 | } |
1803 | 1803 | ||
1804 | static void wbsd_pnp_remove(struct pnp_dev *dev) | 1804 | static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) |
1805 | { | 1805 | { |
1806 | wbsd_shutdown(&dev->dev, 1); | 1806 | wbsd_shutdown(&dev->dev, 1); |
1807 | } | 1807 | } |
@@ -1909,7 +1909,7 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) | |||
1909 | */ | 1909 | */ |
1910 | if (host->config != 0) { | 1910 | if (host->config != 0) { |
1911 | if (!wbsd_chip_validate(host)) { | 1911 | if (!wbsd_chip_validate(host)) { |
1912 | pr_warning(DRIVER_NAME | 1912 | printk(KERN_WARNING DRIVER_NAME |
1913 | ": PnP active but chip not configured! " | 1913 | ": PnP active but chip not configured! " |
1914 | "You probably have a buggy BIOS. " | 1914 | "You probably have a buggy BIOS. " |
1915 | "Configuring chip manually.\n"); | 1915 | "Configuring chip manually.\n"); |
@@ -1941,7 +1941,7 @@ static struct platform_device *wbsd_device; | |||
1941 | 1941 | ||
1942 | static struct platform_driver wbsd_driver = { | 1942 | static struct platform_driver wbsd_driver = { |
1943 | .probe = wbsd_probe, | 1943 | .probe = wbsd_probe, |
1944 | .remove = wbsd_remove, | 1944 | .remove = __devexit_p(wbsd_remove), |
1945 | 1945 | ||
1946 | .suspend = wbsd_platform_suspend, | 1946 | .suspend = wbsd_platform_suspend, |
1947 | .resume = wbsd_platform_resume, | 1947 | .resume = wbsd_platform_resume, |
@@ -1957,7 +1957,7 @@ static struct pnp_driver wbsd_pnp_driver = { | |||
1957 | .name = DRIVER_NAME, | 1957 | .name = DRIVER_NAME, |
1958 | .id_table = pnp_dev_table, | 1958 | .id_table = pnp_dev_table, |
1959 | .probe = wbsd_pnp_probe, | 1959 | .probe = wbsd_pnp_probe, |
1960 | .remove = wbsd_pnp_remove, | 1960 | .remove = __devexit_p(wbsd_pnp_remove), |
1961 | 1961 | ||
1962 | .suspend = wbsd_pnp_suspend, | 1962 | .suspend = wbsd_pnp_suspend, |
1963 | .resume = wbsd_pnp_resume, | 1963 | .resume = wbsd_pnp_resume, |
@@ -1973,9 +1973,9 @@ static int __init wbsd_drv_init(void) | |||
1973 | { | 1973 | { |
1974 | int result; | 1974 | int result; |
1975 | 1975 | ||
1976 | pr_info(DRIVER_NAME | 1976 | printk(KERN_INFO DRIVER_NAME |
1977 | ": Winbond W83L51xD SD/MMC card interface driver\n"); | 1977 | ": Winbond W83L51xD SD/MMC card interface driver\n"); |
1978 | pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); | 1978 | printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); |
1979 | 1979 | ||
1980 | #ifdef CONFIG_PNP | 1980 | #ifdef CONFIG_PNP |
1981 | 1981 | ||
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c deleted file mode 100644 index 154f0e8e931..00000000000 --- a/drivers/mmc/host/wmt-sdmmc.c +++ /dev/null | |||
@@ -1,1029 +0,0 @@ | |||
1 | /* | ||
2 | * WM8505/WM8650 SD/MMC Host Controller | ||
3 | * | ||
4 | * Copyright (C) 2010 Tony Prisk | ||
5 | * Copyright (C) 2008 WonderMedia Technologies, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/ioport.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/gpio.h> | ||
23 | |||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_address.h> | ||
26 | #include <linux/of_irq.h> | ||
27 | #include <linux/of_device.h> | ||
28 | |||
29 | #include <linux/mmc/host.h> | ||
30 | #include <linux/mmc/mmc.h> | ||
31 | #include <linux/mmc/sd.h> | ||
32 | |||
33 | #include <asm/byteorder.h> | ||
34 | |||
35 | |||
36 | #define DRIVER_NAME "wmt-sdhc" | ||
37 | |||
38 | |||
39 | /* MMC/SD controller registers */ | ||
40 | #define SDMMC_CTLR 0x00 | ||
41 | #define SDMMC_CMD 0x01 | ||
42 | #define SDMMC_RSPTYPE 0x02 | ||
43 | #define SDMMC_ARG 0x04 | ||
44 | #define SDMMC_BUSMODE 0x08 | ||
45 | #define SDMMC_BLKLEN 0x0C | ||
46 | #define SDMMC_BLKCNT 0x0E | ||
47 | #define SDMMC_RSP 0x10 | ||
48 | #define SDMMC_CBCR 0x20 | ||
49 | #define SDMMC_INTMASK0 0x24 | ||
50 | #define SDMMC_INTMASK1 0x25 | ||
51 | #define SDMMC_STS0 0x28 | ||
52 | #define SDMMC_STS1 0x29 | ||
53 | #define SDMMC_STS2 0x2A | ||
54 | #define SDMMC_STS3 0x2B | ||
55 | #define SDMMC_RSPTIMEOUT 0x2C | ||
56 | #define SDMMC_CLK 0x30 /* VT8500 only */ | ||
57 | #define SDMMC_EXTCTRL 0x34 | ||
58 | #define SDMMC_SBLKLEN 0x38 | ||
59 | #define SDMMC_DMATIMEOUT 0x3C | ||
60 | |||
61 | |||
62 | /* SDMMC_CTLR bit fields */ | ||
63 | #define CTLR_CMD_START 0x01 | ||
64 | #define CTLR_CMD_WRITE 0x04 | ||
65 | #define CTLR_FIFO_RESET 0x08 | ||
66 | |||
67 | /* SDMMC_BUSMODE bit fields */ | ||
68 | #define BM_SPI_MODE 0x01 | ||
69 | #define BM_FOURBIT_MODE 0x02 | ||
70 | #define BM_EIGHTBIT_MODE 0x04 | ||
71 | #define BM_SD_OFF 0x10 | ||
72 | #define BM_SPI_CS 0x20 | ||
73 | #define BM_SD_POWER 0x40 | ||
74 | #define BM_SOFT_RESET 0x80 | ||
75 | #define BM_ONEBIT_MASK 0xFD | ||
76 | |||
77 | /* SDMMC_BLKLEN bit fields */ | ||
78 | #define BLKL_CRCERR_ABORT 0x0800 | ||
79 | #define BLKL_CD_POL_HIGH 0x1000 | ||
80 | #define BLKL_GPI_CD 0x2000 | ||
81 | #define BLKL_DATA3_CD 0x4000 | ||
82 | #define BLKL_INT_ENABLE 0x8000 | ||
83 | |||
84 | /* SDMMC_INTMASK0 bit fields */ | ||
85 | #define INT0_MBLK_TRAN_DONE_INT_EN 0x10 | ||
86 | #define INT0_BLK_TRAN_DONE_INT_EN 0x20 | ||
87 | #define INT0_CD_INT_EN 0x40 | ||
88 | #define INT0_DI_INT_EN 0x80 | ||
89 | |||
90 | /* SDMMC_INTMASK1 bit fields */ | ||
91 | #define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02 | ||
92 | #define INT1_CMD_RES_TOUT_INT_EN 0x04 | ||
93 | #define INT1_MBLK_AUTO_STOP_INT_EN 0x08 | ||
94 | #define INT1_DATA_TOUT_INT_EN 0x10 | ||
95 | #define INT1_RESCRC_ERR_INT_EN 0x20 | ||
96 | #define INT1_RCRC_ERR_INT_EN 0x40 | ||
97 | #define INT1_WCRC_ERR_INT_EN 0x80 | ||
98 | |||
99 | /* SDMMC_STS0 bit fields */ | ||
100 | #define STS0_WRITE_PROTECT 0x02 | ||
101 | #define STS0_CD_DATA3 0x04 | ||
102 | #define STS0_CD_GPI 0x08 | ||
103 | #define STS0_MBLK_DONE 0x10 | ||
104 | #define STS0_BLK_DONE 0x20 | ||
105 | #define STS0_CARD_DETECT 0x40 | ||
106 | #define STS0_DEVICE_INS 0x80 | ||
107 | |||
108 | /* SDMMC_STS1 bit fields */ | ||
109 | #define STS1_SDIO_INT 0x01 | ||
110 | #define STS1_CMDRSP_DONE 0x02 | ||
111 | #define STS1_RSP_TIMEOUT 0x04 | ||
112 | #define STS1_AUTOSTOP_DONE 0x08 | ||
113 | #define STS1_DATA_TIMEOUT 0x10 | ||
114 | #define STS1_RSP_CRC_ERR 0x20 | ||
115 | #define STS1_RCRC_ERR 0x40 | ||
116 | #define STS1_WCRC_ERR 0x80 | ||
117 | |||
118 | /* SDMMC_STS2 bit fields */ | ||
119 | #define STS2_CMD_RES_BUSY 0x10 | ||
120 | #define STS2_DATARSP_BUSY 0x20 | ||
121 | #define STS2_DIS_FORCECLK 0x80 | ||
122 | |||
123 | |||
124 | /* MMC/SD DMA Controller Registers */ | ||
125 | #define SDDMA_GCR 0x100 | ||
126 | #define SDDMA_IER 0x104 | ||
127 | #define SDDMA_ISR 0x108 | ||
128 | #define SDDMA_DESPR 0x10C | ||
129 | #define SDDMA_RBR 0x110 | ||
130 | #define SDDMA_DAR 0x114 | ||
131 | #define SDDMA_BAR 0x118 | ||
132 | #define SDDMA_CPR 0x11C | ||
133 | #define SDDMA_CCR 0x120 | ||
134 | |||
135 | |||
136 | /* SDDMA_GCR bit fields */ | ||
137 | #define DMA_GCR_DMA_EN 0x00000001 | ||
138 | #define DMA_GCR_SOFT_RESET 0x00000100 | ||
139 | |||
140 | /* SDDMA_IER bit fields */ | ||
141 | #define DMA_IER_INT_EN 0x00000001 | ||
142 | |||
143 | /* SDDMA_ISR bit fields */ | ||
144 | #define DMA_ISR_INT_STS 0x00000001 | ||
145 | |||
146 | /* SDDMA_RBR bit fields */ | ||
147 | #define DMA_RBR_FORMAT 0x40000000 | ||
148 | #define DMA_RBR_END 0x80000000 | ||
149 | |||
150 | /* SDDMA_CCR bit fields */ | ||
151 | #define DMA_CCR_RUN 0x00000080 | ||
152 | #define DMA_CCR_IF_TO_PERIPHERAL 0x00000000 | ||
153 | #define DMA_CCR_PERIPHERAL_TO_IF 0x00400000 | ||
154 | |||
155 | /* SDDMA_CCR event status */ | ||
156 | #define DMA_CCR_EVT_NO_STATUS 0x00000000 | ||
157 | #define DMA_CCR_EVT_UNDERRUN 0x00000001 | ||
158 | #define DMA_CCR_EVT_OVERRUN 0x00000002 | ||
159 | #define DMA_CCR_EVT_DESP_READ 0x00000003 | ||
160 | #define DMA_CCR_EVT_DATA_RW 0x00000004 | ||
161 | #define DMA_CCR_EVT_EARLY_END 0x00000005 | ||
162 | #define DMA_CCR_EVT_SUCCESS 0x0000000F | ||
163 | |||
164 | #define PDMA_READ 0x00 | ||
165 | #define PDMA_WRITE 0x01 | ||
166 | |||
167 | #define WMT_SD_POWER_OFF 0 | ||
168 | #define WMT_SD_POWER_ON 1 | ||
169 | |||
170 | struct wmt_dma_descriptor { | ||
171 | u32 flags; | ||
172 | u32 data_buffer_addr; | ||
173 | u32 branch_addr; | ||
174 | u32 reserved1; | ||
175 | }; | ||
176 | |||
177 | struct wmt_mci_caps { | ||
178 | unsigned int f_min; | ||
179 | unsigned int f_max; | ||
180 | u32 ocr_avail; | ||
181 | u32 caps; | ||
182 | u32 max_seg_size; | ||
183 | u32 max_segs; | ||
184 | u32 max_blk_size; | ||
185 | }; | ||
186 | |||
187 | struct wmt_mci_priv { | ||
188 | struct mmc_host *mmc; | ||
189 | void __iomem *sdmmc_base; | ||
190 | |||
191 | int irq_regular; | ||
192 | int irq_dma; | ||
193 | |||
194 | void *dma_desc_buffer; | ||
195 | dma_addr_t dma_desc_device_addr; | ||
196 | |||
197 | struct completion cmdcomp; | ||
198 | struct completion datacomp; | ||
199 | |||
200 | struct completion *comp_cmd; | ||
201 | struct completion *comp_dma; | ||
202 | |||
203 | struct mmc_request *req; | ||
204 | struct mmc_command *cmd; | ||
205 | |||
206 | struct clk *clk_sdmmc; | ||
207 | struct device *dev; | ||
208 | |||
209 | u8 power_inverted; | ||
210 | u8 cd_inverted; | ||
211 | }; | ||
212 | |||
213 | static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) | ||
214 | { | ||
215 | u32 reg_tmp; | ||
216 | if (enable) { | ||
217 | if (priv->power_inverted) { | ||
218 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
219 | writeb(reg_tmp | BM_SD_OFF, | ||
220 | priv->sdmmc_base + SDMMC_BUSMODE); | ||
221 | } else { | ||
222 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
223 | writeb(reg_tmp & (~BM_SD_OFF), | ||
224 | priv->sdmmc_base + SDMMC_BUSMODE); | ||
225 | } | ||
226 | } else { | ||
227 | if (priv->power_inverted) { | ||
228 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
229 | writeb(reg_tmp & (~BM_SD_OFF), | ||
230 | priv->sdmmc_base + SDMMC_BUSMODE); | ||
231 | } else { | ||
232 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
233 | writeb(reg_tmp | BM_SD_OFF, | ||
234 | priv->sdmmc_base + SDMMC_BUSMODE); | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | static void wmt_mci_read_response(struct mmc_host *mmc) | ||
240 | { | ||
241 | struct wmt_mci_priv *priv; | ||
242 | int idx1, idx2; | ||
243 | u8 tmp_resp; | ||
244 | u32 response; | ||
245 | |||
246 | priv = mmc_priv(mmc); | ||
247 | |||
248 | for (idx1 = 0; idx1 < 4; idx1++) { | ||
249 | response = 0; | ||
250 | for (idx2 = 0; idx2 < 4; idx2++) { | ||
251 | if ((idx1 == 3) && (idx2 == 3)) | ||
252 | tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); | ||
253 | else | ||
254 | tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + | ||
255 | (idx1*4) + idx2 + 1); | ||
256 | response |= (tmp_resp << (idx2 * 8)); | ||
257 | } | ||
258 | priv->cmd->resp[idx1] = cpu_to_be32(response); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static void wmt_mci_start_command(struct wmt_mci_priv *priv) | ||
263 | { | ||
264 | u32 reg_tmp; | ||
265 | |||
266 | reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); | ||
267 | writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); | ||
268 | } | ||
269 | |||
270 | static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, | ||
271 | u32 arg, u8 rsptype) | ||
272 | { | ||
273 | struct wmt_mci_priv *priv; | ||
274 | u32 reg_tmp; | ||
275 | |||
276 | priv = mmc_priv(mmc); | ||
277 | |||
278 | /* write command, arg, resptype registers */ | ||
279 | writeb(command, priv->sdmmc_base + SDMMC_CMD); | ||
280 | writel(arg, priv->sdmmc_base + SDMMC_ARG); | ||
281 | writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); | ||
282 | |||
283 | /* reset response FIFO */ | ||
284 | reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); | ||
285 | writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); | ||
286 | |||
287 | /* ensure clock enabled - VT3465 */ | ||
288 | wmt_set_sd_power(priv, WMT_SD_POWER_ON); | ||
289 | |||
290 | /* clear status bits */ | ||
291 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); | ||
292 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); | ||
293 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); | ||
294 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); | ||
295 | |||
296 | /* set command type */ | ||
297 | reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); | ||
298 | writeb((reg_tmp & 0x0F) | (cmdtype << 4), | ||
299 | priv->sdmmc_base + SDMMC_CTLR); | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) | ||
305 | { | ||
306 | writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); | ||
307 | writel(0, priv->sdmmc_base + SDDMA_IER); | ||
308 | } | ||
309 | |||
310 | static void wmt_complete_data_request(struct wmt_mci_priv *priv) | ||
311 | { | ||
312 | struct mmc_request *req; | ||
313 | req = priv->req; | ||
314 | |||
315 | req->data->bytes_xfered = req->data->blksz * req->data->blocks; | ||
316 | |||
317 | /* unmap the DMA pages used for write data */ | ||
318 | if (req->data->flags & MMC_DATA_WRITE) | ||
319 | dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, | ||
320 | req->data->sg_len, DMA_TO_DEVICE); | ||
321 | else | ||
322 | dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, | ||
323 | req->data->sg_len, DMA_FROM_DEVICE); | ||
324 | |||
325 | /* Check if the DMA ISR returned a data error */ | ||
326 | if ((req->cmd->error) || (req->data->error)) | ||
327 | mmc_request_done(priv->mmc, req); | ||
328 | else { | ||
329 | wmt_mci_read_response(priv->mmc); | ||
330 | if (!req->data->stop) { | ||
331 | /* single-block read/write requests end here */ | ||
332 | mmc_request_done(priv->mmc, req); | ||
333 | } else { | ||
334 | /* | ||
335 | * we change the priv->cmd variable so the response is | ||
336 | * stored in the stop struct rather than the original | ||
337 | * calling command struct | ||
338 | */ | ||
339 | priv->comp_cmd = &priv->cmdcomp; | ||
340 | init_completion(priv->comp_cmd); | ||
341 | priv->cmd = req->data->stop; | ||
342 | wmt_mci_send_command(priv->mmc, req->data->stop->opcode, | ||
343 | 7, req->data->stop->arg, 9); | ||
344 | wmt_mci_start_command(priv); | ||
345 | } | ||
346 | } | ||
347 | } | ||
348 | |||
349 | static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) | ||
350 | { | ||
351 | struct mmc_host *mmc; | ||
352 | struct wmt_mci_priv *priv; | ||
353 | |||
354 | int status; | ||
355 | |||
356 | priv = (struct wmt_mci_priv *)data; | ||
357 | mmc = priv->mmc; | ||
358 | |||
359 | status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; | ||
360 | |||
361 | if (status != DMA_CCR_EVT_SUCCESS) { | ||
362 | dev_err(priv->dev, "DMA Error: Status = %d\n", status); | ||
363 | priv->req->data->error = -ETIMEDOUT; | ||
364 | complete(priv->comp_dma); | ||
365 | return IRQ_HANDLED; | ||
366 | } | ||
367 | |||
368 | priv->req->data->error = 0; | ||
369 | |||
370 | wmt_mci_disable_dma(priv); | ||
371 | |||
372 | complete(priv->comp_dma); | ||
373 | |||
374 | if (priv->comp_cmd) { | ||
375 | if (completion_done(priv->comp_cmd)) { | ||
376 | /* | ||
377 | * if the command (regular) interrupt has already | ||
378 | * completed, finish off the request otherwise we wait | ||
379 | * for the command interrupt and finish from there. | ||
380 | */ | ||
381 | wmt_complete_data_request(priv); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | return IRQ_HANDLED; | ||
386 | } | ||
387 | |||
388 | static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) | ||
389 | { | ||
390 | struct wmt_mci_priv *priv; | ||
391 | u32 status0; | ||
392 | u32 status1; | ||
393 | u32 status2; | ||
394 | u32 reg_tmp; | ||
395 | int cmd_done; | ||
396 | |||
397 | priv = (struct wmt_mci_priv *)data; | ||
398 | cmd_done = 0; | ||
399 | status0 = readb(priv->sdmmc_base + SDMMC_STS0); | ||
400 | status1 = readb(priv->sdmmc_base + SDMMC_STS1); | ||
401 | status2 = readb(priv->sdmmc_base + SDMMC_STS2); | ||
402 | |||
403 | /* Check for card insertion */ | ||
404 | reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); | ||
405 | if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { | ||
406 | mmc_detect_change(priv->mmc, 0); | ||
407 | if (priv->cmd) | ||
408 | priv->cmd->error = -ETIMEDOUT; | ||
409 | if (priv->comp_cmd) | ||
410 | complete(priv->comp_cmd); | ||
411 | if (priv->comp_dma) { | ||
412 | wmt_mci_disable_dma(priv); | ||
413 | complete(priv->comp_dma); | ||
414 | } | ||
415 | writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); | ||
416 | return IRQ_HANDLED; | ||
417 | } | ||
418 | |||
419 | if ((!priv->req->data) || | ||
420 | ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { | ||
421 | /* handle non-data & stop_transmission requests */ | ||
422 | if (status1 & STS1_CMDRSP_DONE) { | ||
423 | priv->cmd->error = 0; | ||
424 | cmd_done = 1; | ||
425 | } else if ((status1 & STS1_RSP_TIMEOUT) || | ||
426 | (status1 & STS1_DATA_TIMEOUT)) { | ||
427 | priv->cmd->error = -ETIMEDOUT; | ||
428 | cmd_done = 1; | ||
429 | } | ||
430 | |||
431 | if (cmd_done) { | ||
432 | priv->comp_cmd = NULL; | ||
433 | |||
434 | if (!priv->cmd->error) | ||
435 | wmt_mci_read_response(priv->mmc); | ||
436 | |||
437 | priv->cmd = NULL; | ||
438 | |||
439 | mmc_request_done(priv->mmc, priv->req); | ||
440 | } | ||
441 | } else { | ||
442 | /* handle data requests */ | ||
443 | if (status1 & STS1_CMDRSP_DONE) { | ||
444 | if (priv->cmd) | ||
445 | priv->cmd->error = 0; | ||
446 | if (priv->comp_cmd) | ||
447 | complete(priv->comp_cmd); | ||
448 | } | ||
449 | |||
450 | if ((status1 & STS1_RSP_TIMEOUT) || | ||
451 | (status1 & STS1_DATA_TIMEOUT)) { | ||
452 | if (priv->cmd) | ||
453 | priv->cmd->error = -ETIMEDOUT; | ||
454 | if (priv->comp_cmd) | ||
455 | complete(priv->comp_cmd); | ||
456 | if (priv->comp_dma) { | ||
457 | wmt_mci_disable_dma(priv); | ||
458 | complete(priv->comp_dma); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | if (priv->comp_dma) { | ||
463 | /* | ||
464 | * If the dma interrupt has already completed, finish | ||
465 | * off the request; otherwise we wait for the DMA | ||
466 | * interrupt and finish from there. | ||
467 | */ | ||
468 | if (completion_done(priv->comp_dma)) | ||
469 | wmt_complete_data_request(priv); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | writeb(status0, priv->sdmmc_base + SDMMC_STS0); | ||
474 | writeb(status1, priv->sdmmc_base + SDMMC_STS1); | ||
475 | writeb(status2, priv->sdmmc_base + SDMMC_STS2); | ||
476 | |||
477 | return IRQ_HANDLED; | ||
478 | } | ||
479 | |||
480 | static void wmt_reset_hardware(struct mmc_host *mmc) | ||
481 | { | ||
482 | struct wmt_mci_priv *priv; | ||
483 | u32 reg_tmp; | ||
484 | |||
485 | priv = mmc_priv(mmc); | ||
486 | |||
487 | /* reset controller */ | ||
488 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
489 | writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); | ||
490 | |||
491 | /* reset response FIFO */ | ||
492 | reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); | ||
493 | writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); | ||
494 | |||
495 | /* enable GPI pin to detect card */ | ||
496 | writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); | ||
497 | |||
498 | /* clear interrupt status */ | ||
499 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); | ||
500 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); | ||
501 | |||
502 | /* setup interrupts */ | ||
503 | writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + | ||
504 | SDMMC_INTMASK0); | ||
505 | writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | | ||
506 | INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); | ||
507 | |||
508 | /* set the DMA timeout */ | ||
509 | writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); | ||
510 | |||
511 | /* auto clock freezing enable */ | ||
512 | reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); | ||
513 | writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); | ||
514 | |||
515 | /* set a default clock speed of 400Khz */ | ||
516 | clk_set_rate(priv->clk_sdmmc, 400000); | ||
517 | } | ||
518 | |||
519 | static int wmt_dma_init(struct mmc_host *mmc) | ||
520 | { | ||
521 | struct wmt_mci_priv *priv; | ||
522 | |||
523 | priv = mmc_priv(mmc); | ||
524 | |||
525 | writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); | ||
526 | writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); | ||
527 | if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) | ||
528 | return 0; | ||
529 | else | ||
530 | return 1; | ||
531 | } | ||
532 | |||
533 | static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, | ||
534 | u16 req_count, u32 buffer_addr, u32 branch_addr, int end) | ||
535 | { | ||
536 | desc->flags = 0x40000000 | req_count; | ||
537 | if (end) | ||
538 | desc->flags |= 0x80000000; | ||
539 | desc->data_buffer_addr = buffer_addr; | ||
540 | desc->branch_addr = branch_addr; | ||
541 | } | ||
542 | |||
543 | static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) | ||
544 | { | ||
545 | struct wmt_mci_priv *priv; | ||
546 | u32 reg_tmp; | ||
547 | |||
548 | priv = mmc_priv(mmc); | ||
549 | |||
550 | /* Enable DMA Interrupts */ | ||
551 | writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); | ||
552 | |||
553 | /* Write DMA Descriptor Pointer Register */ | ||
554 | writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); | ||
555 | |||
556 | writel(0x00, priv->sdmmc_base + SDDMA_CCR); | ||
557 | |||
558 | if (dir == PDMA_WRITE) { | ||
559 | reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); | ||
560 | writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + | ||
561 | SDDMA_CCR); | ||
562 | } else { | ||
563 | reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); | ||
564 | writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + | ||
565 | SDDMA_CCR); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | static void wmt_dma_start(struct wmt_mci_priv *priv) | ||
570 | { | ||
571 | u32 reg_tmp; | ||
572 | |||
573 | reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); | ||
574 | writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); | ||
575 | } | ||
576 | |||
577 | static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) | ||
578 | { | ||
579 | struct wmt_mci_priv *priv; | ||
580 | struct wmt_dma_descriptor *desc; | ||
581 | u8 command; | ||
582 | u8 cmdtype; | ||
583 | u32 arg; | ||
584 | u8 rsptype; | ||
585 | u32 reg_tmp; | ||
586 | |||
587 | struct scatterlist *sg; | ||
588 | int i; | ||
589 | int sg_cnt; | ||
590 | int offset; | ||
591 | u32 dma_address; | ||
592 | int desc_cnt; | ||
593 | |||
594 | priv = mmc_priv(mmc); | ||
595 | priv->req = req; | ||
596 | |||
597 | /* | ||
598 | * Use the cmd variable to pass a pointer to the resp[] structure | ||
599 | * This is required on multi-block requests to pass the pointer to the | ||
600 | * stop command | ||
601 | */ | ||
602 | priv->cmd = req->cmd; | ||
603 | |||
604 | command = req->cmd->opcode; | ||
605 | arg = req->cmd->arg; | ||
606 | rsptype = mmc_resp_type(req->cmd); | ||
607 | cmdtype = 0; | ||
608 | |||
609 | /* rsptype=7 only valid for SPI commands - should be =2 for SD */ | ||
610 | if (rsptype == 7) | ||
611 | rsptype = 2; | ||
612 | /* rsptype=21 is R1B, convert for controller */ | ||
613 | if (rsptype == 21) | ||
614 | rsptype = 9; | ||
615 | |||
616 | if (!req->data) { | ||
617 | wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); | ||
618 | wmt_mci_start_command(priv); | ||
619 | /* completion is now handled in the regular_isr() */ | ||
620 | } | ||
621 | if (req->data) { | ||
622 | priv->comp_cmd = &priv->cmdcomp; | ||
623 | init_completion(priv->comp_cmd); | ||
624 | |||
625 | wmt_dma_init(mmc); | ||
626 | |||
627 | /* set controller data length */ | ||
628 | reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); | ||
629 | writew((reg_tmp & 0xF800) | (req->data->blksz - 1), | ||
630 | priv->sdmmc_base + SDMMC_BLKLEN); | ||
631 | |||
632 | /* set controller block count */ | ||
633 | writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); | ||
634 | |||
635 | desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; | ||
636 | |||
637 | if (req->data->flags & MMC_DATA_WRITE) { | ||
638 | sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, | ||
639 | req->data->sg_len, DMA_TO_DEVICE); | ||
640 | cmdtype = 1; | ||
641 | if (req->data->blocks > 1) | ||
642 | cmdtype = 3; | ||
643 | } else { | ||
644 | sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, | ||
645 | req->data->sg_len, DMA_FROM_DEVICE); | ||
646 | cmdtype = 2; | ||
647 | if (req->data->blocks > 1) | ||
648 | cmdtype = 4; | ||
649 | } | ||
650 | |||
651 | dma_address = priv->dma_desc_device_addr + 16; | ||
652 | desc_cnt = 0; | ||
653 | |||
654 | for_each_sg(req->data->sg, sg, sg_cnt, i) { | ||
655 | offset = 0; | ||
656 | while (offset < sg_dma_len(sg)) { | ||
657 | wmt_dma_init_descriptor(desc, req->data->blksz, | ||
658 | sg_dma_address(sg)+offset, | ||
659 | dma_address, 0); | ||
660 | desc++; | ||
661 | desc_cnt++; | ||
662 | offset += req->data->blksz; | ||
663 | dma_address += 16; | ||
664 | if (desc_cnt == req->data->blocks) | ||
665 | break; | ||
666 | } | ||
667 | } | ||
668 | desc--; | ||
669 | desc->flags |= 0x80000000; | ||
670 | |||
671 | if (req->data->flags & MMC_DATA_WRITE) | ||
672 | wmt_dma_config(mmc, priv->dma_desc_device_addr, | ||
673 | PDMA_WRITE); | ||
674 | else | ||
675 | wmt_dma_config(mmc, priv->dma_desc_device_addr, | ||
676 | PDMA_READ); | ||
677 | |||
678 | wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); | ||
679 | |||
680 | priv->comp_dma = &priv->datacomp; | ||
681 | init_completion(priv->comp_dma); | ||
682 | |||
683 | wmt_dma_start(priv); | ||
684 | wmt_mci_start_command(priv); | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
689 | { | ||
690 | struct wmt_mci_priv *priv; | ||
691 | u32 reg_tmp; | ||
692 | |||
693 | priv = mmc_priv(mmc); | ||
694 | |||
695 | if (ios->power_mode == MMC_POWER_UP) { | ||
696 | wmt_reset_hardware(mmc); | ||
697 | |||
698 | wmt_set_sd_power(priv, WMT_SD_POWER_ON); | ||
699 | } | ||
700 | if (ios->power_mode == MMC_POWER_OFF) | ||
701 | wmt_set_sd_power(priv, WMT_SD_POWER_OFF); | ||
702 | |||
703 | if (ios->clock != 0) | ||
704 | clk_set_rate(priv->clk_sdmmc, ios->clock); | ||
705 | |||
706 | switch (ios->bus_width) { | ||
707 | case MMC_BUS_WIDTH_8: | ||
708 | reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); | ||
709 | writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL); | ||
710 | break; | ||
711 | case MMC_BUS_WIDTH_4: | ||
712 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
713 | writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base + | ||
714 | SDMMC_BUSMODE); | ||
715 | |||
716 | reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); | ||
717 | writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); | ||
718 | break; | ||
719 | case MMC_BUS_WIDTH_1: | ||
720 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
721 | writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base + | ||
722 | SDMMC_BUSMODE); | ||
723 | |||
724 | reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); | ||
725 | writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); | ||
726 | break; | ||
727 | } | ||
728 | } | ||
729 | |||
730 | static int wmt_mci_get_ro(struct mmc_host *mmc) | ||
731 | { | ||
732 | struct wmt_mci_priv *priv = mmc_priv(mmc); | ||
733 | |||
734 | return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); | ||
735 | } | ||
736 | |||
737 | static int wmt_mci_get_cd(struct mmc_host *mmc) | ||
738 | { | ||
739 | struct wmt_mci_priv *priv = mmc_priv(mmc); | ||
740 | u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; | ||
741 | |||
742 | return !(cd ^ priv->cd_inverted); | ||
743 | } | ||
744 | |||
745 | static struct mmc_host_ops wmt_mci_ops = { | ||
746 | .request = wmt_mci_request, | ||
747 | .set_ios = wmt_mci_set_ios, | ||
748 | .get_ro = wmt_mci_get_ro, | ||
749 | .get_cd = wmt_mci_get_cd, | ||
750 | }; | ||
751 | |||
752 | /* Controller capabilities */ | ||
753 | static struct wmt_mci_caps wm8505_caps = { | ||
754 | .f_min = 390425, | ||
755 | .f_max = 50000000, | ||
756 | .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, | ||
757 | .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | | ||
758 | MMC_CAP_SD_HIGHSPEED, | ||
759 | .max_seg_size = 65024, | ||
760 | .max_segs = 128, | ||
761 | .max_blk_size = 2048, | ||
762 | }; | ||
763 | |||
764 | static struct of_device_id wmt_mci_dt_ids[] = { | ||
765 | { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, | ||
766 | { /* Sentinel */ }, | ||
767 | }; | ||
768 | |||
769 | static int wmt_mci_probe(struct platform_device *pdev) | ||
770 | { | ||
771 | struct mmc_host *mmc; | ||
772 | struct wmt_mci_priv *priv; | ||
773 | struct device_node *np = pdev->dev.of_node; | ||
774 | const struct of_device_id *of_id = | ||
775 | of_match_device(wmt_mci_dt_ids, &pdev->dev); | ||
776 | const struct wmt_mci_caps *wmt_caps = of_id->data; | ||
777 | int ret; | ||
778 | int regular_irq, dma_irq; | ||
779 | |||
780 | if (!of_id || !of_id->data) { | ||
781 | dev_err(&pdev->dev, "Controller capabilities data missing\n"); | ||
782 | return -EFAULT; | ||
783 | } | ||
784 | |||
785 | if (!np) { | ||
786 | dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); | ||
787 | return -EFAULT; | ||
788 | } | ||
789 | |||
790 | regular_irq = irq_of_parse_and_map(np, 0); | ||
791 | dma_irq = irq_of_parse_and_map(np, 1); | ||
792 | |||
793 | if (!regular_irq || !dma_irq) { | ||
794 | dev_err(&pdev->dev, "Getting IRQs failed!\n"); | ||
795 | ret = -ENXIO; | ||
796 | goto fail1; | ||
797 | } | ||
798 | |||
799 | mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); | ||
800 | if (!mmc) { | ||
801 | dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); | ||
802 | ret = -ENOMEM; | ||
803 | goto fail1; | ||
804 | } | ||
805 | |||
806 | mmc->ops = &wmt_mci_ops; | ||
807 | mmc->f_min = wmt_caps->f_min; | ||
808 | mmc->f_max = wmt_caps->f_max; | ||
809 | mmc->ocr_avail = wmt_caps->ocr_avail; | ||
810 | mmc->caps = wmt_caps->caps; | ||
811 | |||
812 | mmc->max_seg_size = wmt_caps->max_seg_size; | ||
813 | mmc->max_segs = wmt_caps->max_segs; | ||
814 | mmc->max_blk_size = wmt_caps->max_blk_size; | ||
815 | |||
816 | mmc->max_req_size = (16*512*mmc->max_segs); | ||
817 | mmc->max_blk_count = mmc->max_req_size / 512; | ||
818 | |||
819 | priv = mmc_priv(mmc); | ||
820 | priv->mmc = mmc; | ||
821 | priv->dev = &pdev->dev; | ||
822 | |||
823 | priv->power_inverted = 0; | ||
824 | priv->cd_inverted = 0; | ||
825 | |||
826 | if (of_get_property(np, "sdon-inverted", NULL)) | ||
827 | priv->power_inverted = 1; | ||
828 | if (of_get_property(np, "cd-inverted", NULL)) | ||
829 | priv->cd_inverted = 1; | ||
830 | |||
831 | priv->sdmmc_base = of_iomap(np, 0); | ||
832 | if (!priv->sdmmc_base) { | ||
833 | dev_err(&pdev->dev, "Failed to map IO space\n"); | ||
834 | ret = -ENOMEM; | ||
835 | goto fail2; | ||
836 | } | ||
837 | |||
838 | priv->irq_regular = regular_irq; | ||
839 | priv->irq_dma = dma_irq; | ||
840 | |||
841 | ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); | ||
842 | if (ret) { | ||
843 | dev_err(&pdev->dev, "Register regular IRQ fail\n"); | ||
844 | goto fail3; | ||
845 | } | ||
846 | |||
847 | ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv); | ||
848 | if (ret) { | ||
849 | dev_err(&pdev->dev, "Register DMA IRQ fail\n"); | ||
850 | goto fail4; | ||
851 | } | ||
852 | |||
853 | /* alloc some DMA buffers for descriptors/transfers */ | ||
854 | priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, | ||
855 | mmc->max_blk_count * 16, | ||
856 | &priv->dma_desc_device_addr, | ||
857 | 208); | ||
858 | if (!priv->dma_desc_buffer) { | ||
859 | dev_err(&pdev->dev, "DMA alloc fail\n"); | ||
860 | ret = -EPERM; | ||
861 | goto fail5; | ||
862 | } | ||
863 | |||
864 | platform_set_drvdata(pdev, mmc); | ||
865 | |||
866 | priv->clk_sdmmc = of_clk_get(np, 0); | ||
867 | if (IS_ERR(priv->clk_sdmmc)) { | ||
868 | dev_err(&pdev->dev, "Error getting clock\n"); | ||
869 | ret = PTR_ERR(priv->clk_sdmmc); | ||
870 | goto fail5; | ||
871 | } | ||
872 | |||
873 | clk_prepare_enable(priv->clk_sdmmc); | ||
874 | |||
875 | /* configure the controller to a known 'ready' state */ | ||
876 | wmt_reset_hardware(mmc); | ||
877 | |||
878 | mmc_add_host(mmc); | ||
879 | |||
880 | dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); | ||
881 | |||
882 | return 0; | ||
883 | fail5: | ||
884 | free_irq(dma_irq, priv); | ||
885 | fail4: | ||
886 | free_irq(regular_irq, priv); | ||
887 | fail3: | ||
888 | iounmap(priv->sdmmc_base); | ||
889 | fail2: | ||
890 | mmc_free_host(mmc); | ||
891 | fail1: | ||
892 | return ret; | ||
893 | } | ||
894 | |||
895 | static int wmt_mci_remove(struct platform_device *pdev) | ||
896 | { | ||
897 | struct mmc_host *mmc; | ||
898 | struct wmt_mci_priv *priv; | ||
899 | struct resource *res; | ||
900 | u32 reg_tmp; | ||
901 | |||
902 | mmc = platform_get_drvdata(pdev); | ||
903 | priv = mmc_priv(mmc); | ||
904 | |||
905 | /* reset SD controller */ | ||
906 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
907 | writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); | ||
908 | reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); | ||
909 | writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); | ||
910 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); | ||
911 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); | ||
912 | |||
913 | /* release the dma buffers */ | ||
914 | dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, | ||
915 | priv->dma_desc_buffer, priv->dma_desc_device_addr); | ||
916 | |||
917 | mmc_remove_host(mmc); | ||
918 | |||
919 | free_irq(priv->irq_regular, priv); | ||
920 | free_irq(priv->irq_dma, priv); | ||
921 | |||
922 | iounmap(priv->sdmmc_base); | ||
923 | |||
924 | clk_disable_unprepare(priv->clk_sdmmc); | ||
925 | clk_put(priv->clk_sdmmc); | ||
926 | |||
927 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
928 | release_mem_region(res->start, res->end - res->start + 1); | ||
929 | |||
930 | mmc_free_host(mmc); | ||
931 | |||
932 | platform_set_drvdata(pdev, NULL); | ||
933 | |||
934 | dev_info(&pdev->dev, "WMT MCI device removed\n"); | ||
935 | |||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | #ifdef CONFIG_PM | ||
940 | static int wmt_mci_suspend(struct device *dev) | ||
941 | { | ||
942 | u32 reg_tmp; | ||
943 | struct platform_device *pdev = to_platform_device(dev); | ||
944 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
945 | struct wmt_mci_priv *priv; | ||
946 | int ret; | ||
947 | |||
948 | if (!mmc) | ||
949 | return 0; | ||
950 | |||
951 | priv = mmc_priv(mmc); | ||
952 | ret = mmc_suspend_host(mmc); | ||
953 | |||
954 | if (!ret) { | ||
955 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
956 | writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + | ||
957 | SDMMC_BUSMODE); | ||
958 | |||
959 | reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); | ||
960 | writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); | ||
961 | |||
962 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); | ||
963 | writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); | ||
964 | |||
965 | clk_disable(priv->clk_sdmmc); | ||
966 | } | ||
967 | return ret; | ||
968 | } | ||
969 | |||
970 | static int wmt_mci_resume(struct device *dev) | ||
971 | { | ||
972 | u32 reg_tmp; | ||
973 | struct platform_device *pdev = to_platform_device(dev); | ||
974 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
975 | struct wmt_mci_priv *priv; | ||
976 | int ret = 0; | ||
977 | |||
978 | if (mmc) { | ||
979 | priv = mmc_priv(mmc); | ||
980 | clk_enable(priv->clk_sdmmc); | ||
981 | |||
982 | reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); | ||
983 | writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + | ||
984 | SDMMC_BUSMODE); | ||
985 | |||
986 | reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); | ||
987 | writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), | ||
988 | priv->sdmmc_base + SDMMC_BLKLEN); | ||
989 | |||
990 | reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); | ||
991 | writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + | ||
992 | SDMMC_INTMASK0); | ||
993 | |||
994 | ret = mmc_resume_host(mmc); | ||
995 | } | ||
996 | |||
997 | return ret; | ||
998 | } | ||
999 | |||
1000 | static const struct dev_pm_ops wmt_mci_pm = { | ||
1001 | .suspend = wmt_mci_suspend, | ||
1002 | .resume = wmt_mci_resume, | ||
1003 | }; | ||
1004 | |||
1005 | #define wmt_mci_pm_ops (&wmt_mci_pm) | ||
1006 | |||
1007 | #else /* !CONFIG_PM */ | ||
1008 | |||
1009 | #define wmt_mci_pm_ops NULL | ||
1010 | |||
1011 | #endif | ||
1012 | |||
1013 | static struct platform_driver wmt_mci_driver = { | ||
1014 | .probe = wmt_mci_probe, | ||
1015 | .remove = __exit_p(wmt_mci_remove), | ||
1016 | .driver = { | ||
1017 | .name = DRIVER_NAME, | ||
1018 | .owner = THIS_MODULE, | ||
1019 | .pm = wmt_mci_pm_ops, | ||
1020 | .of_match_table = wmt_mci_dt_ids, | ||
1021 | }, | ||
1022 | }; | ||
1023 | |||
1024 | module_platform_driver(wmt_mci_driver); | ||
1025 | |||
1026 | MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); | ||
1027 | MODULE_AUTHOR("Tony Prisk"); | ||
1028 | MODULE_LICENSE("GPL v2"); | ||
1029 | MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids); | ||