aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJason Cooper <jason@lakedaemon.net>2013-01-31 12:39:01 -0500
committerJason Cooper <jason@lakedaemon.net>2013-01-31 12:39:01 -0500
commit04ee16bdf486061147c608410c2c9181352aaa3a (patch)
tree2e33b82635f20f5899248468f3a11d576886c7da /drivers
parent222922189c0b3c6e3ba117ca6e964278c1e0ccc8 (diff)
parentd2938758caae523768f60208dbc8a487be3f0bd5 (diff)
Merge commit 'd293875' into mvebu/boards
Pulled in mmc/mmc-next up to: d293875 mmc: mvsdio: add pinctrl integration
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/card/block.c30
-rw-r--r--drivers/mmc/card/queue.c32
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/core/bus.c1
-rw-r--r--drivers/mmc/core/core.c121
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/sdio.c33
-rw-r--r--drivers/mmc/core/slot-gpio.c57
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/mvsdio.c131
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c227
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c59
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c12
-rw-r--r--drivers/mmc/host/sdhci.c117
-rw-r--r--drivers/mmc/host/sh_mmcif.c280
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c8
17 files changed, 795 insertions, 329 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..f79b4688e471 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -113,17 +113,6 @@ struct mmc_blk_data {
113 113
114static DEFINE_MUTEX(open_lock); 114static DEFINE_MUTEX(open_lock);
115 115
116enum mmc_blk_status {
117 MMC_BLK_SUCCESS = 0,
118 MMC_BLK_PARTIAL,
119 MMC_BLK_CMD_ERR,
120 MMC_BLK_RETRY,
121 MMC_BLK_ABORT,
122 MMC_BLK_DATA_ERR,
123 MMC_BLK_ECC_ERR,
124 MMC_BLK_NOMEDIUM,
125};
126
127module_param(perdev_minors, int, 0444); 116module_param(perdev_minors, int, 0444);
128MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 117MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
129 118
@@ -1364,8 +1353,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1364 } else 1353 } else
1365 areq = NULL; 1354 areq = NULL;
1366 areq = mmc_start_req(card->host, areq, (int *) &status); 1355 areq = mmc_start_req(card->host, areq, (int *) &status);
1367 if (!areq) 1356 if (!areq) {
1357 if (status == MMC_BLK_NEW_REQUEST)
1358 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1368 return 0; 1359 return 0;
1360 }
1369 1361
1370 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1362 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1371 brq = &mq_rq->brq; 1363 brq = &mq_rq->brq;
@@ -1438,6 +1430,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1438 break; 1430 break;
1439 case MMC_BLK_NOMEDIUM: 1431 case MMC_BLK_NOMEDIUM:
1440 goto cmd_abort; 1432 goto cmd_abort;
1433 default:
1434 pr_err("%s: Unhandled return value (%d)",
1435 req->rq_disk->disk_name, status);
1436 goto cmd_abort;
1441 } 1437 }
1442 1438
1443 if (ret) { 1439 if (ret) {
@@ -1472,6 +1468,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1472 int ret; 1468 int ret;
1473 struct mmc_blk_data *md = mq->data; 1469 struct mmc_blk_data *md = mq->data;
1474 struct mmc_card *card = md->queue.card; 1470 struct mmc_card *card = md->queue.card;
1471 struct mmc_host *host = card->host;
1472 unsigned long flags;
1475 1473
1476 if (req && !mq->mqrq_prev->req) 1474 if (req && !mq->mqrq_prev->req)
1477 /* claim host only for the first request */ 1475 /* claim host only for the first request */
@@ -1486,6 +1484,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1486 goto out; 1484 goto out;
1487 } 1485 }
1488 1486
1487 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1489 if (req && req->cmd_flags & REQ_DISCARD) { 1488 if (req && req->cmd_flags & REQ_DISCARD) {
1490 /* complete ongoing async transfer before issuing discard */ 1489 /* complete ongoing async transfer before issuing discard */
1491 if (card->host->areq) 1490 if (card->host->areq)
@@ -1501,11 +1500,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1501 mmc_blk_issue_rw_rq(mq, NULL); 1500 mmc_blk_issue_rw_rq(mq, NULL);
1502 ret = mmc_blk_issue_flush(mq, req); 1501 ret = mmc_blk_issue_flush(mq, req);
1503 } else { 1502 } else {
1503 if (!req && host->areq) {
1504 spin_lock_irqsave(&host->context_info.lock, flags);
1505 host->context_info.is_waiting_last_req = true;
1506 spin_unlock_irqrestore(&host->context_info.lock, flags);
1507 }
1504 ret = mmc_blk_issue_rw_rq(mq, req); 1508 ret = mmc_blk_issue_rw_rq(mq, req);
1505 } 1509 }
1506 1510
1507out: 1511out:
1508 if (!req) 1512 if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
1509 /* release host only when there are no more requests */ 1513 /* release host only when there are no more requests */
1510 mmc_release_host(card->host); 1514 mmc_release_host(card->host);
1511 return ret; 1515 return ret;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fadf52eb5d70..5e0971016ac5 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,8 @@
22 22
23#define MMC_QUEUE_BOUNCESZ 65536 23#define MMC_QUEUE_BOUNCESZ 65536
24 24
25#define MMC_QUEUE_SUSPENDED (1 << 0) 25
26#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
26 27
27/* 28/*
28 * Prepare a MMC request. This just filters out odd stuff. 29 * Prepare a MMC request. This just filters out odd stuff.
@@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d)
58 do { 59 do {
59 struct request *req = NULL; 60 struct request *req = NULL;
60 struct mmc_queue_req *tmp; 61 struct mmc_queue_req *tmp;
62 unsigned int cmd_flags = 0;
61 63
62 spin_lock_irq(q->queue_lock); 64 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE); 65 set_current_state(TASK_INTERRUPTIBLE);
@@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d)
67 69
68 if (req || mq->mqrq_prev->req) { 70 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING); 71 set_current_state(TASK_RUNNING);
72 cmd_flags = req ? req->cmd_flags : 0;
70 mq->issue_fn(mq, req); 73 mq->issue_fn(mq, req);
74 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
75 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
76 continue; /* fetch again */
77 }
71 78
72 /* 79 /*
73 * Current request becomes previous request 80 * Current request becomes previous request
74 * and vice versa. 81 * and vice versa.
82 * In case of special requests, current request
83 * has been finished. Do not assign it to previous
84 * request.
75 */ 85 */
86 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
87 mq->mqrq_cur->req = NULL;
88
76 mq->mqrq_prev->brq.mrq.data = NULL; 89 mq->mqrq_prev->brq.mrq.data = NULL;
77 mq->mqrq_prev->req = NULL; 90 mq->mqrq_prev->req = NULL;
78 tmp = mq->mqrq_prev; 91 tmp = mq->mqrq_prev;
@@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
103{ 116{
104 struct mmc_queue *mq = q->queuedata; 117 struct mmc_queue *mq = q->queuedata;
105 struct request *req; 118 struct request *req;
119 unsigned long flags;
120 struct mmc_context_info *cntx;
106 121
107 if (!mq) { 122 if (!mq) {
108 while ((req = blk_fetch_request(q)) != NULL) { 123 while ((req = blk_fetch_request(q)) != NULL) {
@@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
112 return; 127 return;
113 } 128 }
114 129
115 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 130 cntx = &mq->card->host->context_info;
131 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
132 /*
133 * New MMC request arrived when MMC thread may be
134 * blocked on the previous request to be complete
135 * with no current request fetched
136 */
137 spin_lock_irqsave(&cntx->lock, flags);
138 if (cntx->is_waiting_last_req) {
139 cntx->is_new_req = true;
140 wake_up_interruptible(&cntx->wait);
141 }
142 spin_unlock_irqrestore(&cntx->lock, flags);
143 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
116 wake_up_process(mq->thread); 144 wake_up_process(mq->thread);
117} 145}
118 146
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..e20c27b2b8b4 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -27,6 +27,9 @@ struct mmc_queue {
27 struct task_struct *thread; 27 struct task_struct *thread;
28 struct semaphore thread_sem; 28 struct semaphore thread_sem;
29 unsigned int flags; 29 unsigned int flags;
30#define MMC_QUEUE_SUSPENDED (1 << 0)
31#define MMC_QUEUE_NEW_REQUEST (1 << 1)
32
30 int (*issue_fn)(struct mmc_queue *, struct request *); 33 int (*issue_fn)(struct mmc_queue *, struct request *);
31 void *data; 34 void *data;
32 struct request_queue *queue; 35 struct request_queue *queue;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 420cb6753c1e..e219c97a02a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
321#ifdef CONFIG_DEBUG_FS 321#ifdef CONFIG_DEBUG_FS
322 mmc_add_card_debugfs(card); 322 mmc_add_card_debugfs(card);
323#endif 323#endif
324 mmc_init_context_info(card->host);
324 325
325 ret = device_add(&card->dev); 326 ret = device_add(&card->dev);
326 if (ret) 327 if (ret)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed7687cf09..8b3a1222e665 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -319,11 +319,44 @@ out:
319} 319}
320EXPORT_SYMBOL(mmc_start_bkops); 320EXPORT_SYMBOL(mmc_start_bkops);
321 321
322/*
323 * mmc_wait_data_done() - done callback for data request
324 * @mrq: done data request
325 *
326 * Wakes up mmc context, passed as a callback to host controller driver
327 */
328static void mmc_wait_data_done(struct mmc_request *mrq)
329{
330 mrq->host->context_info.is_done_rcv = true;
331 wake_up_interruptible(&mrq->host->context_info.wait);
332}
333
322static void mmc_wait_done(struct mmc_request *mrq) 334static void mmc_wait_done(struct mmc_request *mrq)
323{ 335{
324 complete(&mrq->completion); 336 complete(&mrq->completion);
325} 337}
326 338
339/*
340 *__mmc_start_data_req() - starts data request
341 * @host: MMC host to start the request
342 * @mrq: data request to start
343 *
344 * Sets the done callback to be called when request is completed by the card.
345 * Starts data mmc request execution
346 */
347static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
348{
349 mrq->done = mmc_wait_data_done;
350 mrq->host = host;
351 if (mmc_card_removed(host->card)) {
352 mrq->cmd->error = -ENOMEDIUM;
353 return -ENOMEDIUM;
354 }
355 mmc_start_request(host, mrq);
356
357 return 0;
358}
359
327static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 360static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
328{ 361{
329 init_completion(&mrq->completion); 362 init_completion(&mrq->completion);
@@ -337,6 +370,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
337 return 0; 370 return 0;
338} 371}
339 372
373/*
374 * mmc_wait_for_data_req_done() - wait for request completed
375 * @host: MMC host to prepare the command.
376 * @mrq: MMC request to wait for
377 *
378 * Blocks MMC context till host controller will ack end of data request
379 * execution or new request notification arrives from the block layer.
380 * Handles command retries.
381 *
382 * Returns enum mmc_blk_status after checking errors.
383 */
384static int mmc_wait_for_data_req_done(struct mmc_host *host,
385 struct mmc_request *mrq,
386 struct mmc_async_req *next_req)
387{
388 struct mmc_command *cmd;
389 struct mmc_context_info *context_info = &host->context_info;
390 int err;
391 unsigned long flags;
392
393 while (1) {
394 wait_event_interruptible(context_info->wait,
395 (context_info->is_done_rcv ||
396 context_info->is_new_req));
397 spin_lock_irqsave(&context_info->lock, flags);
398 context_info->is_waiting_last_req = false;
399 spin_unlock_irqrestore(&context_info->lock, flags);
400 if (context_info->is_done_rcv) {
401 context_info->is_done_rcv = false;
402 context_info->is_new_req = false;
403 cmd = mrq->cmd;
404 if (!cmd->error || !cmd->retries ||
405 mmc_card_removed(host->card)) {
406 err = host->areq->err_check(host->card,
407 host->areq);
408 break; /* return err */
409 } else {
410 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
411 mmc_hostname(host),
412 cmd->opcode, cmd->error);
413 cmd->retries--;
414 cmd->error = 0;
415 host->ops->request(host, mrq);
416 continue; /* wait for done/new event again */
417 }
418 } else if (context_info->is_new_req) {
419 context_info->is_new_req = false;
420 if (!next_req) {
421 err = MMC_BLK_NEW_REQUEST;
422 break; /* return err */
423 }
424 }
425 }
426 return err;
427}
428
340static void mmc_wait_for_req_done(struct mmc_host *host, 429static void mmc_wait_for_req_done(struct mmc_host *host,
341 struct mmc_request *mrq) 430 struct mmc_request *mrq)
342{ 431{
@@ -426,8 +515,17 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
426 mmc_pre_req(host, areq->mrq, !host->areq); 515 mmc_pre_req(host, areq->mrq, !host->areq);
427 516
428 if (host->areq) { 517 if (host->areq) {
429 mmc_wait_for_req_done(host, host->areq->mrq); 518 err = mmc_wait_for_data_req_done(host, host->areq->mrq,
430 err = host->areq->err_check(host->card, host->areq); 519 areq);
520 if (err == MMC_BLK_NEW_REQUEST) {
521 if (error)
522 *error = err;
523 /*
524 * The previous request was not completed,
525 * nothing to return
526 */
527 return NULL;
528 }
431 /* 529 /*
432 * Check BKOPS urgency for each R1 response 530 * Check BKOPS urgency for each R1 response
433 */ 531 */
@@ -439,7 +537,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
439 } 537 }
440 538
441 if (!err && areq) 539 if (!err && areq)
442 start_err = __mmc_start_req(host, areq->mrq); 540 start_err = __mmc_start_data_req(host, areq->mrq);
443 541
444 if (host->areq) 542 if (host->areq)
445 mmc_post_req(host, host->areq->mrq, 0); 543 mmc_post_req(host, host->areq->mrq, 0);
@@ -2581,6 +2679,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2581} 2679}
2582#endif 2680#endif
2583 2681
2682/**
2683 * mmc_init_context_info() - init synchronization context
2684 * @host: mmc host
2685 *
2686 * Init struct context_info needed to implement asynchronous
2687 * request mechanism, used by mmc core, host driver and mmc requests
2688 * supplier.
2689 */
2690void mmc_init_context_info(struct mmc_host *host)
2691{
2692 spin_lock_init(&host->context_info.lock);
2693 host->context_info.is_new_req = false;
2694 host->context_info.is_done_rcv = false;
2695 host->context_info.is_waiting_last_req = false;
2696 init_waitqueue_head(&host->context_info.wait);
2697}
2698
2584static int __init mmc_init(void) 2699static int __init mmc_init(void)
2585{ 2700{
2586 int ret; 2701 int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354f..0272b3284b5e 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -76,5 +76,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
76void mmc_add_card_debugfs(struct mmc_card *card); 76void mmc_add_card_debugfs(struct mmc_card *card);
77void mmc_remove_card_debugfs(struct mmc_card *card); 77void mmc_remove_card_debugfs(struct mmc_card *card);
78 78
79void mmc_init_context_info(struct mmc_host *host);
79#endif 80#endif
80 81
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2273ce6b6c1a..3a64933466b8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
157 if (ret) 157 if (ret)
158 goto out; 158 goto out;
159 159
160 if (card->host->caps & 160 if (mmc_host_uhs(card->host)) {
161 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
162 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
163 MMC_CAP_UHS_DDR50)) {
164 if (data & SDIO_UHS_DDR50) 161 if (data & SDIO_UHS_DDR50)
165 card->sw_caps.sd3_bus_mode 162 card->sw_caps.sd3_bus_mode
166 |= SD_MODE_UHS_DDR50; 163 |= SD_MODE_UHS_DDR50;
@@ -478,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
478 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
479 * default speed. 476 * default speed.
480 */ 477 */
481 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!mmc_host_uhs(card->host))
482 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
483 return 0; 479 return 0;
484 480
485 bus_speed = SDIO_SPEED_SDR12; 481 bus_speed = SDIO_SPEED_SDR12;
@@ -489,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
489 bus_speed = SDIO_SPEED_SDR104; 485 bus_speed = SDIO_SPEED_SDR104;
490 timing = MMC_TIMING_UHS_SDR104; 486 timing = MMC_TIMING_UHS_SDR104;
491 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; 487 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
488 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
492 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 489 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
493 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 490 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
494 bus_speed = SDIO_SPEED_DDR50; 491 bus_speed = SDIO_SPEED_DDR50;
495 timing = MMC_TIMING_UHS_DDR50; 492 timing = MMC_TIMING_UHS_DDR50;
496 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; 493 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
494 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
497 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
498 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
499 SD_MODE_UHS_SDR50)) { 497 SD_MODE_UHS_SDR50)) {
500 bus_speed = SDIO_SPEED_SDR50; 498 bus_speed = SDIO_SPEED_SDR50;
501 timing = MMC_TIMING_UHS_SDR50; 499 timing = MMC_TIMING_UHS_SDR50;
502 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; 500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
503 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 502 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
504 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 503 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
505 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 504 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
506 bus_speed = SDIO_SPEED_SDR25; 505 bus_speed = SDIO_SPEED_SDR25;
507 timing = MMC_TIMING_UHS_SDR25; 506 timing = MMC_TIMING_UHS_SDR25;
508 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; 507 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
508 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
509 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 509 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
510 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 510 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
511 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 511 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
@@ -513,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
513 bus_speed = SDIO_SPEED_SDR12; 513 bus_speed = SDIO_SPEED_SDR12;
514 timing = MMC_TIMING_UHS_SDR12; 514 timing = MMC_TIMING_UHS_SDR12;
515 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 515 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
516 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
516 } 517 }
517 518
518 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); 519 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
@@ -645,11 +646,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
645 * systems that claim 1.8v signalling in fact do not support 646 * systems that claim 1.8v signalling in fact do not support
646 * it. 647 * it.
647 */ 648 */
648 if ((ocr & R4_18V_PRESENT) && 649 if ((ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
649 (host->caps &
650 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
651 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
652 MMC_CAP_UHS_DDR50))) {
653 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 650 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
654 true); 651 true);
655 if (err) { 652 if (err) {
@@ -937,10 +934,12 @@ static int mmc_sdio_resume(struct mmc_host *host)
937 mmc_claim_host(host); 934 mmc_claim_host(host);
938 935
939 /* No need to reinitialize powered-resumed nonremovable cards */ 936 /* No need to reinitialize powered-resumed nonremovable cards */
940 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) 937 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
938 sdio_reset(host);
939 mmc_go_idle(host);
941 err = mmc_sdio_init_card(host, host->ocr, host->card, 940 err = mmc_sdio_init_card(host, host->ocr, host->card,
942 mmc_card_keep_power(host)); 941 mmc_card_keep_power(host));
943 else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 942 } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
944 /* We may have switched to 1-bit mode during suspend */ 943 /* We may have switched to 1-bit mode during suspend */
945 err = sdio_enable_4bit_bus(host->card); 944 err = sdio_enable_4bit_bus(host->card);
946 if (err > 0) { 945 if (err > 0) {
@@ -1020,6 +1019,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
1020 goto out; 1019 goto out;
1021 } 1020 }
1022 1021
1022 if (mmc_host_uhs(host))
1023 /* to query card if 1.8V signalling is supported */
1024 host->ocr |= R4_18V_PRESENT;
1025
1023 ret = mmc_sdio_init_card(host, host->ocr, host->card, 1026 ret = mmc_sdio_init_card(host, host->ocr, host->card,
1024 mmc_card_keep_power(host)); 1027 mmc_card_keep_power(host));
1025 if (!ret && host->sdio_irqs) 1028 if (!ret && host->sdio_irqs)
@@ -1085,6 +1088,10 @@ int mmc_attach_sdio(struct mmc_host *host)
1085 /* 1088 /*
1086 * Detect and init the card. 1089 * Detect and init the card.
1087 */ 1090 */
1091 if (mmc_host_uhs(host))
1092 /* to query card if 1.8V signalling is supported */
1093 host->ocr |= R4_18V_PRESENT;
1094
1088 err = mmc_sdio_init_card(host, host->ocr, NULL, 0); 1095 err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
1089 if (err) { 1096 if (err) {
1090 if (err == -EAGAIN) { 1097 if (err == -EAGAIN) {
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 16a1c0b6f264..324235105519 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -92,6 +92,20 @@ int mmc_gpio_get_cd(struct mmc_host *host)
92} 92}
93EXPORT_SYMBOL(mmc_gpio_get_cd); 93EXPORT_SYMBOL(mmc_gpio_get_cd);
94 94
95/**
96 * mmc_gpio_request_ro - request a gpio for write-protection
97 * @host: mmc host
98 * @gpio: gpio number requested
99 *
100 * As devm_* managed functions are used in mmc_gpio_request_ro(), client
101 * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
102 * if the requesting and freeing are only needed at probing and unbinding time
103 * for once. However, if client drivers do something special like runtime
104 * switching for write-protection, they are responsible for calling
105 * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
106 *
107 * Returns zero on success, else an error.
108 */
95int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) 109int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
96{ 110{
97 struct mmc_gpio *ctx; 111 struct mmc_gpio *ctx;
@@ -106,7 +120,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
106 120
107 ctx = host->slot.handler_priv; 121 ctx = host->slot.handler_priv;
108 122
109 ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); 123 ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
124 ctx->ro_label);
110 if (ret < 0) 125 if (ret < 0)
111 return ret; 126 return ret;
112 127
@@ -116,6 +131,20 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
116} 131}
117EXPORT_SYMBOL(mmc_gpio_request_ro); 132EXPORT_SYMBOL(mmc_gpio_request_ro);
118 133
134/**
135 * mmc_gpio_request_cd - request a gpio for card-detection
136 * @host: mmc host
137 * @gpio: gpio number requested
138 *
139 * As devm_* managed functions are used in mmc_gpio_request_cd(), client
140 * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
141 * if the requesting and freeing are only needed at probing and unbinding time
142 * for once. However, if client drivers do something special like runtime
143 * switching for card-detection, they are responsible for calling
144 * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
145 *
146 * Returns zero on success, else an error.
147 */
119int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) 148int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
120{ 149{
121 struct mmc_gpio *ctx; 150 struct mmc_gpio *ctx;
@@ -128,7 +157,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
128 157
129 ctx = host->slot.handler_priv; 158 ctx = host->slot.handler_priv;
130 159
131 ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label); 160 ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
161 ctx->cd_label);
132 if (ret < 0) 162 if (ret < 0)
133 /* 163 /*
134 * don't bother freeing memory. It might still get used by other 164 * don't bother freeing memory. It might still get used by other
@@ -146,7 +176,8 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
146 irq = -EINVAL; 176 irq = -EINVAL;
147 177
148 if (irq >= 0) { 178 if (irq >= 0) {
149 ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt, 179 ret = devm_request_threaded_irq(&host->class_dev, irq,
180 NULL, mmc_gpio_cd_irqt,
150 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 181 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
151 ctx->cd_label, host); 182 ctx->cd_label, host);
152 if (ret < 0) 183 if (ret < 0)
@@ -164,6 +195,13 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
164} 195}
165EXPORT_SYMBOL(mmc_gpio_request_cd); 196EXPORT_SYMBOL(mmc_gpio_request_cd);
166 197
198/**
199 * mmc_gpio_free_ro - free the write-protection gpio
200 * @host: mmc host
201 *
202 * It's provided only for cases that client drivers need to manually free
203 * up the write-protection gpio requested by mmc_gpio_request_ro().
204 */
167void mmc_gpio_free_ro(struct mmc_host *host) 205void mmc_gpio_free_ro(struct mmc_host *host)
168{ 206{
169 struct mmc_gpio *ctx = host->slot.handler_priv; 207 struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -175,10 +213,17 @@ void mmc_gpio_free_ro(struct mmc_host *host)
175 gpio = ctx->ro_gpio; 213 gpio = ctx->ro_gpio;
176 ctx->ro_gpio = -EINVAL; 214 ctx->ro_gpio = -EINVAL;
177 215
178 gpio_free(gpio); 216 devm_gpio_free(&host->class_dev, gpio);
179} 217}
180EXPORT_SYMBOL(mmc_gpio_free_ro); 218EXPORT_SYMBOL(mmc_gpio_free_ro);
181 219
220/**
221 * mmc_gpio_free_cd - free the card-detection gpio
222 * @host: mmc host
223 *
224 * It's provided only for cases that client drivers need to manually free
225 * up the card-detection gpio requested by mmc_gpio_request_cd().
226 */
182void mmc_gpio_free_cd(struct mmc_host *host) 227void mmc_gpio_free_cd(struct mmc_host *host)
183{ 228{
184 struct mmc_gpio *ctx = host->slot.handler_priv; 229 struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -188,13 +233,13 @@ void mmc_gpio_free_cd(struct mmc_host *host)
188 return; 233 return;
189 234
190 if (host->slot.cd_irq >= 0) { 235 if (host->slot.cd_irq >= 0) {
191 free_irq(host->slot.cd_irq, host); 236 devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
192 host->slot.cd_irq = -EINVAL; 237 host->slot.cd_irq = -EINVAL;
193 } 238 }
194 239
195 gpio = ctx->cd_gpio; 240 gpio = ctx->cd_gpio;
196 ctx->cd_gpio = -EINVAL; 241 ctx->cd_gpio = -EINVAL;
197 242
198 gpio_free(gpio); 243 devm_gpio_free(&host->class_dev, gpio);
199} 244}
200EXPORT_SYMBOL(mmc_gpio_free_cd); 245EXPORT_SYMBOL(mmc_gpio_free_cd);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8d13c6594520..66a54aa68e25 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -241,6 +241,17 @@ config MMC_SDHCI_S3C_DMA
241 241
242 YMMV. 242 YMMV.
243 243
244config MMC_SDHCI_BCM2835
245 tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
246 depends on ARCH_BCM2835
247 depends on MMC_SDHCI_PLTFM
248 select MMC_SDHCI_IO_ACCESSORS
249 help
250 This selects the BCM2835 SD/MMC controller. If you have a BCM2835
251 platform with SD or MMC devices, say Y or M here.
252
253 If unsure, say N.
254
244config MMC_OMAP 255config MMC_OMAP
245 tristate "TI OMAP Multimedia Card Interface support" 256 tristate "TI OMAP Multimedia Card Interface support"
246 depends on ARCH_OMAP 257 depends on ARCH_OMAP
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e4e218c930bd..d5ea072207ec 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
58obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o 58obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
59obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o 59obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
60obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o 60obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
61obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
61 62
62ifeq ($(CONFIG_CB710_DEBUG),y) 63ifeq ($(CONFIG_CB710_DEBUG),y)
63 CFLAGS-cb710-mmc += -DDEBUG 64 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index f8dd36102949..145cdaf000d1 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -21,7 +21,11 @@
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/of_gpio.h>
25#include <linux/of_irq.h>
24#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#include <linux/mmc/slot-gpio.h>
28#include <linux/pinctrl/consumer.h>
25 29
26#include <asm/sizes.h> 30#include <asm/sizes.h>
27#include <asm/unaligned.h> 31#include <asm/unaligned.h>
@@ -51,8 +55,6 @@ struct mvsd_host {
51 struct mmc_host *mmc; 55 struct mmc_host *mmc;
52 struct device *dev; 56 struct device *dev;
53 struct clk *clk; 57 struct clk *clk;
54 int gpio_card_detect;
55 int gpio_write_protect;
56}; 58};
57 59
58#define mvsd_write(offs, val) writel(val, iobase + (offs)) 60#define mvsd_write(offs, val) writel(val, iobase + (offs))
@@ -538,13 +540,6 @@ static void mvsd_timeout_timer(unsigned long data)
538 mmc_request_done(host->mmc, mrq); 540 mmc_request_done(host->mmc, mrq);
539} 541}
540 542
541static irqreturn_t mvsd_card_detect_irq(int irq, void *dev)
542{
543 struct mvsd_host *host = dev;
544 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
545 return IRQ_HANDLED;
546}
547
548static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) 543static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
549{ 544{
550 struct mvsd_host *host = mmc_priv(mmc); 545 struct mvsd_host *host = mmc_priv(mmc);
@@ -564,20 +559,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
564 spin_unlock_irqrestore(&host->lock, flags); 559 spin_unlock_irqrestore(&host->lock, flags);
565} 560}
566 561
567static int mvsd_get_ro(struct mmc_host *mmc)
568{
569 struct mvsd_host *host = mmc_priv(mmc);
570
571 if (host->gpio_write_protect)
572 return gpio_get_value(host->gpio_write_protect);
573
574 /*
575 * Board doesn't support read only detection; let the mmc core
576 * decide what to do.
577 */
578 return -ENOSYS;
579}
580
581static void mvsd_power_up(struct mvsd_host *host) 562static void mvsd_power_up(struct mvsd_host *host)
582{ 563{
583 void __iomem *iobase = host->base; 564 void __iomem *iobase = host->base;
@@ -674,7 +655,7 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
674 655
675static const struct mmc_host_ops mvsd_ops = { 656static const struct mmc_host_ops mvsd_ops = {
676 .request = mvsd_request, 657 .request = mvsd_request,
677 .get_ro = mvsd_get_ro, 658 .get_ro = mmc_gpio_get_ro,
678 .set_ios = mvsd_set_ios, 659 .set_ios = mvsd_set_ios,
679 .enable_sdio_irq = mvsd_enable_sdio_irq, 660 .enable_sdio_irq = mvsd_enable_sdio_irq,
680}; 661};
@@ -703,17 +684,18 @@ mv_conf_mbus_windows(struct mvsd_host *host,
703 684
704static int __init mvsd_probe(struct platform_device *pdev) 685static int __init mvsd_probe(struct platform_device *pdev)
705{ 686{
687 struct device_node *np = pdev->dev.of_node;
706 struct mmc_host *mmc = NULL; 688 struct mmc_host *mmc = NULL;
707 struct mvsd_host *host = NULL; 689 struct mvsd_host *host = NULL;
708 const struct mvsdio_platform_data *mvsd_data;
709 const struct mbus_dram_target_info *dram; 690 const struct mbus_dram_target_info *dram;
710 struct resource *r; 691 struct resource *r;
711 int ret, irq; 692 int ret, irq;
693 int gpio_card_detect, gpio_write_protect;
694 struct pinctrl *pinctrl;
712 695
713 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 696 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
714 irq = platform_get_irq(pdev, 0); 697 irq = platform_get_irq(pdev, 0);
715 mvsd_data = pdev->dev.platform_data; 698 if (!r || irq < 0)
716 if (!r || irq < 0 || !mvsd_data)
717 return -ENXIO; 699 return -ENXIO;
718 700
719 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); 701 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -725,8 +707,43 @@ static int __init mvsd_probe(struct platform_device *pdev)
725 host = mmc_priv(mmc); 707 host = mmc_priv(mmc);
726 host->mmc = mmc; 708 host->mmc = mmc;
727 host->dev = &pdev->dev; 709 host->dev = &pdev->dev;
728 host->base_clock = mvsd_data->clock / 2; 710
729 host->clk = ERR_PTR(-EINVAL); 711 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
712 if (IS_ERR(pinctrl))
713 dev_warn(&pdev->dev, "no pins associated\n");
714
715 /*
716 * Some non-DT platforms do not pass a clock, and the clock
717 * frequency is passed through platform_data. On DT platforms,
718 * a clock must always be passed, even if there is no gatable
719 * clock associated to the SDIO interface (it can simply be a
720 * fixed rate clock).
721 */
722 host->clk = devm_clk_get(&pdev->dev, NULL);
723 if (!IS_ERR(host->clk))
724 clk_prepare_enable(host->clk);
725
726 if (np) {
727 if (IS_ERR(host->clk)) {
728 dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
729 ret = -EINVAL;
730 goto out;
731 }
732
733 host->base_clock = clk_get_rate(host->clk) / 2;
734 gpio_card_detect = of_get_named_gpio(np, "cd-gpios", 0);
735 gpio_write_protect = of_get_named_gpio(np, "wp-gpios", 0);
736 } else {
737 const struct mvsdio_platform_data *mvsd_data;
738 mvsd_data = pdev->dev.platform_data;
739 if (!mvsd_data) {
740 ret = -ENXIO;
741 goto out;
742 }
743 host->base_clock = mvsd_data->clock / 2;
744 gpio_card_detect = mvsd_data->gpio_card_detect;
745 gpio_write_protect = mvsd_data->gpio_write_protect;
746 }
730 747
731 mmc->ops = &mvsd_ops; 748 mmc->ops = &mvsd_ops;
732 749
@@ -765,43 +782,14 @@ static int __init mvsd_probe(struct platform_device *pdev)
765 goto out; 782 goto out;
766 } 783 }
767 784
768 /* Not all platforms can gate the clock, so it is not 785 if (gpio_is_valid(gpio_card_detect)) {
769 an error if the clock does not exists. */ 786 ret = mmc_gpio_request_cd(mmc, gpio_card_detect);
770 host->clk = devm_clk_get(&pdev->dev, NULL); 787 if (ret)
771 if (!IS_ERR(host->clk)) 788 goto out;
772 clk_prepare_enable(host->clk); 789 } else
773
774 if (mvsd_data->gpio_card_detect) {
775 ret = devm_gpio_request_one(&pdev->dev,
776 mvsd_data->gpio_card_detect,
777 GPIOF_IN, DRIVER_NAME " cd");
778 if (ret == 0) {
779 irq = gpio_to_irq(mvsd_data->gpio_card_detect);
780 ret = devm_request_irq(&pdev->dev, irq,
781 mvsd_card_detect_irq,
782 IRQ_TYPE_EDGE_RISING |
783 IRQ_TYPE_EDGE_FALLING,
784 DRIVER_NAME " cd", host);
785 if (ret == 0)
786 host->gpio_card_detect =
787 mvsd_data->gpio_card_detect;
788 else
789 devm_gpio_free(&pdev->dev,
790 mvsd_data->gpio_card_detect);
791 }
792 }
793 if (!host->gpio_card_detect)
794 mmc->caps |= MMC_CAP_NEEDS_POLL; 790 mmc->caps |= MMC_CAP_NEEDS_POLL;
795 791
796 if (mvsd_data->gpio_write_protect) { 792 mmc_gpio_request_ro(mmc, gpio_write_protect);
797 ret = devm_gpio_request_one(&pdev->dev,
798 mvsd_data->gpio_write_protect,
799 GPIOF_IN, DRIVER_NAME " wp");
800 if (ret == 0) {
801 host->gpio_write_protect =
802 mvsd_data->gpio_write_protect;
803 }
804 }
805 793
806 setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); 794 setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
807 platform_set_drvdata(pdev, mmc); 795 platform_set_drvdata(pdev, mmc);
@@ -811,15 +799,17 @@ static int __init mvsd_probe(struct platform_device *pdev)
811 799
812 pr_notice("%s: %s driver initialized, ", 800 pr_notice("%s: %s driver initialized, ",
813 mmc_hostname(mmc), DRIVER_NAME); 801 mmc_hostname(mmc), DRIVER_NAME);
814 if (host->gpio_card_detect) 802 if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
815 printk("using GPIO %d for card detection\n", 803 printk("using GPIO %d for card detection\n",
816 host->gpio_card_detect); 804 gpio_card_detect);
817 else 805 else
818 printk("lacking card detect (fall back to polling)\n"); 806 printk("lacking card detect (fall back to polling)\n");
819 return 0; 807 return 0;
820 808
821out: 809out:
822 if (mmc) { 810 if (mmc) {
811 mmc_gpio_free_cd(mmc);
812 mmc_gpio_free_ro(mmc);
823 if (!IS_ERR(host->clk)) 813 if (!IS_ERR(host->clk))
824 clk_disable_unprepare(host->clk); 814 clk_disable_unprepare(host->clk);
825 mmc_free_host(mmc); 815 mmc_free_host(mmc);
@@ -834,6 +824,8 @@ static int __exit mvsd_remove(struct platform_device *pdev)
834 824
835 struct mvsd_host *host = mmc_priv(mmc); 825 struct mvsd_host *host = mmc_priv(mmc);
836 826
827 mmc_gpio_free_cd(mmc);
828 mmc_gpio_free_ro(mmc);
837 mmc_remove_host(mmc); 829 mmc_remove_host(mmc);
838 del_timer_sync(&host->timer); 830 del_timer_sync(&host->timer);
839 mvsd_power_down(host); 831 mvsd_power_down(host);
@@ -873,12 +865,19 @@ static int mvsd_resume(struct platform_device *dev)
873#define mvsd_resume NULL 865#define mvsd_resume NULL
874#endif 866#endif
875 867
868static const struct of_device_id mvsdio_dt_ids[] = {
869 { .compatible = "marvell,orion-sdio" },
870 { /* sentinel */ }
871};
872MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
873
876static struct platform_driver mvsd_driver = { 874static struct platform_driver mvsd_driver = {
877 .remove = __exit_p(mvsd_remove), 875 .remove = __exit_p(mvsd_remove),
878 .suspend = mvsd_suspend, 876 .suspend = mvsd_suspend,
879 .resume = mvsd_resume, 877 .resume = mvsd_resume,
880 .driver = { 878 .driver = {
881 .name = DRIVER_NAME, 879 .name = DRIVER_NAME,
880 .of_match_table = mvsdio_dt_ids,
882 }, 881 },
883}; 882};
884 883
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
new file mode 100644
index 000000000000..453825fcc5cf
--- /dev/null
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -0,0 +1,227 @@
1/*
2 * BCM2835 SDHCI
3 * Copyright (C) 2012 Stephen Warren
4 * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
5 * Portions of the code there were obviously based on the Linux kernel at:
6 * git://github.com/raspberrypi/linux.git rpi-3.6.y
7 * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/delay.h>
23#include <linux/module.h>
24#include <linux/mmc/host.h>
25#include "sdhci-pltfm.h"
26
27/*
28 * 400KHz is max freq for card ID etc. Use that as min card clock. We need to
29 * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
30 */
31#define MIN_FREQ 400000
32
33/*
34 * The Arasan has a bugette whereby it may lose the content of successive
35 * writes to registers that are within two SD-card clock cycles of each other
36 * (a clock domain crossing problem). It seems, however, that the data
37 * register does not have this problem, which is just as well - otherwise we'd
38 * have to nobble the DMA engine too.
39 *
40 * This should probably be dynamically calculated based on the actual card
41 * frequency. However, this is the longest we'll have to wait, and doesn't
42 * seem to slow access down too much, so the added complexity doesn't seem
43 * worth it for now.
44 *
45 * 1/MIN_FREQ is (max) time per tick of eMMC clock.
46 * 2/MIN_FREQ is time for two ticks.
47 * Multiply by 1000000 to get uS per two ticks.
48 * *1000000 for uSecs.
49 * +1 for hack rounding.
50 */
51#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
52
53struct bcm2835_sdhci {
54 struct clk *clk;
55 u32 shadow;
56};
57
58static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
59{
60 writel(val, host->ioaddr + reg);
61
62 udelay(BCM2835_SDHCI_WRITE_DELAY);
63}
64
65static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
66{
67 u32 val = readl(host->ioaddr + reg);
68
69 if (reg == SDHCI_CAPABILITIES)
70 val |= SDHCI_CAN_VDD_330;
71
72 return val;
73}
74
75static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
76{
77 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
78 struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
79 u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
80 bcm2835_sdhci_readl(host, reg & ~3);
81 u32 word_num = (reg >> 1) & 1;
82 u32 word_shift = word_num * 16;
83 u32 mask = 0xffff << word_shift;
84 u32 newval = (oldval & ~mask) | (val << word_shift);
85
86 if (reg == SDHCI_TRANSFER_MODE)
87 bcm2835_host->shadow = newval;
88 else
89 bcm2835_sdhci_writel(host, newval, reg & ~3);
90}
91
92static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
93{
94 u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
95 u32 word_num = (reg >> 1) & 1;
96 u32 word_shift = word_num * 16;
97 u32 word = (val >> word_shift) & 0xffff;
98
99 return word;
100}
101
102static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
103{
104 u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
105 u32 byte_num = reg & 3;
106 u32 byte_shift = byte_num * 8;
107 u32 mask = 0xff << byte_shift;
108 u32 newval = (oldval & ~mask) | (val << byte_shift);
109
110 bcm2835_sdhci_writel(host, newval, reg & ~3);
111}
112
113static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
114{
115 u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
116 u32 byte_num = reg & 3;
117 u32 byte_shift = byte_num * 8;
118 u32 byte = (val >> byte_shift) & 0xff;
119
120 return byte;
121}
122
123static unsigned int bcm2835_sdhci_get_max_clock(struct sdhci_host *host)
124{
125 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
126 struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
127
128 return clk_get_rate(bcm2835_host->clk);
129}
130
131unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
132{
133 return MIN_FREQ;
134}
135
136unsigned int bcm2835_sdhci_get_timeout_clock(struct sdhci_host *host)
137{
138 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
139 struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
140
141 return clk_get_rate(bcm2835_host->clk);
142}
143
144static struct sdhci_ops bcm2835_sdhci_ops = {
145 .write_l = bcm2835_sdhci_writel,
146 .write_w = bcm2835_sdhci_writew,
147 .write_b = bcm2835_sdhci_writeb,
148 .read_l = bcm2835_sdhci_readl,
149 .read_w = bcm2835_sdhci_readw,
150 .read_b = bcm2835_sdhci_readb,
151 .get_max_clock = bcm2835_sdhci_get_max_clock,
152 .get_min_clock = bcm2835_sdhci_get_min_clock,
153 .get_timeout_clock = bcm2835_sdhci_get_timeout_clock,
154};
155
156static struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
157 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
158 .ops = &bcm2835_sdhci_ops,
159};
160
161static int bcm2835_sdhci_probe(struct platform_device *pdev)
162{
163 struct sdhci_host *host;
164 struct bcm2835_sdhci *bcm2835_host;
165 struct sdhci_pltfm_host *pltfm_host;
166 int ret;
167
168 host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata);
169 if (IS_ERR(host))
170 return PTR_ERR(host);
171
172 bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
173 GFP_KERNEL);
174 if (!bcm2835_host) {
175 dev_err(mmc_dev(host->mmc),
176 "failed to allocate bcm2835_sdhci\n");
177 return -ENOMEM;
178 }
179
180 pltfm_host = sdhci_priv(host);
181 pltfm_host->priv = bcm2835_host;
182
183 bcm2835_host->clk = devm_clk_get(&pdev->dev, NULL);
184 if (IS_ERR(bcm2835_host->clk)) {
185 ret = PTR_ERR(bcm2835_host->clk);
186 goto err;
187 }
188
189 return sdhci_add_host(host);
190
191err:
192 sdhci_pltfm_free(pdev);
193 return ret;
194}
195
196static int bcm2835_sdhci_remove(struct platform_device *pdev)
197{
198 struct sdhci_host *host = platform_get_drvdata(pdev);
199 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
200
201 sdhci_remove_host(host, dead);
202 sdhci_pltfm_free(pdev);
203
204 return 0;
205}
206
207static const struct of_device_id bcm2835_sdhci_of_match[] = {
208 { .compatible = "brcm,bcm2835-sdhci" },
209 { }
210};
211MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
212
213static struct platform_driver bcm2835_sdhci_driver = {
214 .driver = {
215 .name = "sdhci-bcm2835",
216 .owner = THIS_MODULE,
217 .of_match_table = bcm2835_sdhci_of_match,
218 .pm = SDHCI_PLTFM_PMOPS,
219 },
220 .probe = bcm2835_sdhci_probe,
221 .remove = bcm2835_sdhci_remove,
222};
223module_platform_driver(bcm2835_sdhci_driver);
224
225MODULE_DESCRIPTION("BCM2835 SDHCI driver");
226MODULE_AUTHOR("Stephen Warren");
227MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index e07df812ff1e..ae68bc965ab5 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -21,6 +21,7 @@
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
23#include <linux/mmc/sdio.h> 23#include <linux/mmc/sdio.h>
24#include <linux/mmc/slot-gpio.h>
24#include <linux/of.h> 25#include <linux/of.h>
25#include <linux/of_device.h> 26#include <linux/of_device.h>
26#include <linux/of_gpio.h> 27#include <linux/of_gpio.h>
@@ -147,19 +148,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
147 struct pltfm_imx_data *imx_data = pltfm_host->priv; 148 struct pltfm_imx_data *imx_data = pltfm_host->priv;
148 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 149 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
149 150
150 /* fake CARD_PRESENT flag */
151 u32 val = readl(host->ioaddr + reg); 151 u32 val = readl(host->ioaddr + reg);
152 152
153 if (unlikely((reg == SDHCI_PRESENT_STATE)
154 && gpio_is_valid(boarddata->cd_gpio))) {
155 if (gpio_get_value(boarddata->cd_gpio))
156 /* no card, if a valid gpio says so... */
157 val &= ~SDHCI_CARD_PRESENT;
158 else
159 /* ... in all other cases assume card is present */
160 val |= SDHCI_CARD_PRESENT;
161 }
162
163 if (unlikely(reg == SDHCI_CAPABILITIES)) { 153 if (unlikely(reg == SDHCI_CAPABILITIES)) {
164 /* In FSL esdhc IC module, only bit20 is used to indicate the 154 /* In FSL esdhc IC module, only bit20 is used to indicate the
165 * ADMA2 capability of esdhc, but this bit is messed up on 155 * ADMA2 capability of esdhc, but this bit is messed up on
@@ -192,13 +182,6 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
192 u32 data; 182 u32 data;
193 183
194 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { 184 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
195 if (boarddata->cd_type == ESDHC_CD_GPIO)
196 /*
197 * These interrupts won't work with a custom
198 * card_detect gpio (only applied to mx25/35)
199 */
200 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
201
202 if (val & SDHCI_INT_CARD_INT) { 185 if (val & SDHCI_INT_CARD_INT) {
203 /* 186 /*
204 * Clear and then set D3CD bit to avoid missing the 187 * Clear and then set D3CD bit to avoid missing the
@@ -362,8 +345,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
362 345
363 switch (boarddata->wp_type) { 346 switch (boarddata->wp_type) {
364 case ESDHC_WP_GPIO: 347 case ESDHC_WP_GPIO:
365 if (gpio_is_valid(boarddata->wp_gpio)) 348 return mmc_gpio_get_ro(host->mmc);
366 return gpio_get_value(boarddata->wp_gpio);
367 case ESDHC_WP_CONTROLLER: 349 case ESDHC_WP_CONTROLLER:
368 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 350 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
369 SDHCI_WRITE_PROTECT); 351 SDHCI_WRITE_PROTECT);
@@ -394,14 +376,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
394 .ops = &sdhci_esdhc_ops, 376 .ops = &sdhci_esdhc_ops,
395}; 377};
396 378
397static irqreturn_t cd_irq(int irq, void *data)
398{
399 struct sdhci_host *sdhost = (struct sdhci_host *)data;
400
401 tasklet_schedule(&sdhost->card_tasklet);
402 return IRQ_HANDLED;
403};
404
405#ifdef CONFIG_OF 379#ifdef CONFIG_OF
406static int 380static int
407sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 381sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -527,37 +501,22 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
527 501
528 /* write_protect */ 502 /* write_protect */
529 if (boarddata->wp_type == ESDHC_WP_GPIO) { 503 if (boarddata->wp_type == ESDHC_WP_GPIO) {
530 err = devm_gpio_request_one(&pdev->dev, boarddata->wp_gpio, 504 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
531 GPIOF_IN, "ESDHC_WP");
532 if (err) { 505 if (err) {
533 dev_warn(mmc_dev(host->mmc), 506 dev_err(mmc_dev(host->mmc),
534 "no write-protect pin available!\n"); 507 "failed to request write-protect gpio!\n");
535 boarddata->wp_gpio = -EINVAL; 508 goto disable_clk;
536 } 509 }
537 } else { 510 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
538 boarddata->wp_gpio = -EINVAL;
539 } 511 }
540 512
541 /* card_detect */ 513 /* card_detect */
542 if (boarddata->cd_type != ESDHC_CD_GPIO)
543 boarddata->cd_gpio = -EINVAL;
544
545 switch (boarddata->cd_type) { 514 switch (boarddata->cd_type) {
546 case ESDHC_CD_GPIO: 515 case ESDHC_CD_GPIO:
547 err = devm_gpio_request_one(&pdev->dev, boarddata->cd_gpio, 516 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
548 GPIOF_IN, "ESDHC_CD");
549 if (err) { 517 if (err) {
550 dev_err(mmc_dev(host->mmc), 518 dev_err(mmc_dev(host->mmc),
551 "no card-detect pin available!\n"); 519 "failed to request card-detect gpio!\n");
552 goto disable_clk;
553 }
554
555 err = devm_request_irq(&pdev->dev,
556 gpio_to_irq(boarddata->cd_gpio), cd_irq,
557 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
558 mmc_hostname(host->mmc), host);
559 if (err) {
560 dev_err(mmc_dev(host->mmc), "request irq error\n");
561 goto disable_clk; 520 goto disable_clk;
562 } 521 }
563 /* fall through */ 522 /* fall through */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index fad0966427fd..3d20c10fc571 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -311,12 +311,18 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
311 311
312 platform_set_drvdata(pdev, host); 312 platform_set_drvdata(pdev, host);
313 313
314 if (pdata->pm_caps & MMC_PM_KEEP_POWER) {
315 device_init_wakeup(&pdev->dev, 1);
316 host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
317 } else {
318 device_init_wakeup(&pdev->dev, 0);
319 }
320
314 return 0; 321 return 0;
315 322
316err_add_host: 323err_add_host:
317 clk_disable_unprepare(clk); 324 clk_disable_unprepare(clk);
318 clk_put(clk); 325 clk_put(clk);
319 mmc_gpio_free_cd(host->mmc);
320err_cd_req: 326err_cd_req:
321err_clk_get: 327err_clk_get:
322 sdhci_pltfm_free(pdev); 328 sdhci_pltfm_free(pdev);
@@ -329,16 +335,12 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
329 struct sdhci_host *host = platform_get_drvdata(pdev); 335 struct sdhci_host *host = platform_get_drvdata(pdev);
330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 336 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331 struct sdhci_pxa *pxa = pltfm_host->priv; 337 struct sdhci_pxa *pxa = pltfm_host->priv;
332 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
333 338
334 sdhci_remove_host(host, 1); 339 sdhci_remove_host(host, 1);
335 340
336 clk_disable_unprepare(pltfm_host->clk); 341 clk_disable_unprepare(pltfm_host->clk);
337 clk_put(pltfm_host->clk); 342 clk_put(pltfm_host->clk);
338 343
339 if (gpio_is_valid(pdata->ext_cd_gpio))
340 mmc_gpio_free_cd(host->mmc);
341
342 sdhci_pltfm_free(pdev); 344 sdhci_pltfm_free(pdev);
343 kfree(pxa); 345 kfree(pxa);
344 346
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6f0bfc0c8c9c..336ab06aeb2f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1189,6 +1189,15 @@ out:
1189 host->clock = clock; 1189 host->clock = clock;
1190} 1190}
1191 1191
1192static inline void sdhci_update_clock(struct sdhci_host *host)
1193{
1194 unsigned int clock;
1195
1196 clock = host->clock;
1197 host->clock = 0;
1198 sdhci_set_clock(host, clock);
1199}
1200
1192static int sdhci_set_power(struct sdhci_host *host, unsigned short power) 1201static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
1193{ 1202{
1194 u8 pwr = 0; 1203 u8 pwr = 0;
@@ -1258,7 +1267,7 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
1258static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1267static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1259{ 1268{
1260 struct sdhci_host *host; 1269 struct sdhci_host *host;
1261 bool present; 1270 int present;
1262 unsigned long flags; 1271 unsigned long flags;
1263 u32 tuning_opcode; 1272 u32 tuning_opcode;
1264 1273
@@ -1287,18 +1296,21 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1287 1296
1288 host->mrq = mrq; 1297 host->mrq = mrq;
1289 1298
1290 /* If polling, assume that the card is always present. */ 1299 /*
1291 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1300 * Firstly check card presence from cd-gpio. The return could
1292 present = true; 1301 * be one of the following possibilities:
1293 else 1302 * negative: cd-gpio is not available
1294 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1303 * zero: cd-gpio is used, and card is removed
1295 SDHCI_CARD_PRESENT; 1304 * one: cd-gpio is used, and card is present
1296 1305 */
1297 /* If we're using a cd-gpio, testing the presence bit might fail. */ 1306 present = mmc_gpio_get_cd(host->mmc);
1298 if (!present) { 1307 if (present < 0) {
1299 int ret = mmc_gpio_get_cd(host->mmc); 1308 /* If polling, assume that the card is always present. */
1300 if (ret > 0) 1309 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1301 present = true; 1310 present = 1;
1311 else
1312 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1313 SDHCI_CARD_PRESENT;
1302 } 1314 }
1303 1315
1304 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1316 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
@@ -1415,7 +1427,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1415 1427
1416 if (host->version >= SDHCI_SPEC_300) { 1428 if (host->version >= SDHCI_SPEC_300) {
1417 u16 clk, ctrl_2; 1429 u16 clk, ctrl_2;
1418 unsigned int clock;
1419 1430
1420 /* In case of UHS-I modes, set High Speed Enable */ 1431 /* In case of UHS-I modes, set High Speed Enable */
1421 if ((ios->timing == MMC_TIMING_MMC_HS200) || 1432 if ((ios->timing == MMC_TIMING_MMC_HS200) ||
@@ -1455,9 +1466,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1455 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1466 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1456 1467
1457 /* Re-enable SD Clock */ 1468 /* Re-enable SD Clock */
1458 clock = host->clock; 1469 sdhci_update_clock(host);
1459 host->clock = 0;
1460 sdhci_set_clock(host, clock);
1461 } 1470 }
1462 1471
1463 1472
@@ -1488,9 +1497,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1488 } 1497 }
1489 1498
1490 /* Re-enable SD Clock */ 1499 /* Re-enable SD Clock */
1491 clock = host->clock; 1500 sdhci_update_clock(host);
1492 host->clock = 0;
1493 sdhci_set_clock(host, clock);
1494 } else 1501 } else
1495 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1502 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1496 1503
@@ -2080,14 +2087,9 @@ static void sdhci_tasklet_finish(unsigned long param)
2080 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2087 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2081 2088
2082 /* Some controllers need this kick or reset won't work here */ 2089 /* Some controllers need this kick or reset won't work here */
2083 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 2090 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2084 unsigned int clock;
2085
2086 /* This is to force an update */ 2091 /* This is to force an update */
2087 clock = host->clock; 2092 sdhci_update_clock(host);
2088 host->clock = 0;
2089 sdhci_set_clock(host, clock);
2090 }
2091 2093
2092 /* Spec says we should do both at the same time, but Ricoh 2094 /* Spec says we should do both at the same time, but Ricoh
2093 controllers do not like that. */ 2095 controllers do not like that. */
@@ -2455,6 +2457,32 @@ out:
2455\*****************************************************************************/ 2457\*****************************************************************************/
2456 2458
2457#ifdef CONFIG_PM 2459#ifdef CONFIG_PM
2460void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2461{
2462 u8 val;
2463 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2464 | SDHCI_WAKE_ON_INT;
2465
2466 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2467 val |= mask ;
2468 /* Avoid fake wake up */
2469 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2470 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2471 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2472}
2473EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2474
2475void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2476{
2477 u8 val;
2478 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2479 | SDHCI_WAKE_ON_INT;
2480
2481 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2482 val &= ~mask;
2483 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2484}
2485EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
2458 2486
2459int sdhci_suspend_host(struct sdhci_host *host) 2487int sdhci_suspend_host(struct sdhci_host *host)
2460{ 2488{
@@ -2484,8 +2512,13 @@ int sdhci_suspend_host(struct sdhci_host *host)
2484 return ret; 2512 return ret;
2485 } 2513 }
2486 2514
2487 free_irq(host->irq, host); 2515 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2488 2516 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
2517 free_irq(host->irq, host);
2518 } else {
2519 sdhci_enable_irq_wakeups(host);
2520 enable_irq_wake(host->irq);
2521 }
2489 return ret; 2522 return ret;
2490} 2523}
2491 2524
@@ -2500,10 +2533,15 @@ int sdhci_resume_host(struct sdhci_host *host)
2500 host->ops->enable_dma(host); 2533 host->ops->enable_dma(host);
2501 } 2534 }
2502 2535
2503 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2536 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2504 mmc_hostname(host->mmc), host); 2537 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2505 if (ret) 2538 mmc_hostname(host->mmc), host);
2506 return ret; 2539 if (ret)
2540 return ret;
2541 } else {
2542 sdhci_disable_irq_wakeups(host);
2543 disable_irq_wake(host->irq);
2544 }
2507 2545
2508 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2546 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2509 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2547 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
@@ -2531,17 +2569,6 @@ int sdhci_resume_host(struct sdhci_host *host)
2531} 2569}
2532 2570
2533EXPORT_SYMBOL_GPL(sdhci_resume_host); 2571EXPORT_SYMBOL_GPL(sdhci_resume_host);
2534
2535void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2536{
2537 u8 val;
2538 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2539 val |= SDHCI_WAKE_ON_INT;
2540 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2541}
2542
2543EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2544
2545#endif /* CONFIG_PM */ 2572#endif /* CONFIG_PM */
2546 2573
2547#ifdef CONFIG_PM_RUNTIME 2574#ifdef CONFIG_PM_RUNTIME
@@ -3139,6 +3166,7 @@ int sdhci_add_host(struct sdhci_host *host)
3139#ifdef SDHCI_USE_LEDS_CLASS 3166#ifdef SDHCI_USE_LEDS_CLASS
3140reset: 3167reset:
3141 sdhci_reset(host, SDHCI_RESET_ALL); 3168 sdhci_reset(host, SDHCI_RESET_ALL);
3169 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3142 free_irq(host->irq, host); 3170 free_irq(host->irq, host);
3143#endif 3171#endif
3144untasklet: 3172untasklet:
@@ -3181,6 +3209,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
3181 if (!dead) 3209 if (!dead)
3182 sdhci_reset(host, SDHCI_RESET_ALL); 3210 sdhci_reset(host, SDHCI_RESET_ALL);
3183 3211
3212 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
3184 free_irq(host->irq, host); 3213 free_irq(host->irq, host);
3185 3214
3186 del_timer_sync(&host->timer); 3215 del_timer_sync(&host->timer);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9a4c151067dd..0189efcb9e12 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -56,6 +56,7 @@
56#include <linux/mmc/sh_mmcif.h> 56#include <linux/mmc/sh_mmcif.h>
57#include <linux/mmc/slot-gpio.h> 57#include <linux/mmc/slot-gpio.h>
58#include <linux/mod_devicetable.h> 58#include <linux/mod_devicetable.h>
59#include <linux/mutex.h>
59#include <linux/pagemap.h> 60#include <linux/pagemap.h>
60#include <linux/platform_device.h> 61#include <linux/platform_device.h>
61#include <linux/pm_qos.h> 62#include <linux/pm_qos.h>
@@ -88,6 +89,7 @@
88#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ 89#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
89#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ 90#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
90#define CMD_SET_CCSH (1 << 5) 91#define CMD_SET_CCSH (1 << 5)
92#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
91#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ 93#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
92#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ 94#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
93#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ 95#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
@@ -127,6 +129,10 @@
127 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ 129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
128 INT_RDATTO | INT_RBSYTO | INT_RSPTO) 130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
129 131
132#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
133 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
134 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
135
130/* CE_INT_MASK */ 136/* CE_INT_MASK */
131#define MASK_ALL 0x00000000 137#define MASK_ALL 0x00000000
132#define MASK_MCCSDE (1 << 29) 138#define MASK_MCCSDE (1 << 29)
@@ -158,6 +164,11 @@
158 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ 164 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
159 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) 165 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
160 166
167#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
168 MASK_MBUFREN | MASK_MBUFWEN | \
169 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
170 MASK_MCMD12RBE | MASK_MCMD12CRE)
171
161/* CE_HOST_STS1 */ 172/* CE_HOST_STS1 */
162#define STS1_CMDSEQ (1 << 31) 173#define STS1_CMDSEQ (1 << 31)
163 174
@@ -195,6 +206,7 @@ enum mmcif_state {
195 STATE_IDLE, 206 STATE_IDLE,
196 STATE_REQUEST, 207 STATE_REQUEST,
197 STATE_IOS, 208 STATE_IOS,
209 STATE_TIMEOUT,
198}; 210};
199 211
200enum mmcif_wait_for { 212enum mmcif_wait_for {
@@ -216,6 +228,7 @@ struct sh_mmcif_host {
216 struct clk *hclk; 228 struct clk *hclk;
217 unsigned int clk; 229 unsigned int clk;
218 int bus_width; 230 int bus_width;
231 unsigned char timing;
219 bool sd_error; 232 bool sd_error;
220 bool dying; 233 bool dying;
221 long timeout; 234 long timeout;
@@ -230,6 +243,7 @@ struct sh_mmcif_host {
230 int sg_blkidx; 243 int sg_blkidx;
231 bool power; 244 bool power;
232 bool card_present; 245 bool card_present;
246 struct mutex thread_lock;
233 247
234 /* DMA support */ 248 /* DMA support */
235 struct dma_chan *chan_rx; 249 struct dma_chan *chan_rx;
@@ -253,23 +267,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
253static void mmcif_dma_complete(void *arg) 267static void mmcif_dma_complete(void *arg)
254{ 268{
255 struct sh_mmcif_host *host = arg; 269 struct sh_mmcif_host *host = arg;
256 struct mmc_data *data = host->mrq->data; 270 struct mmc_request *mrq = host->mrq;
257 271
258 dev_dbg(&host->pd->dev, "Command completed\n"); 272 dev_dbg(&host->pd->dev, "Command completed\n");
259 273
260 if (WARN(!data, "%s: NULL data in DMA completion!\n", 274 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
261 dev_name(&host->pd->dev))) 275 dev_name(&host->pd->dev)))
262 return; 276 return;
263 277
264 if (data->flags & MMC_DATA_READ)
265 dma_unmap_sg(host->chan_rx->device->dev,
266 data->sg, data->sg_len,
267 DMA_FROM_DEVICE);
268 else
269 dma_unmap_sg(host->chan_tx->device->dev,
270 data->sg, data->sg_len,
271 DMA_TO_DEVICE);
272
273 complete(&host->dma_complete); 278 complete(&host->dma_complete);
274} 279}
275 280
@@ -423,8 +428,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
423 if (ret < 0) 428 if (ret < 0)
424 goto ecfgrx; 429 goto ecfgrx;
425 430
426 init_completion(&host->dma_complete);
427
428 return; 431 return;
429 432
430ecfgrx: 433ecfgrx:
@@ -520,13 +523,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
520 } 523 }
521 524
522 if (state2 & STS2_CRC_ERR) { 525 if (state2 & STS2_CRC_ERR) {
523 dev_dbg(&host->pd->dev, ": CRC error\n"); 526 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
527 host->state, host->wait_for);
524 ret = -EIO; 528 ret = -EIO;
525 } else if (state2 & STS2_TIMEOUT_ERR) { 529 } else if (state2 & STS2_TIMEOUT_ERR) {
526 dev_dbg(&host->pd->dev, ": Timeout\n"); 530 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
531 host->state, host->wait_for);
527 ret = -ETIMEDOUT; 532 ret = -ETIMEDOUT;
528 } else { 533 } else {
529 dev_dbg(&host->pd->dev, ": End/Index error\n"); 534 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
535 host->state, host->wait_for);
530 ret = -EIO; 536 ret = -EIO;
531 } 537 }
532 return ret; 538 return ret;
@@ -549,10 +555,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
549 host->pio_ptr = p; 555 host->pio_ptr = p;
550 } 556 }
551 557
552 if (host->sg_idx == data->sg_len) 558 return host->sg_idx != data->sg_len;
553 return false;
554
555 return true;
556} 559}
557 560
558static void sh_mmcif_single_read(struct sh_mmcif_host *host, 561static void sh_mmcif_single_read(struct sh_mmcif_host *host,
@@ -562,7 +565,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
562 BLOCK_SIZE_MASK) + 3; 565 BLOCK_SIZE_MASK) + 3;
563 566
564 host->wait_for = MMCIF_WAIT_FOR_READ; 567 host->wait_for = MMCIF_WAIT_FOR_READ;
565 schedule_delayed_work(&host->timeout_work, host->timeout);
566 568
567 /* buf read enable */ 569 /* buf read enable */
568 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 570 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@@ -576,6 +578,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
576 578
577 if (host->sd_error) { 579 if (host->sd_error) {
578 data->error = sh_mmcif_error_manage(host); 580 data->error = sh_mmcif_error_manage(host);
581 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
579 return false; 582 return false;
580 } 583 }
581 584
@@ -604,7 +607,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
604 host->sg_idx = 0; 607 host->sg_idx = 0;
605 host->sg_blkidx = 0; 608 host->sg_blkidx = 0;
606 host->pio_ptr = sg_virt(data->sg); 609 host->pio_ptr = sg_virt(data->sg);
607 schedule_delayed_work(&host->timeout_work, host->timeout); 610
608 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 611 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
609} 612}
610 613
@@ -616,6 +619,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
616 619
617 if (host->sd_error) { 620 if (host->sd_error) {
618 data->error = sh_mmcif_error_manage(host); 621 data->error = sh_mmcif_error_manage(host);
622 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
619 return false; 623 return false;
620 } 624 }
621 625
@@ -627,7 +631,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
627 if (!sh_mmcif_next_block(host, p)) 631 if (!sh_mmcif_next_block(host, p))
628 return false; 632 return false;
629 633
630 schedule_delayed_work(&host->timeout_work, host->timeout);
631 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 634 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
632 635
633 return true; 636 return true;
@@ -640,7 +643,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
640 BLOCK_SIZE_MASK) + 3; 643 BLOCK_SIZE_MASK) + 3;
641 644
642 host->wait_for = MMCIF_WAIT_FOR_WRITE; 645 host->wait_for = MMCIF_WAIT_FOR_WRITE;
643 schedule_delayed_work(&host->timeout_work, host->timeout);
644 646
645 /* buf write enable */ 647 /* buf write enable */
646 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 648 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
@@ -654,6 +656,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
654 656
655 if (host->sd_error) { 657 if (host->sd_error) {
656 data->error = sh_mmcif_error_manage(host); 658 data->error = sh_mmcif_error_manage(host);
659 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
657 return false; 660 return false;
658 } 661 }
659 662
@@ -682,7 +685,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
682 host->sg_idx = 0; 685 host->sg_idx = 0;
683 host->sg_blkidx = 0; 686 host->sg_blkidx = 0;
684 host->pio_ptr = sg_virt(data->sg); 687 host->pio_ptr = sg_virt(data->sg);
685 schedule_delayed_work(&host->timeout_work, host->timeout); 688
686 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 689 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
687} 690}
688 691
@@ -694,6 +697,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
694 697
695 if (host->sd_error) { 698 if (host->sd_error) {
696 data->error = sh_mmcif_error_manage(host); 699 data->error = sh_mmcif_error_manage(host);
700 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
697 return false; 701 return false;
698 } 702 }
699 703
@@ -705,7 +709,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
705 if (!sh_mmcif_next_block(host, p)) 709 if (!sh_mmcif_next_block(host, p))
706 return false; 710 return false;
707 711
708 schedule_delayed_work(&host->timeout_work, host->timeout);
709 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 712 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
710 713
711 return true; 714 return true;
@@ -756,6 +759,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
756 } 759 }
757 switch (opc) { 760 switch (opc) {
758 /* RBSY */ 761 /* RBSY */
762 case MMC_SLEEP_AWAKE:
759 case MMC_SWITCH: 763 case MMC_SWITCH:
760 case MMC_STOP_TRANSMISSION: 764 case MMC_STOP_TRANSMISSION:
761 case MMC_SET_WRITE_PROT: 765 case MMC_SET_WRITE_PROT:
@@ -781,6 +785,17 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
781 dev_err(&host->pd->dev, "Unsupported bus width.\n"); 785 dev_err(&host->pd->dev, "Unsupported bus width.\n");
782 break; 786 break;
783 } 787 }
788 switch (host->timing) {
789 case MMC_TIMING_UHS_DDR50:
790 /*
791 * MMC core will only set this timing, if the host
792 * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF
793 * implementations with this capability, e.g. sh73a0,
794 * will have to set it in their platform data.
795 */
796 tmp |= CMD_SET_DARS;
797 break;
798 }
784 } 799 }
785 /* DWEN */ 800 /* DWEN */
786 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) 801 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
@@ -824,7 +839,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
824 sh_mmcif_single_read(host, mrq); 839 sh_mmcif_single_read(host, mrq);
825 return 0; 840 return 0;
826 default: 841 default:
827 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); 842 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
828 return -EINVAL; 843 return -EINVAL;
829 } 844 }
830} 845}
@@ -838,6 +853,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
838 853
839 switch (opc) { 854 switch (opc) {
840 /* response busy check */ 855 /* response busy check */
856 case MMC_SLEEP_AWAKE:
841 case MMC_SWITCH: 857 case MMC_SWITCH:
842 case MMC_STOP_TRANSMISSION: 858 case MMC_STOP_TRANSMISSION:
843 case MMC_SET_WRITE_PROT: 859 case MMC_SET_WRITE_PROT:
@@ -885,7 +901,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
885 } 901 }
886 902
887 host->wait_for = MMCIF_WAIT_FOR_STOP; 903 host->wait_for = MMCIF_WAIT_FOR_STOP;
888 schedule_delayed_work(&host->timeout_work, host->timeout);
889} 904}
890 905
891static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) 906static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -895,6 +910,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
895 910
896 spin_lock_irqsave(&host->lock, flags); 911 spin_lock_irqsave(&host->lock, flags);
897 if (host->state != STATE_IDLE) { 912 if (host->state != STATE_IDLE) {
913 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
898 spin_unlock_irqrestore(&host->lock, flags); 914 spin_unlock_irqrestore(&host->lock, flags);
899 mrq->cmd->error = -EAGAIN; 915 mrq->cmd->error = -EAGAIN;
900 mmc_request_done(mmc, mrq); 916 mmc_request_done(mmc, mrq);
@@ -911,6 +927,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
911 if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) 927 if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
912 break; 928 break;
913 case MMC_APP_CMD: 929 case MMC_APP_CMD:
930 case SD_IO_RW_DIRECT:
914 host->state = STATE_IDLE; 931 host->state = STATE_IDLE;
915 mrq->cmd->error = -ETIMEDOUT; 932 mrq->cmd->error = -ETIMEDOUT;
916 mmc_request_done(mmc, mrq); 933 mmc_request_done(mmc, mrq);
@@ -957,6 +974,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
957 974
958 spin_lock_irqsave(&host->lock, flags); 975 spin_lock_irqsave(&host->lock, flags);
959 if (host->state != STATE_IDLE) { 976 if (host->state != STATE_IDLE) {
977 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
960 spin_unlock_irqrestore(&host->lock, flags); 978 spin_unlock_irqrestore(&host->lock, flags);
961 return; 979 return;
962 } 980 }
@@ -981,7 +999,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
981 } 999 }
982 } 1000 }
983 if (host->power) { 1001 if (host->power) {
984 pm_runtime_put(&host->pd->dev); 1002 pm_runtime_put_sync(&host->pd->dev);
985 clk_disable(host->hclk); 1003 clk_disable(host->hclk);
986 host->power = false; 1004 host->power = false;
987 if (ios->power_mode == MMC_POWER_OFF) 1005 if (ios->power_mode == MMC_POWER_OFF)
@@ -1001,6 +1019,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1001 sh_mmcif_clock_control(host, ios->clock); 1019 sh_mmcif_clock_control(host, ios->clock);
1002 } 1020 }
1003 1021
1022 host->timing = ios->timing;
1004 host->bus_width = ios->bus_width; 1023 host->bus_width = ios->bus_width;
1005 host->state = STATE_IDLE; 1024 host->state = STATE_IDLE;
1006} 1025}
@@ -1038,14 +1057,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1038 case MMC_SELECT_CARD: 1057 case MMC_SELECT_CARD:
1039 case MMC_APP_CMD: 1058 case MMC_APP_CMD:
1040 cmd->error = -ETIMEDOUT; 1059 cmd->error = -ETIMEDOUT;
1041 host->sd_error = false;
1042 break; 1060 break;
1043 default: 1061 default:
1044 cmd->error = sh_mmcif_error_manage(host); 1062 cmd->error = sh_mmcif_error_manage(host);
1045 dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
1046 cmd->opcode, cmd->error);
1047 break; 1063 break;
1048 } 1064 }
1065 dev_dbg(&host->pd->dev, "CMD%d error %d\n",
1066 cmd->opcode, cmd->error);
1067 host->sd_error = false;
1049 return false; 1068 return false;
1050 } 1069 }
1051 if (!(cmd->flags & MMC_RSP_PRESENT)) { 1070 if (!(cmd->flags & MMC_RSP_PRESENT)) {
@@ -1058,6 +1077,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1058 if (!data) 1077 if (!data)
1059 return false; 1078 return false;
1060 1079
1080 /*
1081 * Completion can be signalled from DMA callback and error, so, have to
1082 * reset here, before setting .dma_active
1083 */
1084 init_completion(&host->dma_complete);
1085
1061 if (data->flags & MMC_DATA_READ) { 1086 if (data->flags & MMC_DATA_READ) {
1062 if (host->chan_rx) 1087 if (host->chan_rx)
1063 sh_mmcif_start_dma_rx(host); 1088 sh_mmcif_start_dma_rx(host);
@@ -1068,34 +1093,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1068 1093
1069 if (!host->dma_active) { 1094 if (!host->dma_active) {
1070 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); 1095 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1071 if (!data->error) 1096 return !data->error;
1072 return true;
1073 return false;
1074 } 1097 }
1075 1098
1076 /* Running in the IRQ thread, can sleep */ 1099 /* Running in the IRQ thread, can sleep */
1077 time = wait_for_completion_interruptible_timeout(&host->dma_complete, 1100 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1078 host->timeout); 1101 host->timeout);
1102
1103 if (data->flags & MMC_DATA_READ)
1104 dma_unmap_sg(host->chan_rx->device->dev,
1105 data->sg, data->sg_len,
1106 DMA_FROM_DEVICE);
1107 else
1108 dma_unmap_sg(host->chan_tx->device->dev,
1109 data->sg, data->sg_len,
1110 DMA_TO_DEVICE);
1111
1079 if (host->sd_error) { 1112 if (host->sd_error) {
1080 dev_err(host->mmc->parent, 1113 dev_err(host->mmc->parent,
1081 "Error IRQ while waiting for DMA completion!\n"); 1114 "Error IRQ while waiting for DMA completion!\n");
1082 /* Woken up by an error IRQ: abort DMA */ 1115 /* Woken up by an error IRQ: abort DMA */
1083 if (data->flags & MMC_DATA_READ)
1084 dmaengine_terminate_all(host->chan_rx);
1085 else
1086 dmaengine_terminate_all(host->chan_tx);
1087 data->error = sh_mmcif_error_manage(host); 1116 data->error = sh_mmcif_error_manage(host);
1088 } else if (!time) { 1117 } else if (!time) {
1118 dev_err(host->mmc->parent, "DMA timeout!\n");
1089 data->error = -ETIMEDOUT; 1119 data->error = -ETIMEDOUT;
1090 } else if (time < 0) { 1120 } else if (time < 0) {
1121 dev_err(host->mmc->parent,
1122 "wait_for_completion_...() error %ld!\n", time);
1091 data->error = time; 1123 data->error = time;
1092 } 1124 }
1093 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, 1125 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1094 BUF_ACC_DMAREN | BUF_ACC_DMAWEN); 1126 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1095 host->dma_active = false; 1127 host->dma_active = false;
1096 1128
1097 if (data->error) 1129 if (data->error) {
1098 data->bytes_xfered = 0; 1130 data->bytes_xfered = 0;
1131 /* Abort DMA */
1132 if (data->flags & MMC_DATA_READ)
1133 dmaengine_terminate_all(host->chan_rx);
1134 else
1135 dmaengine_terminate_all(host->chan_tx);
1136 }
1099 1137
1100 return false; 1138 return false;
1101} 1139}
@@ -1103,10 +1141,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1103static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) 1141static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1104{ 1142{
1105 struct sh_mmcif_host *host = dev_id; 1143 struct sh_mmcif_host *host = dev_id;
1106 struct mmc_request *mrq = host->mrq; 1144 struct mmc_request *mrq;
1145 bool wait = false;
1107 1146
1108 cancel_delayed_work_sync(&host->timeout_work); 1147 cancel_delayed_work_sync(&host->timeout_work);
1109 1148
1149 mutex_lock(&host->thread_lock);
1150
1151 mrq = host->mrq;
1152 if (!mrq) {
1153 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1154 host->state, host->wait_for);
1155 mutex_unlock(&host->thread_lock);
1156 return IRQ_HANDLED;
1157 }
1158
1110 /* 1159 /*
1111 * All handlers return true, if processing continues, and false, if the 1160 * All handlers return true, if processing continues, and false, if the
1112 * request has to be completed - successfully or not 1161 * request has to be completed - successfully or not
@@ -1114,35 +1163,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1114 switch (host->wait_for) { 1163 switch (host->wait_for) {
1115 case MMCIF_WAIT_FOR_REQUEST: 1164 case MMCIF_WAIT_FOR_REQUEST:
1116 /* We're too late, the timeout has already kicked in */ 1165 /* We're too late, the timeout has already kicked in */
1166 mutex_unlock(&host->thread_lock);
1117 return IRQ_HANDLED; 1167 return IRQ_HANDLED;
1118 case MMCIF_WAIT_FOR_CMD: 1168 case MMCIF_WAIT_FOR_CMD:
1119 if (sh_mmcif_end_cmd(host)) 1169 /* Wait for data? */
1120 /* Wait for data */ 1170 wait = sh_mmcif_end_cmd(host);
1121 return IRQ_HANDLED;
1122 break; 1171 break;
1123 case MMCIF_WAIT_FOR_MREAD: 1172 case MMCIF_WAIT_FOR_MREAD:
1124 if (sh_mmcif_mread_block(host)) 1173 /* Wait for more data? */
1125 /* Wait for more data */ 1174 wait = sh_mmcif_mread_block(host);
1126 return IRQ_HANDLED;
1127 break; 1175 break;
1128 case MMCIF_WAIT_FOR_READ: 1176 case MMCIF_WAIT_FOR_READ:
1129 if (sh_mmcif_read_block(host)) 1177 /* Wait for data end? */
1130 /* Wait for data end */ 1178 wait = sh_mmcif_read_block(host);
1131 return IRQ_HANDLED;
1132 break; 1179 break;
1133 case MMCIF_WAIT_FOR_MWRITE: 1180 case MMCIF_WAIT_FOR_MWRITE:
1134 if (sh_mmcif_mwrite_block(host)) 1181 /* Wait data to write? */
1135 /* Wait data to write */ 1182 wait = sh_mmcif_mwrite_block(host);
1136 return IRQ_HANDLED;
1137 break; 1183 break;
1138 case MMCIF_WAIT_FOR_WRITE: 1184 case MMCIF_WAIT_FOR_WRITE:
1139 if (sh_mmcif_write_block(host)) 1185 /* Wait for data end? */
1140 /* Wait for data end */ 1186 wait = sh_mmcif_write_block(host);
1141 return IRQ_HANDLED;
1142 break; 1187 break;
1143 case MMCIF_WAIT_FOR_STOP: 1188 case MMCIF_WAIT_FOR_STOP:
1144 if (host->sd_error) { 1189 if (host->sd_error) {
1145 mrq->stop->error = sh_mmcif_error_manage(host); 1190 mrq->stop->error = sh_mmcif_error_manage(host);
1191 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
1146 break; 1192 break;
1147 } 1193 }
1148 sh_mmcif_get_cmd12response(host, mrq->stop); 1194 sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1150,13 +1196,22 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1150 break; 1196 break;
1151 case MMCIF_WAIT_FOR_READ_END: 1197 case MMCIF_WAIT_FOR_READ_END:
1152 case MMCIF_WAIT_FOR_WRITE_END: 1198 case MMCIF_WAIT_FOR_WRITE_END:
1153 if (host->sd_error) 1199 if (host->sd_error) {
1154 mrq->data->error = sh_mmcif_error_manage(host); 1200 mrq->data->error = sh_mmcif_error_manage(host);
1201 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
1202 }
1155 break; 1203 break;
1156 default: 1204 default:
1157 BUG(); 1205 BUG();
1158 } 1206 }
1159 1207
1208 if (wait) {
1209 schedule_delayed_work(&host->timeout_work, host->timeout);
1210 /* Wait for more data */
1211 mutex_unlock(&host->thread_lock);
1212 return IRQ_HANDLED;
1213 }
1214
1160 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { 1215 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1161 struct mmc_data *data = mrq->data; 1216 struct mmc_data *data = mrq->data;
1162 if (!mrq->cmd->error && data && !data->error) 1217 if (!mrq->cmd->error && data && !data->error)
@@ -1165,8 +1220,11 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1165 1220
1166 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { 1221 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1167 sh_mmcif_stop_cmd(host, mrq); 1222 sh_mmcif_stop_cmd(host, mrq);
1168 if (!mrq->stop->error) 1223 if (!mrq->stop->error) {
1224 schedule_delayed_work(&host->timeout_work, host->timeout);
1225 mutex_unlock(&host->thread_lock);
1169 return IRQ_HANDLED; 1226 return IRQ_HANDLED;
1227 }
1170 } 1228 }
1171 } 1229 }
1172 1230
@@ -1175,6 +1233,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1175 host->mrq = NULL; 1233 host->mrq = NULL;
1176 mmc_request_done(host->mmc, mrq); 1234 mmc_request_done(host->mmc, mrq);
1177 1235
1236 mutex_unlock(&host->thread_lock);
1237
1178 return IRQ_HANDLED; 1238 return IRQ_HANDLED;
1179} 1239}
1180 1240
@@ -1182,56 +1242,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1182{ 1242{
1183 struct sh_mmcif_host *host = dev_id; 1243 struct sh_mmcif_host *host = dev_id;
1184 u32 state; 1244 u32 state;
1185 int err = 0;
1186 1245
1187 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); 1246 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1247 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1248 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1188 1249
1189 if (state & INT_ERR_STS) { 1250 if (state & ~MASK_CLEAN)
1190 /* error interrupts - process first */ 1251 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
1191 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 1252 state);
1192 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 1253
1193 err = 1; 1254 if (state & INT_ERR_STS || state & ~INT_ALL) {
1194 } else if (state & INT_RBSYE) {
1195 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1196 ~(INT_RBSYE | INT_CRSPE));
1197 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
1198 } else if (state & INT_CRSPE) {
1199 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
1200 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
1201 } else if (state & INT_BUFREN) {
1202 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
1203 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
1204 } else if (state & INT_BUFWEN) {
1205 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
1206 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
1207 } else if (state & INT_CMD12DRE) {
1208 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1209 ~(INT_CMD12DRE | INT_CMD12RBE |
1210 INT_CMD12CRE | INT_BUFRE));
1211 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
1212 } else if (state & INT_BUFRE) {
1213 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
1214 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
1215 } else if (state & INT_DTRANE) {
1216 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1217 ~(INT_CMD12DRE | INT_CMD12RBE |
1218 INT_CMD12CRE | INT_DTRANE));
1219 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
1220 } else if (state & INT_CMD12RBE) {
1221 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
1222 ~(INT_CMD12RBE | INT_CMD12CRE));
1223 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
1224 } else {
1225 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
1226 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1227 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1228 err = 1;
1229 }
1230 if (err) {
1231 host->sd_error = true; 1255 host->sd_error = true;
1232 dev_dbg(&host->pd->dev, "int err state = %08x\n", state); 1256 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
1233 } 1257 }
1234 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { 1258 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1259 if (!host->mrq)
1260 dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
1235 if (!host->dma_active) 1261 if (!host->dma_active)
1236 return IRQ_WAKE_THREAD; 1262 return IRQ_WAKE_THREAD;
1237 else if (host->sd_error) 1263 else if (host->sd_error)
@@ -1248,11 +1274,24 @@ static void mmcif_timeout_work(struct work_struct *work)
1248 struct delayed_work *d = container_of(work, struct delayed_work, work); 1274 struct delayed_work *d = container_of(work, struct delayed_work, work);
1249 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); 1275 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1250 struct mmc_request *mrq = host->mrq; 1276 struct mmc_request *mrq = host->mrq;
1277 unsigned long flags;
1251 1278
1252 if (host->dying) 1279 if (host->dying)
1253 /* Don't run after mmc_remove_host() */ 1280 /* Don't run after mmc_remove_host() */
1254 return; 1281 return;
1255 1282
1283 dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1284 host->wait_for, mrq->cmd->opcode);
1285
1286 spin_lock_irqsave(&host->lock, flags);
1287 if (host->state == STATE_IDLE) {
1288 spin_unlock_irqrestore(&host->lock, flags);
1289 return;
1290 }
1291
1292 host->state = STATE_TIMEOUT;
1293 spin_unlock_irqrestore(&host->lock, flags);
1294
1256 /* 1295 /*
1257 * Handle races with cancel_delayed_work(), unless 1296 * Handle races with cancel_delayed_work(), unless
1258 * cancel_delayed_work_sync() is used 1297 * cancel_delayed_work_sync() is used
@@ -1306,10 +1345,11 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1306 struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; 1345 struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1307 struct resource *res; 1346 struct resource *res;
1308 void __iomem *reg; 1347 void __iomem *reg;
1348 const char *name;
1309 1349
1310 irq[0] = platform_get_irq(pdev, 0); 1350 irq[0] = platform_get_irq(pdev, 0);
1311 irq[1] = platform_get_irq(pdev, 1); 1351 irq[1] = platform_get_irq(pdev, 1);
1312 if (irq[0] < 0 || irq[1] < 0) { 1352 if (irq[0] < 0) {
1313 dev_err(&pdev->dev, "Get irq error\n"); 1353 dev_err(&pdev->dev, "Get irq error\n");
1314 return -ENXIO; 1354 return -ENXIO;
1315 } 1355 }
@@ -1332,7 +1372,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1332 host = mmc_priv(mmc); 1372 host = mmc_priv(mmc);
1333 host->mmc = mmc; 1373 host->mmc = mmc;
1334 host->addr = reg; 1374 host->addr = reg;
1335 host->timeout = 1000; 1375 host->timeout = msecs_to_jiffies(1000);
1336 1376
1337 host->pd = pdev; 1377 host->pd = pdev;
1338 1378
@@ -1341,7 +1381,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1341 mmc->ops = &sh_mmcif_ops; 1381 mmc->ops = &sh_mmcif_ops;
1342 sh_mmcif_init_ocr(host); 1382 sh_mmcif_init_ocr(host);
1343 1383
1344 mmc->caps = MMC_CAP_MMC_HIGHSPEED; 1384 mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1345 if (pd && pd->caps) 1385 if (pd && pd->caps)
1346 mmc->caps |= pd->caps; 1386 mmc->caps |= pd->caps;
1347 mmc->max_segs = 32; 1387 mmc->max_segs = 32;
@@ -1374,15 +1414,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1374 sh_mmcif_sync_reset(host); 1414 sh_mmcif_sync_reset(host);
1375 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1415 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1376 1416
1377 ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); 1417 name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
1418 ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);
1378 if (ret) { 1419 if (ret) {
1379 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); 1420 dev_err(&pdev->dev, "request_irq error (%s)\n", name);
1380 goto ereqirq0; 1421 goto ereqirq0;
1381 } 1422 }
1382 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); 1423 if (irq[1] >= 0) {
1383 if (ret) { 1424 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt,
1384 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1425 0, "sh_mmc:int", host);
1385 goto ereqirq1; 1426 if (ret) {
1427 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1428 goto ereqirq1;
1429 }
1386 } 1430 }
1387 1431
1388 if (pd && pd->use_cd_gpio) { 1432 if (pd && pd->use_cd_gpio) {
@@ -1391,6 +1435,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1391 goto erqcd; 1435 goto erqcd;
1392 } 1436 }
1393 1437
1438 mutex_init(&host->thread_lock);
1439
1394 clk_disable(host->hclk); 1440 clk_disable(host->hclk);
1395 ret = mmc_add_host(mmc); 1441 ret = mmc_add_host(mmc);
1396 if (ret < 0) 1442 if (ret < 0)
@@ -1404,10 +1450,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1404 return ret; 1450 return ret;
1405 1451
1406emmcaddh: 1452emmcaddh:
1407 if (pd && pd->use_cd_gpio)
1408 mmc_gpio_free_cd(mmc);
1409erqcd: 1453erqcd:
1410 free_irq(irq[1], host); 1454 if (irq[1] >= 0)
1455 free_irq(irq[1], host);
1411ereqirq1: 1456ereqirq1:
1412 free_irq(irq[0], host); 1457 free_irq(irq[0], host);
1413ereqirq0: 1458ereqirq0:
@@ -1427,7 +1472,6 @@ ealloch:
1427static int sh_mmcif_remove(struct platform_device *pdev) 1472static int sh_mmcif_remove(struct platform_device *pdev)
1428{ 1473{
1429 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1474 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1430 struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
1431 int irq[2]; 1475 int irq[2];
1432 1476
1433 host->dying = true; 1477 host->dying = true;
@@ -1436,9 +1480,6 @@ static int sh_mmcif_remove(struct platform_device *pdev)
1436 1480
1437 dev_pm_qos_hide_latency_limit(&pdev->dev); 1481 dev_pm_qos_hide_latency_limit(&pdev->dev);
1438 1482
1439 if (pd && pd->use_cd_gpio)
1440 mmc_gpio_free_cd(host->mmc);
1441
1442 mmc_remove_host(host->mmc); 1483 mmc_remove_host(host->mmc);
1443 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1484 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1444 1485
@@ -1456,7 +1497,8 @@ static int sh_mmcif_remove(struct platform_device *pdev)
1456 irq[1] = platform_get_irq(pdev, 1); 1497 irq[1] = platform_get_irq(pdev, 1);
1457 1498
1458 free_irq(irq[0], host); 1499 free_irq(irq[0], host);
1459 free_irq(irq[1], host); 1500 if (irq[1] >= 0)
1501 free_irq(irq[1], host);
1460 1502
1461 platform_set_drvdata(pdev, NULL); 1503 platform_set_drvdata(pdev, NULL);
1462 1504
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 50bf495a988b..0f992e9ffc73 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1060,16 +1060,8 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
1060void tmio_mmc_host_remove(struct tmio_mmc_host *host) 1060void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1061{ 1061{
1062 struct platform_device *pdev = host->pdev; 1062 struct platform_device *pdev = host->pdev;
1063 struct tmio_mmc_data *pdata = host->pdata;
1064 struct mmc_host *mmc = host->mmc; 1063 struct mmc_host *mmc = host->mmc;
1065 1064
1066 if (pdata->flags & TMIO_MMC_USE_GPIO_CD)
1067 /*
1068 * This means we can miss a card-eject, but this is anyway
1069 * possible, because of delayed processing of hotplug events.
1070 */
1071 mmc_gpio_free_cd(mmc);
1072
1073 if (!host->native_hotplug) 1065 if (!host->native_hotplug)
1074 pm_runtime_get_sync(&pdev->dev); 1066 pm_runtime_get_sync(&pdev->dev);
1075 1067