aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/card/queue.c18
-rw-r--r--drivers/mmc/card/sdio_uart.c303
-rw-r--r--drivers/mmc/core/Kconfig4
-rw-r--r--drivers/mmc/core/core.c16
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c25
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/core/sdio_cis.c167
-rw-r--r--drivers/mmc/host/Kconfig64
-rw-r--r--drivers/mmc/host/Makefile8
-rw-r--r--drivers/mmc/host/atmel-mci.c141
-rw-r--r--drivers/mmc/host/bfin_sdh.c639
-rw-r--r--drivers/mmc/host/davinci_mmc.c1349
-rw-r--r--drivers/mmc/host/mmci.c41
-rw-r--r--drivers/mmc/host/msm_sdcc.c5
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap.c10
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-of-core.c (renamed from drivers/mmc/host/sdhci-of.c)143
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c143
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c65
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci-pci.c75
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/tmio_mmc.c61
-rw-r--r--drivers/mmc/host/tmio_mmc.h46
31 files changed, 2929 insertions, 510 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 85f0e8cd875b..1f552c6e7579 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md)
85 mutex_lock(&open_lock); 85 mutex_lock(&open_lock);
86 md->usage--; 86 md->usage--;
87 if (md->usage == 0) { 87 if (md->usage == 0) {
88 int devmaj = MAJOR(disk_devt(md->disk));
88 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; 89 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
90
91 if (!devmaj)
92 devidx = md->disk->first_minor >> MMC_SHIFT;
93
94 blk_cleanup_queue(md->queue.queue);
95
89 __clear_bit(devidx, dev_use); 96 __clear_bit(devidx, dev_use);
90 97
91 put_disk(md->disk); 98 put_disk(md->disk);
@@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card)
613 return 0; 620 return 0;
614 621
615 out: 622 out:
623 mmc_cleanup_queue(&md->queue);
616 mmc_blk_put(md); 624 mmc_blk_put(md);
617 625
618 return err; 626 return err;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 49e582356c65..c5a7a855f4b1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q)
90 struct request *req; 90 struct request *req;
91 91
92 if (!mq) { 92 if (!mq) {
93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 while ((req = blk_fetch_request(q)) != NULL) {
94 while ((req = blk_fetch_request(q)) != NULL) 94 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO); 95 __blk_end_request_all(req, -EIO);
96 }
96 return; 97 return;
97 } 98 }
98 99
@@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
223 struct request_queue *q = mq->queue; 224 struct request_queue *q = mq->queue;
224 unsigned long flags; 225 unsigned long flags;
225 226
226 /* Mark that we should start throwing out stragglers */
227 spin_lock_irqsave(q->queue_lock, flags);
228 q->queuedata = NULL;
229 spin_unlock_irqrestore(q->queue_lock, flags);
230
231 /* Make sure the queue isn't suspended, as that will deadlock */ 227 /* Make sure the queue isn't suspended, as that will deadlock */
232 mmc_queue_resume(mq); 228 mmc_queue_resume(mq);
233 229
234 /* Then terminate our worker thread */ 230 /* Then terminate our worker thread */
235 kthread_stop(mq->thread); 231 kthread_stop(mq->thread);
236 232
233 /* Empty the queue */
234 spin_lock_irqsave(q->queue_lock, flags);
235 q->queuedata = NULL;
236 blk_start_queue(q);
237 spin_unlock_irqrestore(q->queue_lock, flags);
238
237 if (mq->bounce_sg) 239 if (mq->bounce_sg)
238 kfree(mq->bounce_sg); 240 kfree(mq->bounce_sg);
239 mq->bounce_sg = NULL; 241 mq->bounce_sg = NULL;
@@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
245 kfree(mq->bounce_buf); 247 kfree(mq->bounce_buf);
246 mq->bounce_buf = NULL; 248 mq->bounce_buf = NULL;
247 249
248 blk_cleanup_queue(mq->queue);
249
250 mq->card = NULL; 250 mq->card = NULL;
251} 251}
252EXPORT_SYMBOL(mmc_cleanup_queue); 252EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index b8e7c5ae981e..f53755533e7e 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/sched.h>
32#include <linux/mutex.h> 33#include <linux/mutex.h>
33#include <linux/seq_file.h> 34#include <linux/seq_file.h>
34#include <linux/serial_reg.h> 35#include <linux/serial_reg.h>
@@ -73,11 +74,10 @@ struct uart_icount {
73}; 74};
74 75
75struct sdio_uart_port { 76struct sdio_uart_port {
77 struct tty_port port;
76 struct kref kref; 78 struct kref kref;
77 struct tty_struct *tty; 79 struct tty_struct *tty;
78 unsigned int index; 80 unsigned int index;
79 unsigned int opened;
80 struct mutex open_lock;
81 struct sdio_func *func; 81 struct sdio_func *func;
82 struct mutex func_lock; 82 struct mutex func_lock;
83 struct task_struct *in_sdio_uart_irq; 83 struct task_struct *in_sdio_uart_irq;
@@ -87,6 +87,7 @@ struct sdio_uart_port {
87 struct uart_icount icount; 87 struct uart_icount icount;
88 unsigned int uartclk; 88 unsigned int uartclk;
89 unsigned int mctrl; 89 unsigned int mctrl;
90 unsigned int rx_mctrl;
90 unsigned int read_status_mask; 91 unsigned int read_status_mask;
91 unsigned int ignore_status_mask; 92 unsigned int ignore_status_mask;
92 unsigned char x_char; 93 unsigned char x_char;
@@ -102,7 +103,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
102 int index, ret = -EBUSY; 103 int index, ret = -EBUSY;
103 104
104 kref_init(&port->kref); 105 kref_init(&port->kref);
105 mutex_init(&port->open_lock);
106 mutex_init(&port->func_lock); 106 mutex_init(&port->func_lock);
107 spin_lock_init(&port->write_lock); 107 spin_lock_init(&port->write_lock);
108 108
@@ -151,6 +151,7 @@ static void sdio_uart_port_put(struct sdio_uart_port *port)
151static void sdio_uart_port_remove(struct sdio_uart_port *port) 151static void sdio_uart_port_remove(struct sdio_uart_port *port)
152{ 152{
153 struct sdio_func *func; 153 struct sdio_func *func;
154 struct tty_struct *tty;
154 155
155 BUG_ON(sdio_uart_table[port->index] != port); 156 BUG_ON(sdio_uart_table[port->index] != port);
156 157
@@ -165,15 +166,19 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
165 * give up on that port ASAP. 166 * give up on that port ASAP.
166 * Beware: the lock ordering is critical. 167 * Beware: the lock ordering is critical.
167 */ 168 */
168 mutex_lock(&port->open_lock); 169 mutex_lock(&port->port.mutex);
169 mutex_lock(&port->func_lock); 170 mutex_lock(&port->func_lock);
170 func = port->func; 171 func = port->func;
171 sdio_claim_host(func); 172 sdio_claim_host(func);
172 port->func = NULL; 173 port->func = NULL;
173 mutex_unlock(&port->func_lock); 174 mutex_unlock(&port->func_lock);
174 if (port->opened) 175 tty = tty_port_tty_get(&port->port);
175 tty_hangup(port->tty); 176 /* tty_hangup is async so is this safe as is ?? */
176 mutex_unlock(&port->open_lock); 177 if (tty) {
178 tty_hangup(tty);
179 tty_kref_put(tty);
180 }
181 mutex_unlock(&port->port.mutex);
177 sdio_release_irq(func); 182 sdio_release_irq(func);
178 sdio_disable_func(func); 183 sdio_disable_func(func);
179 sdio_release_host(func); 184 sdio_release_host(func);
@@ -217,6 +222,8 @@ static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
217 unsigned char status; 222 unsigned char status;
218 unsigned int ret; 223 unsigned int ret;
219 224
225 /* FIXME: What stops this losing the delta bits and breaking
226 sdio_uart_check_modem_status ? */
220 status = sdio_in(port, UART_MSR); 227 status = sdio_in(port, UART_MSR);
221 228
222 ret = 0; 229 ret = 0;
@@ -391,7 +398,7 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
391static void sdio_uart_receive_chars(struct sdio_uart_port *port, 398static void sdio_uart_receive_chars(struct sdio_uart_port *port,
392 unsigned int *status) 399 unsigned int *status)
393{ 400{
394 struct tty_struct *tty = port->tty; 401 struct tty_struct *tty = tty_port_tty_get(&port->port);
395 unsigned int ch, flag; 402 unsigned int ch, flag;
396 int max_count = 256; 403 int max_count = 256;
397 404
@@ -428,24 +435,30 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
428 } 435 }
429 436
430 if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0) 437 if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
431 tty_insert_flip_char(tty, ch, flag); 438 if (tty)
439 tty_insert_flip_char(tty, ch, flag);
432 440
433 /* 441 /*
434 * Overrun is special. Since it's reported immediately, 442 * Overrun is special. Since it's reported immediately,
435 * it doesn't affect the current character. 443 * it doesn't affect the current character.
436 */ 444 */
437 if (*status & ~port->ignore_status_mask & UART_LSR_OE) 445 if (*status & ~port->ignore_status_mask & UART_LSR_OE)
438 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 446 if (tty)
447 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
439 448
440 *status = sdio_in(port, UART_LSR); 449 *status = sdio_in(port, UART_LSR);
441 } while ((*status & UART_LSR_DR) && (max_count-- > 0)); 450 } while ((*status & UART_LSR_DR) && (max_count-- > 0));
442 tty_flip_buffer_push(tty); 451 if (tty) {
452 tty_flip_buffer_push(tty);
453 tty_kref_put(tty);
454 }
443} 455}
444 456
445static void sdio_uart_transmit_chars(struct sdio_uart_port *port) 457static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
446{ 458{
447 struct circ_buf *xmit = &port->xmit; 459 struct circ_buf *xmit = &port->xmit;
448 int count; 460 int count;
461 struct tty_struct *tty;
449 462
450 if (port->x_char) { 463 if (port->x_char) {
451 sdio_out(port, UART_TX, port->x_char); 464 sdio_out(port, UART_TX, port->x_char);
@@ -453,8 +466,13 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
453 port->x_char = 0; 466 port->x_char = 0;
454 return; 467 return;
455 } 468 }
456 if (circ_empty(xmit) || port->tty->stopped || port->tty->hw_stopped) { 469
470 tty = tty_port_tty_get(&port->port);
471
472 if (tty == NULL || circ_empty(xmit) ||
473 tty->stopped || tty->hw_stopped) {
457 sdio_uart_stop_tx(port); 474 sdio_uart_stop_tx(port);
475 tty_kref_put(tty);
458 return; 476 return;
459 } 477 }
460 478
@@ -468,15 +486,17 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
468 } while (--count > 0); 486 } while (--count > 0);
469 487
470 if (circ_chars_pending(xmit) < WAKEUP_CHARS) 488 if (circ_chars_pending(xmit) < WAKEUP_CHARS)
471 tty_wakeup(port->tty); 489 tty_wakeup(tty);
472 490
473 if (circ_empty(xmit)) 491 if (circ_empty(xmit))
474 sdio_uart_stop_tx(port); 492 sdio_uart_stop_tx(port);
493 tty_kref_put(tty);
475} 494}
476 495
477static void sdio_uart_check_modem_status(struct sdio_uart_port *port) 496static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
478{ 497{
479 int status; 498 int status;
499 struct tty_struct *tty;
480 500
481 status = sdio_in(port, UART_MSR); 501 status = sdio_in(port, UART_MSR);
482 502
@@ -487,25 +507,39 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
487 port->icount.rng++; 507 port->icount.rng++;
488 if (status & UART_MSR_DDSR) 508 if (status & UART_MSR_DDSR)
489 port->icount.dsr++; 509 port->icount.dsr++;
490 if (status & UART_MSR_DDCD) 510 if (status & UART_MSR_DDCD) {
491 port->icount.dcd++; 511 port->icount.dcd++;
512 /* DCD raise - wake for open */
513 if (status & UART_MSR_DCD)
514 wake_up_interruptible(&port->port.open_wait);
515 else {
516 /* DCD drop - hang up if tty attached */
517 tty = tty_port_tty_get(&port->port);
518 if (tty) {
519 tty_hangup(tty);
520 tty_kref_put(tty);
521 }
522 }
523 }
492 if (status & UART_MSR_DCTS) { 524 if (status & UART_MSR_DCTS) {
493 port->icount.cts++; 525 port->icount.cts++;
494 if (port->tty->termios->c_cflag & CRTSCTS) { 526 tty = tty_port_tty_get(&port->port);
527 if (tty && (tty->termios->c_cflag & CRTSCTS)) {
495 int cts = (status & UART_MSR_CTS); 528 int cts = (status & UART_MSR_CTS);
496 if (port->tty->hw_stopped) { 529 if (tty->hw_stopped) {
497 if (cts) { 530 if (cts) {
498 port->tty->hw_stopped = 0; 531 tty->hw_stopped = 0;
499 sdio_uart_start_tx(port); 532 sdio_uart_start_tx(port);
500 tty_wakeup(port->tty); 533 tty_wakeup(tty);
501 } 534 }
502 } else { 535 } else {
503 if (!cts) { 536 if (!cts) {
504 port->tty->hw_stopped = 1; 537 tty->hw_stopped = 1;
505 sdio_uart_stop_tx(port); 538 sdio_uart_stop_tx(port);
506 } 539 }
507 } 540 }
508 } 541 }
542 tty_kref_put(tty);
509 } 543 }
510} 544}
511 545
@@ -542,8 +576,62 @@ static void sdio_uart_irq(struct sdio_func *func)
542 port->in_sdio_uart_irq = NULL; 576 port->in_sdio_uart_irq = NULL;
543} 577}
544 578
545static int sdio_uart_startup(struct sdio_uart_port *port) 579static int uart_carrier_raised(struct tty_port *tport)
580{
581 struct sdio_uart_port *port =
582 container_of(tport, struct sdio_uart_port, port);
583 unsigned int ret = sdio_uart_claim_func(port);
584 if (ret) /* Missing hardware shoudn't block for carrier */
585 return 1;
586 ret = sdio_uart_get_mctrl(port);
587 sdio_uart_release_func(port);
588 if (ret & TIOCM_CAR)
589 return 1;
590 return 0;
591}
592
593/**
594 * uart_dtr_rts - port helper to set uart signals
595 * @tport: tty port to be updated
596 * @onoff: set to turn on DTR/RTS
597 *
598 * Called by the tty port helpers when the modem signals need to be
599 * adjusted during an open, close and hangup.
600 */
601
602static void uart_dtr_rts(struct tty_port *tport, int onoff)
603{
604 struct sdio_uart_port *port =
605 container_of(tport, struct sdio_uart_port, port);
606 int ret = sdio_uart_claim_func(port);
607 if (ret)
608 return;
609 if (onoff == 0)
610 sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
611 else
612 sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
613 sdio_uart_release_func(port);
614}
615
616/**
617 * sdio_uart_activate - start up hardware
618 * @tport: tty port to activate
619 * @tty: tty bound to this port
620 *
621 * Activate a tty port. The port locking guarantees us this will be
622 * run exactly once per set of opens, and if successful will see the
623 * shutdown method run exactly once to match. Start up and shutdown are
624 * protected from each other by the internal locking and will not run
625 * at the same time even during a hangup event.
626 *
627 * If we successfully start up the port we take an extra kref as we
628 * will keep it around until shutdown when the kref is dropped.
629 */
630
631static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
546{ 632{
633 struct sdio_uart_port *port =
634 container_of(tport, struct sdio_uart_port, port);
547 unsigned long page; 635 unsigned long page;
548 int ret; 636 int ret;
549 637
@@ -551,7 +639,7 @@ static int sdio_uart_startup(struct sdio_uart_port *port)
551 * Set the TTY IO error marker - we will only clear this 639 * Set the TTY IO error marker - we will only clear this
552 * once we have successfully opened the port. 640 * once we have successfully opened the port.
553 */ 641 */
554 set_bit(TTY_IO_ERROR, &port->tty->flags); 642 set_bit(TTY_IO_ERROR, &tty->flags);
555 643
556 /* Initialise and allocate the transmit buffer. */ 644 /* Initialise and allocate the transmit buffer. */
557 page = __get_free_page(GFP_KERNEL); 645 page = __get_free_page(GFP_KERNEL);
@@ -592,19 +680,19 @@ static int sdio_uart_startup(struct sdio_uart_port *port)
592 */ 680 */
593 sdio_out(port, UART_LCR, UART_LCR_WLEN8); 681 sdio_out(port, UART_LCR, UART_LCR_WLEN8);
594 682
595 port->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; 683 port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
596 port->mctrl = TIOCM_OUT2; 684 port->mctrl = TIOCM_OUT2;
597 685
598 sdio_uart_change_speed(port, port->tty->termios, NULL); 686 sdio_uart_change_speed(port, tty->termios, NULL);
599 687
600 if (port->tty->termios->c_cflag & CBAUD) 688 if (tty->termios->c_cflag & CBAUD)
601 sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); 689 sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
602 690
603 if (port->tty->termios->c_cflag & CRTSCTS) 691 if (tty->termios->c_cflag & CRTSCTS)
604 if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) 692 if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
605 port->tty->hw_stopped = 1; 693 tty->hw_stopped = 1;
606 694
607 clear_bit(TTY_IO_ERROR, &port->tty->flags); 695 clear_bit(TTY_IO_ERROR, &tty->flags);
608 696
609 /* Kick the IRQ handler once while we're still holding the host lock */ 697 /* Kick the IRQ handler once while we're still holding the host lock */
610 sdio_uart_irq(port->func); 698 sdio_uart_irq(port->func);
@@ -621,8 +709,20 @@ err1:
621 return ret; 709 return ret;
622} 710}
623 711
624static void sdio_uart_shutdown(struct sdio_uart_port *port) 712/**
713 * sdio_uart_shutdown - stop hardware
714 * @tport: tty port to shut down
715 *
716 * Deactivate a tty port. The port locking guarantees us this will be
717 * run only if a successful matching activate already ran. The two are
718 * protected from each other by the internal locking and will not run
719 * at the same time even during a hangup event.
720 */
721
722static void sdio_uart_shutdown(struct tty_port *tport)
625{ 723{
724 struct sdio_uart_port *port =
725 container_of(tport, struct sdio_uart_port, port);
626 int ret; 726 int ret;
627 727
628 ret = sdio_uart_claim_func(port); 728 ret = sdio_uart_claim_func(port);
@@ -631,12 +731,6 @@ static void sdio_uart_shutdown(struct sdio_uart_port *port)
631 731
632 sdio_uart_stop_rx(port); 732 sdio_uart_stop_rx(port);
633 733
634 /* TODO: wait here for TX FIFO to drain */
635
636 /* Turn off DTR and RTS early. */
637 if (port->tty->termios->c_cflag & HUPCL)
638 sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
639
640 /* Disable interrupts from this port */ 734 /* Disable interrupts from this port */
641 sdio_release_irq(port->func); 735 sdio_release_irq(port->func);
642 port->ier = 0; 736 port->ier = 0;
@@ -661,77 +755,70 @@ skip:
661 free_page((unsigned long)port->xmit.buf); 755 free_page((unsigned long)port->xmit.buf);
662} 756}
663 757
664static int sdio_uart_open(struct tty_struct *tty, struct file *filp) 758/**
759 * sdio_uart_install - install method
760 * @driver: the driver in use (sdio_uart in our case)
761 * @tty: the tty being bound
762 *
763 * Look up and bind the tty and the driver together. Initialize
764 * any needed private data (in our case the termios)
765 */
766
767static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
665{ 768{
666 struct sdio_uart_port *port; 769 int idx = tty->index;
667 int ret; 770 struct sdio_uart_port *port = sdio_uart_port_get(idx);
771 int ret = tty_init_termios(tty);
772
773 if (ret == 0) {
774 tty_driver_kref_get(driver);
775 tty->count++;
776 /* This is the ref sdio_uart_port get provided */
777 tty->driver_data = port;
778 driver->ttys[idx] = tty;
779 } else
780 sdio_uart_port_put(port);
781 return ret;
782}
668 783
669 port = sdio_uart_port_get(tty->index); 784/**
670 if (!port) 785 * sdio_uart_cleanup - called on the last tty kref drop
671 return -ENODEV; 786 * @tty: the tty being destroyed
787 *
788 * Called asynchronously when the last reference to the tty is dropped.
789 * We cannot destroy the tty->driver_data port kref until this point
790 */
672 791
673 mutex_lock(&port->open_lock); 792static void sdio_uart_cleanup(struct tty_struct *tty)
793{
794 struct sdio_uart_port *port = tty->driver_data;
795 tty->driver_data = NULL; /* Bug trap */
796 sdio_uart_port_put(port);
797}
674 798
675 /* 799/*
676 * Make sure not to mess up with a dead port 800 * Open/close/hangup is now entirely boilerplate
677 * which has not been closed yet. 801 */
678 */
679 if (tty->driver_data && tty->driver_data != port) {
680 mutex_unlock(&port->open_lock);
681 sdio_uart_port_put(port);
682 return -EBUSY;
683 }
684 802
685 if (!port->opened) { 803static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
686 tty->driver_data = port; 804{
687 port->tty = tty; 805 struct sdio_uart_port *port = tty->driver_data;
688 ret = sdio_uart_startup(port); 806 return tty_port_open(&port->port, tty, filp);
689 if (ret) {
690 tty->driver_data = NULL;
691 port->tty = NULL;
692 mutex_unlock(&port->open_lock);
693 sdio_uart_port_put(port);
694 return ret;
695 }
696 }
697 port->opened++;
698 mutex_unlock(&port->open_lock);
699 return 0;
700} 807}
701 808
702static void sdio_uart_close(struct tty_struct *tty, struct file * filp) 809static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
703{ 810{
704 struct sdio_uart_port *port = tty->driver_data; 811 struct sdio_uart_port *port = tty->driver_data;
812 tty_port_close(&port->port, tty, filp);
813}
705 814
706 if (!port) 815static void sdio_uart_hangup(struct tty_struct *tty)
707 return; 816{
708 817 struct sdio_uart_port *port = tty->driver_data;
709 mutex_lock(&port->open_lock); 818 tty_port_hangup(&port->port);
710 BUG_ON(!port->opened);
711
712 /*
713 * This is messy. The tty layer calls us even when open()
714 * returned an error. Ignore this close request if tty->count
715 * is larger than port->count.
716 */
717 if (tty->count > port->opened) {
718 mutex_unlock(&port->open_lock);
719 return;
720 }
721
722 if (--port->opened == 0) {
723 tty->closing = 1;
724 sdio_uart_shutdown(port);
725 tty_ldisc_flush(tty);
726 port->tty = NULL;
727 tty->driver_data = NULL;
728 tty->closing = 0;
729 }
730 mutex_unlock(&port->open_lock);
731 sdio_uart_port_put(port);
732} 819}
733 820
734static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf, 821static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
735 int count) 822 int count)
736{ 823{
737 struct sdio_uart_port *port = tty->driver_data; 824 struct sdio_uart_port *port = tty->driver_data;
@@ -756,7 +843,7 @@ static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf,
756 } 843 }
757 spin_unlock(&port->write_lock); 844 spin_unlock(&port->write_lock);
758 845
759 if ( !(port->ier & UART_IER_THRI)) { 846 if (!(port->ier & UART_IER_THRI)) {
760 int err = sdio_uart_claim_func(port); 847 int err = sdio_uart_claim_func(port);
761 if (!err) { 848 if (!err) {
762 sdio_uart_start_tx(port); 849 sdio_uart_start_tx(port);
@@ -843,17 +930,12 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
843 sdio_uart_release_func(port); 930 sdio_uart_release_func(port);
844} 931}
845 932
846static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 933static void sdio_uart_set_termios(struct tty_struct *tty,
934 struct ktermios *old_termios)
847{ 935{
848 struct sdio_uart_port *port = tty->driver_data; 936 struct sdio_uart_port *port = tty->driver_data;
849 unsigned int cflag = tty->termios->c_cflag; 937 unsigned int cflag = tty->termios->c_cflag;
850 938
851#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
852
853 if ((cflag ^ old_termios->c_cflag) == 0 &&
854 RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0)
855 return;
856
857 if (sdio_uart_claim_func(port) != 0) 939 if (sdio_uart_claim_func(port) != 0)
858 return; 940 return;
859 941
@@ -928,7 +1010,7 @@ static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file,
928 int result; 1010 int result;
929 1011
930 result = sdio_uart_claim_func(port); 1012 result = sdio_uart_claim_func(port);
931 if(!result) { 1013 if (!result) {
932 sdio_uart_update_mctrl(port, set, clear); 1014 sdio_uart_update_mctrl(port, set, clear);
933 sdio_uart_release_func(port); 1015 sdio_uart_release_func(port);
934 } 1016 }
@@ -946,7 +1028,7 @@ static int sdio_uart_proc_show(struct seq_file *m, void *v)
946 struct sdio_uart_port *port = sdio_uart_port_get(i); 1028 struct sdio_uart_port *port = sdio_uart_port_get(i);
947 if (port) { 1029 if (port) {
948 seq_printf(m, "%d: uart:SDIO", i); 1030 seq_printf(m, "%d: uart:SDIO", i);
949 if(capable(CAP_SYS_ADMIN)) { 1031 if (capable(CAP_SYS_ADMIN)) {
950 seq_printf(m, " tx:%d rx:%d", 1032 seq_printf(m, " tx:%d rx:%d",
951 port->icount.tx, port->icount.rx); 1033 port->icount.tx, port->icount.rx);
952 if (port->icount.frame) 1034 if (port->icount.frame)
@@ -994,6 +1076,13 @@ static const struct file_operations sdio_uart_proc_fops = {
994 .release = single_release, 1076 .release = single_release,
995}; 1077};
996 1078
1079static const struct tty_port_operations sdio_uart_port_ops = {
1080 .dtr_rts = uart_dtr_rts,
1081 .carrier_raised = uart_carrier_raised,
1082 .shutdown = sdio_uart_shutdown,
1083 .activate = sdio_uart_activate,
1084};
1085
997static const struct tty_operations sdio_uart_ops = { 1086static const struct tty_operations sdio_uart_ops = {
998 .open = sdio_uart_open, 1087 .open = sdio_uart_open,
999 .close = sdio_uart_close, 1088 .close = sdio_uart_close,
@@ -1004,9 +1093,12 @@ static const struct tty_operations sdio_uart_ops = {
1004 .throttle = sdio_uart_throttle, 1093 .throttle = sdio_uart_throttle,
1005 .unthrottle = sdio_uart_unthrottle, 1094 .unthrottle = sdio_uart_unthrottle,
1006 .set_termios = sdio_uart_set_termios, 1095 .set_termios = sdio_uart_set_termios,
1096 .hangup = sdio_uart_hangup,
1007 .break_ctl = sdio_uart_break_ctl, 1097 .break_ctl = sdio_uart_break_ctl,
1008 .tiocmget = sdio_uart_tiocmget, 1098 .tiocmget = sdio_uart_tiocmget,
1009 .tiocmset = sdio_uart_tiocmset, 1099 .tiocmset = sdio_uart_tiocmset,
1100 .install = sdio_uart_install,
1101 .cleanup = sdio_uart_cleanup,
1010 .proc_fops = &sdio_uart_proc_fops, 1102 .proc_fops = &sdio_uart_proc_fops,
1011}; 1103};
1012 1104
@@ -1043,7 +1135,7 @@ static int sdio_uart_probe(struct sdio_func *func,
1043 } 1135 }
1044 if (!tpl) { 1136 if (!tpl) {
1045 printk(KERN_WARNING 1137 printk(KERN_WARNING
1046 "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", 1138 "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
1047 sdio_func_id(func)); 1139 sdio_func_id(func));
1048 kfree(port); 1140 kfree(port);
1049 return -EINVAL; 1141 return -EINVAL;
@@ -1068,13 +1160,16 @@ static int sdio_uart_probe(struct sdio_func *func,
1068 1160
1069 port->func = func; 1161 port->func = func;
1070 sdio_set_drvdata(func, port); 1162 sdio_set_drvdata(func, port);
1163 tty_port_init(&port->port);
1164 port->port.ops = &sdio_uart_port_ops;
1071 1165
1072 ret = sdio_uart_add_port(port); 1166 ret = sdio_uart_add_port(port);
1073 if (ret) { 1167 if (ret) {
1074 kfree(port); 1168 kfree(port);
1075 } else { 1169 } else {
1076 struct device *dev; 1170 struct device *dev;
1077 dev = tty_register_device(sdio_uart_tty_driver, port->index, &func->dev); 1171 dev = tty_register_device(sdio_uart_tty_driver,
1172 port->index, &func->dev);
1078 if (IS_ERR(dev)) { 1173 if (IS_ERR(dev)) {
1079 sdio_uart_port_remove(port); 1174 sdio_uart_port_remove(port);
1080 ret = PTR_ERR(dev); 1175 ret = PTR_ERR(dev);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ab37a6d9d32a..bb22ffd76ef8 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config MMC_UNSAFE_RESUME 5config MMC_UNSAFE_RESUME
6 bool "Allow unsafe resume (DANGEROUS)" 6 bool "Assume MMC/SD cards are non-removable (DANGEROUS)"
7 help 7 help
8 If you say Y here, the MMC layer will assume that all cards 8 If you say Y here, the MMC layer will assume that all cards
9 stayed in their respective slots during the suspend. The 9 stayed in their respective slots during the suspend. The
@@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME
14 This option is usually just for embedded systems which use 14 This option is usually just for embedded systems which use
15 a MMC/SD card for rootfs. Most people should say N here. 15 a MMC/SD card for rootfs. Most people should say N here.
16 16
17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1".
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7dab2e5f4bc9..30acd5265821 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,6 +48,22 @@ int use_spi_crc = 1;
48module_param(use_spi_crc, bool, 0); 48module_param(use_spi_crc, bool, 0);
49 49
50/* 50/*
51 * We normally treat cards as removed during suspend if they are not
52 * known to be on a non-removable bus, to avoid the risk of writing
53 * back data to a different card after resume. Allow this to be
54 * overridden if necessary.
55 */
56#ifdef CONFIG_MMC_UNSAFE_RESUME
57int mmc_assume_removable;
58#else
59int mmc_assume_removable = 1;
60#endif
61module_param_named(removable, mmc_assume_removable, bool, 0644);
62MODULE_PARM_DESC(
63 removable,
64 "MMC/SD cards are removable and may be removed during suspend");
65
66/*
51 * Internal function. Schedule delayed work in the MMC work queue. 67 * Internal function. Schedule delayed work in the MMC work queue.
52 */ 68 */
53static int mmc_schedule_delayed_work(struct delayed_work *work, 69static int mmc_schedule_delayed_work(struct delayed_work *work,
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 67ae6abc4230..a811c52a1659 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
54int mmc_attach_sd(struct mmc_host *host, u32 ocr); 54int mmc_attach_sd(struct mmc_host *host, u32 ocr);
55int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 55int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
56 56
57/* Module parameters */
57extern int use_spi_crc; 58extern int use_spi_crc;
59extern int mmc_assume_removable;
58 60
59/* Debugfs information for hosts and cards */ 61/* Debugfs information for hosts and cards */
60void mmc_add_host_debugfs(struct mmc_host *host); 62void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bfefce365ae7..0eac6c814904 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
207 } 207 }
208 208
209 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 209 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
210 if (card->ext_csd.rev > 3) { 210 if (card->ext_csd.rev > 5) {
211 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 211 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
212 "version %d\n", mmc_hostname(card->host), 212 "version %d\n", mmc_hostname(card->host),
213 card->ext_csd.rev); 213 card->ext_csd.rev);
@@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host)
602 return err; 602 return err;
603} 603}
604 604
605#ifdef CONFIG_MMC_UNSAFE_RESUME
606
607static const struct mmc_bus_ops mmc_ops = {
608 .awake = mmc_awake,
609 .sleep = mmc_sleep,
610 .remove = mmc_remove,
611 .detect = mmc_detect,
612 .suspend = mmc_suspend,
613 .resume = mmc_resume,
614 .power_restore = mmc_power_restore,
615};
616
617static void mmc_attach_bus_ops(struct mmc_host *host)
618{
619 mmc_attach_bus(host, &mmc_ops);
620}
621
622#else
623
624static const struct mmc_bus_ops mmc_ops = { 605static const struct mmc_bus_ops mmc_ops = {
625 .awake = mmc_awake, 606 .awake = mmc_awake,
626 .sleep = mmc_sleep, 607 .sleep = mmc_sleep,
@@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
645{ 626{
646 const struct mmc_bus_ops *bus_ops; 627 const struct mmc_bus_ops *bus_ops;
647 628
648 if (host->caps & MMC_CAP_NONREMOVABLE) 629 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
649 bus_ops = &mmc_ops_unsafe; 630 bus_ops = &mmc_ops_unsafe;
650 else 631 else
651 bus_ops = &mmc_ops; 632 bus_ops = &mmc_ops;
652 mmc_attach_bus(host, bus_ops); 633 mmc_attach_bus(host, bus_ops);
653} 634}
654 635
655#endif
656
657/* 636/*
658 * Starting point for MMC card init. 637 * Starting point for MMC card init.
659 */ 638 */
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 10b2a4d20f5a..fdd414eded09 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host)
606 mmc_release_host(host); 606 mmc_release_host(host);
607} 607}
608 608
609#ifdef CONFIG_MMC_UNSAFE_RESUME
610
611static const struct mmc_bus_ops mmc_sd_ops = {
612 .remove = mmc_sd_remove,
613 .detect = mmc_sd_detect,
614 .suspend = mmc_sd_suspend,
615 .resume = mmc_sd_resume,
616 .power_restore = mmc_sd_power_restore,
617};
618
619static void mmc_sd_attach_bus_ops(struct mmc_host *host)
620{
621 mmc_attach_bus(host, &mmc_sd_ops);
622}
623
624#else
625
626static const struct mmc_bus_ops mmc_sd_ops = { 609static const struct mmc_bus_ops mmc_sd_ops = {
627 .remove = mmc_sd_remove, 610 .remove = mmc_sd_remove,
628 .detect = mmc_sd_detect, 611 .detect = mmc_sd_detect,
@@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
643{ 626{
644 const struct mmc_bus_ops *bus_ops; 627 const struct mmc_bus_ops *bus_ops;
645 628
646 if (host->caps & MMC_CAP_NONREMOVABLE) 629 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
647 bus_ops = &mmc_sd_ops_unsafe; 630 bus_ops = &mmc_sd_ops_unsafe;
648 else 631 else
649 bus_ops = &mmc_sd_ops; 632 bus_ops = &mmc_sd_ops;
650 mmc_attach_bus(host, bus_ops); 633 mmc_attach_bus(host, bus_ops);
651} 634}
652 635
653#endif
654
655/* 636/*
656 * Starting point for SD card init. 637 * Starting point for SD card init.
657 */ 638 */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index cdb845b68ab5..06b64085a355 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
516 * The number of functions on the card is encoded inside 516 * The number of functions on the card is encoded inside
517 * the ocr. 517 * the ocr.
518 */ 518 */
519 card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28; 519 funcs = (ocr & 0x70000000) >> 28;
520 card->sdio_funcs = 0;
520 521
521 /* 522 /*
522 * If needed, disconnect card detection pull-up resistor. 523 * If needed, disconnect card detection pull-up resistor.
@@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
528 /* 529 /*
529 * Initialize (but don't add) all present functions. 530 * Initialize (but don't add) all present functions.
530 */ 531 */
531 for (i = 0;i < funcs;i++) { 532 for (i = 0; i < funcs; i++, card->sdio_funcs++) {
532 err = sdio_init_func(host->card, i + 1); 533 err = sdio_init_func(host->card, i + 1);
533 if (err) 534 if (err)
534 goto remove; 535 goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d37464e296a5..9e060c87e64d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func)
248/* 248/*
249 * Unregister a SDIO function with the driver model, and 249 * Unregister a SDIO function with the driver model, and
250 * (eventually) free it. 250 * (eventually) free it.
251 * This function can be called through error paths where sdio_add_func() was
252 * never executed (because a failure occurred at an earlier point).
251 */ 253 */
252void sdio_remove_func(struct sdio_func *func) 254void sdio_remove_func(struct sdio_func *func)
253{ 255{
254 if (sdio_func_present(func)) 256 if (!sdio_func_present(func))
255 device_del(&func->dev); 257 return;
256 258
259 device_del(&func->dev);
257 put_device(&func->dev); 260 put_device(&func->dev);
258} 261}
259 262
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f85dcd536508..9538389783c1 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,26 +97,56 @@ static const unsigned char speed_val[16] =
97static const unsigned int speed_unit[8] = 97static const unsigned int speed_unit[8] =
98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
99 99
100/* FUNCE tuples with these types get passed to SDIO drivers */ 100
101static const unsigned char funce_type_whitelist[] = { 101typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
102 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ 102 const unsigned char *, unsigned);
103
104struct cis_tpl {
105 unsigned char code;
106 unsigned char min_size;
107 tpl_parse_t *parse;
103}; 108};
104 109
105static int cistpl_funce_whitelisted(unsigned char type) 110static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
111 const char *tpl_descr,
112 const struct cis_tpl *tpl, int tpl_count,
113 unsigned char code,
114 const unsigned char *buf, unsigned size)
106{ 115{
107 int i; 116 int i, ret;
108 117
109 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { 118 /* look for a matching code in the table */
110 if (funce_type_whitelist[i] == type) 119 for (i = 0; i < tpl_count; i++, tpl++) {
111 return 1; 120 if (tpl->code == code)
121 break;
112 } 122 }
113 return 0; 123 if (i < tpl_count) {
124 if (size >= tpl->min_size) {
125 if (tpl->parse)
126 ret = tpl->parse(card, func, buf, size);
127 else
128 ret = -EILSEQ; /* known tuple, not parsed */
129 } else {
130 /* invalid tuple */
131 ret = -EINVAL;
132 }
133 if (ret && ret != -EILSEQ && ret != -ENOENT) {
134 printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n",
135 mmc_hostname(card->host), tpl_descr, code, size);
136 }
137 } else {
138 /* unknown tuple */
139 ret = -ENOENT;
140 }
141
142 return ret;
114} 143}
115 144
116static int cistpl_funce_common(struct mmc_card *card, 145static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
117 const unsigned char *buf, unsigned size) 146 const unsigned char *buf, unsigned size)
118{ 147{
119 if (size < 0x04 || buf[0] != 0) 148 /* Only valid for the common CIS (function 0) */
149 if (func)
120 return -EINVAL; 150 return -EINVAL;
121 151
122 /* TPLFE_FN0_BLK_SIZE */ 152 /* TPLFE_FN0_BLK_SIZE */
@@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card,
129 return 0; 159 return 0;
130} 160}
131 161
132static int cistpl_funce_func(struct sdio_func *func, 162static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
133 const unsigned char *buf, unsigned size) 163 const unsigned char *buf, unsigned size)
134{ 164{
135 unsigned vsn; 165 unsigned vsn;
136 unsigned min_size; 166 unsigned min_size;
137 167
138 /* let SDIO drivers take care of whitelisted FUNCE tuples */ 168 /* Only valid for the individual function's CIS (1-7) */
139 if (cistpl_funce_whitelisted(buf[0])) 169 if (!func)
140 return -EILSEQ; 170 return -EINVAL;
141 171
172 /*
173 * This tuple has a different length depending on the SDIO spec
174 * version.
175 */
142 vsn = func->card->cccr.sdio_vsn; 176 vsn = func->card->cccr.sdio_vsn;
143 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 177 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
144 178
145 if (size < min_size || buf[0] != 1) 179 if (size < min_size)
146 return -EINVAL; 180 return -EINVAL;
147 181
148 /* TPLFE_MAX_BLK_SIZE */ 182 /* TPLFE_MAX_BLK_SIZE */
@@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func,
157 return 0; 191 return 0;
158} 192}
159 193
194/*
195 * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
196 *
197 * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
198 * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
199 * TPLFID_FUNCTION is always hardcoded to 0x0C.
200 */
201static const struct cis_tpl cis_tpl_funce_list[] = {
202 { 0x00, 4, cistpl_funce_common },
203 { 0x01, 0, cistpl_funce_func },
204 { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
205};
206
160static int cistpl_funce(struct mmc_card *card, struct sdio_func *func, 207static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
161 const unsigned char *buf, unsigned size) 208 const unsigned char *buf, unsigned size)
162{ 209{
163 int ret; 210 if (size < 1)
164 211 return -EINVAL;
165 /*
166 * There should be two versions of the CISTPL_FUNCE tuple,
167 * one for the common CIS (function 0) and a version used by
168 * the individual function's CIS (1-7). Yet, the later has a
169 * different length depending on the SDIO spec version.
170 */
171 if (func)
172 ret = cistpl_funce_func(func, buf, size);
173 else
174 ret = cistpl_funce_common(card, buf, size);
175
176 if (ret && ret != -EILSEQ) {
177 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
178 "type %u\n", mmc_hostname(card->host), size, buf[0]);
179 }
180 212
181 return ret; 213 return cis_tpl_parse(card, func, "CISTPL_FUNCE",
214 cis_tpl_funce_list,
215 ARRAY_SIZE(cis_tpl_funce_list),
216 buf[0], buf, size);
182} 217}
183 218
184typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 219/* Known TPL_CODEs table for CIS tuples */
185 const unsigned char *, unsigned);
186
187struct cis_tpl {
188 unsigned char code;
189 unsigned char min_size;
190 tpl_parse_t *parse;
191};
192
193static const struct cis_tpl cis_tpl_list[] = { 220static const struct cis_tpl cis_tpl_list[] = {
194 { 0x15, 3, cistpl_vers_1 }, 221 { 0x15, 3, cistpl_vers_1 },
195 { 0x20, 4, cistpl_manfid }, 222 { 0x20, 4, cistpl_manfid },
@@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
268 break; 295 break;
269 } 296 }
270 297
271 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 298 /* Try to parse the CIS tuple */
272 if (cis_tpl_list[i].code == tpl_code) 299 ret = cis_tpl_parse(card, func, "CIS",
273 break; 300 cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
274 if (i < ARRAY_SIZE(cis_tpl_list)) { 301 tpl_code, this->data, tpl_link);
275 const struct cis_tpl *tpl = cis_tpl_list + i; 302 if (ret == -EILSEQ || ret == -ENOENT) {
276 if (tpl_link < tpl->min_size) {
277 printk(KERN_ERR
278 "%s: bad CIS tuple 0x%02x"
279 " (length = %u, expected >= %u)\n",
280 mmc_hostname(card->host),
281 tpl_code, tpl_link, tpl->min_size);
282 ret = -EINVAL;
283 } else if (tpl->parse) {
284 ret = tpl->parse(card, func,
285 this->data, tpl_link);
286 }
287 /* 303 /*
288 * We don't need the tuple anymore if it was 304 * The tuple is unknown or known but not parsed.
289 * successfully parsed by the SDIO core or if it is 305 * Queue the tuple for the function driver.
290 * not going to be parsed by SDIO drivers.
291 */ 306 */
292 if (!ret || ret != -EILSEQ)
293 kfree(this);
294 } else {
295 /* unknown tuple */
296 ret = -EILSEQ;
297 }
298
299 if (ret == -EILSEQ) {
300 /* this tuple is unknown to the core or whitelisted */
301 this->next = NULL; 307 this->next = NULL;
302 this->code = tpl_code; 308 this->code = tpl_code;
303 this->size = tpl_link; 309 this->size = tpl_link;
304 *prev = this; 310 *prev = this;
305 prev = &this->next; 311 prev = &this->next;
306 printk(KERN_DEBUG 312
307 "%s: queuing CIS tuple 0x%02x length %u\n", 313 if (ret == -ENOENT) {
308 mmc_hostname(card->host), tpl_code, tpl_link); 314 /* warn about unknown tuples */
315 printk(KERN_WARNING "%s: queuing unknown"
316 " CIS tuple 0x%02x (%u bytes)\n",
317 mmc_hostname(card->host),
318 tpl_code, tpl_link);
319 }
320
309 /* keep on analyzing tuples */ 321 /* keep on analyzing tuples */
310 ret = 0; 322 ret = 0;
323 } else {
324 /*
325 * We don't need the tuple anymore if it was
326 * successfully parsed by the SDIO core or if it is
327 * not going to be queued for a driver.
328 */
329 kfree(this);
311 } 330 }
312 331
313 ptr += tpl_link; 332 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e04b751680d0..ce1d28884e29 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS
44 This is silent Kconfig symbol that is selected by the drivers that 44 This is silent Kconfig symbol that is selected by the drivers that
45 need to overwrite SDHCI IO memory accessors. 45 need to overwrite SDHCI IO memory accessors.
46 46
47config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
48 bool
49 select MMC_SDHCI_IO_ACCESSORS
50 help
51 This option is selected by drivers running on big endian hosts
52 and performing I/O to a SDHCI controller through a bus that
53 implements a hardware byte swapper using a 32-bit datum.
54 This endian mapping mode is called "data invariance" and
55 has the effect of scrambling the addresses and formats of data
56 accessed in sizes other than the datum size.
57
58 This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
59
47config MMC_SDHCI_PCI 60config MMC_SDHCI_PCI
48 tristate "SDHCI support on PCI bus" 61 tristate "SDHCI support on PCI bus"
49 depends on MMC_SDHCI && PCI 62 depends on MMC_SDHCI && PCI
@@ -75,11 +88,29 @@ config MMC_RICOH_MMC
75config MMC_SDHCI_OF 88config MMC_SDHCI_OF
76 tristate "SDHCI support on OpenFirmware platforms" 89 tristate "SDHCI support on OpenFirmware platforms"
77 depends on MMC_SDHCI && PPC_OF 90 depends on MMC_SDHCI && PPC_OF
78 select MMC_SDHCI_IO_ACCESSORS
79 help 91 help
80 This selects the OF support for Secure Digital Host Controller 92 This selects the OF support for Secure Digital Host Controller
81 Interfaces. So far, only the Freescale eSDHC controller is known 93 Interfaces.
82 to exist on OF platforms. 94
95 If unsure, say N.
96
97config MMC_SDHCI_OF_ESDHC
98 bool "SDHCI OF support for the Freescale eSDHC controller"
99 depends on MMC_SDHCI_OF
100 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
101 help
102 This selects the Freescale eSDHC controller support.
103
104 If unsure, say N.
105
106config MMC_SDHCI_OF_HLWD
107 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
108 depends on MMC_SDHCI_OF
109 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
110 help
111 This selects the Secure Digital Host Controller Interface (SDHCI)
112 found in the "Hollywood" chipset of the Nintendo Wii video game
113 console.
83 114
84 If unsure, say N. 115 If unsure, say N.
85 116
@@ -251,6 +282,14 @@ config MMC_MVSDIO
251 To compile this driver as a module, choose M here: the 282 To compile this driver as a module, choose M here: the
252 module will be called mvsdio. 283 module will be called mvsdio.
253 284
285config MMC_DAVINCI
286 tristate "TI DAVINCI Multimedia Card Interface support"
287 depends on ARCH_DAVINCI
288 help
289 This selects the TI DAVINCI Multimedia card Interface.
290 If you have an DAVINCI board with a Multimedia Card slot,
291 say Y or M here. If unsure, say N.
292
254config MMC_SPI 293config MMC_SPI
255 tristate "MMC/SD/SDIO over SPI" 294 tristate "MMC/SD/SDIO over SPI"
256 depends on SPI_MASTER && !HIGHMEM && HAS_DMA 295 depends on SPI_MASTER && !HIGHMEM && HAS_DMA
@@ -357,3 +396,22 @@ config MMC_VIA_SDMMC
357 If you have a controller with this interface, say Y or M here. 396 If you have a controller with this interface, say Y or M here.
358 397
359 If unsure, say N. 398 If unsure, say N.
399
400config SDH_BFIN
401 tristate "Blackfin Secure Digital Host support"
402 depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512))
403 help
404 If you say yes here you will get support for the Blackfin on-chip
405 Secure Digital Host interface. This includes support for MMC and
406 SD cards.
407
408 To compile this driver as a module, choose M here: the
409 module will be called bfin_sdh.
410
411 If unsure, say N.
412
413config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
414 bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround"
415 depends on SDH_BFIN
416 help
417 If you say yes here SD-Cards may work on the EZkit.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index abcb0400e06d..3d253dd4240f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
13obj-$(CONFIG_MMC_SDHCI) += sdhci.o 13obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 16obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
18obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 17obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
19obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -25,6 +24,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
25obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 24obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
26obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o 25obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
27obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o 26obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
27obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
28obj-$(CONFIG_MMC_SPI) += mmc_spi.o 28obj-$(CONFIG_MMC_SPI) += mmc_spi.o
29ifeq ($(CONFIG_OF),y) 29ifeq ($(CONFIG_OF),y)
30obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o 30obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -34,6 +34,12 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
34obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 34obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38
39obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
40sdhci-of-y := sdhci-of-core.o
41sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
42sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
37 43
38ifeq ($(CONFIG_CB710_DEBUG),y) 44ifeq ($(CONFIG_CB710_DEBUG),y)
39 CFLAGS-cb710-mmc += -DDEBUG 45 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc25586b7ee1..8072128e933b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -25,6 +25,8 @@
25#include <linux/stat.h> 25#include <linux/stat.h>
26 26
27#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
28
29#include <mach/atmel-mci.h>
28#include <linux/atmel-mci.h> 30#include <linux/atmel-mci.h>
29 31
30#include <asm/io.h> 32#include <asm/io.h>
@@ -92,6 +94,7 @@ struct atmel_mci_dma {
92 * @need_clock_update: Update the clock rate before the next request. 94 * @need_clock_update: Update the clock rate before the next request.
93 * @need_reset: Reset controller before next request. 95 * @need_reset: Reset controller before next request.
94 * @mode_reg: Value of the MR register. 96 * @mode_reg: Value of the MR register.
97 * @cfg_reg: Value of the CFG register.
95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus 98 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
96 * rate and timeout calculations. 99 * rate and timeout calculations.
97 * @mapbase: Physical address of the MMIO registers. 100 * @mapbase: Physical address of the MMIO registers.
@@ -155,6 +158,7 @@ struct atmel_mci {
155 bool need_clock_update; 158 bool need_clock_update;
156 bool need_reset; 159 bool need_reset;
157 u32 mode_reg; 160 u32 mode_reg;
161 u32 cfg_reg;
158 unsigned long bus_hz; 162 unsigned long bus_hz;
159 unsigned long mapbase; 163 unsigned long mapbase;
160 struct clk *mck; 164 struct clk *mck;
@@ -223,6 +227,19 @@ static bool mci_has_rwproof(void)
223} 227}
224 228
225/* 229/*
230 * The new MCI2 module isn't 100% compatible with the old MCI module,
231 * and it has a few nice features which we want to use...
232 */
233static inline bool atmci_is_mci2(void)
234{
235 if (cpu_is_at91sam9g45())
236 return true;
237
238 return false;
239}
240
241
242/*
226 * The debugfs stuff below is mostly optimized away when 243 * The debugfs stuff below is mostly optimized away when
227 * CONFIG_DEBUG_FS is not set. 244 * CONFIG_DEBUG_FS is not set.
228 */ 245 */
@@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v)
357 buf[MCI_BLKR / 4], 374 buf[MCI_BLKR / 4],
358 buf[MCI_BLKR / 4] & 0xffff, 375 buf[MCI_BLKR / 4] & 0xffff,
359 (buf[MCI_BLKR / 4] >> 16) & 0xffff); 376 (buf[MCI_BLKR / 4] >> 16) & 0xffff);
377 if (atmci_is_mci2())
378 seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
360 379
361 /* Don't read RSPR and RDR; it will consume the data there */ 380 /* Don't read RSPR and RDR; it will consume the data there */
362 381
363 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); 382 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
364 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); 383 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
365 384
385 if (atmci_is_mci2()) {
386 u32 val;
387
388 val = buf[MCI_DMA / 4];
389 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
390 val, val & 3,
391 ((val >> 4) & 3) ?
392 1 << (((val >> 4) & 3) + 1) : 1,
393 val & MCI_DMAEN ? " DMAEN" : "");
394
395 val = buf[MCI_CFG / 4];
396 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
397 val,
398 val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
399 val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
400 val & MCI_CFG_HSMODE ? " HSMODE" : "",
401 val & MCI_CFG_LSYNC ? " LSYNC" : "");
402 }
403
366 kfree(buf); 404 kfree(buf);
367 405
368 return 0; 406 return 0;
@@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg)
557 595
558 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 596 dev_vdbg(&host->pdev->dev, "DMA complete\n");
559 597
598 if (atmci_is_mci2())
599 /* Disable DMA hardware handshaking on MCI */
600 mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
601
560 atmci_dma_cleanup(host); 602 atmci_dma_cleanup(host);
561 603
562 /* 604 /*
@@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg)
592} 634}
593 635
594static int 636static int
595atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 637atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
596{ 638{
597 struct dma_chan *chan; 639 struct dma_chan *chan;
598 struct dma_async_tx_descriptor *desc; 640 struct dma_async_tx_descriptor *desc;
@@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
624 if (!chan) 666 if (!chan)
625 return -ENODEV; 667 return -ENODEV;
626 668
669 if (atmci_is_mci2())
670 mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
671
627 if (data->flags & MMC_DATA_READ) 672 if (data->flags & MMC_DATA_READ)
628 direction = DMA_FROM_DEVICE; 673 direction = DMA_FROM_DEVICE;
629 else 674 else
@@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
641 host->dma.data_desc = desc; 686 host->dma.data_desc = desc;
642 desc->callback = atmci_dma_complete; 687 desc->callback = atmci_dma_complete;
643 desc->callback_param = host; 688 desc->callback_param = host;
644 desc->tx_submit(desc);
645
646 /* Go! */
647 chan->device->device_issue_pending(chan);
648 689
649 return 0; 690 return 0;
650unmap_exit: 691unmap_exit:
@@ -652,13 +693,26 @@ unmap_exit:
652 return -ENOMEM; 693 return -ENOMEM;
653} 694}
654 695
696static void atmci_submit_data(struct atmel_mci *host)
697{
698 struct dma_chan *chan = host->data_chan;
699 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
700
701 if (chan) {
702 desc->tx_submit(desc);
703 chan->device->device_issue_pending(chan);
704 }
705}
706
655#else /* CONFIG_MMC_ATMELMCI_DMA */ 707#else /* CONFIG_MMC_ATMELMCI_DMA */
656 708
657static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 709static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
658{ 710{
659 return -ENOSYS; 711 return -ENOSYS;
660} 712}
661 713
714static void atmci_submit_data(struct atmel_mci *host) {}
715
662static void atmci_stop_dma(struct atmel_mci *host) 716static void atmci_stop_dma(struct atmel_mci *host)
663{ 717{
664 /* Data transfer was stopped by the interrupt handler */ 718 /* Data transfer was stopped by the interrupt handler */
@@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
672 * Returns a mask of interrupt flags to be enabled after the whole 726 * Returns a mask of interrupt flags to be enabled after the whole
673 * request has been prepared. 727 * request has been prepared.
674 */ 728 */
675static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) 729static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
676{ 730{
677 u32 iflags; 731 u32 iflags;
678 732
@@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
683 host->data = data; 737 host->data = data;
684 738
685 iflags = ATMCI_DATA_ERROR_FLAGS; 739 iflags = ATMCI_DATA_ERROR_FLAGS;
686 if (atmci_submit_data_dma(host, data)) { 740 if (atmci_prepare_data_dma(host, data)) {
687 host->data_chan = NULL; 741 host->data_chan = NULL;
688 742
689 /* 743 /*
@@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host,
729 mci_writel(host, CR, MCI_CR_SWRST); 783 mci_writel(host, CR, MCI_CR_SWRST);
730 mci_writel(host, CR, MCI_CR_MCIEN); 784 mci_writel(host, CR, MCI_CR_MCIEN);
731 mci_writel(host, MR, host->mode_reg); 785 mci_writel(host, MR, host->mode_reg);
786 if (atmci_is_mci2())
787 mci_writel(host, CFG, host->cfg_reg);
732 host->need_reset = false; 788 host->need_reset = false;
733 } 789 }
734 mci_writel(host, SDCR, slot->sdc_reg); 790 mci_writel(host, SDCR, slot->sdc_reg);
@@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host,
744 while (!(mci_readl(host, SR) & MCI_CMDRDY)) 800 while (!(mci_readl(host, SR) & MCI_CMDRDY))
745 cpu_relax(); 801 cpu_relax();
746 } 802 }
803 iflags = 0;
747 data = mrq->data; 804 data = mrq->data;
748 if (data) { 805 if (data) {
749 atmci_set_timeout(host, slot, data); 806 atmci_set_timeout(host, slot, data);
@@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host,
753 | MCI_BLKLEN(data->blksz)); 810 | MCI_BLKLEN(data->blksz));
754 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", 811 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
755 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); 812 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
813
814 iflags |= atmci_prepare_data(host, data);
756 } 815 }
757 816
758 iflags = MCI_CMDRDY; 817 iflags |= MCI_CMDRDY;
759 cmd = mrq->cmd; 818 cmd = mrq->cmd;
760 cmdflags = atmci_prepare_command(slot->mmc, cmd); 819 cmdflags = atmci_prepare_command(slot->mmc, cmd);
761 atmci_start_command(host, cmd, cmdflags); 820 atmci_start_command(host, cmd, cmdflags);
762 821
763 if (data) 822 if (data)
764 iflags |= atmci_submit_data(host, data); 823 atmci_submit_data(host);
765 824
766 if (mrq->stop) { 825 if (mrq->stop) {
767 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); 826 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
@@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
857 clk_enable(host->mck); 916 clk_enable(host->mck);
858 mci_writel(host, CR, MCI_CR_SWRST); 917 mci_writel(host, CR, MCI_CR_SWRST);
859 mci_writel(host, CR, MCI_CR_MCIEN); 918 mci_writel(host, CR, MCI_CR_MCIEN);
919 if (atmci_is_mci2())
920 mci_writel(host, CFG, host->cfg_reg);
860 } 921 }
861 922
862 /* 923 /*
@@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data)
1095 mci_writel(host, CR, MCI_CR_SWRST); 1156 mci_writel(host, CR, MCI_CR_SWRST);
1096 mci_writel(host, CR, MCI_CR_MCIEN); 1157 mci_writel(host, CR, MCI_CR_MCIEN);
1097 mci_writel(host, MR, host->mode_reg); 1158 mci_writel(host, MR, host->mode_reg);
1159 if (atmci_is_mci2())
1160 mci_writel(host, CFG, host->cfg_reg);
1098 1161
1099 host->data = NULL; 1162 host->data = NULL;
1100 host->cmd = NULL; 1163 host->cmd = NULL;
@@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1584#ifdef CONFIG_MMC_ATMELMCI_DMA 1647#ifdef CONFIG_MMC_ATMELMCI_DMA
1585static bool filter(struct dma_chan *chan, void *slave) 1648static bool filter(struct dma_chan *chan, void *slave)
1586{ 1649{
1587 struct dw_dma_slave *dws = slave; 1650 struct mci_dma_data *sl = slave;
1588 1651
1589 if (dws->dma_dev == chan->device->dev) { 1652 if (sl && find_slave_dev(sl) == chan->device->dev) {
1590 chan->private = dws; 1653 chan->private = slave_data_ptr(sl);
1591 return true; 1654 return true;
1592 } else 1655 } else {
1593 return false; 1656 return false;
1657 }
1594} 1658}
1659
1660static void atmci_configure_dma(struct atmel_mci *host)
1661{
1662 struct mci_platform_data *pdata;
1663
1664 if (host == NULL)
1665 return;
1666
1667 pdata = host->pdev->dev.platform_data;
1668
1669 if (pdata && find_slave_dev(pdata->dma_slave)) {
1670 dma_cap_mask_t mask;
1671
1672 setup_dma_addr(pdata->dma_slave,
1673 host->mapbase + MCI_TDR,
1674 host->mapbase + MCI_RDR);
1675
1676 /* Try to grab a DMA channel */
1677 dma_cap_zero(mask);
1678 dma_cap_set(DMA_SLAVE, mask);
1679 host->dma.chan =
1680 dma_request_channel(mask, filter, pdata->dma_slave);
1681 }
1682 if (!host->dma.chan)
1683 dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
1684 else
1685 dev_info(&host->pdev->dev,
1686 "Using %s for DMA transfers\n",
1687 dma_chan_name(host->dma.chan));
1688}
1689#else
1690static void atmci_configure_dma(struct atmel_mci *host) {}
1595#endif 1691#endif
1596 1692
1597static int __init atmci_probe(struct platform_device *pdev) 1693static int __init atmci_probe(struct platform_device *pdev)
@@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev)
1645 if (ret) 1741 if (ret)
1646 goto err_request_irq; 1742 goto err_request_irq;
1647 1743
1648#ifdef CONFIG_MMC_ATMELMCI_DMA 1744 atmci_configure_dma(host);
1649 if (pdata->dma_slave.dma_dev) {
1650 struct dw_dma_slave *dws = &pdata->dma_slave;
1651 dma_cap_mask_t mask;
1652
1653 dws->tx_reg = regs->start + MCI_TDR;
1654 dws->rx_reg = regs->start + MCI_RDR;
1655
1656 /* Try to grab a DMA channel */
1657 dma_cap_zero(mask);
1658 dma_cap_set(DMA_SLAVE, mask);
1659 host->dma.chan = dma_request_channel(mask, filter, dws);
1660 }
1661 if (!host->dma.chan)
1662 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1663#endif /* CONFIG_MMC_ATMELMCI_DMA */
1664 1745
1665 platform_set_drvdata(pdev, host); 1746 platform_set_drvdata(pdev, host);
1666 1747
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
new file mode 100644
index 000000000000..3343a57355cc
--- /dev/null
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -0,0 +1,639 @@
1/*
2 * bfin_sdh.c - Analog Devices Blackfin SDH Controller
3 *
4 * Copyright (C) 2007-2009 Analog Device Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#define DRIVER_NAME "bfin-sdh"
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/dma-mapping.h>
18#include <linux/mmc/host.h>
19#include <linux/proc_fs.h>
20
21#include <asm/cacheflush.h>
22#include <asm/dma.h>
23#include <asm/portmux.h>
24#include <asm/bfin_sdh.h>
25
26#if defined(CONFIG_BF51x)
27#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
28#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
29#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
30#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
31#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
32#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
33#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
34#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
35#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
36#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
37#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
38#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
39#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
40#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
41#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
42#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
43#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
44#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
45#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
46#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
47#define bfin_read_SDH_CFG bfin_read_RSI_CFG
48#define bfin_write_SDH_CFG bfin_write_RSI_CFG
49#endif
50
51struct dma_desc_array {
52 unsigned long start_addr;
53 unsigned short cfg;
54 unsigned short x_count;
55 short x_modify;
56} __packed;
57
58struct sdh_host {
59 struct mmc_host *mmc;
60 spinlock_t lock;
61 struct resource *res;
62 void __iomem *base;
63 int irq;
64 int stat_irq;
65 int dma_ch;
66 int dma_dir;
67 struct dma_desc_array *sg_cpu;
68 dma_addr_t sg_dma;
69 int dma_len;
70
71 unsigned int imask;
72 unsigned int power_mode;
73 unsigned int clk_div;
74
75 struct mmc_request *mrq;
76 struct mmc_command *cmd;
77 struct mmc_data *data;
78};
79
80static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
81{
82 return pdev->dev.platform_data;
83}
84
85static void sdh_stop_clock(struct sdh_host *host)
86{
87 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
88 SSYNC();
89}
90
91static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave(&host->lock, flags);
96 host->imask |= mask;
97 bfin_write_SDH_MASK0(mask);
98 SSYNC();
99 spin_unlock_irqrestore(&host->lock, flags);
100}
101
102static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
103{
104 unsigned long flags;
105
106 spin_lock_irqsave(&host->lock, flags);
107 host->imask &= ~mask;
108 bfin_write_SDH_MASK0(host->imask);
109 SSYNC();
110 spin_unlock_irqrestore(&host->lock, flags);
111}
112
113static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
114{
115 unsigned int length;
116 unsigned int data_ctl;
117 unsigned int dma_cfg;
118 struct scatterlist *sg;
119
120 dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
121 host->data = data;
122 data_ctl = 0;
123 dma_cfg = 0;
124
125 length = data->blksz * data->blocks;
126 bfin_write_SDH_DATA_LGTH(length);
127
128 if (data->flags & MMC_DATA_STREAM)
129 data_ctl |= DTX_MODE;
130
131 if (data->flags & MMC_DATA_READ)
132 data_ctl |= DTX_DIR;
133 /* Only supports power-of-2 block size */
134 if (data->blksz & (data->blksz - 1))
135 return -EINVAL;
136 data_ctl |= ((ffs(data->blksz) - 1) << 4);
137
138 bfin_write_SDH_DATA_CTL(data_ctl);
139
140 bfin_write_SDH_DATA_TIMER(0xFFFF);
141 SSYNC();
142
143 if (data->flags & MMC_DATA_READ) {
144 host->dma_dir = DMA_FROM_DEVICE;
145 dma_cfg |= WNR;
146 } else
147 host->dma_dir = DMA_TO_DEVICE;
148
149 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
150 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
151#if defined(CONFIG_BF54x)
152 dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
153 {
154 int i;
155 for_each_sg(data->sg, sg, host->dma_len, i) {
156 host->sg_cpu[i].start_addr = sg_dma_address(sg);
157 host->sg_cpu[i].cfg = dma_cfg;
158 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
159 host->sg_cpu[i].x_modify = 4;
160 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
161 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
162 i, host->sg_cpu[i].start_addr,
163 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
164 host->sg_cpu[i].x_modify);
165 }
166 }
167 flush_dcache_range((unsigned int)host->sg_cpu,
168 (unsigned int)host->sg_cpu +
169 host->dma_len * sizeof(struct dma_desc_array));
170 /* Set the last descriptor to stop mode */
171 host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
172 host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
173
174 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
175 set_dma_x_count(host->dma_ch, 0);
176 set_dma_x_modify(host->dma_ch, 0);
177 set_dma_config(host->dma_ch, dma_cfg);
178#elif defined(CONFIG_BF51x)
179 /* RSI DMA doesn't work in array mode */
180 dma_cfg |= WDSIZE_32 | DMAEN;
181 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
182 set_dma_x_count(host->dma_ch, length / 4);
183 set_dma_x_modify(host->dma_ch, 4);
184 set_dma_config(host->dma_ch, dma_cfg);
185#endif
186 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
187
188 SSYNC();
189
190 dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
191 return 0;
192}
193
194static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
195{
196 unsigned int sdh_cmd;
197 unsigned int stat_mask;
198
199 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
200 WARN_ON(host->cmd != NULL);
201 host->cmd = cmd;
202
203 sdh_cmd = 0;
204 stat_mask = 0;
205
206 sdh_cmd |= cmd->opcode;
207
208 if (cmd->flags & MMC_RSP_PRESENT) {
209 sdh_cmd |= CMD_RSP;
210 stat_mask |= CMD_RESP_END;
211 } else {
212 stat_mask |= CMD_SENT;
213 }
214
215 if (cmd->flags & MMC_RSP_136)
216 sdh_cmd |= CMD_L_RSP;
217
218 stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
219
220 sdh_enable_stat_irq(host, stat_mask);
221
222 bfin_write_SDH_ARGUMENT(cmd->arg);
223 bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
224 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
225 SSYNC();
226}
227
228static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
229{
230 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
231 host->mrq = NULL;
232 host->cmd = NULL;
233 host->data = NULL;
234 mmc_request_done(host->mmc, mrq);
235}
236
237static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
238{
239 struct mmc_command *cmd = host->cmd;
240 int ret = 0;
241
242 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
243 if (!cmd)
244 return 0;
245
246 host->cmd = NULL;
247
248 if (cmd->flags & MMC_RSP_PRESENT) {
249 cmd->resp[0] = bfin_read_SDH_RESPONSE0();
250 if (cmd->flags & MMC_RSP_136) {
251 cmd->resp[1] = bfin_read_SDH_RESPONSE1();
252 cmd->resp[2] = bfin_read_SDH_RESPONSE2();
253 cmd->resp[3] = bfin_read_SDH_RESPONSE3();
254 }
255 }
256 if (stat & CMD_TIME_OUT)
257 cmd->error = -ETIMEDOUT;
258 else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
259 cmd->error = -EILSEQ;
260
261 sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
262
263 if (host->data && !cmd->error) {
264 if (host->data->flags & MMC_DATA_WRITE) {
265 ret = sdh_setup_data(host, host->data);
266 if (ret)
267 return 0;
268 }
269
270 sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
271 } else
272 sdh_finish_request(host, host->mrq);
273
274 return 1;
275}
276
277static int sdh_data_done(struct sdh_host *host, unsigned int stat)
278{
279 struct mmc_data *data = host->data;
280
281 dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
282 if (!data)
283 return 0;
284
285 disable_dma(host->dma_ch);
286 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
287 host->dma_dir);
288
289 if (stat & DAT_TIME_OUT)
290 data->error = -ETIMEDOUT;
291 else if (stat & DAT_CRC_FAIL)
292 data->error = -EILSEQ;
293 else if (stat & (RX_OVERRUN | TX_UNDERRUN))
294 data->error = -EIO;
295
296 if (!data->error)
297 data->bytes_xfered = data->blocks * data->blksz;
298 else
299 data->bytes_xfered = 0;
300
301 sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
302 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
303 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
304 bfin_write_SDH_DATA_CTL(0);
305 SSYNC();
306
307 host->data = NULL;
308 if (host->mrq->stop) {
309 sdh_stop_clock(host);
310 sdh_start_cmd(host, host->mrq->stop);
311 } else {
312 sdh_finish_request(host, host->mrq);
313 }
314
315 return 1;
316}
317
318static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
319{
320 struct sdh_host *host = mmc_priv(mmc);
321 int ret = 0;
322
323 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
324 WARN_ON(host->mrq != NULL);
325
326 host->mrq = mrq;
327 host->data = mrq->data;
328
329 if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
330 ret = sdh_setup_data(host, mrq->data);
331 if (ret)
332 return;
333 }
334
335 sdh_start_cmd(host, mrq->cmd);
336}
337
338static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
339{
340 struct sdh_host *host;
341 unsigned long flags;
342 u16 clk_ctl = 0;
343 u16 pwr_ctl = 0;
344 u16 cfg;
345 host = mmc_priv(mmc);
346
347 spin_lock_irqsave(&host->lock, flags);
348 if (ios->clock) {
349 unsigned long sys_clk, ios_clk;
350 unsigned char clk_div;
351 ios_clk = 2 * ios->clock;
352 sys_clk = get_sclk();
353 clk_div = sys_clk / ios_clk;
354 if (sys_clk % ios_clk == 0)
355 clk_div -= 1;
356 clk_div = min_t(unsigned char, clk_div, 0xFF);
357 clk_ctl |= clk_div;
358 clk_ctl |= CLK_E;
359 host->clk_div = clk_div;
360 } else
361 sdh_stop_clock(host);
362
363 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
364#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
365 pwr_ctl |= ROD_CTL;
366#else
367 pwr_ctl |= SD_CMD_OD | ROD_CTL;
368#endif
369
370 if (ios->bus_width == MMC_BUS_WIDTH_4) {
371 cfg = bfin_read_SDH_CFG();
372 cfg &= ~PD_SDDAT3;
373 cfg |= PUP_SDDAT3;
374 /* Enable 4 bit SDIO */
375 cfg |= (SD4E | MWE);
376 bfin_write_SDH_CFG(cfg);
377 clk_ctl |= WIDE_BUS;
378 } else {
379 cfg = bfin_read_SDH_CFG();
380 cfg |= MWE;
381 bfin_write_SDH_CFG(cfg);
382 }
383
384 bfin_write_SDH_CLK_CTL(clk_ctl);
385
386 host->power_mode = ios->power_mode;
387 if (ios->power_mode == MMC_POWER_ON)
388 pwr_ctl |= PWR_ON;
389
390 bfin_write_SDH_PWR_CTL(pwr_ctl);
391 SSYNC();
392
393 spin_unlock_irqrestore(&host->lock, flags);
394
395 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
396 host->clk_div,
397 host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
398 ios->clock);
399}
400
401static const struct mmc_host_ops sdh_ops = {
402 .request = sdh_request,
403 .set_ios = sdh_set_ios,
404};
405
406static irqreturn_t sdh_dma_irq(int irq, void *devid)
407{
408 struct sdh_host *host = devid;
409
410 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
411 get_dma_curr_irqstat(host->dma_ch));
412 clear_dma_irqstat(host->dma_ch);
413 SSYNC();
414
415 return IRQ_HANDLED;
416}
417
418static irqreturn_t sdh_stat_irq(int irq, void *devid)
419{
420 struct sdh_host *host = devid;
421 unsigned int status;
422 int handled = 0;
423
424 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
425 status = bfin_read_SDH_E_STATUS();
426 if (status & SD_CARD_DET) {
427 mmc_detect_change(host->mmc, 0);
428 bfin_write_SDH_E_STATUS(SD_CARD_DET);
429 }
430 status = bfin_read_SDH_STATUS();
431 if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
432 handled |= sdh_cmd_done(host, status);
433 bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
434 CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
435 SSYNC();
436 }
437
438 status = bfin_read_SDH_STATUS();
439 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
440 handled |= sdh_data_done(host, status);
441
442 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
443
444 return IRQ_RETVAL(handled);
445}
446
447static int __devinit sdh_probe(struct platform_device *pdev)
448{
449 struct mmc_host *mmc;
450 struct sdh_host *host;
451 struct bfin_sd_host *drv_data = get_sdh_data(pdev);
452 int ret;
453
454 if (!drv_data) {
455 dev_err(&pdev->dev, "missing platform driver data\n");
456 ret = -EINVAL;
457 goto out;
458 }
459
460 mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
461 if (!mmc) {
462 ret = -ENOMEM;
463 goto out;
464 }
465
466 mmc->ops = &sdh_ops;
467 mmc->max_phys_segs = 32;
468 mmc->max_seg_size = 1 << 16;
469 mmc->max_blk_size = 1 << 11;
470 mmc->max_blk_count = 1 << 11;
471 mmc->max_req_size = PAGE_SIZE;
472 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
473 mmc->f_max = get_sclk();
474 mmc->f_min = mmc->f_max >> 9;
475 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
476 host = mmc_priv(mmc);
477 host->mmc = mmc;
478
479 spin_lock_init(&host->lock);
480 host->irq = drv_data->irq_int0;
481 host->dma_ch = drv_data->dma_chan;
482
483 ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
484 if (ret) {
485 dev_err(&pdev->dev, "unable to request DMA channel\n");
486 goto out1;
487 }
488
489 ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
490 if (ret) {
491 dev_err(&pdev->dev, "unable to request DMA irq\n");
492 goto out2;
493 }
494
495 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
496 if (host->sg_cpu == NULL) {
497 ret = -ENOMEM;
498 goto out2;
499 }
500
501 platform_set_drvdata(pdev, mmc);
502 mmc_add_host(mmc);
503
504 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
505 if (ret) {
506 dev_err(&pdev->dev, "unable to request status irq\n");
507 goto out3;
508 }
509
510 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
511 if (ret) {
512 dev_err(&pdev->dev, "unable to request peripheral pins\n");
513 goto out4;
514 }
515#if defined(CONFIG_BF54x)
516 /* Secure Digital Host shares DMA with Nand controller */
517 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
518#endif
519
520 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
521 SSYNC();
522
523 /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
524 * mmc stack will do the detection.
525 */
526 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
527 SSYNC();
528
529 return 0;
530
531out4:
532 free_irq(host->irq, host);
533out3:
534 mmc_remove_host(mmc);
535 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
536out2:
537 free_dma(host->dma_ch);
538out1:
539 mmc_free_host(mmc);
540 out:
541 return ret;
542}
543
544static int __devexit sdh_remove(struct platform_device *pdev)
545{
546 struct mmc_host *mmc = platform_get_drvdata(pdev);
547
548 platform_set_drvdata(pdev, NULL);
549
550 if (mmc) {
551 struct sdh_host *host = mmc_priv(mmc);
552
553 mmc_remove_host(mmc);
554
555 sdh_stop_clock(host);
556 free_irq(host->irq, host);
557 free_dma(host->dma_ch);
558 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
559
560 mmc_free_host(mmc);
561 }
562
563 return 0;
564}
565
566#ifdef CONFIG_PM
567static int sdh_suspend(struct platform_device *dev, pm_message_t state)
568{
569 struct mmc_host *mmc = platform_get_drvdata(dev);
570 struct bfin_sd_host *drv_data = get_sdh_data(dev);
571 int ret = 0;
572
573 if (mmc)
574 ret = mmc_suspend_host(mmc, state);
575
576 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
577 peripheral_free_list(drv_data->pin_req);
578
579 return ret;
580}
581
582static int sdh_resume(struct platform_device *dev)
583{
584 struct mmc_host *mmc = platform_get_drvdata(dev);
585 struct bfin_sd_host *drv_data = get_sdh_data(dev);
586 int ret = 0;
587
588 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
589 if (ret) {
590 dev_err(&dev->dev, "unable to request peripheral pins\n");
591 return ret;
592 }
593
594 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
595#if defined(CONFIG_BF54x)
596 /* Secure Digital Host shares DMA with Nand controller */
597 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
598#endif
599 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
600 SSYNC();
601
602 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
603 SSYNC();
604
605 if (mmc)
606 ret = mmc_resume_host(mmc);
607
608 return ret;
609}
610#else
611# define sdh_suspend NULL
612# define sdh_resume NULL
613#endif
614
615static struct platform_driver sdh_driver = {
616 .probe = sdh_probe,
617 .remove = __devexit_p(sdh_remove),
618 .suspend = sdh_suspend,
619 .resume = sdh_resume,
620 .driver = {
621 .name = DRIVER_NAME,
622 },
623};
624
625static int __init sdh_init(void)
626{
627 return platform_driver_register(&sdh_driver);
628}
629module_init(sdh_init);
630
631static void __exit sdh_exit(void)
632{
633 platform_driver_unregister(&sdh_driver);
634}
635module_exit(sdh_exit);
636
637MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
638MODULE_AUTHOR("Cliff Cai, Roy Huang");
639MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
new file mode 100644
index 000000000000..dd45e7c3517e
--- /dev/null
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -0,0 +1,1349 @@
1/*
2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
3 *
4 * Copyright (C) 2006 Texas Instruments.
5 * Original author: Purushotam Kumar
6 * Copyright (C) 2009 David Brownell
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/cpufreq.h>
29#include <linux/mmc/host.h>
30#include <linux/io.h>
31#include <linux/irq.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/mmc/mmc.h>
35
36#include <mach/mmc.h>
37#include <mach/edma.h>
38
39/*
40 * Register Definitions
41 */
42#define DAVINCI_MMCCTL 0x00 /* Control Register */
43#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
44#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
45#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
46#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
47#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
48#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
49#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
50#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
51#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
52#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
53#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
54#define DAVINCI_MMCCMD 0x30 /* Command Register */
55#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
56#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
57#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
58#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
59#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
60#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
61#define DAVINCI_MMCETOK 0x4C
62#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
63#define DAVINCI_MMCCKC 0x54
64#define DAVINCI_MMCTORC 0x58
65#define DAVINCI_MMCTODC 0x5C
66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C
70#define DAVINCI_SDIOST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72
73/* DAVINCI_MMCCTL definitions */
74#define MMCCTL_DATRST (1 << 0)
75#define MMCCTL_CMDRST (1 << 1)
76#define MMCCTL_WIDTH_4_BIT (1 << 2)
77#define MMCCTL_DATEG_DISABLED (0 << 6)
78#define MMCCTL_DATEG_RISING (1 << 6)
79#define MMCCTL_DATEG_FALLING (2 << 6)
80#define MMCCTL_DATEG_BOTH (3 << 6)
81#define MMCCTL_PERMDR_LE (0 << 9)
82#define MMCCTL_PERMDR_BE (1 << 9)
83#define MMCCTL_PERMDX_LE (0 << 10)
84#define MMCCTL_PERMDX_BE (1 << 10)
85
86/* DAVINCI_MMCCLK definitions */
87#define MMCCLK_CLKEN (1 << 8)
88#define MMCCLK_CLKRT_MASK (0xFF << 0)
89
90/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
91#define MMCST0_DATDNE BIT(0) /* data done */
92#define MMCST0_BSYDNE BIT(1) /* busy done */
93#define MMCST0_RSPDNE BIT(2) /* command done */
94#define MMCST0_TOUTRD BIT(3) /* data read timeout */
95#define MMCST0_TOUTRS BIT(4) /* command response timeout */
96#define MMCST0_CRCWR BIT(5) /* data write CRC error */
97#define MMCST0_CRCRD BIT(6) /* data read CRC error */
98#define MMCST0_CRCRS BIT(7) /* command response CRC error */
99#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
100#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
101#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
102#define MMCST0_TRNDNE BIT(12) /* transfer done */
103
104/* DAVINCI_MMCST1 definitions */
105#define MMCST1_BUSY (1 << 0)
106
107/* DAVINCI_MMCCMD definitions */
108#define MMCCMD_CMD_MASK (0x3F << 0)
109#define MMCCMD_PPLEN (1 << 7)
110#define MMCCMD_BSYEXP (1 << 8)
111#define MMCCMD_RSPFMT_MASK (3 << 9)
112#define MMCCMD_RSPFMT_NONE (0 << 9)
113#define MMCCMD_RSPFMT_R1456 (1 << 9)
114#define MMCCMD_RSPFMT_R2 (2 << 9)
115#define MMCCMD_RSPFMT_R3 (3 << 9)
116#define MMCCMD_DTRW (1 << 11)
117#define MMCCMD_STRMTP (1 << 12)
118#define MMCCMD_WDATX (1 << 13)
119#define MMCCMD_INITCK (1 << 14)
120#define MMCCMD_DCLR (1 << 15)
121#define MMCCMD_DMATRIG (1 << 16)
122
123/* DAVINCI_MMCFIFOCTL definitions */
124#define MMCFIFOCTL_FIFORST (1 << 0)
125#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
126#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
127#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
128#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
129#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
130#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
131#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
132
133
134/* MMCSD Init clock in Hz in opendrain mode */
135#define MMCSD_INIT_CLOCK 200000
136
137/*
138 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
139 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
140 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
141 * than the page or two that's otherwise typical. NR_SG == 16 gives at
142 * least the same throughput boost, using EDMA transfer linkage instead
143 * of spending CPU time copying pages.
144 */
145#define MAX_CCNT ((1 << 16) - 1)
146
147#define NR_SG 16
148
149static unsigned rw_threshold = 32;
150module_param(rw_threshold, uint, S_IRUGO);
151MODULE_PARM_DESC(rw_threshold,
152 "Read/Write threshold. Default = 32");
153
154static unsigned __initdata use_dma = 1;
155module_param(use_dma, uint, 0);
156MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
157
158struct mmc_davinci_host {
159 struct mmc_command *cmd;
160 struct mmc_data *data;
161 struct mmc_host *mmc;
162 struct clk *clk;
163 unsigned int mmc_input_clk;
164 void __iomem *base;
165 struct resource *mem_res;
166 int irq;
167 unsigned char bus_mode;
168
169#define DAVINCI_MMC_DATADIR_NONE 0
170#define DAVINCI_MMC_DATADIR_READ 1
171#define DAVINCI_MMC_DATADIR_WRITE 2
172 unsigned char data_dir;
173
174 /* buffer is used during PIO of one scatterlist segment, and
175 * is updated along with buffer_bytes_left. bytes_left applies
176 * to all N blocks of the PIO transfer.
177 */
178 u8 *buffer;
179 u32 buffer_bytes_left;
180 u32 bytes_left;
181
182 u32 rxdma, txdma;
183 bool use_dma;
184 bool do_dma;
185
186 /* Scatterlist DMA uses one or more parameter RAM entries:
187 * the main one (associated with rxdma or txdma) plus zero or
188 * more links. The entries for a given transfer differ only
189 * by memory buffer (address, length) and link field.
190 */
191 struct edmacc_param tx_template;
192 struct edmacc_param rx_template;
193 unsigned n_link;
194 u32 links[NR_SG - 1];
195
196 /* For PIO we walk scatterlists one segment at a time. */
197 unsigned int sg_len;
198 struct scatterlist *sg;
199
200 /* Version of the MMC/SD controller */
201 u8 version;
202 /* for ns in one cycle calculation */
203 unsigned ns_in_one_cycle;
204#ifdef CONFIG_CPU_FREQ
205 struct notifier_block freq_transition;
206#endif
207};
208
209
210/* PIO only */
211static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
212{
213 host->buffer_bytes_left = sg_dma_len(host->sg);
214 host->buffer = sg_virt(host->sg);
215 if (host->buffer_bytes_left > host->bytes_left)
216 host->buffer_bytes_left = host->bytes_left;
217}
218
219static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
220 unsigned int n)
221{
222 u8 *p;
223 unsigned int i;
224
225 if (host->buffer_bytes_left == 0) {
226 host->sg = sg_next(host->data->sg);
227 mmc_davinci_sg_to_buf(host);
228 }
229
230 p = host->buffer;
231 if (n > host->buffer_bytes_left)
232 n = host->buffer_bytes_left;
233 host->buffer_bytes_left -= n;
234 host->bytes_left -= n;
235
236 /* NOTE: we never transfer more than rw_threshold bytes
237 * to/from the fifo here; there's no I/O overlap.
238 * This also assumes that access width( i.e. ACCWD) is 4 bytes
239 */
240 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
241 for (i = 0; i < (n >> 2); i++) {
242 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
243 p = p + 4;
244 }
245 if (n & 3) {
246 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
247 p = p + (n & 3);
248 }
249 } else {
250 for (i = 0; i < (n >> 2); i++) {
251 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
252 p = p + 4;
253 }
254 if (n & 3) {
255 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
256 p = p + (n & 3);
257 }
258 }
259 host->buffer = p;
260}
261
262static void mmc_davinci_start_command(struct mmc_davinci_host *host,
263 struct mmc_command *cmd)
264{
265 u32 cmd_reg = 0;
266 u32 im_val;
267
268 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
269 cmd->opcode, cmd->arg,
270 ({ char *s;
271 switch (mmc_resp_type(cmd)) {
272 case MMC_RSP_R1:
273 s = ", R1/R5/R6/R7 response";
274 break;
275 case MMC_RSP_R1B:
276 s = ", R1b response";
277 break;
278 case MMC_RSP_R2:
279 s = ", R2 response";
280 break;
281 case MMC_RSP_R3:
282 s = ", R3/R4 response";
283 break;
284 default:
285 s = ", (R? response)";
286 break;
287 }; s; }));
288 host->cmd = cmd;
289
290 switch (mmc_resp_type(cmd)) {
291 case MMC_RSP_R1B:
292 /* There's some spec confusion about when R1B is
293 * allowed, but if the card doesn't issue a BUSY
294 * then it's harmless for us to allow it.
295 */
296 cmd_reg |= MMCCMD_BSYEXP;
297 /* FALLTHROUGH */
298 case MMC_RSP_R1: /* 48 bits, CRC */
299 cmd_reg |= MMCCMD_RSPFMT_R1456;
300 break;
301 case MMC_RSP_R2: /* 136 bits, CRC */
302 cmd_reg |= MMCCMD_RSPFMT_R2;
303 break;
304 case MMC_RSP_R3: /* 48 bits, no CRC */
305 cmd_reg |= MMCCMD_RSPFMT_R3;
306 break;
307 default:
308 cmd_reg |= MMCCMD_RSPFMT_NONE;
309 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
310 mmc_resp_type(cmd));
311 break;
312 }
313
314 /* Set command index */
315 cmd_reg |= cmd->opcode;
316
317 /* Enable EDMA transfer triggers */
318 if (host->do_dma)
319 cmd_reg |= MMCCMD_DMATRIG;
320
321 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
322 host->data_dir == DAVINCI_MMC_DATADIR_READ)
323 cmd_reg |= MMCCMD_DMATRIG;
324
325 /* Setting whether command involves data transfer or not */
326 if (cmd->data)
327 cmd_reg |= MMCCMD_WDATX;
328
329 /* Setting whether stream or block transfer */
330 if (cmd->flags & MMC_DATA_STREAM)
331 cmd_reg |= MMCCMD_STRMTP;
332
333 /* Setting whether data read or write */
334 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
335 cmd_reg |= MMCCMD_DTRW;
336
337 if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
338 cmd_reg |= MMCCMD_PPLEN;
339
340 /* set Command timeout */
341 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
342
343 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
344 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
345 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
346 im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
347
348 if (!host->do_dma)
349 im_val |= MMCST0_DXRDY;
350 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
351 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
352
353 if (!host->do_dma)
354 im_val |= MMCST0_DRRDY;
355 }
356
357 /*
358 * Before non-DMA WRITE commands the controller needs priming:
359 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
360 */
361 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
362 davinci_fifo_data_trans(host, rw_threshold);
363
364 writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
365 writel(cmd_reg, host->base + DAVINCI_MMCCMD);
366 writel(im_val, host->base + DAVINCI_MMCIM);
367}
368
369/*----------------------------------------------------------------------*/
370
371/* DMA infrastructure */
372
373static void davinci_abort_dma(struct mmc_davinci_host *host)
374{
375 int sync_dev;
376
377 if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
378 sync_dev = host->rxdma;
379 else
380 sync_dev = host->txdma;
381
382 edma_stop(sync_dev);
383 edma_clean_channel(sync_dev);
384}
385
386static void
387mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
388
389static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
390{
391 if (DMA_COMPLETE != ch_status) {
392 struct mmc_davinci_host *host = data;
393
394 /* Currently means: DMA Event Missed, or "null" transfer
395 * request was seen. In the future, TC errors (like bad
396 * addresses) might be presented too.
397 */
398 dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
399 (host->data->flags & MMC_DATA_WRITE)
400 ? "write" : "read");
401 host->data->error = -EIO;
402 mmc_davinci_xfer_done(host, host->data);
403 }
404}
405
406/* Set up tx or rx template, to be modified and updated later */
407static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
408 bool tx, struct edmacc_param *template)
409{
410 unsigned sync_dev;
411 const u16 acnt = 4;
412 const u16 bcnt = rw_threshold >> 2;
413 const u16 ccnt = 0;
414 u32 src_port = 0;
415 u32 dst_port = 0;
416 s16 src_bidx, dst_bidx;
417 s16 src_cidx, dst_cidx;
418
419 /*
420 * A-B Sync transfer: each DMA request is for one "frame" of
421 * rw_threshold bytes, broken into "acnt"-size chunks repeated
422 * "bcnt" times. Each segment needs "ccnt" such frames; since
423 * we tell the block layer our mmc->max_seg_size limit, we can
424 * trust (later) that it's within bounds.
425 *
426 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
427 * EDMA will optimize memory operations to use larger bursts.
428 */
429 if (tx) {
430 sync_dev = host->txdma;
431
432 /* src_prt, ccnt, and link to be set up later */
433 src_bidx = acnt;
434 src_cidx = acnt * bcnt;
435
436 dst_port = host->mem_res->start + DAVINCI_MMCDXR;
437 dst_bidx = 0;
438 dst_cidx = 0;
439 } else {
440 sync_dev = host->rxdma;
441
442 src_port = host->mem_res->start + DAVINCI_MMCDRR;
443 src_bidx = 0;
444 src_cidx = 0;
445
446 /* dst_prt, ccnt, and link to be set up later */
447 dst_bidx = acnt;
448 dst_cidx = acnt * bcnt;
449 }
450
451 /*
452 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
453 * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
454 * parameter is ignored.
455 */
456 edma_set_src(sync_dev, src_port, INCR, W8BIT);
457 edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
458
459 edma_set_src_index(sync_dev, src_bidx, src_cidx);
460 edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
461
462 edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
463
464 edma_read_slot(sync_dev, template);
465
466 /* don't bother with irqs or chaining */
467 template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
468}
469
470static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
471 struct mmc_data *data)
472{
473 struct edmacc_param *template;
474 int channel, slot;
475 unsigned link;
476 struct scatterlist *sg;
477 unsigned sg_len;
478 unsigned bytes_left = host->bytes_left;
479 const unsigned shift = ffs(rw_threshold) - 1;;
480
481 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
482 template = &host->tx_template;
483 channel = host->txdma;
484 } else {
485 template = &host->rx_template;
486 channel = host->rxdma;
487 }
488
489 /* We know sg_len and ccnt will never be out of range because
490 * we told the mmc layer which in turn tells the block layer
491 * to ensure that it only hands us one scatterlist segment
492 * per EDMA PARAM entry. Update the PARAM
493 * entries needed for each segment of this scatterlist.
494 */
495 for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
496 sg_len-- != 0 && bytes_left;
497 sg = sg_next(sg), slot = host->links[link++]) {
498 u32 buf = sg_dma_address(sg);
499 unsigned count = sg_dma_len(sg);
500
501 template->link_bcntrld = sg_len
502 ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
503 : 0xffff;
504
505 if (count > bytes_left)
506 count = bytes_left;
507 bytes_left -= count;
508
509 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
510 template->src = buf;
511 else
512 template->dst = buf;
513 template->ccnt = count >> shift;
514
515 edma_write_slot(slot, template);
516 }
517
518 if (host->version == MMC_CTLR_VERSION_2)
519 edma_clear_event(channel);
520
521 edma_start(channel);
522}
523
524static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
525 struct mmc_data *data)
526{
527 int i;
528 int mask = rw_threshold - 1;
529
530 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
531 ((data->flags & MMC_DATA_WRITE)
532 ? DMA_TO_DEVICE
533 : DMA_FROM_DEVICE));
534
535 /* no individual DMA segment should need a partial FIFO */
536 for (i = 0; i < host->sg_len; i++) {
537 if (sg_dma_len(data->sg + i) & mask) {
538 dma_unmap_sg(mmc_dev(host->mmc),
539 data->sg, data->sg_len,
540 (data->flags & MMC_DATA_WRITE)
541 ? DMA_TO_DEVICE
542 : DMA_FROM_DEVICE);
543 return -1;
544 }
545 }
546
547 host->do_dma = 1;
548 mmc_davinci_send_dma_request(host, data);
549
550 return 0;
551}
552
553static void __init_or_module
554davinci_release_dma_channels(struct mmc_davinci_host *host)
555{
556 unsigned i;
557
558 if (!host->use_dma)
559 return;
560
561 for (i = 0; i < host->n_link; i++)
562 edma_free_slot(host->links[i]);
563
564 edma_free_channel(host->txdma);
565 edma_free_channel(host->rxdma);
566}
567
568static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
569{
570 int r, i;
571
572 /* Acquire master DMA write channel */
573 r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
574 EVENTQ_DEFAULT);
575 if (r < 0) {
576 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
577 "tx", r);
578 return r;
579 }
580 mmc_davinci_dma_setup(host, true, &host->tx_template);
581
582 /* Acquire master DMA read channel */
583 r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
584 EVENTQ_DEFAULT);
585 if (r < 0) {
586 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
587 "rx", r);
588 goto free_master_write;
589 }
590 mmc_davinci_dma_setup(host, false, &host->rx_template);
591
592 /* Allocate parameter RAM slots, which will later be bound to a
593 * channel as needed to handle a scatterlist.
594 */
595 for (i = 0; i < ARRAY_SIZE(host->links); i++) {
596 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
597 if (r < 0) {
598 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
599 r);
600 break;
601 }
602 host->links[i] = r;
603 }
604 host->n_link = i;
605
606 return 0;
607
608free_master_write:
609 edma_free_channel(host->txdma);
610
611 return r;
612}
613
614/*----------------------------------------------------------------------*/
615
616static void
617mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
618{
619 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
620 int timeout;
621 struct mmc_data *data = req->data;
622
623 if (host->version == MMC_CTLR_VERSION_2)
624 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
625
626 host->data = data;
627 if (data == NULL) {
628 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
629 writel(0, host->base + DAVINCI_MMCBLEN);
630 writel(0, host->base + DAVINCI_MMCNBLK);
631 return;
632 }
633
634 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
635 (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
636 (data->flags & MMC_DATA_WRITE) ? "write" : "read",
637 data->blocks, data->blksz);
638 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
639 data->timeout_clks, data->timeout_ns);
640 timeout = data->timeout_clks +
641 (data->timeout_ns / host->ns_in_one_cycle);
642 if (timeout > 0xffff)
643 timeout = 0xffff;
644
645 writel(timeout, host->base + DAVINCI_MMCTOD);
646 writel(data->blocks, host->base + DAVINCI_MMCNBLK);
647 writel(data->blksz, host->base + DAVINCI_MMCBLEN);
648
649 /* Configure the FIFO */
650 switch (data->flags & MMC_DATA_WRITE) {
651 case MMC_DATA_WRITE:
652 host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
653 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
654 host->base + DAVINCI_MMCFIFOCTL);
655 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
656 host->base + DAVINCI_MMCFIFOCTL);
657 break;
658
659 default:
660 host->data_dir = DAVINCI_MMC_DATADIR_READ;
661 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
662 host->base + DAVINCI_MMCFIFOCTL);
663 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
664 host->base + DAVINCI_MMCFIFOCTL);
665 break;
666 }
667
668 host->buffer = NULL;
669 host->bytes_left = data->blocks * data->blksz;
670
671 /* For now we try to use DMA whenever we won't need partial FIFO
672 * reads or writes, either for the whole transfer (as tested here)
673 * or for any individual scatterlist segment (tested when we call
674 * start_dma_transfer).
675 *
676 * While we *could* change that, unusual block sizes are rarely
677 * used. The occasional fallback to PIO should't hurt.
678 */
679 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
680 && mmc_davinci_start_dma_transfer(host, data) == 0) {
681 /* zero this to ensure we take no PIO paths */
682 host->bytes_left = 0;
683 } else {
684 /* Revert to CPU Copy */
685 host->sg_len = data->sg_len;
686 host->sg = host->data->sg;
687 mmc_davinci_sg_to_buf(host);
688 }
689}
690
691static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
692{
693 struct mmc_davinci_host *host = mmc_priv(mmc);
694 unsigned long timeout = jiffies + msecs_to_jiffies(900);
695 u32 mmcst1 = 0;
696
697 /* Card may still be sending BUSY after a previous operation,
698 * typically some kind of write. If so, we can't proceed yet.
699 */
700 while (time_before(jiffies, timeout)) {
701 mmcst1 = readl(host->base + DAVINCI_MMCST1);
702 if (!(mmcst1 & MMCST1_BUSY))
703 break;
704 cpu_relax();
705 }
706 if (mmcst1 & MMCST1_BUSY) {
707 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
708 req->cmd->error = -ETIMEDOUT;
709 mmc_request_done(mmc, req);
710 return;
711 }
712
713 host->do_dma = 0;
714 mmc_davinci_prepare_data(host, req);
715 mmc_davinci_start_command(host, req->cmd);
716}
717
718static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
719 unsigned int mmc_req_freq)
720{
721 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
722
723 mmc_pclk = host->mmc_input_clk;
724 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
725 mmc_push_pull_divisor = ((unsigned int)mmc_pclk
726 / (2 * mmc_req_freq)) - 1;
727 else
728 mmc_push_pull_divisor = 0;
729
730 mmc_freq = (unsigned int)mmc_pclk
731 / (2 * (mmc_push_pull_divisor + 1));
732
733 if (mmc_freq > mmc_req_freq)
734 mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
735 /* Convert ns to clock cycles */
736 if (mmc_req_freq <= 400000)
737 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
738 / (2 * (mmc_push_pull_divisor + 1)))/1000));
739 else
740 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
741 / (2 * (mmc_push_pull_divisor + 1)))/1000000));
742
743 return mmc_push_pull_divisor;
744}
745
746static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
747{
748 unsigned int open_drain_freq = 0, mmc_pclk = 0;
749 unsigned int mmc_push_pull_freq = 0;
750 struct mmc_davinci_host *host = mmc_priv(mmc);
751
752 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
753 u32 temp;
754
755 /* Ignoring the init clock value passed for fixing the inter
756 * operability with different cards.
757 */
758 open_drain_freq = ((unsigned int)mmc_pclk
759 / (2 * MMCSD_INIT_CLOCK)) - 1;
760
761 if (open_drain_freq > 0xFF)
762 open_drain_freq = 0xFF;
763
764 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
765 temp |= open_drain_freq;
766 writel(temp, host->base + DAVINCI_MMCCLK);
767
768 /* Convert ns to clock cycles */
769 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
770 } else {
771 u32 temp;
772 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
773
774 if (mmc_push_pull_freq > 0xFF)
775 mmc_push_pull_freq = 0xFF;
776
777 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
778 writel(temp, host->base + DAVINCI_MMCCLK);
779
780 udelay(10);
781
782 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
783 temp |= mmc_push_pull_freq;
784 writel(temp, host->base + DAVINCI_MMCCLK);
785
786 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
787
788 udelay(10);
789 }
790}
791
792static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
793{
794 unsigned int mmc_pclk = 0;
795 struct mmc_davinci_host *host = mmc_priv(mmc);
796
797 mmc_pclk = host->mmc_input_clk;
798 dev_dbg(mmc_dev(host->mmc),
799 "clock %dHz busmode %d powermode %d Vdd %04x\n",
800 ios->clock, ios->bus_mode, ios->power_mode,
801 ios->vdd);
802 if (ios->bus_width == MMC_BUS_WIDTH_4) {
803 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
804 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT,
805 host->base + DAVINCI_MMCCTL);
806 } else {
807 dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n");
808 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT,
809 host->base + DAVINCI_MMCCTL);
810 }
811
812 calculate_clk_divider(mmc, ios);
813
814 host->bus_mode = ios->bus_mode;
815 if (ios->power_mode == MMC_POWER_UP) {
816 unsigned long timeout = jiffies + msecs_to_jiffies(50);
817 bool lose = true;
818
819 /* Send clock cycles, poll completion */
820 writel(0, host->base + DAVINCI_MMCARGHL);
821 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
822 while (time_before(jiffies, timeout)) {
823 u32 tmp = readl(host->base + DAVINCI_MMCST0);
824
825 if (tmp & MMCST0_RSPDNE) {
826 lose = false;
827 break;
828 }
829 cpu_relax();
830 }
831 if (lose)
832 dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
833 }
834
835 /* FIXME on power OFF, reset things ... */
836}
837
838static void
839mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
840{
841 host->data = NULL;
842
843 if (host->do_dma) {
844 davinci_abort_dma(host);
845
846 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
847 (data->flags & MMC_DATA_WRITE)
848 ? DMA_TO_DEVICE
849 : DMA_FROM_DEVICE);
850 host->do_dma = false;
851 }
852 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
853
854 if (!data->stop || (host->cmd && host->cmd->error)) {
855 mmc_request_done(host->mmc, data->mrq);
856 writel(0, host->base + DAVINCI_MMCIM);
857 } else
858 mmc_davinci_start_command(host, data->stop);
859}
860
861static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
862 struct mmc_command *cmd)
863{
864 host->cmd = NULL;
865
866 if (cmd->flags & MMC_RSP_PRESENT) {
867 if (cmd->flags & MMC_RSP_136) {
868 /* response type 2 */
869 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
870 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
871 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
872 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
873 } else {
874 /* response types 1, 1b, 3, 4, 5, 6 */
875 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
876 }
877 }
878
879 if (host->data == NULL || cmd->error) {
880 if (cmd->error == -ETIMEDOUT)
881 cmd->mrq->cmd->retries = 0;
882 mmc_request_done(host->mmc, cmd->mrq);
883 writel(0, host->base + DAVINCI_MMCIM);
884 }
885}
886
887static void
888davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
889{
890 u32 temp;
891
892 /* reset command and data state machines */
893 temp = readl(host->base + DAVINCI_MMCCTL);
894 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
895 host->base + DAVINCI_MMCCTL);
896
897 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
898 udelay(10);
899 writel(temp, host->base + DAVINCI_MMCCTL);
900}
901
902static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
903{
904 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
905 unsigned int status, qstatus;
906 int end_command = 0;
907 int end_transfer = 0;
908 struct mmc_data *data = host->data;
909
910 if (host->cmd == NULL && host->data == NULL) {
911 status = readl(host->base + DAVINCI_MMCST0);
912 dev_dbg(mmc_dev(host->mmc),
913 "Spurious interrupt 0x%04x\n", status);
914 /* Disable the interrupt from mmcsd */
915 writel(0, host->base + DAVINCI_MMCIM);
916 return IRQ_NONE;
917 }
918
919 status = readl(host->base + DAVINCI_MMCST0);
920 qstatus = status;
921
922 /* handle FIFO first when using PIO for data.
923 * bytes_left will decrease to zero as I/O progress and status will
924 * read zero over iteration because this controller status
925 * register(MMCST0) reports any status only once and it is cleared
926 * by read. So, it is not unbouned loop even in the case of
927 * non-dma.
928 */
929 while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
930 davinci_fifo_data_trans(host, rw_threshold);
931 status = readl(host->base + DAVINCI_MMCST0);
932 if (!status)
933 break;
934 qstatus |= status;
935 }
936
937 if (qstatus & MMCST0_DATDNE) {
938 /* All blocks sent/received, and CRC checks passed */
939 if (data != NULL) {
940 if ((host->do_dma == 0) && (host->bytes_left > 0)) {
941 /* if datasize < rw_threshold
942 * no RX ints are generated
943 */
944 davinci_fifo_data_trans(host, host->bytes_left);
945 }
946 end_transfer = 1;
947 data->bytes_xfered = data->blocks * data->blksz;
948 } else {
949 dev_err(mmc_dev(host->mmc),
950 "DATDNE with no host->data\n");
951 }
952 }
953
954 if (qstatus & MMCST0_TOUTRD) {
955 /* Read data timeout */
956 data->error = -ETIMEDOUT;
957 end_transfer = 1;
958
959 dev_dbg(mmc_dev(host->mmc),
960 "read data timeout, status %x\n",
961 qstatus);
962
963 davinci_abort_data(host, data);
964 }
965
966 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
967 /* Data CRC error */
968 data->error = -EILSEQ;
969 end_transfer = 1;
970
971 /* NOTE: this controller uses CRCWR to report both CRC
972 * errors and timeouts (on writes). MMCDRSP values are
973 * only weakly documented, but 0x9f was clearly a timeout
974 * case and the two three-bit patterns in various SD specs
975 * (101, 010) aren't part of it ...
976 */
977 if (qstatus & MMCST0_CRCWR) {
978 u32 temp = readb(host->base + DAVINCI_MMCDRSP);
979
980 if (temp == 0x9f)
981 data->error = -ETIMEDOUT;
982 }
983 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
984 (qstatus & MMCST0_CRCWR) ? "write" : "read",
985 (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
986
987 davinci_abort_data(host, data);
988 }
989
990 if (qstatus & MMCST0_TOUTRS) {
991 /* Command timeout */
992 if (host->cmd) {
993 dev_dbg(mmc_dev(host->mmc),
994 "CMD%d timeout, status %x\n",
995 host->cmd->opcode, qstatus);
996 host->cmd->error = -ETIMEDOUT;
997 if (data) {
998 end_transfer = 1;
999 davinci_abort_data(host, data);
1000 } else
1001 end_command = 1;
1002 }
1003 }
1004
1005 if (qstatus & MMCST0_CRCRS) {
1006 /* Command CRC error */
1007 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
1008 if (host->cmd) {
1009 host->cmd->error = -EILSEQ;
1010 end_command = 1;
1011 }
1012 }
1013
1014 if (qstatus & MMCST0_RSPDNE) {
1015 /* End of command phase */
1016 end_command = (int) host->cmd;
1017 }
1018
1019 if (end_command)
1020 mmc_davinci_cmd_done(host, host->cmd);
1021 if (end_transfer)
1022 mmc_davinci_xfer_done(host, data);
1023 return IRQ_HANDLED;
1024}
1025
1026static int mmc_davinci_get_cd(struct mmc_host *mmc)
1027{
1028 struct platform_device *pdev = to_platform_device(mmc->parent);
1029 struct davinci_mmc_config *config = pdev->dev.platform_data;
1030
1031 if (!config || !config->get_cd)
1032 return -ENOSYS;
1033 return config->get_cd(pdev->id);
1034}
1035
1036static int mmc_davinci_get_ro(struct mmc_host *mmc)
1037{
1038 struct platform_device *pdev = to_platform_device(mmc->parent);
1039 struct davinci_mmc_config *config = pdev->dev.platform_data;
1040
1041 if (!config || !config->get_ro)
1042 return -ENOSYS;
1043 return config->get_ro(pdev->id);
1044}
1045
1046static struct mmc_host_ops mmc_davinci_ops = {
1047 .request = mmc_davinci_request,
1048 .set_ios = mmc_davinci_set_ios,
1049 .get_cd = mmc_davinci_get_cd,
1050 .get_ro = mmc_davinci_get_ro,
1051};
1052
1053/*----------------------------------------------------------------------*/
1054
1055#ifdef CONFIG_CPU_FREQ
1056static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
1057 unsigned long val, void *data)
1058{
1059 struct mmc_davinci_host *host;
1060 unsigned int mmc_pclk;
1061 struct mmc_host *mmc;
1062 unsigned long flags;
1063
1064 host = container_of(nb, struct mmc_davinci_host, freq_transition);
1065 mmc = host->mmc;
1066 mmc_pclk = clk_get_rate(host->clk);
1067
1068 if (val == CPUFREQ_POSTCHANGE) {
1069 spin_lock_irqsave(&mmc->lock, flags);
1070 host->mmc_input_clk = mmc_pclk;
1071 calculate_clk_divider(mmc, &mmc->ios);
1072 spin_unlock_irqrestore(&mmc->lock, flags);
1073 }
1074
1075 return 0;
1076}
1077
1078static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1079{
1080 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
1081
1082 return cpufreq_register_notifier(&host->freq_transition,
1083 CPUFREQ_TRANSITION_NOTIFIER);
1084}
1085
1086static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1087{
1088 cpufreq_unregister_notifier(&host->freq_transition,
1089 CPUFREQ_TRANSITION_NOTIFIER);
1090}
1091#else
1092static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1093{
1094 return 0;
1095}
1096
1097static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1098{
1099}
1100#endif
1101static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1102{
1103 /* DAT line portion is diabled and in reset state */
1104 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1105 host->base + DAVINCI_MMCCTL);
1106
1107 /* CMD line portion is diabled and in reset state */
1108 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1109 host->base + DAVINCI_MMCCTL);
1110
1111 udelay(10);
1112
1113 writel(0, host->base + DAVINCI_MMCCLK);
1114 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
1115
1116 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1117 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1118
1119 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
1120 host->base + DAVINCI_MMCCTL);
1121 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1122 host->base + DAVINCI_MMCCTL);
1123
1124 udelay(10);
1125}
1126
1127static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1128{
1129 struct davinci_mmc_config *pdata = pdev->dev.platform_data;
1130 struct mmc_davinci_host *host = NULL;
1131 struct mmc_host *mmc = NULL;
1132 struct resource *r, *mem = NULL;
1133 int ret = 0, irq = 0;
1134 size_t mem_size;
1135
1136 /* REVISIT: when we're fully converted, fail if pdata is NULL */
1137
1138 ret = -ENODEV;
1139 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1140 irq = platform_get_irq(pdev, 0);
1141 if (!r || irq == NO_IRQ)
1142 goto out;
1143
1144 ret = -EBUSY;
1145 mem_size = resource_size(r);
1146 mem = request_mem_region(r->start, mem_size, pdev->name);
1147 if (!mem)
1148 goto out;
1149
1150 ret = -ENOMEM;
1151 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
1152 if (!mmc)
1153 goto out;
1154
1155 host = mmc_priv(mmc);
1156 host->mmc = mmc; /* Important */
1157
1158 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1159 if (!r)
1160 goto out;
1161 host->rxdma = r->start;
1162
1163 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1164 if (!r)
1165 goto out;
1166 host->txdma = r->start;
1167
1168 host->mem_res = mem;
1169 host->base = ioremap(mem->start, mem_size);
1170 if (!host->base)
1171 goto out;
1172
1173 ret = -ENXIO;
1174 host->clk = clk_get(&pdev->dev, "MMCSDCLK");
1175 if (IS_ERR(host->clk)) {
1176 ret = PTR_ERR(host->clk);
1177 goto out;
1178 }
1179 clk_enable(host->clk);
1180 host->mmc_input_clk = clk_get_rate(host->clk);
1181
1182 init_mmcsd_host(host);
1183
1184 host->use_dma = use_dma;
1185 host->irq = irq;
1186
1187 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1188 host->use_dma = 0;
1189
1190 /* REVISIT: someday, support IRQ-driven card detection. */
1191 mmc->caps |= MMC_CAP_NEEDS_POLL;
1192
1193 if (!pdata || pdata->wires == 4 || pdata->wires == 0)
1194 mmc->caps |= MMC_CAP_4_BIT_DATA;
1195
1196 host->version = pdata->version;
1197
1198 mmc->ops = &mmc_davinci_ops;
1199 mmc->f_min = 312500;
1200 mmc->f_max = 25000000;
1201 if (pdata && pdata->max_freq)
1202 mmc->f_max = pdata->max_freq;
1203 if (pdata && pdata->caps)
1204 mmc->caps |= pdata->caps;
1205 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1206
1207 /* With no iommu coalescing pages, each phys_seg is a hw_seg.
1208 * Each hw_seg uses one EDMA parameter RAM slot, always one
1209 * channel and then usually some linked slots.
1210 */
1211 mmc->max_hw_segs = 1 + host->n_link;
1212 mmc->max_phys_segs = mmc->max_hw_segs;
1213
1214 /* EDMA limit per hw segment (one or two MBytes) */
1215 mmc->max_seg_size = MAX_CCNT * rw_threshold;
1216
1217 /* MMC/SD controller limits for multiblock requests */
1218 mmc->max_blk_size = 4095; /* BLEN is 12 bits */
1219 mmc->max_blk_count = 65535; /* NBLK is 16 bits */
1220 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1221
1222 dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
1223 dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
1224 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1225 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1226 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
1227
1228 platform_set_drvdata(pdev, host);
1229
1230 ret = mmc_davinci_cpufreq_register(host);
1231 if (ret) {
1232 dev_err(&pdev->dev, "failed to register cpufreq\n");
1233 goto cpu_freq_fail;
1234 }
1235
1236 ret = mmc_add_host(mmc);
1237 if (ret < 0)
1238 goto out;
1239
1240 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
1241 if (ret)
1242 goto out;
1243
1244 rename_region(mem, mmc_hostname(mmc));
1245
1246 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
1247 host->use_dma ? "DMA" : "PIO",
1248 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1249
1250 return 0;
1251
1252out:
1253 mmc_davinci_cpufreq_deregister(host);
1254cpu_freq_fail:
1255 if (host) {
1256 davinci_release_dma_channels(host);
1257
1258 if (host->clk) {
1259 clk_disable(host->clk);
1260 clk_put(host->clk);
1261 }
1262
1263 if (host->base)
1264 iounmap(host->base);
1265 }
1266
1267 if (mmc)
1268 mmc_free_host(mmc);
1269
1270 if (mem)
1271 release_resource(mem);
1272
1273 dev_dbg(&pdev->dev, "probe err %d\n", ret);
1274
1275 return ret;
1276}
1277
1278static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1279{
1280 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1281
1282 platform_set_drvdata(pdev, NULL);
1283 if (host) {
1284 mmc_davinci_cpufreq_deregister(host);
1285
1286 mmc_remove_host(host->mmc);
1287 free_irq(host->irq, host);
1288
1289 davinci_release_dma_channels(host);
1290
1291 clk_disable(host->clk);
1292 clk_put(host->clk);
1293
1294 iounmap(host->base);
1295
1296 release_resource(host->mem_res);
1297
1298 mmc_free_host(host->mmc);
1299 }
1300
1301 return 0;
1302}
1303
1304#ifdef CONFIG_PM
1305static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
1306{
1307 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1308
1309 return mmc_suspend_host(host->mmc, msg);
1310}
1311
1312static int davinci_mmcsd_resume(struct platform_device *pdev)
1313{
1314 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1315
1316 return mmc_resume_host(host->mmc);
1317}
1318#else
1319#define davinci_mmcsd_suspend NULL
1320#define davinci_mmcsd_resume NULL
1321#endif
1322
1323static struct platform_driver davinci_mmcsd_driver = {
1324 .driver = {
1325 .name = "davinci_mmc",
1326 .owner = THIS_MODULE,
1327 },
1328 .remove = __exit_p(davinci_mmcsd_remove),
1329 .suspend = davinci_mmcsd_suspend,
1330 .resume = davinci_mmcsd_resume,
1331};
1332
1333static int __init davinci_mmcsd_init(void)
1334{
1335 return platform_driver_probe(&davinci_mmcsd_driver,
1336 davinci_mmcsd_probe);
1337}
1338module_init(davinci_mmcsd_init);
1339
1340static void __exit davinci_mmcsd_exit(void)
1341{
1342 platform_driver_unregister(&davinci_mmcsd_driver);
1343}
1344module_exit(davinci_mmcsd_exit);
1345
1346MODULE_AUTHOR("Texas Instruments India");
1347MODULE_LICENSE("GPL");
1348MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1349
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 90d168ad03b6..84c103a7ee13 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,6 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -34,9 +35,6 @@
34 35
35#define DRIVER_NAME "mmci-pl18x" 36#define DRIVER_NAME "mmci-pl18x"
36 37
37#define DBG(host,fmt,args...) \
38 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
39
40static unsigned int fmax = 515633; 38static unsigned int fmax = 515633;
41 39
42/* 40/*
@@ -105,8 +103,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
105 void __iomem *base; 103 void __iomem *base;
106 int blksz_bits; 104 int blksz_bits;
107 105
108 DBG(host, "blksz %04x blks %04x flags %08x\n", 106 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
109 data->blksz, data->blocks, data->flags); 107 data->blksz, data->blocks, data->flags);
110 108
111 host->data = data; 109 host->data = data;
112 host->size = data->blksz; 110 host->size = data->blksz;
@@ -155,7 +153,7 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
155{ 153{
156 void __iomem *base = host->base; 154 void __iomem *base = host->base;
157 155
158 DBG(host, "op %02x arg %08x flags %08x\n", 156 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
159 cmd->opcode, cmd->arg, cmd->flags); 157 cmd->opcode, cmd->arg, cmd->flags);
160 158
161 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 159 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
@@ -184,8 +182,20 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
184{ 182{
185 if (status & MCI_DATABLOCKEND) { 183 if (status & MCI_DATABLOCKEND) {
186 host->data_xfered += data->blksz; 184 host->data_xfered += data->blksz;
185#ifdef CONFIG_ARCH_U300
186 /*
187 * On the U300 some signal or other is
188 * badly routed so that a data write does
189 * not properly terminate with a MCI_DATAEND
190 * status flag. This quirk will make writes
191 * work again.
192 */
193 if (data->flags & MMC_DATA_WRITE)
194 status |= MCI_DATAEND;
195#endif
187 } 196 }
188 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 197 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
198 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
189 if (status & MCI_DATACRCFAIL) 199 if (status & MCI_DATACRCFAIL)
190 data->error = -EILSEQ; 200 data->error = -EILSEQ;
191 else if (status & MCI_DATATIMEOUT) 201 else if (status & MCI_DATATIMEOUT)
@@ -307,7 +317,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
307 317
308 status = readl(base + MMCISTATUS); 318 status = readl(base + MMCISTATUS);
309 319
310 DBG(host, "irq1 %08x\n", status); 320 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
311 321
312 do { 322 do {
313 unsigned long flags; 323 unsigned long flags;
@@ -401,7 +411,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
401 status &= readl(host->base + MMCIMASK0); 411 status &= readl(host->base + MMCIMASK0);
402 writel(status, host->base + MMCICLEAR); 412 writel(status, host->base + MMCICLEAR);
403 413
404 DBG(host, "irq0 %08x\n", status); 414 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
405 415
406 data = host->data; 416 data = host->data;
407 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 417 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
@@ -428,8 +438,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
428 WARN_ON(host->mrq != NULL); 438 WARN_ON(host->mrq != NULL);
429 439
430 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 440 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
431 printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n", 441 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
432 mmc_hostname(mmc), mrq->data->blksz); 442 mrq->data->blksz);
433 mrq->cmd->error = -EINVAL; 443 mrq->cmd->error = -EINVAL;
434 mmc_request_done(mmc, mrq); 444 mmc_request_done(mmc, mrq);
435 return; 445 return;
@@ -582,8 +592,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
582 592
583 host->hw_designer = amba_manf(dev); 593 host->hw_designer = amba_manf(dev);
584 host->hw_revision = amba_rev(dev); 594 host->hw_revision = amba_rev(dev);
585 DBG(host, "designer ID = 0x%02x\n", host->hw_designer); 595 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
586 DBG(host, "revision = 0x%01x\n", host->hw_revision); 596 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
587 597
588 host->clk = clk_get(&dev->dev, NULL); 598 host->clk = clk_get(&dev->dev, NULL);
589 if (IS_ERR(host->clk)) { 599 if (IS_ERR(host->clk)) {
@@ -608,7 +618,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
608 if (ret < 0) 618 if (ret < 0)
609 goto clk_disable; 619 goto clk_disable;
610 host->mclk = clk_get_rate(host->clk); 620 host->mclk = clk_get_rate(host->clk);
611 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); 621 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
622 host->mclk);
612 } 623 }
613 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 624 host->base = ioremap(dev->res.start, resource_size(&dev->res));
614 if (!host->base) { 625 if (!host->base) {
@@ -619,6 +630,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
619 mmc->ops = &mmci_ops; 630 mmc->ops = &mmci_ops;
620 mmc->f_min = (host->mclk + 511) / 512; 631 mmc->f_min = (host->mclk + 511) / 512;
621 mmc->f_max = min(host->mclk, fmax); 632 mmc->f_max = min(host->mclk, fmax);
633 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
634
622#ifdef CONFIG_REGULATOR 635#ifdef CONFIG_REGULATOR
623 /* If we're using the regulator framework, try to fetch a regulator */ 636 /* If we're using the regulator framework, try to fetch a regulator */
624 host->vcc = regulator_get(&dev->dev, "vmmc"); 637 host->vcc = regulator_get(&dev->dev, "vmmc");
@@ -712,7 +725,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
712 725
713 mmc_add_host(mmc); 726 mmc_add_host(mmc);
714 727
715 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 728 dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
716 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 729 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
717 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 730 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
718 731
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index dba4600bcdb4..b31946e0b4ca 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -38,10 +38,9 @@
38#include <asm/div64.h> 38#include <asm/div64.h>
39#include <asm/sizes.h> 39#include <asm/sizes.h>
40 40
41#include <asm/mach/mmc.h> 41#include <mach/mmc.h>
42#include <mach/msm_iomap.h> 42#include <mach/msm_iomap.h>
43#include <mach/dma.h> 43#include <mach/dma.h>
44#include <mach/htc_pwrsink.h>
45 44
46#include "msm_sdcc.h" 45#include "msm_sdcc.h"
47 46
@@ -775,13 +774,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
775 774
776 switch (ios->power_mode) { 775 switch (ios->power_mode) {
777 case MMC_POWER_OFF: 776 case MMC_POWER_OFF:
778 htc_pwrsink_set(PWRSINK_SDCARD, 0);
779 break; 777 break;
780 case MMC_POWER_UP: 778 case MMC_POWER_UP:
781 pwr |= MCI_PWR_UP; 779 pwr |= MCI_PWR_UP;
782 break; 780 break;
783 case MMC_POWER_ON: 781 case MMC_POWER_ON:
784 htc_pwrsink_set(PWRSINK_SDCARD, 100);
785 pwr |= MCI_PWR_ON; 782 pwr |= MCI_PWR_ON;
786 break; 783 break;
787 } 784 }
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 88671529c45d..60a2b69e54f5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev)
679{ 679{
680 struct mmc_host *mmc; 680 struct mmc_host *mmc;
681 struct mxcmci_host *host = NULL; 681 struct mxcmci_host *host = NULL;
682 struct resource *r; 682 struct resource *iores, *r;
683 int ret = 0, irq; 683 int ret = 0, irq;
684 684
685 printk(KERN_INFO "i.MX SDHC driver\n"); 685 printk(KERN_INFO "i.MX SDHC driver\n");
686 686
687 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 687 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
688 irq = platform_get_irq(pdev, 0); 688 irq = platform_get_irq(pdev, 0);
689 if (!r || irq < 0) 689 if (!iores || irq < 0)
690 return -EINVAL; 690 return -EINVAL;
691 691
692 r = request_mem_region(r->start, resource_size(r), pdev->name); 692 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
693 if (!r) 693 if (!r)
694 return -EBUSY; 694 return -EBUSY;
695 695
@@ -809,7 +809,7 @@ out_iounmap:
809out_free: 809out_free:
810 mmc_free_host(mmc); 810 mmc_free_host(mmc);
811out_release_mem: 811out_release_mem:
812 release_mem_region(host->res->start, resource_size(host->res)); 812 release_mem_region(iores->start, resource_size(iores));
813 return ret; 813 return ret;
814} 814}
815 815
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 0c44d560bf1a..0c7a63c1f12f 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,6 +22,8 @@
22#include <linux/mmc/core.h> 22#include <linux/mmc/core.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24 24
25MODULE_LICENSE("GPL");
26
25enum { 27enum {
26 CD_GPIO = 0, 28 CD_GPIO = 0,
27 WP_GPIO, 29 WP_GPIO,
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5f970e253e50..c6d7e8ecadbf 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1459 goto err_ioremap; 1459 goto err_ioremap;
1460 1460
1461 host->iclk = clk_get(&pdev->dev, "ick"); 1461 host->iclk = clk_get(&pdev->dev, "ick");
1462 if (IS_ERR(host->iclk)) 1462 if (IS_ERR(host->iclk)) {
1463 ret = PTR_ERR(host->iclk);
1463 goto err_free_mmc_host; 1464 goto err_free_mmc_host;
1465 }
1464 clk_enable(host->iclk); 1466 clk_enable(host->iclk);
1465 1467
1466 host->fclk = clk_get(&pdev->dev, "fck"); 1468 host->fclk = clk_get(&pdev->dev, "fck");
@@ -1500,10 +1502,8 @@ err_free_irq:
1500err_free_fclk: 1502err_free_fclk:
1501 clk_put(host->fclk); 1503 clk_put(host->fclk);
1502err_free_iclk: 1504err_free_iclk:
1503 if (host->iclk != NULL) { 1505 clk_disable(host->iclk);
1504 clk_disable(host->iclk); 1506 clk_put(host->iclk);
1505 clk_put(host->iclk);
1506 }
1507err_free_mmc_host: 1507err_free_mmc_host:
1508 iounmap(host->virt_base); 1508 iounmap(host->virt_base);
1509err_ioremap: 1509err_ioremap:
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index bb47ff465c04..0d783f3e79ed 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev)
828 return ret; 828 return ret;
829} 829}
830 830
831static struct dev_pm_ops pxamci_pm_ops = { 831static const struct dev_pm_ops pxamci_pm_ops = {
832 .suspend = pxamci_suspend, 832 .suspend = pxamci_suspend,
833 .resume = pxamci_resume, 833 .resume = pxamci_resume,
834}; 834};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 941a4d35ef8d..d96e1abf2d64 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -820,7 +820,7 @@ fail_request:
820static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
821{ 821{
822 struct mmc_request *mrq = host->mrq; 822 struct mmc_request *mrq = host->mrq;
823 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; 823 struct mmc_command *cmd;
824 int debug_as_failure = 0; 824 int debug_as_failure = 0;
825 825
826 if (host->complete_what != COMPLETION_FINALIZE) 826 if (host->complete_what != COMPLETION_FINALIZE)
@@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host)
828 828
829 if (!mrq) 829 if (!mrq)
830 return; 830 return;
831 cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
831 832
832 if (cmd->data && (cmd->error == 0) && 833 if (cmd->data && (cmd->error == 0) &&
833 (cmd->data->error == 0)) { 834 (cmd->data->error == 0)) {
@@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1302 if (pdata->no_wprotect) 1303 if (pdata->no_wprotect)
1303 return 0; 1304 return 0;
1304 1305
1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1306 ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
1306 1307 ret ^= pdata->wprotect_invert;
1307 if (pdata->wprotect_invert)
1308 ret = !ret;
1309 1308
1310 return ret; 1309 return ret;
1311} 1310}
@@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1654 goto probe_free_irq; 1653 goto probe_free_irq;
1655 } 1654 }
1656 1655
1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1656 host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
1658 1657
1659 if (host->irq_cd >= 0) { 1658 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1659 if (request_irq(host->irq_cd, s3cmci_irq_cd,
@@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev)
1892 return mmc_resume_host(mmc); 1891 return mmc_resume_host(mmc);
1893} 1892}
1894 1893
1895static struct dev_pm_ops s3cmci_pm = { 1894static const struct dev_pm_ops s3cmci_pm = {
1896 .suspend = s3cmci_suspend, 1895 .suspend = s3cmci_suspend,
1897 .resume = s3cmci_resume, 1896 .resume = s3cmci_resume,
1898}; 1897};
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c
index 01ab916c2802..55e33135edb4 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -22,62 +22,37 @@
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include "sdhci-of.h"
25#include "sdhci.h" 26#include "sdhci.h"
26 27
27struct sdhci_of_data { 28#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
28 unsigned int quirks;
29 struct sdhci_ops ops;
30};
31
32struct sdhci_of_host {
33 unsigned int clock;
34 u16 xfer_mode_shadow;
35};
36 29
37/* 30/*
38 * Ops and quirks for the Freescale eSDHC controller. 31 * These accessors are designed for big endian hosts doing I/O to
32 * little endian controllers incorporating a 32-bit hardware byte swapper.
39 */ 33 */
40 34
41#define ESDHC_DMA_SYSCTL 0x40c 35u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
42#define ESDHC_DMA_SNOOP 0x00000040
43
44#define ESDHC_SYSTEM_CONTROL 0x2c
45#define ESDHC_CLOCK_MASK 0x0000fff0
46#define ESDHC_PREDIV_SHIFT 8
47#define ESDHC_DIVIDER_SHIFT 4
48#define ESDHC_CLOCK_PEREN 0x00000004
49#define ESDHC_CLOCK_HCKEN 0x00000002
50#define ESDHC_CLOCK_IPGEN 0x00000001
51
52#define ESDHC_HOST_CONTROL_RES 0x05
53
54static u32 esdhc_readl(struct sdhci_host *host, int reg)
55{ 36{
56 return in_be32(host->ioaddr + reg); 37 return in_be32(host->ioaddr + reg);
57} 38}
58 39
59static u16 esdhc_readw(struct sdhci_host *host, int reg) 40u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
60{ 41{
61 u16 ret; 42 return in_be16(host->ioaddr + (reg ^ 0x2));
62
63 if (unlikely(reg == SDHCI_HOST_VERSION))
64 ret = in_be16(host->ioaddr + reg);
65 else
66 ret = in_be16(host->ioaddr + (reg ^ 0x2));
67 return ret;
68} 43}
69 44
70static u8 esdhc_readb(struct sdhci_host *host, int reg) 45u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
71{ 46{
72 return in_8(host->ioaddr + (reg ^ 0x3)); 47 return in_8(host->ioaddr + (reg ^ 0x3));
73} 48}
74 49
75static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) 50void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
76{ 51{
77 out_be32(host->ioaddr + reg, val); 52 out_be32(host->ioaddr + reg, val);
78} 53}
79 54
80static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) 55void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
81{ 56{
82 struct sdhci_of_host *of_host = sdhci_priv(host); 57 struct sdhci_of_host *of_host = sdhci_priv(host);
83 int base = reg & ~0x3; 58 int base = reg & ~0x3;
@@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
92 of_host->xfer_mode_shadow = val; 67 of_host->xfer_mode_shadow = val;
93 return; 68 return;
94 case SDHCI_COMMAND: 69 case SDHCI_COMMAND:
95 esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow, 70 sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
96 SDHCI_TRANSFER_MODE); 71 SDHCI_TRANSFER_MODE);
97 return; 72 return;
98 case SDHCI_BLOCK_SIZE:
99 /*
100 * Two last DMA bits are reserved, and first one is used for
101 * non-standard blksz of 4096 bytes that we don't support
102 * yet. So clear the DMA boundary bits.
103 */
104 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
105 /* fall through */
106 } 73 }
107 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); 74 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
108} 75}
109 76
110static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) 77void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
111{ 78{
112 int base = reg & ~0x3; 79 int base = reg & ~0x3;
113 int shift = (reg & 0x3) * 8; 80 int shift = (reg & 0x3) * 8;
114 81
115 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
116 if (reg == SDHCI_HOST_CONTROL)
117 val &= ~ESDHC_HOST_CONTROL_RES;
118
119 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); 82 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
120} 83}
121 84#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
122static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
123{
124 int pre_div = 2;
125 int div = 1;
126
127 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
128 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
129
130 if (clock == 0)
131 goto out;
132
133 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
134 pre_div *= 2;
135
136 while (host->max_clk / pre_div / div > clock && div < 16)
137 div++;
138
139 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
140 clock, host->max_clk / pre_div / div);
141
142 pre_div >>= 1;
143 div--;
144
145 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
146 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
147 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
148 mdelay(100);
149out:
150 host->clock = clock;
151}
152
153static int esdhc_enable_dma(struct sdhci_host *host)
154{
155 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
156 return 0;
157}
158
159static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
160{
161 struct sdhci_of_host *of_host = sdhci_priv(host);
162
163 return of_host->clock;
164}
165
166static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
167{
168 struct sdhci_of_host *of_host = sdhci_priv(host);
169
170 return of_host->clock / 256 / 16;
171}
172
173static struct sdhci_of_data sdhci_esdhc = {
174 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
175 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
176 SDHCI_QUIRK_NO_BUSY_IRQ |
177 SDHCI_QUIRK_NONSTANDARD_CLOCK |
178 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
179 SDHCI_QUIRK_PIO_NEEDS_DELAY |
180 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
181 SDHCI_QUIRK_NO_CARD_NO_RESET,
182 .ops = {
183 .readl = esdhc_readl,
184 .readw = esdhc_readw,
185 .readb = esdhc_readb,
186 .writel = esdhc_writel,
187 .writew = esdhc_writew,
188 .writeb = esdhc_writeb,
189 .set_clock = esdhc_set_clock,
190 .enable_dma = esdhc_enable_dma,
191 .get_max_clock = esdhc_get_max_clock,
192 .get_min_clock = esdhc_get_min_clock,
193 },
194};
195 85
196#ifdef CONFIG_PM 86#ifdef CONFIG_PM
197 87
@@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
301} 191}
302 192
303static const struct of_device_id sdhci_of_match[] = { 193static const struct of_device_id sdhci_of_match[] = {
194#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
304 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, 195 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
305 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, 196 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
306 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, 197 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
198#endif
199#ifdef CONFIG_MMC_SDHCI_OF_HLWD
200 { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
201#endif
307 { .compatible = "generic-sdhci", }, 202 { .compatible = "generic-sdhci", },
308 {}, 203 {},
309}; 204};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 000000000000..d5b11a17e648
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,143 @@
1/*
2 * Freescale eSDHC controller driver.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/io.h>
17#include <linux/delay.h>
18#include <linux/mmc/host.h>
19#include "sdhci-of.h"
20#include "sdhci.h"
21
22/*
23 * Ops and quirks for the Freescale eSDHC controller.
24 */
25
26#define ESDHC_DMA_SYSCTL 0x40c
27#define ESDHC_DMA_SNOOP 0x00000040
28
29#define ESDHC_SYSTEM_CONTROL 0x2c
30#define ESDHC_CLOCK_MASK 0x0000fff0
31#define ESDHC_PREDIV_SHIFT 8
32#define ESDHC_DIVIDER_SHIFT 4
33#define ESDHC_CLOCK_PEREN 0x00000004
34#define ESDHC_CLOCK_HCKEN 0x00000002
35#define ESDHC_CLOCK_IPGEN 0x00000001
36
37#define ESDHC_HOST_CONTROL_RES 0x05
38
39static u16 esdhc_readw(struct sdhci_host *host, int reg)
40{
41 u16 ret;
42
43 if (unlikely(reg == SDHCI_HOST_VERSION))
44 ret = in_be16(host->ioaddr + reg);
45 else
46 ret = sdhci_be32bs_readw(host, reg);
47 return ret;
48}
49
50static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
51{
52 if (reg == SDHCI_BLOCK_SIZE) {
53 /*
54 * Two last DMA bits are reserved, and first one is used for
55 * non-standard blksz of 4096 bytes that we don't support
56 * yet. So clear the DMA boundary bits.
57 */
58 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
59 }
60 sdhci_be32bs_writew(host, val, reg);
61}
62
63static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
64{
65 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
66 if (reg == SDHCI_HOST_CONTROL)
67 val &= ~ESDHC_HOST_CONTROL_RES;
68 sdhci_be32bs_writeb(host, val, reg);
69}
70
71static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
72{
73 int pre_div = 2;
74 int div = 1;
75
76 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
77 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
78
79 if (clock == 0)
80 goto out;
81
82 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
83 pre_div *= 2;
84
85 while (host->max_clk / pre_div / div > clock && div < 16)
86 div++;
87
88 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
89 clock, host->max_clk / pre_div / div);
90
91 pre_div >>= 1;
92 div--;
93
94 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
95 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
96 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
97 mdelay(100);
98out:
99 host->clock = clock;
100}
101
102static int esdhc_enable_dma(struct sdhci_host *host)
103{
104 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
105 return 0;
106}
107
108static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
109{
110 struct sdhci_of_host *of_host = sdhci_priv(host);
111
112 return of_host->clock;
113}
114
115static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
116{
117 struct sdhci_of_host *of_host = sdhci_priv(host);
118
119 return of_host->clock / 256 / 16;
120}
121
122struct sdhci_of_data sdhci_esdhc = {
123 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
124 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
125 SDHCI_QUIRK_NO_BUSY_IRQ |
126 SDHCI_QUIRK_NONSTANDARD_CLOCK |
127 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
128 SDHCI_QUIRK_PIO_NEEDS_DELAY |
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = {
132 .readl = sdhci_be32bs_readl,
133 .readw = esdhc_readw,
134 .readb = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel,
136 .writew = esdhc_writew,
137 .writeb = esdhc_writeb,
138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock,
141 .get_min_clock = esdhc_get_min_clock,
142 },
143};
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 000000000000..35117f3ed757
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,65 @@
1/*
2 * drivers/mmc/host/sdhci-of-hlwd.c
3 *
4 * Nintendo Wii Secure Digital Host Controller Interface.
5 * Copyright (C) 2009 The GameCube Linux Team
6 * Copyright (C) 2009 Albert Herranz
7 *
8 * Based on sdhci-of-esdhc.c
9 *
10 * Copyright (c) 2007 Freescale Semiconductor, Inc.
11 * Copyright (c) 2009 MontaVista Software, Inc.
12 *
13 * Authors: Xiaobo Xie <X.Xie@freescale.com>
14 * Anton Vorontsov <avorontsov@ru.mvista.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or (at
19 * your option) any later version.
20 */
21
22#include <linux/delay.h>
23#include <linux/mmc/host.h>
24#include "sdhci-of.h"
25#include "sdhci.h"
26
27/*
28 * Ops and quirks for the Nintendo Wii SDHCI controllers.
29 */
30
31/*
32 * We need a small delay after each write, or things go horribly wrong.
33 */
34#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
35
36static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
37{
38 sdhci_be32bs_writel(host, val, reg);
39 udelay(SDHCI_HLWD_WRITE_DELAY);
40}
41
42static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
43{
44 sdhci_be32bs_writew(host, val, reg);
45 udelay(SDHCI_HLWD_WRITE_DELAY);
46}
47
48static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
49{
50 sdhci_be32bs_writeb(host, val, reg);
51 udelay(SDHCI_HLWD_WRITE_DELAY);
52}
53
54struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = {
58 .readl = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb,
64 },
65};
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
new file mode 100644
index 000000000000..ad09ad9915d8
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of.h
@@ -0,0 +1,42 @@
1/*
2 * OpenFirmware bindings for Secure Digital Host Controller Interface.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#ifndef __SDHCI_OF_H
17#define __SDHCI_OF_H
18
19#include <linux/types.h>
20#include "sdhci.h"
21
22struct sdhci_of_data {
23 unsigned int quirks;
24 struct sdhci_ops ops;
25};
26
27struct sdhci_of_host {
28 unsigned int clock;
29 u16 xfer_mode_shadow;
30};
31
32extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
33extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
34extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
35extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
36extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
37extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
38
39extern struct sdhci_of_data sdhci_esdhc;
40extern struct sdhci_of_data sdhci_hlwd;
41
42#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e0356644d1aa..5c3a1767770a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
285 .resume = jmicron_resume, 285 .resume = jmicron_resume,
286}; 286};
287 287
288/* SysKonnect CardBus2SDIO extra registers */
289#define SYSKT_CTRL 0x200
290#define SYSKT_RDFIFO_STAT 0x204
291#define SYSKT_WRFIFO_STAT 0x208
292#define SYSKT_POWER_DATA 0x20c
293#define SYSKT_POWER_330 0xef
294#define SYSKT_POWER_300 0xf8
295#define SYSKT_POWER_184 0xcc
296#define SYSKT_POWER_CMD 0x20d
297#define SYSKT_POWER_START (1 << 7)
298#define SYSKT_POWER_STATUS 0x20e
299#define SYSKT_POWER_STATUS_OK (1 << 0)
300#define SYSKT_BOARD_REV 0x210
301#define SYSKT_CHIP_REV 0x211
302#define SYSKT_CONF_DATA 0x212
303#define SYSKT_CONF_DATA_1V8 (1 << 2)
304#define SYSKT_CONF_DATA_2V5 (1 << 1)
305#define SYSKT_CONF_DATA_3V3 (1 << 0)
306
307static int syskt_probe(struct sdhci_pci_chip *chip)
308{
309 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
310 chip->pdev->class &= ~0x0000FF;
311 chip->pdev->class |= PCI_SDHCI_IFDMA;
312 }
313 return 0;
314}
315
316static int syskt_probe_slot(struct sdhci_pci_slot *slot)
317{
318 int tm, ps;
319
320 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
321 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
322 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
323 "board rev %d.%d, chip rev %d.%d\n",
324 board_rev >> 4, board_rev & 0xf,
325 chip_rev >> 4, chip_rev & 0xf);
326 if (chip_rev >= 0x20)
327 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
328
329 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
330 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
331 udelay(50);
332 tm = 10; /* Wait max 1 ms */
333 do {
334 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
335 if (ps & SYSKT_POWER_STATUS_OK)
336 break;
337 udelay(100);
338 } while (--tm);
339 if (!tm) {
340 dev_err(&slot->chip->pdev->dev,
341 "power regulator never stabilized");
342 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
343 return -ENODEV;
344 }
345
346 return 0;
347}
348
349static const struct sdhci_pci_fixes sdhci_syskt = {
350 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
351 .probe = syskt_probe,
352 .probe_slot = syskt_probe_slot,
353};
354
288static int via_probe(struct sdhci_pci_chip *chip) 355static int via_probe(struct sdhci_pci_chip *chip)
289{ 356{
290 if (chip->pdev->revision == 0x10) 357 if (chip->pdev->revision == 0x10)
@@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
363 }, 430 },
364 431
365 { 432 {
433 .vendor = PCI_VENDOR_ID_SYSKONNECT,
434 .device = 0x8000,
435 .subvendor = PCI_ANY_ID,
436 .subdevice = PCI_ANY_ID,
437 .driver_data = (kernel_ulong_t)&sdhci_syskt,
438 },
439
440 {
366 .vendor = PCI_VENDOR_ID_VIA, 441 .vendor = PCI_VENDOR_ID_VIA,
367 .device = 0x95d0, 442 .device = 0x95d0,
368 .subvendor = PCI_ANY_ID, 443 .subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ce5f1d73dc04..842f46f94284 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -8,6 +8,8 @@
8 * the Free Software Foundation; either version 2 of the License, or (at 8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11#ifndef __SDHCI_H
12#define __SDHCI_H
11 13
12#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
13#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
408extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 410extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
409extern int sdhci_resume_host(struct sdhci_host *host); 411extern int sdhci_resume_host(struct sdhci_host *host);
410#endif 412#endif
413
414#endif /* __SDHCI_H */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 91991b460c45..e22c3fa3516a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -46,7 +46,9 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
46 clk |= 0x100; 46 clk |= 0x100;
47 } 47 }
48 48
49 sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22); 49 if (host->set_clk_div)
50 host->set_clk_div(host->pdev, (clk>>22) & 1);
51
50 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 52 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
51} 53}
52 54
@@ -427,12 +429,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
427 /* Power sequence - OFF -> ON -> UP */ 429 /* Power sequence - OFF -> ON -> UP */
428 switch (ios->power_mode) { 430 switch (ios->power_mode) {
429 case MMC_POWER_OFF: /* power down SD bus */ 431 case MMC_POWER_OFF: /* power down SD bus */
430 sd_config_write8(host, CNF_PWR_CTL_2, 0x00); 432 if (host->set_pwr)
433 host->set_pwr(host->pdev, 0);
431 tmio_mmc_clk_stop(host); 434 tmio_mmc_clk_stop(host);
432 break; 435 break;
433 case MMC_POWER_ON: /* power up SD bus */ 436 case MMC_POWER_ON: /* power up SD bus */
434 437 if (host->set_pwr)
435 sd_config_write8(host, CNF_PWR_CTL_2, 0x02); 438 host->set_pwr(host->pdev, 1);
436 break; 439 break;
437 case MMC_POWER_UP: /* start bus clock */ 440 case MMC_POWER_UP: /* start bus clock */
438 tmio_mmc_clk_start(host); 441 tmio_mmc_clk_start(host);
@@ -485,21 +488,15 @@ static int tmio_mmc_resume(struct platform_device *dev)
485{ 488{
486 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 489 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
487 struct mmc_host *mmc = platform_get_drvdata(dev); 490 struct mmc_host *mmc = platform_get_drvdata(dev);
488 struct tmio_mmc_host *host = mmc_priv(mmc);
489 int ret = 0; 491 int ret = 0;
490 492
491 /* Tell the MFD core we are ready to be enabled */ 493 /* Tell the MFD core we are ready to be enabled */
492 if (cell->enable) { 494 if (cell->resume) {
493 ret = cell->enable(dev); 495 ret = cell->resume(dev);
494 if (ret) 496 if (ret)
495 goto out; 497 goto out;
496 } 498 }
497 499
498 /* Enable the MMC/SD Control registers */
499 sd_config_write16(host, CNF_CMD, SDCREN);
500 sd_config_write32(host, CNF_CTL_BASE,
501 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
502
503 mmc_resume_host(mmc); 500 mmc_resume_host(mmc);
504 501
505out: 502out:
@@ -514,17 +511,16 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
514{ 511{
515 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 512 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
516 struct tmio_mmc_data *pdata; 513 struct tmio_mmc_data *pdata;
517 struct resource *res_ctl, *res_cnf; 514 struct resource *res_ctl;
518 struct tmio_mmc_host *host; 515 struct tmio_mmc_host *host;
519 struct mmc_host *mmc; 516 struct mmc_host *mmc;
520 int ret = -EINVAL; 517 int ret = -EINVAL;
521 518
522 if (dev->num_resources != 3) 519 if (dev->num_resources != 2)
523 goto out; 520 goto out;
524 521
525 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 522 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
526 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1); 523 if (!res_ctl)
527 if (!res_ctl || !res_cnf)
528 goto out; 524 goto out;
529 525
530 pdata = cell->driver_data; 526 pdata = cell->driver_data;
@@ -539,8 +535,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
539 535
540 host = mmc_priv(mmc); 536 host = mmc_priv(mmc);
541 host->mmc = mmc; 537 host->mmc = mmc;
538 host->pdev = dev;
542 platform_set_drvdata(dev, mmc); 539 platform_set_drvdata(dev, mmc);
543 540
541 host->set_pwr = pdata->set_pwr;
542 host->set_clk_div = pdata->set_clk_div;
543
544 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 544 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
545 host->bus_shift = resource_size(res_ctl) >> 10; 545 host->bus_shift = resource_size(res_ctl) >> 10;
546 546
@@ -548,10 +548,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
548 if (!host->ctl) 548 if (!host->ctl)
549 goto host_free; 549 goto host_free;
550 550
551 host->cnf = ioremap(res_cnf->start, resource_size(res_cnf));
552 if (!host->cnf)
553 goto unmap_ctl;
554
555 mmc->ops = &tmio_mmc_ops; 551 mmc->ops = &tmio_mmc_ops;
556 mmc->caps = MMC_CAP_4_BIT_DATA; 552 mmc->caps = MMC_CAP_4_BIT_DATA;
557 mmc->f_max = pdata->hclk; 553 mmc->f_max = pdata->hclk;
@@ -562,23 +558,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
562 if (cell->enable) { 558 if (cell->enable) {
563 ret = cell->enable(dev); 559 ret = cell->enable(dev);
564 if (ret) 560 if (ret)
565 goto unmap_cnf; 561 goto unmap_ctl;
566 } 562 }
567 563
568 /* Enable the MMC/SD Control registers */
569 sd_config_write16(host, CNF_CMD, SDCREN);
570 sd_config_write32(host, CNF_CTL_BASE,
571 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
572
573 /* Disable SD power during suspend */
574 sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
575
576 /* The below is required but why? FIXME */
577 sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
578
579 /* Power down SD bus*/
580 sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
581
582 tmio_mmc_clk_stop(host); 564 tmio_mmc_clk_stop(host);
583 reset(host); 565 reset(host);
584 566
@@ -586,14 +568,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
586 if (ret >= 0) 568 if (ret >= 0)
587 host->irq = ret; 569 host->irq = ret;
588 else 570 else
589 goto unmap_cnf; 571 goto unmap_ctl;
590 572
591 disable_mmc_irqs(host, TMIO_MASK_ALL); 573 disable_mmc_irqs(host, TMIO_MASK_ALL);
592 574
593 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 575 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
594 IRQF_TRIGGER_FALLING, "tmio-mmc", host); 576 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
595 if (ret) 577 if (ret)
596 goto unmap_cnf; 578 goto unmap_ctl;
597 579
598 mmc_add_host(mmc); 580 mmc_add_host(mmc);
599 581
@@ -605,8 +587,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
605 587
606 return 0; 588 return 0;
607 589
608unmap_cnf:
609 iounmap(host->cnf);
610unmap_ctl: 590unmap_ctl:
611 iounmap(host->ctl); 591 iounmap(host->ctl);
612host_free: 592host_free:
@@ -626,7 +606,6 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
626 mmc_remove_host(mmc); 606 mmc_remove_host(mmc);
627 free_irq(host->irq, host); 607 free_irq(host->irq, host);
628 iounmap(host->ctl); 608 iounmap(host->ctl);
629 iounmap(host->cnf);
630 mmc_free_host(mmc); 609 mmc_free_host(mmc);
631 } 610 }
632 611
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9fa998594974..692dc23363b9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -11,26 +11,6 @@
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13 13
14#define CNF_CMD 0x04
15#define CNF_CTL_BASE 0x10
16#define CNF_INT_PIN 0x3d
17#define CNF_STOP_CLK_CTL 0x40
18#define CNF_GCLK_CTL 0x41
19#define CNF_SD_CLK_MODE 0x42
20#define CNF_PIN_STATUS 0x44
21#define CNF_PWR_CTL_1 0x48
22#define CNF_PWR_CTL_2 0x49
23#define CNF_PWR_CTL_3 0x4a
24#define CNF_CARD_DETECT_MODE 0x4c
25#define CNF_SD_SLOT 0x50
26#define CNF_EXT_GCLK_CTL_1 0xf0
27#define CNF_EXT_GCLK_CTL_2 0xf1
28#define CNF_EXT_GCLK_CTL_3 0xf9
29#define CNF_SD_LED_EN_1 0xfa
30#define CNF_SD_LED_EN_2 0xfe
31
32#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
33
34#define CTL_SD_CMD 0x00 14#define CTL_SD_CMD 0x00
35#define CTL_ARG_REG 0x04 15#define CTL_ARG_REG 0x04
36#define CTL_STOP_INTERNAL_ACTION 0x08 16#define CTL_STOP_INTERNAL_ACTION 0x08
@@ -110,7 +90,6 @@
110 90
111 91
112struct tmio_mmc_host { 92struct tmio_mmc_host {
113 void __iomem *cnf;
114 void __iomem *ctl; 93 void __iomem *ctl;
115 unsigned long bus_shift; 94 unsigned long bus_shift;
116 struct mmc_command *cmd; 95 struct mmc_command *cmd;
@@ -119,10 +98,16 @@ struct tmio_mmc_host {
119 struct mmc_host *mmc; 98 struct mmc_host *mmc;
120 int irq; 99 int irq;
121 100
101 /* Callbacks for clock / power control */
102 void (*set_pwr)(struct platform_device *host, int state);
103 void (*set_clk_div)(struct platform_device *host, int state);
104
122 /* pio related stuff */ 105 /* pio related stuff */
123 struct scatterlist *sg_ptr; 106 struct scatterlist *sg_ptr;
124 unsigned int sg_len; 107 unsigned int sg_len;
125 unsigned int sg_off; 108 unsigned int sg_off;
109
110 struct platform_device *pdev;
126}; 111};
127 112
128#include <linux/io.h> 113#include <linux/io.h>
@@ -163,25 +148,6 @@ static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
163 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 148 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
164} 149}
165 150
166static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
167 u8 val)
168{
169 writeb(val, host->cnf + (addr << host->bus_shift));
170}
171
172static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
173 u16 val)
174{
175 writew(val, host->cnf + (addr << host->bus_shift));
176}
177
178static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
179 u32 val)
180{
181 writew(val, host->cnf + (addr << host->bus_shift));
182 writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
183}
184
185#include <linux/scatterlist.h> 151#include <linux/scatterlist.h>
186#include <linux/blkdev.h> 152#include <linux/blkdev.h>
187 153