aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/at91_mci.c
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2007-02-11 13:57:36 -0500
committerPierre Ossman <drzeus@drzeus.cx>2007-05-01 07:04:17 -0400
commit1c6a0718f0bfdab0d9b7da5f7b74f38a0058c03a (patch)
tree5e7f2a26d5d1782d87c596b40f874c6c0b8b8e1a /drivers/mmc/at91_mci.c
parent98ac2162699f7e9880683cb954891817f20b607c (diff)
mmc: Move host and card drivers to subdirs
Clean up the drivers/mmc directory by moving card and host drivers into subdirectories. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/at91_mci.c')
-rw-r--r--drivers/mmc/at91_mci.c1001
1 files changed, 0 insertions, 1001 deletions
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
deleted file mode 100644
index e37943c314cb..000000000000
--- a/drivers/mmc/at91_mci.c
+++ /dev/null
@@ -1,1001 +0,0 @@
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67#include <linux/atmel_pdc.h>
68
69#include <linux/mmc/host.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
75#include <asm/arch/cpu.h>
76#include <asm/arch/gpio.h>
77#include <asm/arch/at91_mci.h>
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
83#define FL_SENT_COMMAND (1 << 0)
84#define FL_SENT_STOP (1 << 1)
85
86#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
89
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
92
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
103 void __iomem *baseaddr;
104 int irq;
105
106 struct at91_mmc_data *board;
107 int present;
108
109 struct clk *mci_clk;
110
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129};
130
131/*
132 * Copy from sg to a dma block - used for transfers
133 */
134static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
135{
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 unsigned int *sgbuffer;
151
152 sg = &data->sg[i];
153
154 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
155 amount = min(size, sg->length);
156 size -= amount;
157
158 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
159 int index;
160
161 for (index = 0; index < (amount / 4); index++)
162 *dmabuf++ = swab32(sgbuffer[index]);
163 }
164 else
165 memcpy(dmabuf, sgbuffer, amount);
166
167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
168
169 if (size == 0)
170 break;
171 }
172
173 /*
174 * Check that we didn't get a request to transfer
175 * more data than can fit into the SG list.
176 */
177 BUG_ON(size != 0);
178}
179
180/*
181 * Prepare a dma read
182 */
183static void at91mci_pre_dma_read(struct at91mci_host *host)
184{
185 int i;
186 struct scatterlist *sg;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189
190 pr_debug("pre dma read\n");
191
192 cmd = host->cmd;
193 if (!cmd) {
194 pr_debug("no command\n");
195 return;
196 }
197
198 data = cmd->data;
199 if (!data) {
200 pr_debug("no data\n");
201 return;
202 }
203
204 for (i = 0; i < 2; i++) {
205 /* nothing left to transfer */
206 if (host->transfer_index >= data->sg_len) {
207 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
208 break;
209 }
210
211 /* Check to see if this needs filling */
212 if (i == 0) {
213 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
214 pr_debug("Transfer active in current\n");
215 continue;
216 }
217 }
218 else {
219 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
220 pr_debug("Transfer active in next\n");
221 continue;
222 }
223 }
224
225 /* Setup the next transfer */
226 pr_debug("Using transfer index %d\n", host->transfer_index);
227
228 sg = &data->sg[host->transfer_index++];
229 pr_debug("sg = %p\n", sg);
230
231 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
232
233 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
234
235 if (i == 0) {
236 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
237 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
238 }
239 else {
240 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
241 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
242 }
243 }
244
245 pr_debug("pre dma read done\n");
246}
247
248/*
249 * Handle after a dma read
250 */
251static void at91mci_post_dma_read(struct at91mci_host *host)
252{
253 struct mmc_command *cmd;
254 struct mmc_data *data;
255
256 pr_debug("post dma read\n");
257
258 cmd = host->cmd;
259 if (!cmd) {
260 pr_debug("no command\n");
261 return;
262 }
263
264 data = cmd->data;
265 if (!data) {
266 pr_debug("no data\n");
267 return;
268 }
269
270 while (host->in_use_index < host->transfer_index) {
271 unsigned int *buffer;
272
273 struct scatterlist *sg;
274
275 pr_debug("finishing index %d\n", host->in_use_index);
276
277 sg = &data->sg[host->in_use_index++];
278
279 pr_debug("Unmapping page %08X\n", sg->dma_address);
280
281 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
282
283 /* Swap the contents of the buffer */
284 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
285 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
286
287 data->bytes_xfered += sg->length;
288
289 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
290 int index;
291
292 for (index = 0; index < (sg->length / 4); index++)
293 buffer[index] = swab32(buffer[index]);
294 }
295
296 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
297 flush_dcache_page(sg->page);
298 }
299
300 /* Is there another transfer to trigger? */
301 if (host->transfer_index < data->sg_len)
302 at91mci_pre_dma_read(host);
303 else {
304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
305 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
306 }
307
308 pr_debug("post dma read done\n");
309}
310
311/*
312 * Handle transmitted data
313 */
314static void at91_mci_handle_transmitted(struct at91mci_host *host)
315{
316 struct mmc_command *cmd;
317 struct mmc_data *data;
318
319 pr_debug("Handling the transmit\n");
320
321 /* Disable the transfer */
322 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
323
324 /* Now wait for cmd ready */
325 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
326 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
327
328 cmd = host->cmd;
329 if (!cmd) return;
330
331 data = cmd->data;
332 if (!data) return;
333
334 data->bytes_xfered = host->total_length;
335}
336
337/*
338 * Enable the controller
339 */
340static void at91_mci_enable(struct at91mci_host *host)
341{
342 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
343 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
344 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
345 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
346
347 /* use Slot A or B (only one at same time) */
348 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
349}
350
351/*
352 * Disable the controller
353 */
354static void at91_mci_disable(struct at91mci_host *host)
355{
356 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
357}
358
359/*
360 * Send a command
361 * return the interrupts to enable
362 */
363static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
364{
365 unsigned int cmdr, mr;
366 unsigned int block_length;
367 struct mmc_data *data = cmd->data;
368
369 unsigned int blocks;
370 unsigned int ier = 0;
371
372 host->cmd = cmd;
373
374 /* Not sure if this is needed */
375#if 0
376 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
377 pr_debug("Clearing timeout\n");
378 at91_mci_write(host, AT91_MCI_ARGR, 0);
379 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
380 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
381 /* spin */
382 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
383 }
384 }
385#endif
386 cmdr = cmd->opcode;
387
388 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
389 cmdr |= AT91_MCI_RSPTYP_NONE;
390 else {
391 /* if a response is expected then allow maximum response latancy */
392 cmdr |= AT91_MCI_MAXLAT;
393 /* set 136 bit response for R2, 48 bit response otherwise */
394 if (mmc_resp_type(cmd) == MMC_RSP_R2)
395 cmdr |= AT91_MCI_RSPTYP_136;
396 else
397 cmdr |= AT91_MCI_RSPTYP_48;
398 }
399
400 if (data) {
401 block_length = data->blksz;
402 blocks = data->blocks;
403
404 /* always set data start - also set direction flag for read */
405 if (data->flags & MMC_DATA_READ)
406 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
407 else if (data->flags & MMC_DATA_WRITE)
408 cmdr |= AT91_MCI_TRCMD_START;
409
410 if (data->flags & MMC_DATA_STREAM)
411 cmdr |= AT91_MCI_TRTYP_STREAM;
412 if (data->flags & MMC_DATA_MULTI)
413 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
414 }
415 else {
416 block_length = 0;
417 blocks = 0;
418 }
419
420 if (cmd->opcode == MMC_STOP_TRANSMISSION)
421 cmdr |= AT91_MCI_TRCMD_STOP;
422
423 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
424 cmdr |= AT91_MCI_OPDCMD;
425
426 /*
427 * Set the arguments and send the command
428 */
429 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
430 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
431
432 if (!data) {
433 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
434 at91_mci_write(host, ATMEL_PDC_RPR, 0);
435 at91_mci_write(host, ATMEL_PDC_RCR, 0);
436 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
437 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
438 at91_mci_write(host, ATMEL_PDC_TPR, 0);
439 at91_mci_write(host, ATMEL_PDC_TCR, 0);
440 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
441 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
442
443 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
444 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
445 return AT91_MCI_CMDRDY;
446 }
447
448 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
449 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
450
451 /*
452 * Disable the PDC controller
453 */
454 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
455
456 if (cmdr & AT91_MCI_TRCMD_START) {
457 data->bytes_xfered = 0;
458 host->transfer_index = 0;
459 host->in_use_index = 0;
460 if (cmdr & AT91_MCI_TRDIR) {
461 /*
462 * Handle a read
463 */
464 host->buffer = NULL;
465 host->total_length = 0;
466
467 at91mci_pre_dma_read(host);
468 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
469 }
470 else {
471 /*
472 * Handle a write
473 */
474 host->total_length = block_length * blocks;
475 host->buffer = dma_alloc_coherent(NULL,
476 host->total_length,
477 &host->physical_address, GFP_KERNEL);
478
479 at91mci_sg_to_dma(host, data);
480
481 pr_debug("Transmitting %d bytes\n", host->total_length);
482
483 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
484 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
485 ier = AT91_MCI_TXBUFE;
486 }
487 }
488
489 /*
490 * Send the command and then enable the PDC - not the other way round as
491 * the data sheet says
492 */
493
494 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
495 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
496
497 if (cmdr & AT91_MCI_TRCMD_START) {
498 if (cmdr & AT91_MCI_TRDIR)
499 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
500 else
501 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
502 }
503 return ier;
504}
505
506/*
507 * Wait for a command to complete
508 */
509static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
510{
511 unsigned int ier;
512
513 ier = at91_mci_send_command(host, cmd);
514
515 pr_debug("setting ier to %08X\n", ier);
516
517 /* Stop on errors or the required value */
518 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
519}
520
521/*
522 * Process the next step in the request
523 */
524static void at91mci_process_next(struct at91mci_host *host)
525{
526 if (!(host->flags & FL_SENT_COMMAND)) {
527 host->flags |= FL_SENT_COMMAND;
528 at91mci_process_command(host, host->request->cmd);
529 }
530 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
531 host->flags |= FL_SENT_STOP;
532 at91mci_process_command(host, host->request->stop);
533 }
534 else
535 mmc_request_done(host->mmc, host->request);
536}
537
538/*
539 * Handle a command that has been completed
540 */
541static void at91mci_completed_command(struct at91mci_host *host)
542{
543 struct mmc_command *cmd = host->cmd;
544 unsigned int status;
545
546 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
547
548 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
549 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
550 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
551 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
552
553 if (host->buffer) {
554 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
555 host->buffer = NULL;
556 }
557
558 status = at91_mci_read(host, AT91_MCI_SR);
559
560 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
561 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
562
563 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
564 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
565 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
566 if ((status & AT91_MCI_RCRCE) &&
567 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
568 cmd->error = MMC_ERR_NONE;
569 }
570 else {
571 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
572 cmd->error = MMC_ERR_TIMEOUT;
573 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
574 cmd->error = MMC_ERR_BADCRC;
575 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
576 cmd->error = MMC_ERR_FIFO;
577 else
578 cmd->error = MMC_ERR_FAILED;
579
580 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
581 cmd->error, cmd->opcode, cmd->retries);
582 }
583 }
584 else
585 cmd->error = MMC_ERR_NONE;
586
587 at91mci_process_next(host);
588}
589
590/*
591 * Handle an MMC request
592 */
593static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
594{
595 struct at91mci_host *host = mmc_priv(mmc);
596 host->request = mrq;
597 host->flags = 0;
598
599 at91mci_process_next(host);
600}
601
602/*
603 * Set the IOS
604 */
605static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
606{
607 int clkdiv;
608 struct at91mci_host *host = mmc_priv(mmc);
609 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
610
611 host->bus_mode = ios->bus_mode;
612
613 if (ios->clock == 0) {
614 /* Disable the MCI controller */
615 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
616 clkdiv = 0;
617 }
618 else {
619 /* Enable the MCI controller */
620 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
621
622 if ((at91_master_clock % (ios->clock * 2)) == 0)
623 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
624 else
625 clkdiv = (at91_master_clock / ios->clock) / 2;
626
627 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
628 at91_master_clock / (2 * (clkdiv + 1)));
629 }
630 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
631 pr_debug("MMC: Setting controller bus width to 4\n");
632 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
633 }
634 else {
635 pr_debug("MMC: Setting controller bus width to 1\n");
636 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
637 }
638
639 /* Set the clock divider */
640 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
641
642 /* maybe switch power to the card */
643 if (host->board->vcc_pin) {
644 switch (ios->power_mode) {
645 case MMC_POWER_OFF:
646 at91_set_gpio_value(host->board->vcc_pin, 0);
647 break;
648 case MMC_POWER_UP:
649 case MMC_POWER_ON:
650 at91_set_gpio_value(host->board->vcc_pin, 1);
651 break;
652 }
653 }
654}
655
656/*
657 * Handle an interrupt
658 */
659static irqreturn_t at91_mci_irq(int irq, void *devid)
660{
661 struct at91mci_host *host = devid;
662 int completed = 0;
663 unsigned int int_status, int_mask;
664
665 int_status = at91_mci_read(host, AT91_MCI_SR);
666 int_mask = at91_mci_read(host, AT91_MCI_IMR);
667
668 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
669 int_status & int_mask);
670
671 int_status = int_status & int_mask;
672
673 if (int_status & AT91_MCI_ERRORS) {
674 completed = 1;
675
676 if (int_status & AT91_MCI_UNRE)
677 pr_debug("MMC: Underrun error\n");
678 if (int_status & AT91_MCI_OVRE)
679 pr_debug("MMC: Overrun error\n");
680 if (int_status & AT91_MCI_DTOE)
681 pr_debug("MMC: Data timeout\n");
682 if (int_status & AT91_MCI_DCRCE)
683 pr_debug("MMC: CRC error in data\n");
684 if (int_status & AT91_MCI_RTOE)
685 pr_debug("MMC: Response timeout\n");
686 if (int_status & AT91_MCI_RENDE)
687 pr_debug("MMC: Response end bit error\n");
688 if (int_status & AT91_MCI_RCRCE)
689 pr_debug("MMC: Response CRC error\n");
690 if (int_status & AT91_MCI_RDIRE)
691 pr_debug("MMC: Response direction error\n");
692 if (int_status & AT91_MCI_RINDE)
693 pr_debug("MMC: Response index error\n");
694 } else {
695 /* Only continue processing if no errors */
696
697 if (int_status & AT91_MCI_TXBUFE) {
698 pr_debug("TX buffer empty\n");
699 at91_mci_handle_transmitted(host);
700 }
701
702 if (int_status & AT91_MCI_RXBUFF) {
703 pr_debug("RX buffer full\n");
704 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
705 }
706
707 if (int_status & AT91_MCI_ENDTX)
708 pr_debug("Transmit has ended\n");
709
710 if (int_status & AT91_MCI_ENDRX) {
711 pr_debug("Receive has ended\n");
712 at91mci_post_dma_read(host);
713 }
714
715 if (int_status & AT91_MCI_NOTBUSY) {
716 pr_debug("Card is ready\n");
717 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
718 }
719
720 if (int_status & AT91_MCI_DTIP)
721 pr_debug("Data transfer in progress\n");
722
723 if (int_status & AT91_MCI_BLKE)
724 pr_debug("Block transfer has ended\n");
725
726 if (int_status & AT91_MCI_TXRDY)
727 pr_debug("Ready to transmit\n");
728
729 if (int_status & AT91_MCI_RXRDY)
730 pr_debug("Ready to receive\n");
731
732 if (int_status & AT91_MCI_CMDRDY) {
733 pr_debug("Command ready\n");
734 completed = 1;
735 }
736 }
737
738 if (completed) {
739 pr_debug("Completed command\n");
740 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
741 at91mci_completed_command(host);
742 } else
743 at91_mci_write(host, AT91_MCI_IDR, int_status);
744
745 return IRQ_HANDLED;
746}
747
748static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
749{
750 struct at91mci_host *host = _host;
751 int present = !at91_get_gpio_value(irq);
752
753 /*
754 * we expect this irq on both insert and remove,
755 * and use a short delay to debounce.
756 */
757 if (present != host->present) {
758 host->present = present;
759 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
760 present ? "insert" : "remove");
761 if (!present) {
762 pr_debug("****** Resetting SD-card bus width ******\n");
763 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
764 }
765 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
766 }
767 return IRQ_HANDLED;
768}
769
770static int at91_mci_get_ro(struct mmc_host *mmc)
771{
772 int read_only = 0;
773 struct at91mci_host *host = mmc_priv(mmc);
774
775 if (host->board->wp_pin) {
776 read_only = at91_get_gpio_value(host->board->wp_pin);
777 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
778 (read_only ? "read-only" : "read-write") );
779 }
780 else {
781 printk(KERN_WARNING "%s: host does not support reading read-only "
782 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
783 }
784 return read_only;
785}
786
787static const struct mmc_host_ops at91_mci_ops = {
788 .request = at91_mci_request,
789 .set_ios = at91_mci_set_ios,
790 .get_ro = at91_mci_get_ro,
791};
792
793/*
794 * Probe for the device
795 */
796static int __init at91_mci_probe(struct platform_device *pdev)
797{
798 struct mmc_host *mmc;
799 struct at91mci_host *host;
800 struct resource *res;
801 int ret;
802
803 pr_debug("Probe MCI devices\n");
804
805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
806 if (!res)
807 return -ENXIO;
808
809 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
810 return -EBUSY;
811
812 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
813 if (!mmc) {
814 pr_debug("Failed to allocate mmc host\n");
815 release_mem_region(res->start, res->end - res->start + 1);
816 return -ENOMEM;
817 }
818
819 mmc->ops = &at91_mci_ops;
820 mmc->f_min = 375000;
821 mmc->f_max = 25000000;
822 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
823 mmc->caps = MMC_CAP_BYTEBLOCK;
824
825 mmc->max_blk_size = 4095;
826 mmc->max_blk_count = mmc->max_req_size;
827
828 host = mmc_priv(mmc);
829 host->mmc = mmc;
830 host->buffer = NULL;
831 host->bus_mode = 0;
832 host->board = pdev->dev.platform_data;
833 if (host->board->wire4) {
834#ifdef SUPPORT_4WIRE
835 mmc->caps |= MMC_CAP_4_BIT_DATA;
836#else
837 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
838#endif
839 }
840
841 /*
842 * Get Clock
843 */
844 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
845 if (IS_ERR(host->mci_clk)) {
846 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
847 mmc_free_host(mmc);
848 release_mem_region(res->start, res->end - res->start + 1);
849 return -ENODEV;
850 }
851
852 /*
853 * Map I/O region
854 */
855 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
856 if (!host->baseaddr) {
857 clk_put(host->mci_clk);
858 mmc_free_host(mmc);
859 release_mem_region(res->start, res->end - res->start + 1);
860 return -ENOMEM;
861 }
862
863 /*
864 * Reset hardware
865 */
866 clk_enable(host->mci_clk); /* Enable the peripheral clock */
867 at91_mci_disable(host);
868 at91_mci_enable(host);
869
870 /*
871 * Allocate the MCI interrupt
872 */
873 host->irq = platform_get_irq(pdev, 0);
874 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
875 if (ret) {
876 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
877 clk_disable(host->mci_clk);
878 clk_put(host->mci_clk);
879 mmc_free_host(mmc);
880 iounmap(host->baseaddr);
881 release_mem_region(res->start, res->end - res->start + 1);
882 return ret;
883 }
884
885 platform_set_drvdata(pdev, mmc);
886
887 /*
888 * Add host to MMC layer
889 */
890 if (host->board->det_pin)
891 host->present = !at91_get_gpio_value(host->board->det_pin);
892 else
893 host->present = -1;
894
895 mmc_add_host(mmc);
896
897 /*
898 * monitor card insertion/removal if we can
899 */
900 if (host->board->det_pin) {
901 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
902 0, DRIVER_NAME, host);
903 if (ret)
904 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
905 }
906
907 pr_debug("Added MCI driver\n");
908
909 return 0;
910}
911
912/*
913 * Remove a device
914 */
915static int __exit at91_mci_remove(struct platform_device *pdev)
916{
917 struct mmc_host *mmc = platform_get_drvdata(pdev);
918 struct at91mci_host *host;
919 struct resource *res;
920
921 if (!mmc)
922 return -1;
923
924 host = mmc_priv(mmc);
925
926 if (host->present != -1) {
927 free_irq(host->board->det_pin, host);
928 cancel_delayed_work(&host->mmc->detect);
929 }
930
931 at91_mci_disable(host);
932 mmc_remove_host(mmc);
933 free_irq(host->irq, host);
934
935 clk_disable(host->mci_clk); /* Disable the peripheral clock */
936 clk_put(host->mci_clk);
937
938 iounmap(host->baseaddr);
939 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
940 release_mem_region(res->start, res->end - res->start + 1);
941
942 mmc_free_host(mmc);
943 platform_set_drvdata(pdev, NULL);
944 pr_debug("MCI Removed\n");
945
946 return 0;
947}
948
949#ifdef CONFIG_PM
950static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
951{
952 struct mmc_host *mmc = platform_get_drvdata(pdev);
953 int ret = 0;
954
955 if (mmc)
956 ret = mmc_suspend_host(mmc, state);
957
958 return ret;
959}
960
961static int at91_mci_resume(struct platform_device *pdev)
962{
963 struct mmc_host *mmc = platform_get_drvdata(pdev);
964 int ret = 0;
965
966 if (mmc)
967 ret = mmc_resume_host(mmc);
968
969 return ret;
970}
971#else
972#define at91_mci_suspend NULL
973#define at91_mci_resume NULL
974#endif
975
976static struct platform_driver at91_mci_driver = {
977 .remove = __exit_p(at91_mci_remove),
978 .suspend = at91_mci_suspend,
979 .resume = at91_mci_resume,
980 .driver = {
981 .name = DRIVER_NAME,
982 .owner = THIS_MODULE,
983 },
984};
985
986static int __init at91_mci_init(void)
987{
988 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
989}
990
991static void __exit at91_mci_exit(void)
992{
993 platform_driver_unregister(&at91_mci_driver);
994}
995
996module_init(at91_mci_init);
997module_exit(at91_mci_exit);
998
999MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1000MODULE_AUTHOR("Nick Randell");
1001MODULE_LICENSE("GPL");