diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mmc/Kconfig | 18 | ||||
-rw-r--r-- | drivers/mmc/Makefile | 2 | ||||
-rw-r--r-- | drivers/mmc/at91_mci.c | 988 | ||||
-rw-r--r-- | drivers/mmc/imxmmc.c | 1096 | ||||
-rw-r--r-- | drivers/mmc/imxmmc.h | 67 |
5 files changed, 2171 insertions, 0 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 7cc162e8978b..003b077c2324 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig | |||
@@ -91,4 +91,22 @@ config MMC_AU1X | |||
91 | 91 | ||
92 | If unsure, say N. | 92 | If unsure, say N. |
93 | 93 | ||
94 | config MMC_AT91RM9200 | ||
95 | tristate "AT91RM9200 SD/MMC Card Interface support" | ||
96 | depends on ARCH_AT91RM9200 && MMC | ||
97 | help | ||
98 | This selects the AT91RM9200 MCI controller. | ||
99 | |||
100 | If unsure, say N. | ||
101 | |||
102 | config MMC_IMX | ||
103 | tristate "Motorola i.MX Multimedia Card Interface support" | ||
104 | depends on ARCH_IMX && MMC | ||
105 | help | ||
106 | This selects the Motorola i.MX Multimedia card Interface. | ||
107 | If you have a i.MX platform with a Multimedia Card slot, | ||
108 | say Y or M here. | ||
109 | |||
110 | If unsure, say N. | ||
111 | |||
94 | endmenu | 112 | endmenu |
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index c7c34aadfc92..d2957e35cc6f 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile | |||
@@ -17,10 +17,12 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o | |||
17 | # | 17 | # |
18 | obj-$(CONFIG_MMC_ARMMMCI) += mmci.o | 18 | obj-$(CONFIG_MMC_ARMMMCI) += mmci.o |
19 | obj-$(CONFIG_MMC_PXA) += pxamci.o | 19 | obj-$(CONFIG_MMC_PXA) += pxamci.o |
20 | obj-$(CONFIG_MMC_IMX) += imxmmc.o | ||
20 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o | 21 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o |
21 | obj-$(CONFIG_MMC_WBSD) += wbsd.o | 22 | obj-$(CONFIG_MMC_WBSD) += wbsd.o |
22 | obj-$(CONFIG_MMC_AU1X) += au1xmmc.o | 23 | obj-$(CONFIG_MMC_AU1X) += au1xmmc.o |
23 | obj-$(CONFIG_MMC_OMAP) += omap.o | 24 | obj-$(CONFIG_MMC_OMAP) += omap.o |
25 | obj-$(CONFIG_MMC_AT91RM9200) += at91_mci.o | ||
24 | 26 | ||
25 | mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o | 27 | mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o |
26 | 28 | ||
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c new file mode 100644 index 000000000000..6061c2d101a0 --- /dev/null +++ b/drivers/mmc/at91_mci.c | |||
@@ -0,0 +1,988 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver | ||
3 | * | ||
4 | * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved | ||
5 | * | ||
6 | * Copyright (C) 2006 Malcolm Noyes | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | This is the AT91RM9200 MCI driver that has been tested with both MMC cards | ||
15 | and SD-cards. Boards that support write protect are now supported. | ||
16 | The CCAT91SBC001 board does not support SD cards. | ||
17 | |||
18 | The three entry points are at91_mci_request, at91_mci_set_ios | ||
19 | and at91_mci_get_ro. | ||
20 | |||
21 | SET IOS | ||
22 | This configures the device to put it into the correct mode and clock speed | ||
23 | required. | ||
24 | |||
25 | MCI REQUEST | ||
26 | MCI request processes the commands sent in the mmc_request structure. This | ||
27 | can consist of a processing command and a stop command in the case of | ||
28 | multiple block transfers. | ||
29 | |||
30 | There are three main types of request, commands, reads and writes. | ||
31 | |||
32 | Commands are straight forward. The command is submitted to the controller and | ||
33 | the request function returns. When the controller generates an interrupt to indicate | ||
34 | the command is finished, the response to the command are read and the mmc_request_done | ||
35 | function called to end the request. | ||
36 | |||
37 | Reads and writes work in a similar manner to normal commands but involve the PDC (DMA) | ||
38 | controller to manage the transfers. | ||
39 | |||
40 | A read is done from the controller directly to the scatterlist passed in from the request. | ||
41 | Due to a bug in the controller, when a read is completed, all the words are byte | ||
42 | swapped in the scatterlist buffers. | ||
43 | |||
44 | The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY | ||
45 | |||
46 | A write is slightly different in that the bytes to write are read from the scatterlist | ||
47 | into a dma memory buffer (this is in case the source buffer should be read only). The | ||
48 | entire write buffer is then done from this single dma memory buffer. | ||
49 | |||
50 | The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY | ||
51 | |||
52 | GET RO | ||
53 | Gets the status of the write protect pin, if available. | ||
54 | */ | ||
55 | |||
56 | #include <linux/config.h> | ||
57 | #include <linux/module.h> | ||
58 | #include <linux/moduleparam.h> | ||
59 | #include <linux/init.h> | ||
60 | #include <linux/ioport.h> | ||
61 | #include <linux/platform_device.h> | ||
62 | #include <linux/interrupt.h> | ||
63 | #include <linux/blkdev.h> | ||
64 | #include <linux/delay.h> | ||
65 | #include <linux/err.h> | ||
66 | #include <linux/dma-mapping.h> | ||
67 | #include <linux/clk.h> | ||
68 | |||
69 | #include <linux/mmc/host.h> | ||
70 | #include <linux/mmc/protocol.h> | ||
71 | |||
72 | #include <asm/io.h> | ||
73 | #include <asm/irq.h> | ||
74 | #include <asm/mach/mmc.h> | ||
75 | #include <asm/arch/board.h> | ||
76 | #include <asm/arch/gpio.h> | ||
77 | #include <asm/arch/at91rm9200_mci.h> | ||
78 | #include <asm/arch/at91rm9200_pdc.h> | ||
79 | |||
80 | #define DRIVER_NAME "at91_mci" | ||
81 | |||
82 | #undef SUPPORT_4WIRE | ||
83 | |||
84 | #ifdef CONFIG_MMC_DEBUG | ||
85 | #define DBG(fmt...) \ | ||
86 | printk(fmt) | ||
87 | #else | ||
88 | #define DBG(fmt...) do { } while (0) | ||
89 | #endif | ||
90 | |||
91 | static struct clk *mci_clk; | ||
92 | |||
93 | #define FL_SENT_COMMAND (1 << 0) | ||
94 | #define FL_SENT_STOP (1 << 1) | ||
95 | |||
96 | |||
97 | |||
98 | /* | ||
99 | * Read from a MCI register. | ||
100 | */ | ||
101 | static inline unsigned long at91_mci_read(unsigned int reg) | ||
102 | { | ||
103 | void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI; | ||
104 | |||
105 | return __raw_readl(mci_base + reg); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Write to a MCI register. | ||
110 | */ | ||
111 | static inline void at91_mci_write(unsigned int reg, unsigned long value) | ||
112 | { | ||
113 | void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI; | ||
114 | |||
115 | __raw_writel(value, mci_base + reg); | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Low level type for this driver | ||
120 | */ | ||
121 | struct at91mci_host | ||
122 | { | ||
123 | struct mmc_host *mmc; | ||
124 | struct mmc_command *cmd; | ||
125 | struct mmc_request *request; | ||
126 | |||
127 | struct at91_mmc_data *board; | ||
128 | int present; | ||
129 | |||
130 | /* | ||
131 | * Flag indicating when the command has been sent. This is used to | ||
132 | * work out whether or not to send the stop | ||
133 | */ | ||
134 | unsigned int flags; | ||
135 | /* flag for current bus settings */ | ||
136 | u32 bus_mode; | ||
137 | |||
138 | /* DMA buffer used for transmitting */ | ||
139 | unsigned int* buffer; | ||
140 | dma_addr_t physical_address; | ||
141 | unsigned int total_length; | ||
142 | |||
143 | /* Latest in the scatterlist that has been enabled for transfer, but not freed */ | ||
144 | int in_use_index; | ||
145 | |||
146 | /* Latest in the scatterlist that has been enabled for transfer */ | ||
147 | int transfer_index; | ||
148 | }; | ||
149 | |||
150 | /* | ||
151 | * Copy from sg to a dma block - used for transfers | ||
152 | */ | ||
153 | static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) | ||
154 | { | ||
155 | unsigned int len, i, size; | ||
156 | unsigned *dmabuf = host->buffer; | ||
157 | |||
158 | size = host->total_length; | ||
159 | len = data->sg_len; | ||
160 | |||
161 | /* | ||
162 | * Just loop through all entries. Size might not | ||
163 | * be the entire list though so make sure that | ||
164 | * we do not transfer too much. | ||
165 | */ | ||
166 | for (i = 0; i < len; i++) { | ||
167 | struct scatterlist *sg; | ||
168 | int amount; | ||
169 | int index; | ||
170 | unsigned int *sgbuffer; | ||
171 | |||
172 | sg = &data->sg[i]; | ||
173 | |||
174 | sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; | ||
175 | amount = min(size, sg->length); | ||
176 | size -= amount; | ||
177 | amount /= 4; | ||
178 | |||
179 | for (index = 0; index < amount; index++) | ||
180 | *dmabuf++ = swab32(sgbuffer[index]); | ||
181 | |||
182 | kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); | ||
183 | |||
184 | if (size == 0) | ||
185 | break; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Check that we didn't get a request to transfer | ||
190 | * more data than can fit into the SG list. | ||
191 | */ | ||
192 | BUG_ON(size != 0); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Prepare a dma read | ||
197 | */ | ||
198 | static void at91mci_pre_dma_read(struct at91mci_host *host) | ||
199 | { | ||
200 | int i; | ||
201 | struct scatterlist *sg; | ||
202 | struct mmc_command *cmd; | ||
203 | struct mmc_data *data; | ||
204 | |||
205 | DBG("pre dma read\n"); | ||
206 | |||
207 | cmd = host->cmd; | ||
208 | if (!cmd) { | ||
209 | DBG("no command\n"); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | data = cmd->data; | ||
214 | if (!data) { | ||
215 | DBG("no data\n"); | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | for (i = 0; i < 2; i++) { | ||
220 | /* nothing left to transfer */ | ||
221 | if (host->transfer_index >= data->sg_len) { | ||
222 | DBG("Nothing left to transfer (index = %d)\n", host->transfer_index); | ||
223 | break; | ||
224 | } | ||
225 | |||
226 | /* Check to see if this needs filling */ | ||
227 | if (i == 0) { | ||
228 | if (at91_mci_read(AT91_PDC_RCR) != 0) { | ||
229 | DBG("Transfer active in current\n"); | ||
230 | continue; | ||
231 | } | ||
232 | } | ||
233 | else { | ||
234 | if (at91_mci_read(AT91_PDC_RNCR) != 0) { | ||
235 | DBG("Transfer active in next\n"); | ||
236 | continue; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* Setup the next transfer */ | ||
241 | DBG("Using transfer index %d\n", host->transfer_index); | ||
242 | |||
243 | sg = &data->sg[host->transfer_index++]; | ||
244 | DBG("sg = %p\n", sg); | ||
245 | |||
246 | sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE); | ||
247 | |||
248 | DBG("dma address = %08X, length = %d\n", sg->dma_address, sg->length); | ||
249 | |||
250 | if (i == 0) { | ||
251 | at91_mci_write(AT91_PDC_RPR, sg->dma_address); | ||
252 | at91_mci_write(AT91_PDC_RCR, sg->length / 4); | ||
253 | } | ||
254 | else { | ||
255 | at91_mci_write(AT91_PDC_RNPR, sg->dma_address); | ||
256 | at91_mci_write(AT91_PDC_RNCR, sg->length / 4); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | DBG("pre dma read done\n"); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Handle after a dma read | ||
265 | */ | ||
266 | static void at91mci_post_dma_read(struct at91mci_host *host) | ||
267 | { | ||
268 | struct mmc_command *cmd; | ||
269 | struct mmc_data *data; | ||
270 | |||
271 | DBG("post dma read\n"); | ||
272 | |||
273 | cmd = host->cmd; | ||
274 | if (!cmd) { | ||
275 | DBG("no command\n"); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | data = cmd->data; | ||
280 | if (!data) { | ||
281 | DBG("no data\n"); | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | while (host->in_use_index < host->transfer_index) { | ||
286 | unsigned int *buffer; | ||
287 | int index; | ||
288 | int len; | ||
289 | |||
290 | struct scatterlist *sg; | ||
291 | |||
292 | DBG("finishing index %d\n", host->in_use_index); | ||
293 | |||
294 | sg = &data->sg[host->in_use_index++]; | ||
295 | |||
296 | DBG("Unmapping page %08X\n", sg->dma_address); | ||
297 | |||
298 | dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); | ||
299 | |||
300 | /* Swap the contents of the buffer */ | ||
301 | buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; | ||
302 | DBG("buffer = %p, length = %d\n", buffer, sg->length); | ||
303 | |||
304 | data->bytes_xfered += sg->length; | ||
305 | |||
306 | len = sg->length / 4; | ||
307 | |||
308 | for (index = 0; index < len; index++) { | ||
309 | buffer[index] = swab32(buffer[index]); | ||
310 | } | ||
311 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); | ||
312 | flush_dcache_page(sg->page); | ||
313 | } | ||
314 | |||
315 | /* Is there another transfer to trigger? */ | ||
316 | if (host->transfer_index < data->sg_len) | ||
317 | at91mci_pre_dma_read(host); | ||
318 | else { | ||
319 | at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF); | ||
320 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | ||
321 | } | ||
322 | |||
323 | DBG("post dma read done\n"); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Handle transmitted data | ||
328 | */ | ||
329 | static void at91_mci_handle_transmitted(struct at91mci_host *host) | ||
330 | { | ||
331 | struct mmc_command *cmd; | ||
332 | struct mmc_data *data; | ||
333 | |||
334 | DBG("Handling the transmit\n"); | ||
335 | |||
336 | /* Disable the transfer */ | ||
337 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | ||
338 | |||
339 | /* Now wait for cmd ready */ | ||
340 | at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE); | ||
341 | at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY); | ||
342 | |||
343 | cmd = host->cmd; | ||
344 | if (!cmd) return; | ||
345 | |||
346 | data = cmd->data; | ||
347 | if (!data) return; | ||
348 | |||
349 | data->bytes_xfered = host->total_length; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Enable the controller | ||
354 | */ | ||
355 | static void at91_mci_enable(void) | ||
356 | { | ||
357 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); | ||
358 | at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF); | ||
359 | at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); | ||
360 | at91_mci_write(AT91_MCI_MR, 0x834A); | ||
361 | at91_mci_write(AT91_MCI_SDCR, 0x0); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Disable the controller | ||
366 | */ | ||
367 | static void at91_mci_disable(void) | ||
368 | { | ||
369 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Send a command | ||
374 | * return the interrupts to enable | ||
375 | */ | ||
376 | static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd) | ||
377 | { | ||
378 | unsigned int cmdr, mr; | ||
379 | unsigned int block_length; | ||
380 | struct mmc_data *data = cmd->data; | ||
381 | |||
382 | unsigned int blocks; | ||
383 | unsigned int ier = 0; | ||
384 | |||
385 | host->cmd = cmd; | ||
386 | |||
387 | /* Not sure if this is needed */ | ||
388 | #if 0 | ||
389 | if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { | ||
390 | DBG("Clearing timeout\n"); | ||
391 | at91_mci_write(AT91_MCI_ARGR, 0); | ||
392 | at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD); | ||
393 | while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) { | ||
394 | /* spin */ | ||
395 | DBG("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR)); | ||
396 | } | ||
397 | } | ||
398 | #endif | ||
399 | cmdr = cmd->opcode; | ||
400 | |||
401 | if (mmc_resp_type(cmd) == MMC_RSP_NONE) | ||
402 | cmdr |= AT91_MCI_RSPTYP_NONE; | ||
403 | else { | ||
404 | /* if a response is expected then allow maximum response latancy */ | ||
405 | cmdr |= AT91_MCI_MAXLAT; | ||
406 | /* set 136 bit response for R2, 48 bit response otherwise */ | ||
407 | if (mmc_resp_type(cmd) == MMC_RSP_R2) | ||
408 | cmdr |= AT91_MCI_RSPTYP_136; | ||
409 | else | ||
410 | cmdr |= AT91_MCI_RSPTYP_48; | ||
411 | } | ||
412 | |||
413 | if (data) { | ||
414 | block_length = 1 << data->blksz_bits; | ||
415 | blocks = data->blocks; | ||
416 | |||
417 | /* always set data start - also set direction flag for read */ | ||
418 | if (data->flags & MMC_DATA_READ) | ||
419 | cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START); | ||
420 | else if (data->flags & MMC_DATA_WRITE) | ||
421 | cmdr |= AT91_MCI_TRCMD_START; | ||
422 | |||
423 | if (data->flags & MMC_DATA_STREAM) | ||
424 | cmdr |= AT91_MCI_TRTYP_STREAM; | ||
425 | if (data->flags & MMC_DATA_MULTI) | ||
426 | cmdr |= AT91_MCI_TRTYP_MULTIPLE; | ||
427 | } | ||
428 | else { | ||
429 | block_length = 0; | ||
430 | blocks = 0; | ||
431 | } | ||
432 | |||
433 | if (cmd->opcode == MMC_STOP_TRANSMISSION) | ||
434 | cmdr |= AT91_MCI_TRCMD_STOP; | ||
435 | |||
436 | if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
437 | cmdr |= AT91_MCI_OPDCMD; | ||
438 | |||
439 | /* | ||
440 | * Set the arguments and send the command | ||
441 | */ | ||
442 | DBG("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n", | ||
443 | cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR)); | ||
444 | |||
445 | if (!data) { | ||
446 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS); | ||
447 | at91_mci_write(AT91_PDC_RPR, 0); | ||
448 | at91_mci_write(AT91_PDC_RCR, 0); | ||
449 | at91_mci_write(AT91_PDC_RNPR, 0); | ||
450 | at91_mci_write(AT91_PDC_RNCR, 0); | ||
451 | at91_mci_write(AT91_PDC_TPR, 0); | ||
452 | at91_mci_write(AT91_PDC_TCR, 0); | ||
453 | at91_mci_write(AT91_PDC_TNPR, 0); | ||
454 | at91_mci_write(AT91_PDC_TNCR, 0); | ||
455 | |||
456 | at91_mci_write(AT91_MCI_ARGR, cmd->arg); | ||
457 | at91_mci_write(AT91_MCI_CMDR, cmdr); | ||
458 | return AT91_MCI_CMDRDY; | ||
459 | } | ||
460 | |||
461 | mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */ | ||
462 | at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); | ||
463 | |||
464 | /* | ||
465 | * Disable the PDC controller | ||
466 | */ | ||
467 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | ||
468 | |||
469 | if (cmdr & AT91_MCI_TRCMD_START) { | ||
470 | data->bytes_xfered = 0; | ||
471 | host->transfer_index = 0; | ||
472 | host->in_use_index = 0; | ||
473 | if (cmdr & AT91_MCI_TRDIR) { | ||
474 | /* | ||
475 | * Handle a read | ||
476 | */ | ||
477 | host->buffer = NULL; | ||
478 | host->total_length = 0; | ||
479 | |||
480 | at91mci_pre_dma_read(host); | ||
481 | ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */; | ||
482 | } | ||
483 | else { | ||
484 | /* | ||
485 | * Handle a write | ||
486 | */ | ||
487 | host->total_length = block_length * blocks; | ||
488 | host->buffer = dma_alloc_coherent(NULL, | ||
489 | host->total_length, | ||
490 | &host->physical_address, GFP_KERNEL); | ||
491 | |||
492 | at91mci_sg_to_dma(host, data); | ||
493 | |||
494 | DBG("Transmitting %d bytes\n", host->total_length); | ||
495 | |||
496 | at91_mci_write(AT91_PDC_TPR, host->physical_address); | ||
497 | at91_mci_write(AT91_PDC_TCR, host->total_length / 4); | ||
498 | ier = AT91_MCI_TXBUFE; | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* | ||
503 | * Send the command and then enable the PDC - not the other way round as | ||
504 | * the data sheet says | ||
505 | */ | ||
506 | |||
507 | at91_mci_write(AT91_MCI_ARGR, cmd->arg); | ||
508 | at91_mci_write(AT91_MCI_CMDR, cmdr); | ||
509 | |||
510 | if (cmdr & AT91_MCI_TRCMD_START) { | ||
511 | if (cmdr & AT91_MCI_TRDIR) | ||
512 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN); | ||
513 | else | ||
514 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN); | ||
515 | } | ||
516 | return ier; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Wait for a command to complete | ||
521 | */ | ||
522 | static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd) | ||
523 | { | ||
524 | unsigned int ier; | ||
525 | |||
526 | ier = at91_mci_send_command(host, cmd); | ||
527 | |||
528 | DBG("setting ier to %08X\n", ier); | ||
529 | |||
530 | /* Stop on errors or the required value */ | ||
531 | at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier); | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * Process the next step in the request | ||
536 | */ | ||
537 | static void at91mci_process_next(struct at91mci_host *host) | ||
538 | { | ||
539 | if (!(host->flags & FL_SENT_COMMAND)) { | ||
540 | host->flags |= FL_SENT_COMMAND; | ||
541 | at91mci_process_command(host, host->request->cmd); | ||
542 | } | ||
543 | else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { | ||
544 | host->flags |= FL_SENT_STOP; | ||
545 | at91mci_process_command(host, host->request->stop); | ||
546 | } | ||
547 | else | ||
548 | mmc_request_done(host->mmc, host->request); | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * Handle a command that has been completed | ||
553 | */ | ||
554 | static void at91mci_completed_command(struct at91mci_host *host) | ||
555 | { | ||
556 | struct mmc_command *cmd = host->cmd; | ||
557 | unsigned int status; | ||
558 | |||
559 | at91_mci_write(AT91_MCI_IDR, 0xffffffff); | ||
560 | |||
561 | cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0)); | ||
562 | cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1)); | ||
563 | cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2)); | ||
564 | cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3)); | ||
565 | |||
566 | if (host->buffer) { | ||
567 | dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); | ||
568 | host->buffer = NULL; | ||
569 | } | ||
570 | |||
571 | status = at91_mci_read(AT91_MCI_SR); | ||
572 | |||
573 | DBG("Status = %08X [%08X %08X %08X %08X]\n", | ||
574 | status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); | ||
575 | |||
576 | if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE | | ||
577 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE | | ||
578 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) { | ||
579 | if ((status & AT91_MCI_RCRCE) && | ||
580 | ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) { | ||
581 | cmd->error = MMC_ERR_NONE; | ||
582 | } | ||
583 | else { | ||
584 | if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) | ||
585 | cmd->error = MMC_ERR_TIMEOUT; | ||
586 | else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) | ||
587 | cmd->error = MMC_ERR_BADCRC; | ||
588 | else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE)) | ||
589 | cmd->error = MMC_ERR_FIFO; | ||
590 | else | ||
591 | cmd->error = MMC_ERR_FAILED; | ||
592 | |||
593 | DBG("Error detected and set to %d (cmd = %d, retries = %d)\n", | ||
594 | cmd->error, cmd->opcode, cmd->retries); | ||
595 | } | ||
596 | } | ||
597 | else | ||
598 | cmd->error = MMC_ERR_NONE; | ||
599 | |||
600 | at91mci_process_next(host); | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Handle an MMC request | ||
605 | */ | ||
606 | static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
607 | { | ||
608 | struct at91mci_host *host = mmc_priv(mmc); | ||
609 | host->request = mrq; | ||
610 | host->flags = 0; | ||
611 | |||
612 | at91mci_process_next(host); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Set the IOS | ||
617 | */ | ||
618 | static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
619 | { | ||
620 | int clkdiv; | ||
621 | struct at91mci_host *host = mmc_priv(mmc); | ||
622 | unsigned long at91_master_clock = clk_get_rate(mci_clk); | ||
623 | |||
624 | DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n", | ||
625 | ios->clock, ios->bus_mode, ios->power_mode, ios->vdd); | ||
626 | |||
627 | if (host) | ||
628 | host->bus_mode = ios->bus_mode; | ||
629 | else | ||
630 | printk("MMC: No host for bus_mode\n"); | ||
631 | |||
632 | if (ios->clock == 0) { | ||
633 | /* Disable the MCI controller */ | ||
634 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS); | ||
635 | clkdiv = 0; | ||
636 | } | ||
637 | else { | ||
638 | /* Enable the MCI controller */ | ||
639 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); | ||
640 | |||
641 | if ((at91_master_clock % (ios->clock * 2)) == 0) | ||
642 | clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; | ||
643 | else | ||
644 | clkdiv = (at91_master_clock / ios->clock) / 2; | ||
645 | |||
646 | DBG("clkdiv = %d. mcck = %ld\n", clkdiv, | ||
647 | at91_master_clock / (2 * (clkdiv + 1))); | ||
648 | } | ||
649 | if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { | ||
650 | DBG("MMC: Setting controller bus width to 4\n"); | ||
651 | at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS); | ||
652 | } | ||
653 | else { | ||
654 | DBG("MMC: Setting controller bus width to 1\n"); | ||
655 | at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); | ||
656 | } | ||
657 | |||
658 | /* Set the clock divider */ | ||
659 | at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); | ||
660 | |||
661 | /* maybe switch power to the card */ | ||
662 | if (host && host->board->vcc_pin) { | ||
663 | switch (ios->power_mode) { | ||
664 | case MMC_POWER_OFF: | ||
665 | at91_set_gpio_output(host->board->vcc_pin, 0); | ||
666 | break; | ||
667 | case MMC_POWER_UP: | ||
668 | case MMC_POWER_ON: | ||
669 | at91_set_gpio_output(host->board->vcc_pin, 1); | ||
670 | break; | ||
671 | } | ||
672 | } | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * Handle an interrupt | ||
677 | */ | ||
678 | static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs) | ||
679 | { | ||
680 | struct at91mci_host *host = devid; | ||
681 | int completed = 0; | ||
682 | |||
683 | unsigned int int_status; | ||
684 | |||
685 | if (host == NULL) | ||
686 | return IRQ_HANDLED; | ||
687 | |||
688 | int_status = at91_mci_read(AT91_MCI_SR); | ||
689 | DBG("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR), | ||
690 | int_status & at91_mci_read(AT91_MCI_IMR)); | ||
691 | |||
692 | if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000) | ||
693 | completed = 1; | ||
694 | |||
695 | int_status &= at91_mci_read(AT91_MCI_IMR); | ||
696 | |||
697 | if (int_status & AT91_MCI_UNRE) | ||
698 | DBG("MMC: Underrun error\n"); | ||
699 | if (int_status & AT91_MCI_OVRE) | ||
700 | DBG("MMC: Overrun error\n"); | ||
701 | if (int_status & AT91_MCI_DTOE) | ||
702 | DBG("MMC: Data timeout\n"); | ||
703 | if (int_status & AT91_MCI_DCRCE) | ||
704 | DBG("MMC: CRC error in data\n"); | ||
705 | if (int_status & AT91_MCI_RTOE) | ||
706 | DBG("MMC: Response timeout\n"); | ||
707 | if (int_status & AT91_MCI_RENDE) | ||
708 | DBG("MMC: Response end bit error\n"); | ||
709 | if (int_status & AT91_MCI_RCRCE) | ||
710 | DBG("MMC: Response CRC error\n"); | ||
711 | if (int_status & AT91_MCI_RDIRE) | ||
712 | DBG("MMC: Response direction error\n"); | ||
713 | if (int_status & AT91_MCI_RINDE) | ||
714 | DBG("MMC: Response index error\n"); | ||
715 | |||
716 | /* Only continue processing if no errors */ | ||
717 | if (!completed) { | ||
718 | if (int_status & AT91_MCI_TXBUFE) { | ||
719 | DBG("TX buffer empty\n"); | ||
720 | at91_mci_handle_transmitted(host); | ||
721 | } | ||
722 | |||
723 | if (int_status & AT91_MCI_RXBUFF) { | ||
724 | DBG("RX buffer full\n"); | ||
725 | at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); | ||
726 | } | ||
727 | |||
728 | if (int_status & AT91_MCI_ENDTX) { | ||
729 | DBG("Transmit has ended\n"); | ||
730 | } | ||
731 | |||
732 | if (int_status & AT91_MCI_ENDRX) { | ||
733 | DBG("Receive has ended\n"); | ||
734 | at91mci_post_dma_read(host); | ||
735 | } | ||
736 | |||
737 | if (int_status & AT91_MCI_NOTBUSY) { | ||
738 | DBG("Card is ready\n"); | ||
739 | at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); | ||
740 | } | ||
741 | |||
742 | if (int_status & AT91_MCI_DTIP) { | ||
743 | DBG("Data transfer in progress\n"); | ||
744 | } | ||
745 | |||
746 | if (int_status & AT91_MCI_BLKE) { | ||
747 | DBG("Block transfer has ended\n"); | ||
748 | } | ||
749 | |||
750 | if (int_status & AT91_MCI_TXRDY) { | ||
751 | DBG("Ready to transmit\n"); | ||
752 | } | ||
753 | |||
754 | if (int_status & AT91_MCI_RXRDY) { | ||
755 | DBG("Ready to receive\n"); | ||
756 | } | ||
757 | |||
758 | if (int_status & AT91_MCI_CMDRDY) { | ||
759 | DBG("Command ready\n"); | ||
760 | completed = 1; | ||
761 | } | ||
762 | } | ||
763 | at91_mci_write(AT91_MCI_IDR, int_status); | ||
764 | |||
765 | if (completed) { | ||
766 | DBG("Completed command\n"); | ||
767 | at91_mci_write(AT91_MCI_IDR, 0xffffffff); | ||
768 | at91mci_completed_command(host); | ||
769 | } | ||
770 | |||
771 | return IRQ_HANDLED; | ||
772 | } | ||
773 | |||
774 | static irqreturn_t at91_mmc_det_irq(int irq, void *_host, struct pt_regs *regs) | ||
775 | { | ||
776 | struct at91mci_host *host = _host; | ||
777 | int present = !at91_get_gpio_value(irq); | ||
778 | |||
779 | /* | ||
780 | * we expect this irq on both insert and remove, | ||
781 | * and use a short delay to debounce. | ||
782 | */ | ||
783 | if (present != host->present) { | ||
784 | host->present = present; | ||
785 | DBG("%s: card %s\n", mmc_hostname(host->mmc), | ||
786 | present ? "insert" : "remove"); | ||
787 | if (!present) { | ||
788 | DBG("****** Resetting SD-card bus width ******\n"); | ||
789 | at91_mci_write(AT91_MCI_SDCR, 0); | ||
790 | } | ||
791 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
792 | } | ||
793 | return IRQ_HANDLED; | ||
794 | } | ||
795 | |||
796 | int at91_mci_get_ro(struct mmc_host *mmc) | ||
797 | { | ||
798 | int read_only = 0; | ||
799 | struct at91mci_host *host = mmc_priv(mmc); | ||
800 | |||
801 | if (host->board->wp_pin) { | ||
802 | read_only = at91_get_gpio_value(host->board->wp_pin); | ||
803 | printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), | ||
804 | (read_only ? "read-only" : "read-write") ); | ||
805 | } | ||
806 | else { | ||
807 | printk(KERN_WARNING "%s: host does not support reading read-only " | ||
808 | "switch. Assuming write-enable.\n", mmc_hostname(mmc)); | ||
809 | } | ||
810 | return read_only; | ||
811 | } | ||
812 | |||
813 | static struct mmc_host_ops at91_mci_ops = { | ||
814 | .request = at91_mci_request, | ||
815 | .set_ios = at91_mci_set_ios, | ||
816 | .get_ro = at91_mci_get_ro, | ||
817 | }; | ||
818 | |||
819 | /* | ||
820 | * Probe for the device | ||
821 | */ | ||
822 | static int at91_mci_probe(struct platform_device *pdev) | ||
823 | { | ||
824 | struct mmc_host *mmc; | ||
825 | struct at91mci_host *host; | ||
826 | int ret; | ||
827 | |||
828 | DBG("Probe MCI devices\n"); | ||
829 | at91_mci_disable(); | ||
830 | at91_mci_enable(); | ||
831 | |||
832 | mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); | ||
833 | if (!mmc) { | ||
834 | DBG("Failed to allocate mmc host\n"); | ||
835 | return -ENOMEM; | ||
836 | } | ||
837 | |||
838 | mmc->ops = &at91_mci_ops; | ||
839 | mmc->f_min = 375000; | ||
840 | mmc->f_max = 25000000; | ||
841 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
842 | |||
843 | host = mmc_priv(mmc); | ||
844 | host->mmc = mmc; | ||
845 | host->buffer = NULL; | ||
846 | host->bus_mode = 0; | ||
847 | host->board = pdev->dev.platform_data; | ||
848 | if (host->board->wire4) { | ||
849 | #ifdef SUPPORT_4WIRE | ||
850 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
851 | #else | ||
852 | printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n"); | ||
853 | #endif | ||
854 | } | ||
855 | |||
856 | /* | ||
857 | * Get Clock | ||
858 | */ | ||
859 | mci_clk = clk_get(&pdev->dev, "mci_clk"); | ||
860 | if (!mci_clk) { | ||
861 | printk(KERN_ERR "AT91 MMC: no clock defined.\n"); | ||
862 | return -ENODEV; | ||
863 | } | ||
864 | clk_enable(mci_clk); /* Enable the peripheral clock */ | ||
865 | |||
866 | /* | ||
867 | * Allocate the MCI interrupt | ||
868 | */ | ||
869 | ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host); | ||
870 | if (ret) { | ||
871 | DBG("Failed to request MCI interrupt\n"); | ||
872 | return ret; | ||
873 | } | ||
874 | |||
875 | platform_set_drvdata(pdev, mmc); | ||
876 | |||
877 | /* | ||
878 | * Add host to MMC layer | ||
879 | */ | ||
880 | if (host->board->det_pin) | ||
881 | host->present = !at91_get_gpio_value(host->board->det_pin); | ||
882 | else | ||
883 | host->present = -1; | ||
884 | |||
885 | mmc_add_host(mmc); | ||
886 | |||
887 | /* | ||
888 | * monitor card insertion/removal if we can | ||
889 | */ | ||
890 | if (host->board->det_pin) { | ||
891 | ret = request_irq(host->board->det_pin, at91_mmc_det_irq, | ||
892 | SA_SAMPLE_RANDOM, DRIVER_NAME, host); | ||
893 | if (ret) | ||
894 | DBG("couldn't allocate MMC detect irq\n"); | ||
895 | } | ||
896 | |||
897 | DBG(KERN_INFO "Added MCI driver\n"); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * Remove a device | ||
904 | */ | ||
905 | static int at91_mci_remove(struct platform_device *pdev) | ||
906 | { | ||
907 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
908 | struct at91mci_host *host; | ||
909 | |||
910 | if (!mmc) | ||
911 | return -1; | ||
912 | |||
913 | host = mmc_priv(mmc); | ||
914 | |||
915 | if (host->present != -1) { | ||
916 | free_irq(host->board->det_pin, host); | ||
917 | cancel_delayed_work(&host->mmc->detect); | ||
918 | } | ||
919 | |||
920 | mmc_remove_host(mmc); | ||
921 | at91_mci_disable(); | ||
922 | free_irq(AT91_ID_MCI, host); | ||
923 | mmc_free_host(mmc); | ||
924 | |||
925 | clk_disable(mci_clk); /* Disable the peripheral clock */ | ||
926 | clk_put(mci_clk); | ||
927 | |||
928 | platform_set_drvdata(pdev, NULL); | ||
929 | |||
930 | DBG("Removed\n"); | ||
931 | |||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | #ifdef CONFIG_PM | ||
936 | static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) | ||
937 | { | ||
938 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
939 | int ret = 0; | ||
940 | |||
941 | if (mmc) | ||
942 | ret = mmc_suspend_host(mmc, state); | ||
943 | |||
944 | return ret; | ||
945 | } | ||
946 | |||
947 | static int at91_mci_resume(struct platform_device *pdev) | ||
948 | { | ||
949 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
950 | int ret = 0; | ||
951 | |||
952 | if (mmc) | ||
953 | ret = mmc_resume_host(mmc); | ||
954 | |||
955 | return ret; | ||
956 | } | ||
957 | #else | ||
958 | #define at91_mci_suspend NULL | ||
959 | #define at91_mci_resume NULL | ||
960 | #endif | ||
961 | |||
962 | static struct platform_driver at91_mci_driver = { | ||
963 | .probe = at91_mci_probe, | ||
964 | .remove = at91_mci_remove, | ||
965 | .suspend = at91_mci_suspend, | ||
966 | .resume = at91_mci_resume, | ||
967 | .driver = { | ||
968 | .name = DRIVER_NAME, | ||
969 | .owner = THIS_MODULE, | ||
970 | }, | ||
971 | }; | ||
972 | |||
973 | static int __init at91_mci_init(void) | ||
974 | { | ||
975 | return platform_driver_register(&at91_mci_driver); | ||
976 | } | ||
977 | |||
978 | static void __exit at91_mci_exit(void) | ||
979 | { | ||
980 | platform_driver_unregister(&at91_mci_driver); | ||
981 | } | ||
982 | |||
983 | module_init(at91_mci_init); | ||
984 | module_exit(at91_mci_exit); | ||
985 | |||
986 | MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver"); | ||
987 | MODULE_AUTHOR("Nick Randell"); | ||
988 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c new file mode 100644 index 000000000000..ffb7f55d3467 --- /dev/null +++ b/drivers/mmc/imxmmc.c | |||
@@ -0,0 +1,1096 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver | ||
3 | * | ||
4 | * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de> | ||
5 | * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> | ||
6 | * | ||
7 | * derived from pxamci.c by Russell King | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
14 | * Changed to conform redesigned i.MX scatter gather DMA interface | ||
15 | * | ||
16 | * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
17 | * Updated for 2.6.14 kernel | ||
18 | * | ||
19 | * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com> | ||
20 | * Found and corrected problems in the write path | ||
21 | * | ||
22 | * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
23 | * The event handling rewritten right way in softirq. | ||
24 | * Added many ugly hacks and delays to overcome SDHC | ||
25 | * deficiencies | ||
26 | * | ||
27 | */ | ||
28 | #include <linux/config.h> | ||
29 | |||
30 | #ifdef CONFIG_MMC_DEBUG | ||
31 | #define DEBUG | ||
32 | #else | ||
33 | #undef DEBUG | ||
34 | #endif | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/ioport.h> | ||
39 | #include <linux/platform_device.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/blkdev.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/mmc/host.h> | ||
44 | #include <linux/mmc/card.h> | ||
45 | #include <linux/mmc/protocol.h> | ||
46 | #include <linux/delay.h> | ||
47 | |||
48 | #include <asm/dma.h> | ||
49 | #include <asm/io.h> | ||
50 | #include <asm/irq.h> | ||
51 | #include <asm/sizes.h> | ||
52 | #include <asm/arch/mmc.h> | ||
53 | #include <asm/arch/imx-dma.h> | ||
54 | |||
55 | #include "imxmmc.h" | ||
56 | |||
57 | #define DRIVER_NAME "imx-mmc" | ||
58 | |||
59 | #define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ | ||
60 | INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ | ||
61 | INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) | ||
62 | |||
63 | struct imxmci_host { | ||
64 | struct mmc_host *mmc; | ||
65 | spinlock_t lock; | ||
66 | struct resource *res; | ||
67 | int irq; | ||
68 | imx_dmach_t dma; | ||
69 | unsigned int clkrt; | ||
70 | unsigned int cmdat; | ||
71 | volatile unsigned int imask; | ||
72 | unsigned int power_mode; | ||
73 | unsigned int present; | ||
74 | struct imxmmc_platform_data *pdata; | ||
75 | |||
76 | struct mmc_request *req; | ||
77 | struct mmc_command *cmd; | ||
78 | struct mmc_data *data; | ||
79 | |||
80 | struct timer_list timer; | ||
81 | struct tasklet_struct tasklet; | ||
82 | unsigned int status_reg; | ||
83 | unsigned long pending_events; | ||
84 | /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */ | ||
85 | u16 *data_ptr; | ||
86 | unsigned int data_cnt; | ||
87 | atomic_t stuck_timeout; | ||
88 | |||
89 | unsigned int dma_nents; | ||
90 | unsigned int dma_size; | ||
91 | unsigned int dma_dir; | ||
92 | int dma_allocated; | ||
93 | |||
94 | unsigned char actual_bus_width; | ||
95 | }; | ||
96 | |||
97 | #define IMXMCI_PEND_IRQ_b 0 | ||
98 | #define IMXMCI_PEND_DMA_END_b 1 | ||
99 | #define IMXMCI_PEND_DMA_ERR_b 2 | ||
100 | #define IMXMCI_PEND_WAIT_RESP_b 3 | ||
101 | #define IMXMCI_PEND_DMA_DATA_b 4 | ||
102 | #define IMXMCI_PEND_CPU_DATA_b 5 | ||
103 | #define IMXMCI_PEND_CARD_XCHG_b 6 | ||
104 | #define IMXMCI_PEND_SET_INIT_b 7 | ||
105 | |||
106 | #define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) | ||
107 | #define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) | ||
108 | #define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b) | ||
109 | #define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b) | ||
110 | #define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b) | ||
111 | #define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) | ||
112 | #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) | ||
113 | #define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) | ||
114 | |||
115 | static void imxmci_stop_clock(struct imxmci_host *host) | ||
116 | { | ||
117 | int i = 0; | ||
118 | MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK; | ||
119 | while(i < 0x1000) { | ||
120 | if(!(i & 0x7f)) | ||
121 | MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK; | ||
122 | |||
123 | if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) { | ||
124 | /* Check twice before cut */ | ||
125 | if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) | ||
126 | return; | ||
127 | } | ||
128 | |||
129 | i++; | ||
130 | } | ||
131 | dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); | ||
132 | } | ||
133 | |||
134 | static void imxmci_start_clock(struct imxmci_host *host) | ||
135 | { | ||
136 | int i = 0; | ||
137 | MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK; | ||
138 | while(i < 0x1000) { | ||
139 | if(!(i & 0x7f)) | ||
140 | MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; | ||
141 | |||
142 | if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) { | ||
143 | /* Check twice before cut */ | ||
144 | if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) | ||
145 | return; | ||
146 | } | ||
147 | |||
148 | i++; | ||
149 | } | ||
150 | dev_dbg(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); | ||
151 | } | ||
152 | |||
153 | static void imxmci_softreset(void) | ||
154 | { | ||
155 | /* reset sequence */ | ||
156 | MMC_STR_STP_CLK = 0x8; | ||
157 | MMC_STR_STP_CLK = 0xD; | ||
158 | MMC_STR_STP_CLK = 0x5; | ||
159 | MMC_STR_STP_CLK = 0x5; | ||
160 | MMC_STR_STP_CLK = 0x5; | ||
161 | MMC_STR_STP_CLK = 0x5; | ||
162 | MMC_STR_STP_CLK = 0x5; | ||
163 | MMC_STR_STP_CLK = 0x5; | ||
164 | MMC_STR_STP_CLK = 0x5; | ||
165 | MMC_STR_STP_CLK = 0x5; | ||
166 | |||
167 | MMC_RES_TO = 0xff; | ||
168 | MMC_BLK_LEN = 512; | ||
169 | MMC_NOB = 1; | ||
170 | } | ||
171 | |||
172 | static int imxmci_busy_wait_for_status(struct imxmci_host *host, | ||
173 | unsigned int *pstat, unsigned int stat_mask, | ||
174 | int timeout, const char *where) | ||
175 | { | ||
176 | int loops=0; | ||
177 | while(!(*pstat & stat_mask)) { | ||
178 | loops+=2; | ||
179 | if(loops >= timeout) { | ||
180 | dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", | ||
181 | where, *pstat, stat_mask); | ||
182 | return -1; | ||
183 | } | ||
184 | udelay(2); | ||
185 | *pstat |= MMC_STATUS; | ||
186 | } | ||
187 | if(!loops) | ||
188 | return 0; | ||
189 | |||
190 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | ||
191 | loops, where, *pstat, stat_mask); | ||
192 | return loops; | ||
193 | } | ||
194 | |||
195 | static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | ||
196 | { | ||
197 | unsigned int nob = data->blocks; | ||
198 | unsigned int blksz = 1 << data->blksz_bits; | ||
199 | unsigned int datasz = nob * blksz; | ||
200 | int i; | ||
201 | |||
202 | if (data->flags & MMC_DATA_STREAM) | ||
203 | nob = 0xffff; | ||
204 | |||
205 | host->data = data; | ||
206 | data->bytes_xfered = 0; | ||
207 | |||
208 | MMC_NOB = nob; | ||
209 | MMC_BLK_LEN = blksz; | ||
210 | |||
211 | /* | ||
212 | * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. | ||
213 | * We are in big troubles for non-512 byte transfers according to note in the paragraph | ||
214 | * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. | ||
215 | * The situation is even more complex in reality. The SDHC in not able to handle wll | ||
216 | * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. | ||
217 | * This is required for SCR read at least. | ||
218 | */ | ||
219 | if (datasz < 64) { | ||
220 | host->dma_size = datasz; | ||
221 | if (data->flags & MMC_DATA_READ) { | ||
222 | host->dma_dir = DMA_FROM_DEVICE; | ||
223 | |||
224 | /* Hack to enable read SCR */ | ||
225 | if(datasz < 16) { | ||
226 | MMC_NOB = 1; | ||
227 | MMC_BLK_LEN = 16; | ||
228 | } | ||
229 | } else { | ||
230 | host->dma_dir = DMA_TO_DEVICE; | ||
231 | } | ||
232 | |||
233 | /* Convert back to virtual address */ | ||
234 | host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset); | ||
235 | host->data_cnt = 0; | ||
236 | |||
237 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
238 | set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
239 | |||
240 | return; | ||
241 | } | ||
242 | |||
243 | if (data->flags & MMC_DATA_READ) { | ||
244 | host->dma_dir = DMA_FROM_DEVICE; | ||
245 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
246 | data->sg_len, host->dma_dir); | ||
247 | |||
248 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
249 | host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ); | ||
250 | |||
251 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
252 | CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; | ||
253 | } else { | ||
254 | host->dma_dir = DMA_TO_DEVICE; | ||
255 | |||
256 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
257 | data->sg_len, host->dma_dir); | ||
258 | |||
259 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
260 | host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE); | ||
261 | |||
262 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
263 | CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; | ||
264 | } | ||
265 | |||
266 | #if 1 /* This code is there only for consistency checking and can be disabled in future */ | ||
267 | host->dma_size = 0; | ||
268 | for(i=0; i<host->dma_nents; i++) | ||
269 | host->dma_size+=data->sg[i].length; | ||
270 | |||
271 | if (datasz > host->dma_size) { | ||
272 | dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", | ||
273 | datasz, host->dma_size); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | host->dma_size = datasz; | ||
278 | |||
279 | wmb(); | ||
280 | |||
281 | if(host->actual_bus_width == MMC_BUS_WIDTH_4) | ||
282 | BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ | ||
283 | else | ||
284 | BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ | ||
285 | |||
286 | RSSR(host->dma) = DMA_REQ_SDHC; | ||
287 | |||
288 | set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
289 | clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
290 | |||
291 | /* start DMA engine for read, write is delayed after initial response */ | ||
292 | if (host->dma_dir == DMA_FROM_DEVICE) { | ||
293 | imx_dma_enable(host->dma); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) | ||
298 | { | ||
299 | unsigned long flags; | ||
300 | u32 imask; | ||
301 | |||
302 | WARN_ON(host->cmd != NULL); | ||
303 | host->cmd = cmd; | ||
304 | |||
305 | if (cmd->flags & MMC_RSP_BUSY) | ||
306 | cmdat |= CMD_DAT_CONT_BUSY; | ||
307 | |||
308 | switch (mmc_resp_type(cmd)) { | ||
309 | case MMC_RSP_R1: /* short CRC, OPCODE */ | ||
310 | case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ | ||
311 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1; | ||
312 | break; | ||
313 | case MMC_RSP_R2: /* long 136 bit + CRC */ | ||
314 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2; | ||
315 | break; | ||
316 | case MMC_RSP_R3: /* short */ | ||
317 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; | ||
318 | break; | ||
319 | case MMC_RSP_R6: /* short CRC */ | ||
320 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R6; | ||
321 | break; | ||
322 | default: | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) ) | ||
327 | cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ | ||
328 | |||
329 | if ( host->actual_bus_width == MMC_BUS_WIDTH_4 ) | ||
330 | cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; | ||
331 | |||
332 | MMC_CMD = cmd->opcode; | ||
333 | MMC_ARGH = cmd->arg >> 16; | ||
334 | MMC_ARGL = cmd->arg & 0xffff; | ||
335 | MMC_CMD_DAT_CONT = cmdat; | ||
336 | |||
337 | atomic_set(&host->stuck_timeout, 0); | ||
338 | set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); | ||
339 | |||
340 | |||
341 | imask = IMXMCI_INT_MASK_DEFAULT; | ||
342 | imask &= ~INT_MASK_END_CMD_RES; | ||
343 | if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) { | ||
344 | /*imask &= ~INT_MASK_BUF_READY;*/ | ||
345 | imask &= ~INT_MASK_DATA_TRAN; | ||
346 | if ( cmdat & CMD_DAT_CONT_WRITE ) | ||
347 | imask &= ~INT_MASK_WRITE_OP_DONE; | ||
348 | if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) | ||
349 | imask &= ~INT_MASK_BUF_READY; | ||
350 | } | ||
351 | |||
352 | spin_lock_irqsave(&host->lock, flags); | ||
353 | host->imask = imask; | ||
354 | MMC_INT_MASK = host->imask; | ||
355 | spin_unlock_irqrestore(&host->lock, flags); | ||
356 | |||
357 | dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", | ||
358 | cmd->opcode, cmd->opcode, imask); | ||
359 | |||
360 | imxmci_start_clock(host); | ||
361 | } | ||
362 | |||
363 | static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req) | ||
364 | { | ||
365 | unsigned long flags; | ||
366 | |||
367 | spin_lock_irqsave(&host->lock, flags); | ||
368 | |||
369 | host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | | ||
370 | IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); | ||
371 | |||
372 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
373 | MMC_INT_MASK = host->imask; | ||
374 | |||
375 | spin_unlock_irqrestore(&host->lock, flags); | ||
376 | |||
377 | host->req = NULL; | ||
378 | host->cmd = NULL; | ||
379 | host->data = NULL; | ||
380 | mmc_request_done(host->mmc, req); | ||
381 | } | ||
382 | |||
383 | static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat) | ||
384 | { | ||
385 | struct mmc_data *data = host->data; | ||
386 | int data_error; | ||
387 | |||
388 | if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){ | ||
389 | imx_dma_disable(host->dma); | ||
390 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, | ||
391 | host->dma_dir); | ||
392 | } | ||
393 | |||
394 | if ( stat & STATUS_ERR_MASK ) { | ||
395 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat); | ||
396 | if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) | ||
397 | data->error = MMC_ERR_BADCRC; | ||
398 | else if(stat & STATUS_TIME_OUT_READ) | ||
399 | data->error = MMC_ERR_TIMEOUT; | ||
400 | else | ||
401 | data->error = MMC_ERR_FAILED; | ||
402 | } else { | ||
403 | data->bytes_xfered = host->dma_size; | ||
404 | } | ||
405 | |||
406 | data_error = data->error; | ||
407 | |||
408 | host->data = NULL; | ||
409 | |||
410 | return data_error; | ||
411 | } | ||
412 | |||
413 | static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) | ||
414 | { | ||
415 | struct mmc_command *cmd = host->cmd; | ||
416 | int i; | ||
417 | u32 a,b,c; | ||
418 | struct mmc_data *data = host->data; | ||
419 | |||
420 | if (!cmd) | ||
421 | return 0; | ||
422 | |||
423 | host->cmd = NULL; | ||
424 | |||
425 | if (stat & STATUS_TIME_OUT_RESP) { | ||
426 | dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); | ||
427 | cmd->error = MMC_ERR_TIMEOUT; | ||
428 | } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { | ||
429 | dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); | ||
430 | cmd->error = MMC_ERR_BADCRC; | ||
431 | } | ||
432 | |||
433 | if(cmd->flags & MMC_RSP_PRESENT) { | ||
434 | if(cmd->flags & MMC_RSP_136) { | ||
435 | for (i = 0; i < 4; i++) { | ||
436 | u32 a = MMC_RES_FIFO & 0xffff; | ||
437 | u32 b = MMC_RES_FIFO & 0xffff; | ||
438 | cmd->resp[i] = a<<16 | b; | ||
439 | } | ||
440 | } else { | ||
441 | a = MMC_RES_FIFO & 0xffff; | ||
442 | b = MMC_RES_FIFO & 0xffff; | ||
443 | c = MMC_RES_FIFO & 0xffff; | ||
444 | cmd->resp[0] = a<<24 | b<<8 | c>>8; | ||
445 | } | ||
446 | } | ||
447 | |||
448 | dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", | ||
449 | cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); | ||
450 | |||
451 | if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) { | ||
452 | if (host->req->data->flags & MMC_DATA_WRITE) { | ||
453 | |||
454 | /* Wait for FIFO to be empty before starting DMA write */ | ||
455 | |||
456 | stat = MMC_STATUS; | ||
457 | if(imxmci_busy_wait_for_status(host, &stat, | ||
458 | STATUS_APPL_BUFF_FE, | ||
459 | 40, "imxmci_cmd_done DMA WR") < 0) { | ||
460 | cmd->error = MMC_ERR_FIFO; | ||
461 | imxmci_finish_data(host, stat); | ||
462 | if(host->req) | ||
463 | imxmci_finish_request(host, host->req); | ||
464 | dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", | ||
465 | stat); | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
470 | imx_dma_enable(host->dma); | ||
471 | } | ||
472 | } | ||
473 | } else { | ||
474 | struct mmc_request *req; | ||
475 | imxmci_stop_clock(host); | ||
476 | req = host->req; | ||
477 | |||
478 | if(data) | ||
479 | imxmci_finish_data(host, stat); | ||
480 | |||
481 | if( req ) { | ||
482 | imxmci_finish_request(host, req); | ||
483 | } else { | ||
484 | dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | return 1; | ||
489 | } | ||
490 | |||
491 | static int imxmci_data_done(struct imxmci_host *host, unsigned int stat) | ||
492 | { | ||
493 | struct mmc_data *data = host->data; | ||
494 | int data_error; | ||
495 | |||
496 | if (!data) | ||
497 | return 0; | ||
498 | |||
499 | data_error = imxmci_finish_data(host, stat); | ||
500 | |||
501 | if (host->req->stop && (data_error == MMC_ERR_NONE)) { | ||
502 | imxmci_stop_clock(host); | ||
503 | imxmci_start_cmd(host, host->req->stop, 0); | ||
504 | } else { | ||
505 | struct mmc_request *req; | ||
506 | req = host->req; | ||
507 | if( req ) { | ||
508 | imxmci_finish_request(host, req); | ||
509 | } else { | ||
510 | dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); | ||
511 | } | ||
512 | } | ||
513 | |||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | ||
518 | { | ||
519 | int i; | ||
520 | int burst_len; | ||
521 | int flush_len; | ||
522 | int trans_done = 0; | ||
523 | unsigned int stat = *pstat; | ||
524 | |||
525 | if(host->actual_bus_width == MMC_BUS_WIDTH_4) | ||
526 | burst_len = 16; | ||
527 | else | ||
528 | burst_len = 64; | ||
529 | |||
530 | /* This is unfortunately required */ | ||
531 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", | ||
532 | stat); | ||
533 | |||
534 | if(host->dma_dir == DMA_FROM_DEVICE) { | ||
535 | imxmci_busy_wait_for_status(host, &stat, | ||
536 | STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE, | ||
537 | 20, "imxmci_cpu_driven_data read"); | ||
538 | |||
539 | while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && | ||
540 | (host->data_cnt < host->dma_size)) { | ||
541 | if(burst_len >= host->dma_size - host->data_cnt) { | ||
542 | flush_len = burst_len; | ||
543 | burst_len = host->dma_size - host->data_cnt; | ||
544 | flush_len -= burst_len; | ||
545 | host->data_cnt = host->dma_size; | ||
546 | trans_done = 1; | ||
547 | } else { | ||
548 | flush_len = 0; | ||
549 | host->data_cnt += burst_len; | ||
550 | } | ||
551 | |||
552 | for(i = burst_len; i>=2 ; i-=2) { | ||
553 | *(host->data_ptr++) = MMC_BUFFER_ACCESS; | ||
554 | udelay(20); /* required for clocks < 8MHz*/ | ||
555 | } | ||
556 | |||
557 | if(i == 1) | ||
558 | *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS; | ||
559 | |||
560 | stat = MMC_STATUS; | ||
561 | |||
562 | /* Flush extra bytes from FIFO */ | ||
563 | while(flush_len >= 2){ | ||
564 | flush_len -= 2; | ||
565 | i = MMC_BUFFER_ACCESS; | ||
566 | stat = MMC_STATUS; | ||
567 | stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ | ||
568 | } | ||
569 | |||
570 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n", | ||
571 | burst_len, stat); | ||
572 | } | ||
573 | } else { | ||
574 | imxmci_busy_wait_for_status(host, &stat, | ||
575 | STATUS_APPL_BUFF_FE, | ||
576 | 20, "imxmci_cpu_driven_data write"); | ||
577 | |||
578 | while((stat & STATUS_APPL_BUFF_FE) && | ||
579 | (host->data_cnt < host->dma_size)) { | ||
580 | if(burst_len >= host->dma_size - host->data_cnt) { | ||
581 | burst_len = host->dma_size - host->data_cnt; | ||
582 | host->data_cnt = host->dma_size; | ||
583 | trans_done = 1; | ||
584 | } else { | ||
585 | host->data_cnt += burst_len; | ||
586 | } | ||
587 | |||
588 | for(i = burst_len; i>0 ; i-=2) | ||
589 | MMC_BUFFER_ACCESS = *(host->data_ptr++); | ||
590 | |||
591 | stat = MMC_STATUS; | ||
592 | |||
593 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", | ||
594 | burst_len, stat); | ||
595 | } | ||
596 | } | ||
597 | |||
598 | *pstat = stat; | ||
599 | |||
600 | return trans_done; | ||
601 | } | ||
602 | |||
603 | static void imxmci_dma_irq(int dma, void *devid, struct pt_regs *regs) | ||
604 | { | ||
605 | struct imxmci_host *host = devid; | ||
606 | uint32_t stat = MMC_STATUS; | ||
607 | |||
608 | atomic_set(&host->stuck_timeout, 0); | ||
609 | host->status_reg = stat; | ||
610 | set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
611 | tasklet_schedule(&host->tasklet); | ||
612 | } | ||
613 | |||
614 | static irqreturn_t imxmci_irq(int irq, void *devid, struct pt_regs *regs) | ||
615 | { | ||
616 | struct imxmci_host *host = devid; | ||
617 | uint32_t stat = MMC_STATUS; | ||
618 | int handled = 1; | ||
619 | |||
620 | MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT; | ||
621 | |||
622 | atomic_set(&host->stuck_timeout, 0); | ||
623 | host->status_reg = stat; | ||
624 | set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
625 | tasklet_schedule(&host->tasklet); | ||
626 | |||
627 | return IRQ_RETVAL(handled);; | ||
628 | } | ||
629 | |||
630 | static void imxmci_tasklet_fnc(unsigned long data) | ||
631 | { | ||
632 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
633 | u32 stat; | ||
634 | unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ | ||
635 | int timeout = 0; | ||
636 | |||
637 | if(atomic_read(&host->stuck_timeout) > 4) { | ||
638 | char *what; | ||
639 | timeout = 1; | ||
640 | stat = MMC_STATUS; | ||
641 | host->status_reg = stat; | ||
642 | if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
643 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
644 | what = "RESP+DMA"; | ||
645 | else | ||
646 | what = "RESP"; | ||
647 | else | ||
648 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
649 | if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) | ||
650 | what = "DATA"; | ||
651 | else | ||
652 | what = "DMA"; | ||
653 | else | ||
654 | what = "???"; | ||
655 | |||
656 | dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", | ||
657 | what, stat, MMC_INT_MASK); | ||
658 | dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", | ||
659 | MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); | ||
660 | dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n", | ||
661 | host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size); | ||
662 | } | ||
663 | |||
664 | if(!host->present || timeout) | ||
665 | host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | | ||
666 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; | ||
667 | |||
668 | if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { | ||
669 | clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
670 | |||
671 | stat = MMC_STATUS; | ||
672 | /* | ||
673 | * This is not required in theory, but there is chance to miss some flag | ||
674 | * which clears automatically by mask write, FreeScale original code keeps | ||
675 | * stat from IRQ time so do I | ||
676 | */ | ||
677 | stat |= host->status_reg; | ||
678 | |||
679 | if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
680 | imxmci_busy_wait_for_status(host, &stat, | ||
681 | STATUS_END_CMD_RESP | STATUS_ERR_MASK, | ||
682 | 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); | ||
683 | } | ||
684 | |||
685 | if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { | ||
686 | if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
687 | imxmci_cmd_done(host, stat); | ||
688 | if(host->data && (stat & STATUS_ERR_MASK)) | ||
689 | imxmci_data_done(host, stat); | ||
690 | } | ||
691 | |||
692 | if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { | ||
693 | stat |= MMC_STATUS; | ||
694 | if(imxmci_cpu_driven_data(host, &stat)){ | ||
695 | if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
696 | imxmci_cmd_done(host, stat); | ||
697 | atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, | ||
698 | &host->pending_events); | ||
699 | imxmci_data_done(host, stat); | ||
700 | } | ||
701 | } | ||
702 | } | ||
703 | |||
704 | if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && | ||
705 | !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
706 | |||
707 | stat = MMC_STATUS; | ||
708 | /* Same as above */ | ||
709 | stat |= host->status_reg; | ||
710 | |||
711 | if(host->dma_dir == DMA_TO_DEVICE) { | ||
712 | data_dir_mask = STATUS_WRITE_OP_DONE; | ||
713 | } else { | ||
714 | data_dir_mask = STATUS_DATA_TRANS_DONE; | ||
715 | } | ||
716 | |||
717 | imxmci_busy_wait_for_status(host, &stat, | ||
718 | data_dir_mask, | ||
719 | 50, "imxmci_tasklet_fnc data"); | ||
720 | |||
721 | if(stat & data_dir_mask) { | ||
722 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
723 | imxmci_data_done(host, stat); | ||
724 | } | ||
725 | } | ||
726 | |||
727 | if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { | ||
728 | |||
729 | if(host->cmd) | ||
730 | imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); | ||
731 | |||
732 | if(host->data) | ||
733 | imxmci_data_done(host, STATUS_TIME_OUT_READ | | ||
734 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); | ||
735 | |||
736 | if(host->req) | ||
737 | imxmci_finish_request(host, host->req); | ||
738 | |||
739 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
740 | |||
741 | } | ||
742 | } | ||
743 | |||
744 | static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req) | ||
745 | { | ||
746 | struct imxmci_host *host = mmc_priv(mmc); | ||
747 | unsigned int cmdat; | ||
748 | |||
749 | WARN_ON(host->req != NULL); | ||
750 | |||
751 | host->req = req; | ||
752 | |||
753 | cmdat = 0; | ||
754 | |||
755 | if (req->data) { | ||
756 | imxmci_setup_data(host, req->data); | ||
757 | |||
758 | cmdat |= CMD_DAT_CONT_DATA_ENABLE; | ||
759 | |||
760 | if (req->data->flags & MMC_DATA_WRITE) | ||
761 | cmdat |= CMD_DAT_CONT_WRITE; | ||
762 | |||
763 | if (req->data->flags & MMC_DATA_STREAM) { | ||
764 | cmdat |= CMD_DAT_CONT_STREAM_BLOCK; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | imxmci_start_cmd(host, req->cmd, cmdat); | ||
769 | } | ||
770 | |||
771 | #define CLK_RATE 19200000 | ||
772 | |||
773 | static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
774 | { | ||
775 | struct imxmci_host *host = mmc_priv(mmc); | ||
776 | int prescaler; | ||
777 | |||
778 | dev_dbg(mmc_dev(host->mmc), "clock %u power %u vdd %u width %u\n", | ||
779 | ios->clock, ios->power_mode, ios->vdd, | ||
780 | (ios->bus_width==MMC_BUS_WIDTH_4)?4:1); | ||
781 | |||
782 | if( ios->bus_width==MMC_BUS_WIDTH_4 ) { | ||
783 | host->actual_bus_width = MMC_BUS_WIDTH_4; | ||
784 | imx_gpio_mode(PB11_PF_SD_DAT3); | ||
785 | }else{ | ||
786 | host->actual_bus_width = MMC_BUS_WIDTH_1; | ||
787 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
788 | } | ||
789 | |||
790 | if ( host->power_mode != ios->power_mode ) { | ||
791 | switch (ios->power_mode) { | ||
792 | case MMC_POWER_OFF: | ||
793 | break; | ||
794 | case MMC_POWER_UP: | ||
795 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
796 | break; | ||
797 | case MMC_POWER_ON: | ||
798 | break; | ||
799 | } | ||
800 | host->power_mode = ios->power_mode; | ||
801 | } | ||
802 | |||
803 | if ( ios->clock ) { | ||
804 | unsigned int clk; | ||
805 | |||
806 | /* The prescaler is 5 for PERCLK2 equal to 96MHz | ||
807 | * then 96MHz / 5 = 19.2 MHz | ||
808 | */ | ||
809 | clk=imx_get_perclk2(); | ||
810 | prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE; | ||
811 | switch(prescaler) { | ||
812 | case 0: | ||
813 | case 1: prescaler = 0; | ||
814 | break; | ||
815 | case 2: prescaler = 1; | ||
816 | break; | ||
817 | case 3: prescaler = 2; | ||
818 | break; | ||
819 | case 4: prescaler = 4; | ||
820 | break; | ||
821 | default: | ||
822 | case 5: prescaler = 5; | ||
823 | break; | ||
824 | } | ||
825 | |||
826 | dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", | ||
827 | clk, prescaler); | ||
828 | |||
829 | for(clk=0; clk<8; clk++) { | ||
830 | int x; | ||
831 | x = CLK_RATE / (1<<clk); | ||
832 | if( x <= ios->clock) | ||
833 | break; | ||
834 | } | ||
835 | |||
836 | MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */ | ||
837 | |||
838 | imxmci_stop_clock(host); | ||
839 | MMC_CLK_RATE = (prescaler<<3) | clk; | ||
840 | imxmci_start_clock(host); | ||
841 | |||
842 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); | ||
843 | } else { | ||
844 | imxmci_stop_clock(host); | ||
845 | } | ||
846 | } | ||
847 | |||
848 | static struct mmc_host_ops imxmci_ops = { | ||
849 | .request = imxmci_request, | ||
850 | .set_ios = imxmci_set_ios, | ||
851 | }; | ||
852 | |||
853 | static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr) | ||
854 | { | ||
855 | int i; | ||
856 | |||
857 | for (i = 0; i < dev->num_resources; i++) | ||
858 | if (dev->resource[i].flags == mask && nr-- == 0) | ||
859 | return &dev->resource[i]; | ||
860 | return NULL; | ||
861 | } | ||
862 | |||
863 | static int platform_device_irq(struct platform_device *dev, int nr) | ||
864 | { | ||
865 | int i; | ||
866 | |||
867 | for (i = 0; i < dev->num_resources; i++) | ||
868 | if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0) | ||
869 | return dev->resource[i].start; | ||
870 | return NO_IRQ; | ||
871 | } | ||
872 | |||
873 | static void imxmci_check_status(unsigned long data) | ||
874 | { | ||
875 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
876 | |||
877 | if( host->pdata->card_present() != host->present ) { | ||
878 | host->present ^= 1; | ||
879 | dev_info(mmc_dev(host->mmc), "card %s\n", | ||
880 | host->present ? "inserted" : "removed"); | ||
881 | |||
882 | set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events); | ||
883 | tasklet_schedule(&host->tasklet); | ||
884 | } | ||
885 | |||
886 | if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || | ||
887 | test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
888 | atomic_inc(&host->stuck_timeout); | ||
889 | if(atomic_read(&host->stuck_timeout) > 4) | ||
890 | tasklet_schedule(&host->tasklet); | ||
891 | } else { | ||
892 | atomic_set(&host->stuck_timeout, 0); | ||
893 | |||
894 | } | ||
895 | |||
896 | mod_timer(&host->timer, jiffies + (HZ>>1)); | ||
897 | } | ||
898 | |||
899 | static int imxmci_probe(struct platform_device *pdev) | ||
900 | { | ||
901 | struct mmc_host *mmc; | ||
902 | struct imxmci_host *host = NULL; | ||
903 | struct resource *r; | ||
904 | int ret = 0, irq; | ||
905 | |||
906 | printk(KERN_INFO "i.MX mmc driver\n"); | ||
907 | |||
908 | r = platform_device_resource(pdev, IORESOURCE_MEM, 0); | ||
909 | irq = platform_device_irq(pdev, 0); | ||
910 | if (!r || irq == NO_IRQ) | ||
911 | return -ENXIO; | ||
912 | |||
913 | r = request_mem_region(r->start, 0x100, "IMXMCI"); | ||
914 | if (!r) | ||
915 | return -EBUSY; | ||
916 | |||
917 | mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); | ||
918 | if (!mmc) { | ||
919 | ret = -ENOMEM; | ||
920 | goto out; | ||
921 | } | ||
922 | |||
923 | mmc->ops = &imxmci_ops; | ||
924 | mmc->f_min = 150000; | ||
925 | mmc->f_max = CLK_RATE/2; | ||
926 | mmc->ocr_avail = MMC_VDD_32_33; | ||
927 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
928 | |||
929 | /* MMC core transfer sizes tunable parameters */ | ||
930 | mmc->max_hw_segs = 64; | ||
931 | mmc->max_phys_segs = 64; | ||
932 | mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */ | ||
933 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
934 | |||
935 | host = mmc_priv(mmc); | ||
936 | host->mmc = mmc; | ||
937 | host->dma_allocated = 0; | ||
938 | host->pdata = pdev->dev.platform_data; | ||
939 | |||
940 | spin_lock_init(&host->lock); | ||
941 | host->res = r; | ||
942 | host->irq = irq; | ||
943 | |||
944 | imx_gpio_mode(PB8_PF_SD_DAT0); | ||
945 | imx_gpio_mode(PB9_PF_SD_DAT1); | ||
946 | imx_gpio_mode(PB10_PF_SD_DAT2); | ||
947 | /* Configured as GPIO with pull-up to ensure right MCC card mode */ | ||
948 | /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ | ||
949 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
950 | /* imx_gpio_mode(PB11_PF_SD_DAT3); */ | ||
951 | imx_gpio_mode(PB12_PF_SD_CLK); | ||
952 | imx_gpio_mode(PB13_PF_SD_CMD); | ||
953 | |||
954 | imxmci_softreset(); | ||
955 | |||
956 | if ( MMC_REV_NO != 0x390 ) { | ||
957 | dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", | ||
958 | MMC_REV_NO); | ||
959 | goto out; | ||
960 | } | ||
961 | |||
962 | MMC_READ_TO = 0x2db4; /* recommended in data sheet */ | ||
963 | |||
964 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
965 | MMC_INT_MASK = host->imask; | ||
966 | |||
967 | |||
968 | if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){ | ||
969 | dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); | ||
970 | ret = -EBUSY; | ||
971 | goto out; | ||
972 | } | ||
973 | host->dma_allocated=1; | ||
974 | imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); | ||
975 | |||
976 | tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); | ||
977 | host->status_reg=0; | ||
978 | host->pending_events=0; | ||
979 | |||
980 | ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); | ||
981 | if (ret) | ||
982 | goto out; | ||
983 | |||
984 | host->present = host->pdata->card_present(); | ||
985 | init_timer(&host->timer); | ||
986 | host->timer.data = (unsigned long)host; | ||
987 | host->timer.function = imxmci_check_status; | ||
988 | add_timer(&host->timer); | ||
989 | mod_timer(&host->timer, jiffies + (HZ>>1)); | ||
990 | |||
991 | platform_set_drvdata(pdev, mmc); | ||
992 | |||
993 | mmc_add_host(mmc); | ||
994 | |||
995 | return 0; | ||
996 | |||
997 | out: | ||
998 | if (host) { | ||
999 | if(host->dma_allocated){ | ||
1000 | imx_dma_free(host->dma); | ||
1001 | host->dma_allocated=0; | ||
1002 | } | ||
1003 | } | ||
1004 | if (mmc) | ||
1005 | mmc_free_host(mmc); | ||
1006 | release_resource(r); | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | static int imxmci_remove(struct platform_device *pdev) | ||
1011 | { | ||
1012 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
1013 | |||
1014 | platform_set_drvdata(pdev, NULL); | ||
1015 | |||
1016 | if (mmc) { | ||
1017 | struct imxmci_host *host = mmc_priv(mmc); | ||
1018 | |||
1019 | tasklet_disable(&host->tasklet); | ||
1020 | |||
1021 | del_timer_sync(&host->timer); | ||
1022 | mmc_remove_host(mmc); | ||
1023 | |||
1024 | free_irq(host->irq, host); | ||
1025 | if(host->dma_allocated){ | ||
1026 | imx_dma_free(host->dma); | ||
1027 | host->dma_allocated=0; | ||
1028 | } | ||
1029 | |||
1030 | tasklet_kill(&host->tasklet); | ||
1031 | |||
1032 | release_resource(host->res); | ||
1033 | |||
1034 | mmc_free_host(mmc); | ||
1035 | } | ||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | #ifdef CONFIG_PM | ||
1040 | static int imxmci_suspend(struct platform_device *dev, pm_message_t state) | ||
1041 | { | ||
1042 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
1043 | int ret = 0; | ||
1044 | |||
1045 | if (mmc) | ||
1046 | ret = mmc_suspend_host(mmc, state); | ||
1047 | |||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | static int imxmci_resume(struct platform_device *dev) | ||
1052 | { | ||
1053 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
1054 | struct imxmci_host *host; | ||
1055 | int ret = 0; | ||
1056 | |||
1057 | if (mmc) { | ||
1058 | host = mmc_priv(mmc); | ||
1059 | if(host) | ||
1060 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
1061 | ret = mmc_resume_host(mmc); | ||
1062 | } | ||
1063 | |||
1064 | return ret; | ||
1065 | } | ||
1066 | #else | ||
1067 | #define imxmci_suspend NULL | ||
1068 | #define imxmci_resume NULL | ||
1069 | #endif /* CONFIG_PM */ | ||
1070 | |||
1071 | static struct platform_driver imxmci_driver = { | ||
1072 | .probe = imxmci_probe, | ||
1073 | .remove = imxmci_remove, | ||
1074 | .suspend = imxmci_suspend, | ||
1075 | .resume = imxmci_resume, | ||
1076 | .driver = { | ||
1077 | .name = DRIVER_NAME, | ||
1078 | } | ||
1079 | }; | ||
1080 | |||
1081 | static int __init imxmci_init(void) | ||
1082 | { | ||
1083 | return platform_driver_register(&imxmci_driver); | ||
1084 | } | ||
1085 | |||
1086 | static void __exit imxmci_exit(void) | ||
1087 | { | ||
1088 | platform_driver_unregister(&imxmci_driver); | ||
1089 | } | ||
1090 | |||
1091 | module_init(imxmci_init); | ||
1092 | module_exit(imxmci_exit); | ||
1093 | |||
1094 | MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); | ||
1095 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | ||
1096 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mmc/imxmmc.h b/drivers/mmc/imxmmc.h new file mode 100644 index 000000000000..e5339e334dbb --- /dev/null +++ b/drivers/mmc/imxmmc.h | |||
@@ -0,0 +1,67 @@ | |||
1 | |||
2 | # define __REG16(x) (*((volatile u16 *)IO_ADDRESS(x))) | ||
3 | |||
4 | #define MMC_STR_STP_CLK __REG16(IMX_MMC_BASE + 0x00) | ||
5 | #define MMC_STATUS __REG16(IMX_MMC_BASE + 0x04) | ||
6 | #define MMC_CLK_RATE __REG16(IMX_MMC_BASE + 0x08) | ||
7 | #define MMC_CMD_DAT_CONT __REG16(IMX_MMC_BASE + 0x0C) | ||
8 | #define MMC_RES_TO __REG16(IMX_MMC_BASE + 0x10) | ||
9 | #define MMC_READ_TO __REG16(IMX_MMC_BASE + 0x14) | ||
10 | #define MMC_BLK_LEN __REG16(IMX_MMC_BASE + 0x18) | ||
11 | #define MMC_NOB __REG16(IMX_MMC_BASE + 0x1C) | ||
12 | #define MMC_REV_NO __REG16(IMX_MMC_BASE + 0x20) | ||
13 | #define MMC_INT_MASK __REG16(IMX_MMC_BASE + 0x24) | ||
14 | #define MMC_CMD __REG16(IMX_MMC_BASE + 0x28) | ||
15 | #define MMC_ARGH __REG16(IMX_MMC_BASE + 0x2C) | ||
16 | #define MMC_ARGL __REG16(IMX_MMC_BASE + 0x30) | ||
17 | #define MMC_RES_FIFO __REG16(IMX_MMC_BASE + 0x34) | ||
18 | #define MMC_BUFFER_ACCESS __REG16(IMX_MMC_BASE + 0x38) | ||
19 | #define MMC_BUFFER_ACCESS_OFS 0x38 | ||
20 | |||
21 | |||
22 | #define STR_STP_CLK_ENDIAN (1<<5) | ||
23 | #define STR_STP_CLK_RESET (1<<3) | ||
24 | #define STR_STP_CLK_ENABLE (1<<2) | ||
25 | #define STR_STP_CLK_START_CLK (1<<1) | ||
26 | #define STR_STP_CLK_STOP_CLK (1<<0) | ||
27 | #define STATUS_CARD_PRESENCE (1<<15) | ||
28 | #define STATUS_SDIO_INT_ACTIVE (1<<14) | ||
29 | #define STATUS_END_CMD_RESP (1<<13) | ||
30 | #define STATUS_WRITE_OP_DONE (1<<12) | ||
31 | #define STATUS_DATA_TRANS_DONE (1<<11) | ||
32 | #define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10) | ||
33 | #define STATUS_CARD_BUS_CLK_RUN (1<<8) | ||
34 | #define STATUS_APPL_BUFF_FF (1<<7) | ||
35 | #define STATUS_APPL_BUFF_FE (1<<6) | ||
36 | #define STATUS_RESP_CRC_ERR (1<<5) | ||
37 | #define STATUS_CRC_READ_ERR (1<<3) | ||
38 | #define STATUS_CRC_WRITE_ERR (1<<2) | ||
39 | #define STATUS_TIME_OUT_RESP (1<<1) | ||
40 | #define STATUS_TIME_OUT_READ (1<<0) | ||
41 | #define STATUS_ERR_MASK 0x2f | ||
42 | #define CLK_RATE_PRESCALER(x) ((x) & 0x7) | ||
43 | #define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3) | ||
44 | #define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12) | ||
45 | #define CMD_DAT_CONT_STOP_READWAIT (1<<11) | ||
46 | #define CMD_DAT_CONT_START_READWAIT (1<<10) | ||
47 | #define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8) | ||
48 | #define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8) | ||
49 | #define CMD_DAT_CONT_INIT (1<<7) | ||
50 | #define CMD_DAT_CONT_BUSY (1<<6) | ||
51 | #define CMD_DAT_CONT_STREAM_BLOCK (1<<5) | ||
52 | #define CMD_DAT_CONT_WRITE (1<<4) | ||
53 | #define CMD_DAT_CONT_DATA_ENABLE (1<<3) | ||
54 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1) | ||
55 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2) | ||
56 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3) | ||
57 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4) | ||
58 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5) | ||
59 | #define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6) | ||
60 | #define INT_MASK_AUTO_CARD_DETECT (1<<6) | ||
61 | #define INT_MASK_DAT0_EN (1<<5) | ||
62 | #define INT_MASK_SDIO (1<<4) | ||
63 | #define INT_MASK_BUF_READY (1<<3) | ||
64 | #define INT_MASK_END_CMD_RES (1<<2) | ||
65 | #define INT_MASK_WRITE_OP_DONE (1<<1) | ||
66 | #define INT_MASK_DATA_TRAN (1<<0) | ||
67 | #define INT_ALL (0x7f) | ||