diff options
Diffstat (limited to 'drivers/mmc/host/imxmmc.c')
-rw-r--r-- | drivers/mmc/host/imxmmc.c | 1169 |
1 files changed, 1169 insertions, 0 deletions
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c new file mode 100644 index 00000000000..881f7ba545a --- /dev/null +++ b/drivers/mmc/host/imxmmc.c | |||
@@ -0,0 +1,1169 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver | ||
3 | * | ||
4 | * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de> | ||
5 | * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> | ||
6 | * | ||
7 | * derived from pxamci.c by Russell King | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/mmc/host.h> | ||
23 | #include <linux/mmc/card.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/clk.h> | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | #include <asm/dma.h> | ||
29 | #include <asm/irq.h> | ||
30 | #include <asm/sizes.h> | ||
31 | #include <mach/mmc.h> | ||
32 | #include <mach/imx-dma.h> | ||
33 | |||
34 | #include "imxmmc.h" | ||
35 | |||
36 | #define DRIVER_NAME "imx-mmc" | ||
37 | |||
38 | #define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ | ||
39 | INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ | ||
40 | INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) | ||
41 | |||
42 | struct imxmci_host { | ||
43 | struct mmc_host *mmc; | ||
44 | spinlock_t lock; | ||
45 | struct resource *res; | ||
46 | void __iomem *base; | ||
47 | int irq; | ||
48 | imx_dmach_t dma; | ||
49 | volatile unsigned int imask; | ||
50 | unsigned int power_mode; | ||
51 | unsigned int present; | ||
52 | struct imxmmc_platform_data *pdata; | ||
53 | |||
54 | struct mmc_request *req; | ||
55 | struct mmc_command *cmd; | ||
56 | struct mmc_data *data; | ||
57 | |||
58 | struct timer_list timer; | ||
59 | struct tasklet_struct tasklet; | ||
60 | unsigned int status_reg; | ||
61 | unsigned long pending_events; | ||
62 | /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */ | ||
63 | u16 *data_ptr; | ||
64 | unsigned int data_cnt; | ||
65 | atomic_t stuck_timeout; | ||
66 | |||
67 | unsigned int dma_nents; | ||
68 | unsigned int dma_size; | ||
69 | unsigned int dma_dir; | ||
70 | int dma_allocated; | ||
71 | |||
72 | unsigned char actual_bus_width; | ||
73 | |||
74 | int prev_cmd_code; | ||
75 | |||
76 | struct clk *clk; | ||
77 | }; | ||
78 | |||
79 | #define IMXMCI_PEND_IRQ_b 0 | ||
80 | #define IMXMCI_PEND_DMA_END_b 1 | ||
81 | #define IMXMCI_PEND_DMA_ERR_b 2 | ||
82 | #define IMXMCI_PEND_WAIT_RESP_b 3 | ||
83 | #define IMXMCI_PEND_DMA_DATA_b 4 | ||
84 | #define IMXMCI_PEND_CPU_DATA_b 5 | ||
85 | #define IMXMCI_PEND_CARD_XCHG_b 6 | ||
86 | #define IMXMCI_PEND_SET_INIT_b 7 | ||
87 | #define IMXMCI_PEND_STARTED_b 8 | ||
88 | |||
89 | #define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) | ||
90 | #define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) | ||
91 | #define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b) | ||
92 | #define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b) | ||
93 | #define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b) | ||
94 | #define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) | ||
95 | #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) | ||
96 | #define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) | ||
97 | #define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b) | ||
98 | |||
99 | static void imxmci_stop_clock(struct imxmci_host *host) | ||
100 | { | ||
101 | int i = 0; | ||
102 | u16 reg; | ||
103 | |||
104 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
105 | writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); | ||
106 | while (i < 0x1000) { | ||
107 | if (!(i & 0x7f)) { | ||
108 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
109 | writew(reg | STR_STP_CLK_STOP_CLK, | ||
110 | host->base + MMC_REG_STR_STP_CLK); | ||
111 | } | ||
112 | |||
113 | reg = readw(host->base + MMC_REG_STATUS); | ||
114 | if (!(reg & STATUS_CARD_BUS_CLK_RUN)) { | ||
115 | /* Check twice before cut */ | ||
116 | reg = readw(host->base + MMC_REG_STATUS); | ||
117 | if (!(reg & STATUS_CARD_BUS_CLK_RUN)) | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | i++; | ||
122 | } | ||
123 | dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); | ||
124 | } | ||
125 | |||
126 | static int imxmci_start_clock(struct imxmci_host *host) | ||
127 | { | ||
128 | unsigned int trials = 0; | ||
129 | unsigned int delay_limit = 128; | ||
130 | unsigned long flags; | ||
131 | u16 reg; | ||
132 | |||
133 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
134 | writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK); | ||
135 | |||
136 | clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); | ||
137 | |||
138 | /* | ||
139 | * Command start of the clock, this usually succeeds in less | ||
140 | * then 6 delay loops, but during card detection (low clockrate) | ||
141 | * it takes up to 5000 delay loops and sometimes fails for the first time | ||
142 | */ | ||
143 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
144 | writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); | ||
145 | |||
146 | do { | ||
147 | unsigned int delay = delay_limit; | ||
148 | |||
149 | while (delay--) { | ||
150 | reg = readw(host->base + MMC_REG_STATUS); | ||
151 | if (reg & STATUS_CARD_BUS_CLK_RUN) { | ||
152 | /* Check twice before cut */ | ||
153 | reg = readw(host->base + MMC_REG_STATUS); | ||
154 | if (reg & STATUS_CARD_BUS_CLK_RUN) | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | local_irq_save(flags); | ||
163 | /* | ||
164 | * Ensure, that request is not doubled under all possible circumstances. | ||
165 | * It is possible, that cock running state is missed, because some other | ||
166 | * IRQ or schedule delays this function execution and the clocks has | ||
167 | * been already stopped by other means (response processing, SDHC HW) | ||
168 | */ | ||
169 | if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) { | ||
170 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
171 | writew(reg | STR_STP_CLK_START_CLK, | ||
172 | host->base + MMC_REG_STR_STP_CLK); | ||
173 | } | ||
174 | local_irq_restore(flags); | ||
175 | |||
176 | } while (++trials < 256); | ||
177 | |||
178 | dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); | ||
179 | |||
180 | return -1; | ||
181 | } | ||
182 | |||
183 | static void imxmci_softreset(struct imxmci_host *host) | ||
184 | { | ||
185 | int i; | ||
186 | |||
187 | /* reset sequence */ | ||
188 | writew(0x08, host->base + MMC_REG_STR_STP_CLK); | ||
189 | writew(0x0D, host->base + MMC_REG_STR_STP_CLK); | ||
190 | |||
191 | for (i = 0; i < 8; i++) | ||
192 | writew(0x05, host->base + MMC_REG_STR_STP_CLK); | ||
193 | |||
194 | writew(0xff, host->base + MMC_REG_RES_TO); | ||
195 | writew(512, host->base + MMC_REG_BLK_LEN); | ||
196 | writew(1, host->base + MMC_REG_NOB); | ||
197 | } | ||
198 | |||
199 | static int imxmci_busy_wait_for_status(struct imxmci_host *host, | ||
200 | unsigned int *pstat, unsigned int stat_mask, | ||
201 | int timeout, const char *where) | ||
202 | { | ||
203 | int loops = 0; | ||
204 | |||
205 | while (!(*pstat & stat_mask)) { | ||
206 | loops += 2; | ||
207 | if (loops >= timeout) { | ||
208 | dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", | ||
209 | where, *pstat, stat_mask); | ||
210 | return -1; | ||
211 | } | ||
212 | udelay(2); | ||
213 | *pstat |= readw(host->base + MMC_REG_STATUS); | ||
214 | } | ||
215 | if (!loops) | ||
216 | return 0; | ||
217 | |||
218 | /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ | ||
219 | if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000)) | ||
220 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | ||
221 | loops, where, *pstat, stat_mask); | ||
222 | return loops; | ||
223 | } | ||
224 | |||
225 | static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | ||
226 | { | ||
227 | unsigned int nob = data->blocks; | ||
228 | unsigned int blksz = data->blksz; | ||
229 | unsigned int datasz = nob * blksz; | ||
230 | int i; | ||
231 | |||
232 | if (data->flags & MMC_DATA_STREAM) | ||
233 | nob = 0xffff; | ||
234 | |||
235 | host->data = data; | ||
236 | data->bytes_xfered = 0; | ||
237 | |||
238 | writew(nob, host->base + MMC_REG_NOB); | ||
239 | writew(blksz, host->base + MMC_REG_BLK_LEN); | ||
240 | |||
241 | /* | ||
242 | * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. | ||
243 | * We are in big troubles for non-512 byte transfers according to note in the paragraph | ||
244 | * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. | ||
245 | * The situation is even more complex in reality. The SDHC in not able to handle wll | ||
246 | * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. | ||
247 | * This is required for SCR read at least. | ||
248 | */ | ||
249 | if (datasz < 512) { | ||
250 | host->dma_size = datasz; | ||
251 | if (data->flags & MMC_DATA_READ) { | ||
252 | host->dma_dir = DMA_FROM_DEVICE; | ||
253 | |||
254 | /* Hack to enable read SCR */ | ||
255 | writew(1, host->base + MMC_REG_NOB); | ||
256 | writew(512, host->base + MMC_REG_BLK_LEN); | ||
257 | } else { | ||
258 | host->dma_dir = DMA_TO_DEVICE; | ||
259 | } | ||
260 | |||
261 | /* Convert back to virtual address */ | ||
262 | host->data_ptr = (u16 *)sg_virt(data->sg); | ||
263 | host->data_cnt = 0; | ||
264 | |||
265 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
266 | set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
267 | |||
268 | return; | ||
269 | } | ||
270 | |||
271 | if (data->flags & MMC_DATA_READ) { | ||
272 | host->dma_dir = DMA_FROM_DEVICE; | ||
273 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
274 | data->sg_len, host->dma_dir); | ||
275 | |||
276 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
277 | host->res->start + MMC_REG_BUFFER_ACCESS, | ||
278 | DMA_MODE_READ); | ||
279 | |||
280 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
281 | CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; | ||
282 | } else { | ||
283 | host->dma_dir = DMA_TO_DEVICE; | ||
284 | |||
285 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
286 | data->sg_len, host->dma_dir); | ||
287 | |||
288 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
289 | host->res->start + MMC_REG_BUFFER_ACCESS, | ||
290 | DMA_MODE_WRITE); | ||
291 | |||
292 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
293 | CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; | ||
294 | } | ||
295 | |||
296 | #if 1 /* This code is there only for consistency checking and can be disabled in future */ | ||
297 | host->dma_size = 0; | ||
298 | for (i = 0; i < host->dma_nents; i++) | ||
299 | host->dma_size += data->sg[i].length; | ||
300 | |||
301 | if (datasz > host->dma_size) { | ||
302 | dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", | ||
303 | datasz, host->dma_size); | ||
304 | } | ||
305 | #endif | ||
306 | |||
307 | host->dma_size = datasz; | ||
308 | |||
309 | wmb(); | ||
310 | |||
311 | set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
312 | clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
313 | |||
314 | /* start DMA engine for read, write is delayed after initial response */ | ||
315 | if (host->dma_dir == DMA_FROM_DEVICE) | ||
316 | imx_dma_enable(host->dma); | ||
317 | } | ||
318 | |||
319 | static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) | ||
320 | { | ||
321 | unsigned long flags; | ||
322 | u32 imask; | ||
323 | |||
324 | WARN_ON(host->cmd != NULL); | ||
325 | host->cmd = cmd; | ||
326 | |||
327 | /* Ensure, that clock are stopped else command programming and start fails */ | ||
328 | imxmci_stop_clock(host); | ||
329 | |||
330 | if (cmd->flags & MMC_RSP_BUSY) | ||
331 | cmdat |= CMD_DAT_CONT_BUSY; | ||
332 | |||
333 | switch (mmc_resp_type(cmd)) { | ||
334 | case MMC_RSP_R1: /* short CRC, OPCODE */ | ||
335 | case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ | ||
336 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1; | ||
337 | break; | ||
338 | case MMC_RSP_R2: /* long 136 bit + CRC */ | ||
339 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2; | ||
340 | break; | ||
341 | case MMC_RSP_R3: /* short */ | ||
342 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; | ||
343 | break; | ||
344 | default: | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events)) | ||
349 | cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ | ||
350 | |||
351 | if (host->actual_bus_width == MMC_BUS_WIDTH_4) | ||
352 | cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; | ||
353 | |||
354 | writew(cmd->opcode, host->base + MMC_REG_CMD); | ||
355 | writew(cmd->arg >> 16, host->base + MMC_REG_ARGH); | ||
356 | writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL); | ||
357 | writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT); | ||
358 | |||
359 | atomic_set(&host->stuck_timeout, 0); | ||
360 | set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); | ||
361 | |||
362 | |||
363 | imask = IMXMCI_INT_MASK_DEFAULT; | ||
364 | imask &= ~INT_MASK_END_CMD_RES; | ||
365 | if (cmdat & CMD_DAT_CONT_DATA_ENABLE) { | ||
366 | /* imask &= ~INT_MASK_BUF_READY; */ | ||
367 | imask &= ~INT_MASK_DATA_TRAN; | ||
368 | if (cmdat & CMD_DAT_CONT_WRITE) | ||
369 | imask &= ~INT_MASK_WRITE_OP_DONE; | ||
370 | if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) | ||
371 | imask &= ~INT_MASK_BUF_READY; | ||
372 | } | ||
373 | |||
374 | spin_lock_irqsave(&host->lock, flags); | ||
375 | host->imask = imask; | ||
376 | writew(host->imask, host->base + MMC_REG_INT_MASK); | ||
377 | spin_unlock_irqrestore(&host->lock, flags); | ||
378 | |||
379 | dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", | ||
380 | cmd->opcode, cmd->opcode, imask); | ||
381 | |||
382 | imxmci_start_clock(host); | ||
383 | } | ||
384 | |||
385 | static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req) | ||
386 | { | ||
387 | unsigned long flags; | ||
388 | |||
389 | spin_lock_irqsave(&host->lock, flags); | ||
390 | |||
391 | host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | | ||
392 | IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); | ||
393 | |||
394 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
395 | writew(host->imask, host->base + MMC_REG_INT_MASK); | ||
396 | |||
397 | spin_unlock_irqrestore(&host->lock, flags); | ||
398 | |||
399 | if (req && req->cmd) | ||
400 | host->prev_cmd_code = req->cmd->opcode; | ||
401 | |||
402 | host->req = NULL; | ||
403 | host->cmd = NULL; | ||
404 | host->data = NULL; | ||
405 | mmc_request_done(host->mmc, req); | ||
406 | } | ||
407 | |||
408 | static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat) | ||
409 | { | ||
410 | struct mmc_data *data = host->data; | ||
411 | int data_error; | ||
412 | |||
413 | if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
414 | imx_dma_disable(host->dma); | ||
415 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, | ||
416 | host->dma_dir); | ||
417 | } | ||
418 | |||
419 | if (stat & STATUS_ERR_MASK) { | ||
420 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat); | ||
421 | if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) | ||
422 | data->error = -EILSEQ; | ||
423 | else if (stat & STATUS_TIME_OUT_READ) | ||
424 | data->error = -ETIMEDOUT; | ||
425 | else | ||
426 | data->error = -EIO; | ||
427 | } else { | ||
428 | data->bytes_xfered = host->dma_size; | ||
429 | } | ||
430 | |||
431 | data_error = data->error; | ||
432 | |||
433 | host->data = NULL; | ||
434 | |||
435 | return data_error; | ||
436 | } | ||
437 | |||
438 | static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) | ||
439 | { | ||
440 | struct mmc_command *cmd = host->cmd; | ||
441 | int i; | ||
442 | u32 a, b, c; | ||
443 | struct mmc_data *data = host->data; | ||
444 | |||
445 | if (!cmd) | ||
446 | return 0; | ||
447 | |||
448 | host->cmd = NULL; | ||
449 | |||
450 | if (stat & STATUS_TIME_OUT_RESP) { | ||
451 | dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); | ||
452 | cmd->error = -ETIMEDOUT; | ||
453 | } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { | ||
454 | dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); | ||
455 | cmd->error = -EILSEQ; | ||
456 | } | ||
457 | |||
458 | if (cmd->flags & MMC_RSP_PRESENT) { | ||
459 | if (cmd->flags & MMC_RSP_136) { | ||
460 | for (i = 0; i < 4; i++) { | ||
461 | a = readw(host->base + MMC_REG_RES_FIFO); | ||
462 | b = readw(host->base + MMC_REG_RES_FIFO); | ||
463 | cmd->resp[i] = a << 16 | b; | ||
464 | } | ||
465 | } else { | ||
466 | a = readw(host->base + MMC_REG_RES_FIFO); | ||
467 | b = readw(host->base + MMC_REG_RES_FIFO); | ||
468 | c = readw(host->base + MMC_REG_RES_FIFO); | ||
469 | cmd->resp[0] = a << 24 | b << 8 | c >> 8; | ||
470 | } | ||
471 | } | ||
472 | |||
473 | dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", | ||
474 | cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); | ||
475 | |||
476 | if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) { | ||
477 | if (host->req->data->flags & MMC_DATA_WRITE) { | ||
478 | |||
479 | /* Wait for FIFO to be empty before starting DMA write */ | ||
480 | |||
481 | stat = readw(host->base + MMC_REG_STATUS); | ||
482 | if (imxmci_busy_wait_for_status(host, &stat, | ||
483 | STATUS_APPL_BUFF_FE, | ||
484 | 40, "imxmci_cmd_done DMA WR") < 0) { | ||
485 | cmd->error = -EIO; | ||
486 | imxmci_finish_data(host, stat); | ||
487 | if (host->req) | ||
488 | imxmci_finish_request(host, host->req); | ||
489 | dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", | ||
490 | stat); | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
495 | imx_dma_enable(host->dma); | ||
496 | } | ||
497 | } else { | ||
498 | struct mmc_request *req; | ||
499 | imxmci_stop_clock(host); | ||
500 | req = host->req; | ||
501 | |||
502 | if (data) | ||
503 | imxmci_finish_data(host, stat); | ||
504 | |||
505 | if (req) | ||
506 | imxmci_finish_request(host, req); | ||
507 | else | ||
508 | dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); | ||
509 | } | ||
510 | |||
511 | return 1; | ||
512 | } | ||
513 | |||
514 | static int imxmci_data_done(struct imxmci_host *host, unsigned int stat) | ||
515 | { | ||
516 | struct mmc_data *data = host->data; | ||
517 | int data_error; | ||
518 | |||
519 | if (!data) | ||
520 | return 0; | ||
521 | |||
522 | data_error = imxmci_finish_data(host, stat); | ||
523 | |||
524 | if (host->req->stop) { | ||
525 | imxmci_stop_clock(host); | ||
526 | imxmci_start_cmd(host, host->req->stop, 0); | ||
527 | } else { | ||
528 | struct mmc_request *req; | ||
529 | req = host->req; | ||
530 | if (req) | ||
531 | imxmci_finish_request(host, req); | ||
532 | else | ||
533 | dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); | ||
534 | } | ||
535 | |||
536 | return 1; | ||
537 | } | ||
538 | |||
539 | static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | ||
540 | { | ||
541 | int i; | ||
542 | int burst_len; | ||
543 | int trans_done = 0; | ||
544 | unsigned int stat = *pstat; | ||
545 | |||
546 | if (host->actual_bus_width != MMC_BUS_WIDTH_4) | ||
547 | burst_len = 16; | ||
548 | else | ||
549 | burst_len = 64; | ||
550 | |||
551 | /* This is unfortunately required */ | ||
552 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", | ||
553 | stat); | ||
554 | |||
555 | udelay(20); /* required for clocks < 8MHz*/ | ||
556 | |||
557 | if (host->dma_dir == DMA_FROM_DEVICE) { | ||
558 | imxmci_busy_wait_for_status(host, &stat, | ||
559 | STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE | | ||
560 | STATUS_TIME_OUT_READ, | ||
561 | 50, "imxmci_cpu_driven_data read"); | ||
562 | |||
563 | while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && | ||
564 | !(stat & STATUS_TIME_OUT_READ) && | ||
565 | (host->data_cnt < 512)) { | ||
566 | |||
567 | udelay(20); /* required for clocks < 8MHz*/ | ||
568 | |||
569 | for (i = burst_len; i >= 2 ; i -= 2) { | ||
570 | u16 data; | ||
571 | data = readw(host->base + MMC_REG_BUFFER_ACCESS); | ||
572 | udelay(10); /* required for clocks < 8MHz*/ | ||
573 | if (host->data_cnt+2 <= host->dma_size) { | ||
574 | *(host->data_ptr++) = data; | ||
575 | } else { | ||
576 | if (host->data_cnt < host->dma_size) | ||
577 | *(u8 *)(host->data_ptr) = data; | ||
578 | } | ||
579 | host->data_cnt += 2; | ||
580 | } | ||
581 | |||
582 | stat = readw(host->base + MMC_REG_STATUS); | ||
583 | |||
584 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n", | ||
585 | host->data_cnt, burst_len, stat); | ||
586 | } | ||
587 | |||
588 | if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512)) | ||
589 | trans_done = 1; | ||
590 | |||
591 | if (host->dma_size & 0x1ff) | ||
592 | stat &= ~STATUS_CRC_READ_ERR; | ||
593 | |||
594 | if (stat & STATUS_TIME_OUT_READ) { | ||
595 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n", | ||
596 | stat); | ||
597 | trans_done = -1; | ||
598 | } | ||
599 | |||
600 | } else { | ||
601 | imxmci_busy_wait_for_status(host, &stat, | ||
602 | STATUS_APPL_BUFF_FE, | ||
603 | 20, "imxmci_cpu_driven_data write"); | ||
604 | |||
605 | while ((stat & STATUS_APPL_BUFF_FE) && | ||
606 | (host->data_cnt < host->dma_size)) { | ||
607 | if (burst_len >= host->dma_size - host->data_cnt) { | ||
608 | burst_len = host->dma_size - host->data_cnt; | ||
609 | host->data_cnt = host->dma_size; | ||
610 | trans_done = 1; | ||
611 | } else { | ||
612 | host->data_cnt += burst_len; | ||
613 | } | ||
614 | |||
615 | for (i = burst_len; i > 0 ; i -= 2) | ||
616 | writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS); | ||
617 | |||
618 | stat = readw(host->base + MMC_REG_STATUS); | ||
619 | |||
620 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", | ||
621 | burst_len, stat); | ||
622 | } | ||
623 | } | ||
624 | |||
625 | *pstat = stat; | ||
626 | |||
627 | return trans_done; | ||
628 | } | ||
629 | |||
630 | static void imxmci_dma_irq(int dma, void *devid) | ||
631 | { | ||
632 | struct imxmci_host *host = devid; | ||
633 | u32 stat = readw(host->base + MMC_REG_STATUS); | ||
634 | |||
635 | atomic_set(&host->stuck_timeout, 0); | ||
636 | host->status_reg = stat; | ||
637 | set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
638 | tasklet_schedule(&host->tasklet); | ||
639 | } | ||
640 | |||
641 | static irqreturn_t imxmci_irq(int irq, void *devid) | ||
642 | { | ||
643 | struct imxmci_host *host = devid; | ||
644 | u32 stat = readw(host->base + MMC_REG_STATUS); | ||
645 | int handled = 1; | ||
646 | |||
647 | writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT, | ||
648 | host->base + MMC_REG_INT_MASK); | ||
649 | |||
650 | atomic_set(&host->stuck_timeout, 0); | ||
651 | host->status_reg = stat; | ||
652 | set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
653 | set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); | ||
654 | tasklet_schedule(&host->tasklet); | ||
655 | |||
656 | return IRQ_RETVAL(handled); | ||
657 | } | ||
658 | |||
659 | static void imxmci_tasklet_fnc(unsigned long data) | ||
660 | { | ||
661 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
662 | u32 stat; | ||
663 | unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ | ||
664 | int timeout = 0; | ||
665 | |||
666 | if (atomic_read(&host->stuck_timeout) > 4) { | ||
667 | char *what; | ||
668 | timeout = 1; | ||
669 | stat = readw(host->base + MMC_REG_STATUS); | ||
670 | host->status_reg = stat; | ||
671 | if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
672 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
673 | what = "RESP+DMA"; | ||
674 | else | ||
675 | what = "RESP"; | ||
676 | else | ||
677 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
678 | if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) | ||
679 | what = "DATA"; | ||
680 | else | ||
681 | what = "DMA"; | ||
682 | else | ||
683 | what = "???"; | ||
684 | |||
685 | dev_err(mmc_dev(host->mmc), | ||
686 | "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", | ||
687 | what, stat, | ||
688 | readw(host->base + MMC_REG_INT_MASK)); | ||
689 | dev_err(mmc_dev(host->mmc), | ||
690 | "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", | ||
691 | readw(host->base + MMC_REG_CMD_DAT_CONT), | ||
692 | readw(host->base + MMC_REG_BLK_LEN), | ||
693 | readw(host->base + MMC_REG_NOB), | ||
694 | CCR(host->dma)); | ||
695 | dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", | ||
696 | host->cmd ? host->cmd->opcode : 0, | ||
697 | host->prev_cmd_code, | ||
698 | 1 << host->actual_bus_width, host->dma_size); | ||
699 | } | ||
700 | |||
701 | if (!host->present || timeout) | ||
702 | host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | | ||
703 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; | ||
704 | |||
705 | if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { | ||
706 | clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
707 | |||
708 | stat = readw(host->base + MMC_REG_STATUS); | ||
709 | /* | ||
710 | * This is not required in theory, but there is chance to miss some flag | ||
711 | * which clears automatically by mask write, FreeScale original code keeps | ||
712 | * stat from IRQ time so do I | ||
713 | */ | ||
714 | stat |= host->status_reg; | ||
715 | |||
716 | if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) | ||
717 | stat &= ~STATUS_CRC_READ_ERR; | ||
718 | |||
719 | if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
720 | imxmci_busy_wait_for_status(host, &stat, | ||
721 | STATUS_END_CMD_RESP | STATUS_ERR_MASK, | ||
722 | 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); | ||
723 | } | ||
724 | |||
725 | if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { | ||
726 | if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
727 | imxmci_cmd_done(host, stat); | ||
728 | if (host->data && (stat & STATUS_ERR_MASK)) | ||
729 | imxmci_data_done(host, stat); | ||
730 | } | ||
731 | |||
732 | if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { | ||
733 | stat |= readw(host->base + MMC_REG_STATUS); | ||
734 | if (imxmci_cpu_driven_data(host, &stat)) { | ||
735 | if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
736 | imxmci_cmd_done(host, stat); | ||
737 | atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, | ||
738 | &host->pending_events); | ||
739 | imxmci_data_done(host, stat); | ||
740 | } | ||
741 | } | ||
742 | } | ||
743 | |||
744 | if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && | ||
745 | !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
746 | |||
747 | stat = readw(host->base + MMC_REG_STATUS); | ||
748 | /* Same as above */ | ||
749 | stat |= host->status_reg; | ||
750 | |||
751 | if (host->dma_dir == DMA_TO_DEVICE) | ||
752 | data_dir_mask = STATUS_WRITE_OP_DONE; | ||
753 | else | ||
754 | data_dir_mask = STATUS_DATA_TRANS_DONE; | ||
755 | |||
756 | if (stat & data_dir_mask) { | ||
757 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
758 | imxmci_data_done(host, stat); | ||
759 | } | ||
760 | } | ||
761 | |||
762 | if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { | ||
763 | |||
764 | if (host->cmd) | ||
765 | imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); | ||
766 | |||
767 | if (host->data) | ||
768 | imxmci_data_done(host, STATUS_TIME_OUT_READ | | ||
769 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); | ||
770 | |||
771 | if (host->req) | ||
772 | imxmci_finish_request(host, host->req); | ||
773 | |||
774 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
775 | |||
776 | } | ||
777 | } | ||
778 | |||
779 | static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req) | ||
780 | { | ||
781 | struct imxmci_host *host = mmc_priv(mmc); | ||
782 | unsigned int cmdat; | ||
783 | |||
784 | WARN_ON(host->req != NULL); | ||
785 | |||
786 | host->req = req; | ||
787 | |||
788 | cmdat = 0; | ||
789 | |||
790 | if (req->data) { | ||
791 | imxmci_setup_data(host, req->data); | ||
792 | |||
793 | cmdat |= CMD_DAT_CONT_DATA_ENABLE; | ||
794 | |||
795 | if (req->data->flags & MMC_DATA_WRITE) | ||
796 | cmdat |= CMD_DAT_CONT_WRITE; | ||
797 | |||
798 | if (req->data->flags & MMC_DATA_STREAM) | ||
799 | cmdat |= CMD_DAT_CONT_STREAM_BLOCK; | ||
800 | } | ||
801 | |||
802 | imxmci_start_cmd(host, req->cmd, cmdat); | ||
803 | } | ||
804 | |||
805 | #define CLK_RATE 19200000 | ||
806 | |||
807 | static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
808 | { | ||
809 | struct imxmci_host *host = mmc_priv(mmc); | ||
810 | int prescaler; | ||
811 | |||
812 | if (ios->bus_width == MMC_BUS_WIDTH_4) { | ||
813 | host->actual_bus_width = MMC_BUS_WIDTH_4; | ||
814 | imx_gpio_mode(PB11_PF_SD_DAT3); | ||
815 | BLR(host->dma) = 0; /* burst 64 byte read/write */ | ||
816 | } else { | ||
817 | host->actual_bus_width = MMC_BUS_WIDTH_1; | ||
818 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
819 | BLR(host->dma) = 16; /* burst 16 byte read/write */ | ||
820 | } | ||
821 | |||
822 | if (host->power_mode != ios->power_mode) { | ||
823 | switch (ios->power_mode) { | ||
824 | case MMC_POWER_OFF: | ||
825 | break; | ||
826 | case MMC_POWER_UP: | ||
827 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
828 | break; | ||
829 | case MMC_POWER_ON: | ||
830 | break; | ||
831 | } | ||
832 | host->power_mode = ios->power_mode; | ||
833 | } | ||
834 | |||
835 | if (ios->clock) { | ||
836 | unsigned int clk; | ||
837 | u16 reg; | ||
838 | |||
839 | /* The prescaler is 5 for PERCLK2 equal to 96MHz | ||
840 | * then 96MHz / 5 = 19.2 MHz | ||
841 | */ | ||
842 | clk = clk_get_rate(host->clk); | ||
843 | prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE; | ||
844 | switch (prescaler) { | ||
845 | case 0: | ||
846 | case 1: prescaler = 0; | ||
847 | break; | ||
848 | case 2: prescaler = 1; | ||
849 | break; | ||
850 | case 3: prescaler = 2; | ||
851 | break; | ||
852 | case 4: prescaler = 4; | ||
853 | break; | ||
854 | default: | ||
855 | case 5: prescaler = 5; | ||
856 | break; | ||
857 | } | ||
858 | |||
859 | dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", | ||
860 | clk, prescaler); | ||
861 | |||
862 | for (clk = 0; clk < 8; clk++) { | ||
863 | int x; | ||
864 | x = CLK_RATE / (1 << clk); | ||
865 | if (x <= ios->clock) | ||
866 | break; | ||
867 | } | ||
868 | |||
869 | /* enable controller */ | ||
870 | reg = readw(host->base + MMC_REG_STR_STP_CLK); | ||
871 | writew(reg | STR_STP_CLK_ENABLE, | ||
872 | host->base + MMC_REG_STR_STP_CLK); | ||
873 | |||
874 | imxmci_stop_clock(host); | ||
875 | writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE); | ||
876 | /* | ||
877 | * Under my understanding, clock should not be started there, because it would | ||
878 | * initiate SDHC sequencer and send last or random command into card | ||
879 | */ | ||
880 | /* imxmci_start_clock(host); */ | ||
881 | |||
882 | dev_dbg(mmc_dev(host->mmc), | ||
883 | "MMC_CLK_RATE: 0x%08x\n", | ||
884 | readw(host->base + MMC_REG_CLK_RATE)); | ||
885 | } else { | ||
886 | imxmci_stop_clock(host); | ||
887 | } | ||
888 | } | ||
889 | |||
890 | static int imxmci_get_ro(struct mmc_host *mmc) | ||
891 | { | ||
892 | struct imxmci_host *host = mmc_priv(mmc); | ||
893 | |||
894 | if (host->pdata && host->pdata->get_ro) | ||
895 | return !!host->pdata->get_ro(mmc_dev(mmc)); | ||
896 | /* | ||
897 | * Board doesn't support read only detection; let the mmc core | ||
898 | * decide what to do. | ||
899 | */ | ||
900 | return -ENOSYS; | ||
901 | } | ||
902 | |||
903 | |||
904 | static const struct mmc_host_ops imxmci_ops = { | ||
905 | .request = imxmci_request, | ||
906 | .set_ios = imxmci_set_ios, | ||
907 | .get_ro = imxmci_get_ro, | ||
908 | }; | ||
909 | |||
910 | static void imxmci_check_status(unsigned long data) | ||
911 | { | ||
912 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
913 | |||
914 | if (host->pdata && host->pdata->card_present && | ||
915 | host->pdata->card_present(mmc_dev(host->mmc)) != host->present) { | ||
916 | host->present ^= 1; | ||
917 | dev_info(mmc_dev(host->mmc), "card %s\n", | ||
918 | host->present ? "inserted" : "removed"); | ||
919 | |||
920 | set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events); | ||
921 | tasklet_schedule(&host->tasklet); | ||
922 | } | ||
923 | |||
924 | if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || | ||
925 | test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
926 | atomic_inc(&host->stuck_timeout); | ||
927 | if (atomic_read(&host->stuck_timeout) > 4) | ||
928 | tasklet_schedule(&host->tasklet); | ||
929 | } else { | ||
930 | atomic_set(&host->stuck_timeout, 0); | ||
931 | |||
932 | } | ||
933 | |||
934 | mod_timer(&host->timer, jiffies + (HZ>>1)); | ||
935 | } | ||
936 | |||
937 | static int __init imxmci_probe(struct platform_device *pdev) | ||
938 | { | ||
939 | struct mmc_host *mmc; | ||
940 | struct imxmci_host *host = NULL; | ||
941 | struct resource *r; | ||
942 | int ret = 0, irq; | ||
943 | u16 rev_no; | ||
944 | |||
945 | printk(KERN_INFO "i.MX mmc driver\n"); | ||
946 | |||
947 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
948 | irq = platform_get_irq(pdev, 0); | ||
949 | if (!r || irq < 0) | ||
950 | return -ENXIO; | ||
951 | |||
952 | r = request_mem_region(r->start, resource_size(r), pdev->name); | ||
953 | if (!r) | ||
954 | return -EBUSY; | ||
955 | |||
956 | mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); | ||
957 | if (!mmc) { | ||
958 | ret = -ENOMEM; | ||
959 | goto out; | ||
960 | } | ||
961 | |||
962 | mmc->ops = &imxmci_ops; | ||
963 | mmc->f_min = 150000; | ||
964 | mmc->f_max = CLK_RATE/2; | ||
965 | mmc->ocr_avail = MMC_VDD_32_33; | ||
966 | mmc->caps = MMC_CAP_4_BIT_DATA; | ||
967 | |||
968 | /* MMC core transfer sizes tunable parameters */ | ||
969 | mmc->max_segs = 64; | ||
970 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
971 | mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
972 | mmc->max_blk_size = 2048; | ||
973 | mmc->max_blk_count = 65535; | ||
974 | |||
975 | host = mmc_priv(mmc); | ||
976 | host->base = ioremap(r->start, resource_size(r)); | ||
977 | if (!host->base) { | ||
978 | ret = -ENOMEM; | ||
979 | goto out; | ||
980 | } | ||
981 | |||
982 | host->mmc = mmc; | ||
983 | host->dma_allocated = 0; | ||
984 | host->pdata = pdev->dev.platform_data; | ||
985 | if (!host->pdata) | ||
986 | dev_warn(&pdev->dev, "No platform data provided!\n"); | ||
987 | |||
988 | spin_lock_init(&host->lock); | ||
989 | host->res = r; | ||
990 | host->irq = irq; | ||
991 | |||
992 | host->clk = clk_get(&pdev->dev, "perclk2"); | ||
993 | if (IS_ERR(host->clk)) { | ||
994 | ret = PTR_ERR(host->clk); | ||
995 | goto out; | ||
996 | } | ||
997 | clk_enable(host->clk); | ||
998 | |||
999 | imx_gpio_mode(PB8_PF_SD_DAT0); | ||
1000 | imx_gpio_mode(PB9_PF_SD_DAT1); | ||
1001 | imx_gpio_mode(PB10_PF_SD_DAT2); | ||
1002 | /* Configured as GPIO with pull-up to ensure right MCC card mode */ | ||
1003 | /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ | ||
1004 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
1005 | /* imx_gpio_mode(PB11_PF_SD_DAT3); */ | ||
1006 | imx_gpio_mode(PB12_PF_SD_CLK); | ||
1007 | imx_gpio_mode(PB13_PF_SD_CMD); | ||
1008 | |||
1009 | imxmci_softreset(host); | ||
1010 | |||
1011 | rev_no = readw(host->base + MMC_REG_REV_NO); | ||
1012 | if (rev_no != 0x390) { | ||
1013 | dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", | ||
1014 | readw(host->base + MMC_REG_REV_NO)); | ||
1015 | goto out; | ||
1016 | } | ||
1017 | |||
1018 | /* recommended in data sheet */ | ||
1019 | writew(0x2db4, host->base + MMC_REG_READ_TO); | ||
1020 | |||
1021 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
1022 | writew(host->imask, host->base + MMC_REG_INT_MASK); | ||
1023 | |||
1024 | host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); | ||
1025 | if(host->dma < 0) { | ||
1026 | dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); | ||
1027 | ret = -EBUSY; | ||
1028 | goto out; | ||
1029 | } | ||
1030 | host->dma_allocated = 1; | ||
1031 | imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); | ||
1032 | RSSR(host->dma) = DMA_REQ_SDHC; | ||
1033 | |||
1034 | tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); | ||
1035 | host->status_reg=0; | ||
1036 | host->pending_events=0; | ||
1037 | |||
1038 | ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); | ||
1039 | if (ret) | ||
1040 | goto out; | ||
1041 | |||
1042 | if (host->pdata && host->pdata->card_present) | ||
1043 | host->present = host->pdata->card_present(mmc_dev(mmc)); | ||
1044 | else /* if there is no way to detect assume that card is present */ | ||
1045 | host->present = 1; | ||
1046 | |||
1047 | init_timer(&host->timer); | ||
1048 | host->timer.data = (unsigned long)host; | ||
1049 | host->timer.function = imxmci_check_status; | ||
1050 | add_timer(&host->timer); | ||
1051 | mod_timer(&host->timer, jiffies + (HZ >> 1)); | ||
1052 | |||
1053 | platform_set_drvdata(pdev, mmc); | ||
1054 | |||
1055 | mmc_add_host(mmc); | ||
1056 | |||
1057 | return 0; | ||
1058 | |||
1059 | out: | ||
1060 | if (host) { | ||
1061 | if (host->dma_allocated) { | ||
1062 | imx_dma_free(host->dma); | ||
1063 | host->dma_allocated = 0; | ||
1064 | } | ||
1065 | if (host->clk) { | ||
1066 | clk_disable(host->clk); | ||
1067 | clk_put(host->clk); | ||
1068 | } | ||
1069 | if (host->base) | ||
1070 | iounmap(host->base); | ||
1071 | } | ||
1072 | if (mmc) | ||
1073 | mmc_free_host(mmc); | ||
1074 | release_mem_region(r->start, resource_size(r)); | ||
1075 | return ret; | ||
1076 | } | ||
1077 | |||
1078 | static int __exit imxmci_remove(struct platform_device *pdev) | ||
1079 | { | ||
1080 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
1081 | |||
1082 | platform_set_drvdata(pdev, NULL); | ||
1083 | |||
1084 | if (mmc) { | ||
1085 | struct imxmci_host *host = mmc_priv(mmc); | ||
1086 | |||
1087 | tasklet_disable(&host->tasklet); | ||
1088 | |||
1089 | del_timer_sync(&host->timer); | ||
1090 | mmc_remove_host(mmc); | ||
1091 | |||
1092 | free_irq(host->irq, host); | ||
1093 | iounmap(host->base); | ||
1094 | if (host->dma_allocated) { | ||
1095 | imx_dma_free(host->dma); | ||
1096 | host->dma_allocated = 0; | ||
1097 | } | ||
1098 | |||
1099 | tasklet_kill(&host->tasklet); | ||
1100 | |||
1101 | clk_disable(host->clk); | ||
1102 | clk_put(host->clk); | ||
1103 | |||
1104 | release_mem_region(host->res->start, resource_size(host->res)); | ||
1105 | |||
1106 | mmc_free_host(mmc); | ||
1107 | } | ||
1108 | return 0; | ||
1109 | } | ||
1110 | |||
1111 | #ifdef CONFIG_PM | ||
1112 | static int imxmci_suspend(struct platform_device *dev, pm_message_t state) | ||
1113 | { | ||
1114 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
1115 | int ret = 0; | ||
1116 | |||
1117 | if (mmc) | ||
1118 | ret = mmc_suspend_host(mmc); | ||
1119 | |||
1120 | return ret; | ||
1121 | } | ||
1122 | |||
1123 | static int imxmci_resume(struct platform_device *dev) | ||
1124 | { | ||
1125 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
1126 | struct imxmci_host *host; | ||
1127 | int ret = 0; | ||
1128 | |||
1129 | if (mmc) { | ||
1130 | host = mmc_priv(mmc); | ||
1131 | if (host) | ||
1132 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
1133 | ret = mmc_resume_host(mmc); | ||
1134 | } | ||
1135 | |||
1136 | return ret; | ||
1137 | } | ||
1138 | #else | ||
1139 | #define imxmci_suspend NULL | ||
1140 | #define imxmci_resume NULL | ||
1141 | #endif /* CONFIG_PM */ | ||
1142 | |||
1143 | static struct platform_driver imxmci_driver = { | ||
1144 | .remove = __exit_p(imxmci_remove), | ||
1145 | .suspend = imxmci_suspend, | ||
1146 | .resume = imxmci_resume, | ||
1147 | .driver = { | ||
1148 | .name = DRIVER_NAME, | ||
1149 | .owner = THIS_MODULE, | ||
1150 | } | ||
1151 | }; | ||
1152 | |||
1153 | static int __init imxmci_init(void) | ||
1154 | { | ||
1155 | return platform_driver_probe(&imxmci_driver, imxmci_probe); | ||
1156 | } | ||
1157 | |||
1158 | static void __exit imxmci_exit(void) | ||
1159 | { | ||
1160 | platform_driver_unregister(&imxmci_driver); | ||
1161 | } | ||
1162 | |||
1163 | module_init(imxmci_init); | ||
1164 | module_exit(imxmci_exit); | ||
1165 | |||
1166 | MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); | ||
1167 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | ||
1168 | MODULE_LICENSE("GPL"); | ||
1169 | MODULE_ALIAS("platform:imx-mmc"); | ||