diff options
Diffstat (limited to 'drivers/mmc/host/imxmmc.c')
| -rw-r--r-- | drivers/mmc/host/imxmmc.c | 1137 |
1 files changed, 1137 insertions, 0 deletions
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c new file mode 100644 index 000000000000..7ee2045acbef --- /dev/null +++ b/drivers/mmc/host/imxmmc.c | |||
| @@ -0,0 +1,1137 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de> | ||
| 5 | * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> | ||
| 6 | * | ||
| 7 | * derived from pxamci.c by Russell King | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
| 14 | * Changed to conform redesigned i.MX scatter gather DMA interface | ||
| 15 | * | ||
| 16 | * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
| 17 | * Updated for 2.6.14 kernel | ||
| 18 | * | ||
| 19 | * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com> | ||
| 20 | * Found and corrected problems in the write path | ||
| 21 | * | ||
| 22 | * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz> | ||
| 23 | * The event handling rewritten right way in softirq. | ||
| 24 | * Added many ugly hacks and delays to overcome SDHC | ||
| 25 | * deficiencies | ||
| 26 | * | ||
| 27 | */ | ||
| 28 | |||
| 29 | #ifdef CONFIG_MMC_DEBUG | ||
| 30 | #define DEBUG | ||
| 31 | #else | ||
| 32 | #undef DEBUG | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/init.h> | ||
| 37 | #include <linux/ioport.h> | ||
| 38 | #include <linux/platform_device.h> | ||
| 39 | #include <linux/interrupt.h> | ||
| 40 | #include <linux/blkdev.h> | ||
| 41 | #include <linux/dma-mapping.h> | ||
| 42 | #include <linux/mmc/host.h> | ||
| 43 | #include <linux/mmc/card.h> | ||
| 44 | #include <linux/delay.h> | ||
| 45 | |||
| 46 | #include <asm/dma.h> | ||
| 47 | #include <asm/io.h> | ||
| 48 | #include <asm/irq.h> | ||
| 49 | #include <asm/sizes.h> | ||
| 50 | #include <asm/arch/mmc.h> | ||
| 51 | #include <asm/arch/imx-dma.h> | ||
| 52 | |||
| 53 | #include "imxmmc.h" | ||
| 54 | |||
| 55 | #define DRIVER_NAME "imx-mmc" | ||
| 56 | |||
| 57 | #define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ | ||
| 58 | INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ | ||
| 59 | INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) | ||
| 60 | |||
| 61 | struct imxmci_host { | ||
| 62 | struct mmc_host *mmc; | ||
| 63 | spinlock_t lock; | ||
| 64 | struct resource *res; | ||
| 65 | int irq; | ||
| 66 | imx_dmach_t dma; | ||
| 67 | unsigned int clkrt; | ||
| 68 | unsigned int cmdat; | ||
| 69 | volatile unsigned int imask; | ||
| 70 | unsigned int power_mode; | ||
| 71 | unsigned int present; | ||
| 72 | struct imxmmc_platform_data *pdata; | ||
| 73 | |||
| 74 | struct mmc_request *req; | ||
| 75 | struct mmc_command *cmd; | ||
| 76 | struct mmc_data *data; | ||
| 77 | |||
| 78 | struct timer_list timer; | ||
| 79 | struct tasklet_struct tasklet; | ||
| 80 | unsigned int status_reg; | ||
| 81 | unsigned long pending_events; | ||
| 82 | /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */ | ||
| 83 | u16 *data_ptr; | ||
| 84 | unsigned int data_cnt; | ||
| 85 | atomic_t stuck_timeout; | ||
| 86 | |||
| 87 | unsigned int dma_nents; | ||
| 88 | unsigned int dma_size; | ||
| 89 | unsigned int dma_dir; | ||
| 90 | int dma_allocated; | ||
| 91 | |||
| 92 | unsigned char actual_bus_width; | ||
| 93 | |||
| 94 | int prev_cmd_code; | ||
| 95 | }; | ||
| 96 | |||
| 97 | #define IMXMCI_PEND_IRQ_b 0 | ||
| 98 | #define IMXMCI_PEND_DMA_END_b 1 | ||
| 99 | #define IMXMCI_PEND_DMA_ERR_b 2 | ||
| 100 | #define IMXMCI_PEND_WAIT_RESP_b 3 | ||
| 101 | #define IMXMCI_PEND_DMA_DATA_b 4 | ||
| 102 | #define IMXMCI_PEND_CPU_DATA_b 5 | ||
| 103 | #define IMXMCI_PEND_CARD_XCHG_b 6 | ||
| 104 | #define IMXMCI_PEND_SET_INIT_b 7 | ||
| 105 | #define IMXMCI_PEND_STARTED_b 8 | ||
| 106 | |||
| 107 | #define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) | ||
| 108 | #define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) | ||
| 109 | #define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b) | ||
| 110 | #define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b) | ||
| 111 | #define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b) | ||
| 112 | #define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) | ||
| 113 | #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) | ||
| 114 | #define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) | ||
| 115 | #define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b) | ||
| 116 | |||
| 117 | static void imxmci_stop_clock(struct imxmci_host *host) | ||
| 118 | { | ||
| 119 | int i = 0; | ||
| 120 | MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK; | ||
| 121 | while(i < 0x1000) { | ||
| 122 | if(!(i & 0x7f)) | ||
| 123 | MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK; | ||
| 124 | |||
| 125 | if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) { | ||
| 126 | /* Check twice before cut */ | ||
| 127 | if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) | ||
| 128 | return; | ||
| 129 | } | ||
| 130 | |||
| 131 | i++; | ||
| 132 | } | ||
| 133 | dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); | ||
| 134 | } | ||
| 135 | |||
| 136 | static int imxmci_start_clock(struct imxmci_host *host) | ||
| 137 | { | ||
| 138 | unsigned int trials = 0; | ||
| 139 | unsigned int delay_limit = 128; | ||
| 140 | unsigned long flags; | ||
| 141 | |||
| 142 | MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK; | ||
| 143 | |||
| 144 | clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Command start of the clock, this usually succeeds in less | ||
| 148 | * then 6 delay loops, but during card detection (low clockrate) | ||
| 149 | * it takes up to 5000 delay loops and sometimes fails for the first time | ||
| 150 | */ | ||
| 151 | MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; | ||
| 152 | |||
| 153 | do { | ||
| 154 | unsigned int delay = delay_limit; | ||
| 155 | |||
| 156 | while(delay--){ | ||
| 157 | if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) | ||
| 158 | /* Check twice before cut */ | ||
| 159 | if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) | ||
| 160 | return 0; | ||
| 161 | |||
| 162 | if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) | ||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | local_irq_save(flags); | ||
| 167 | /* | ||
| 168 | * Ensure, that request is not doubled under all possible circumstances. | ||
| 169 | * It is possible, that cock running state is missed, because some other | ||
| 170 | * IRQ or schedule delays this function execution and the clocks has | ||
| 171 | * been already stopped by other means (response processing, SDHC HW) | ||
| 172 | */ | ||
| 173 | if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) | ||
| 174 | MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; | ||
| 175 | local_irq_restore(flags); | ||
| 176 | |||
| 177 | } while(++trials<256); | ||
| 178 | |||
| 179 | dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); | ||
| 180 | |||
| 181 | return -1; | ||
| 182 | } | ||
| 183 | |||
| 184 | static void imxmci_softreset(void) | ||
| 185 | { | ||
| 186 | /* reset sequence */ | ||
| 187 | MMC_STR_STP_CLK = 0x8; | ||
| 188 | MMC_STR_STP_CLK = 0xD; | ||
| 189 | MMC_STR_STP_CLK = 0x5; | ||
| 190 | MMC_STR_STP_CLK = 0x5; | ||
| 191 | MMC_STR_STP_CLK = 0x5; | ||
| 192 | MMC_STR_STP_CLK = 0x5; | ||
| 193 | MMC_STR_STP_CLK = 0x5; | ||
| 194 | MMC_STR_STP_CLK = 0x5; | ||
| 195 | MMC_STR_STP_CLK = 0x5; | ||
| 196 | MMC_STR_STP_CLK = 0x5; | ||
| 197 | |||
| 198 | MMC_RES_TO = 0xff; | ||
| 199 | MMC_BLK_LEN = 512; | ||
| 200 | MMC_NOB = 1; | ||
| 201 | } | ||
| 202 | |||
| 203 | static int imxmci_busy_wait_for_status(struct imxmci_host *host, | ||
| 204 | unsigned int *pstat, unsigned int stat_mask, | ||
| 205 | int timeout, const char *where) | ||
| 206 | { | ||
| 207 | int loops=0; | ||
| 208 | while(!(*pstat & stat_mask)) { | ||
| 209 | loops+=2; | ||
| 210 | if(loops >= timeout) { | ||
| 211 | dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", | ||
| 212 | where, *pstat, stat_mask); | ||
| 213 | return -1; | ||
| 214 | } | ||
| 215 | udelay(2); | ||
| 216 | *pstat |= MMC_STATUS; | ||
| 217 | } | ||
| 218 | if(!loops) | ||
| 219 | return 0; | ||
| 220 | |||
| 221 | /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ | ||
| 222 | if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) | ||
| 223 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | ||
| 224 | loops, where, *pstat, stat_mask); | ||
| 225 | return loops; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | ||
| 229 | { | ||
| 230 | unsigned int nob = data->blocks; | ||
| 231 | unsigned int blksz = data->blksz; | ||
| 232 | unsigned int datasz = nob * blksz; | ||
| 233 | int i; | ||
| 234 | |||
| 235 | if (data->flags & MMC_DATA_STREAM) | ||
| 236 | nob = 0xffff; | ||
| 237 | |||
| 238 | host->data = data; | ||
| 239 | data->bytes_xfered = 0; | ||
| 240 | |||
| 241 | MMC_NOB = nob; | ||
| 242 | MMC_BLK_LEN = blksz; | ||
| 243 | |||
| 244 | /* | ||
| 245 | * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. | ||
| 246 | * We are in big troubles for non-512 byte transfers according to note in the paragraph | ||
| 247 | * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. | ||
| 248 | * The situation is even more complex in reality. The SDHC in not able to handle wll | ||
| 249 | * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. | ||
| 250 | * This is required for SCR read at least. | ||
| 251 | */ | ||
| 252 | if (datasz < 512) { | ||
| 253 | host->dma_size = datasz; | ||
| 254 | if (data->flags & MMC_DATA_READ) { | ||
| 255 | host->dma_dir = DMA_FROM_DEVICE; | ||
| 256 | |||
| 257 | /* Hack to enable read SCR */ | ||
| 258 | MMC_NOB = 1; | ||
| 259 | MMC_BLK_LEN = 512; | ||
| 260 | } else { | ||
| 261 | host->dma_dir = DMA_TO_DEVICE; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* Convert back to virtual address */ | ||
| 265 | host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset); | ||
| 266 | host->data_cnt = 0; | ||
| 267 | |||
| 268 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
| 269 | set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
| 270 | |||
| 271 | return; | ||
| 272 | } | ||
| 273 | |||
| 274 | if (data->flags & MMC_DATA_READ) { | ||
| 275 | host->dma_dir = DMA_FROM_DEVICE; | ||
| 276 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
| 277 | data->sg_len, host->dma_dir); | ||
| 278 | |||
| 279 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
| 280 | host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ); | ||
| 281 | |||
| 282 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
| 283 | CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; | ||
| 284 | } else { | ||
| 285 | host->dma_dir = DMA_TO_DEVICE; | ||
| 286 | |||
| 287 | host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
| 288 | data->sg_len, host->dma_dir); | ||
| 289 | |||
| 290 | imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, | ||
| 291 | host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE); | ||
| 292 | |||
| 293 | /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ | ||
| 294 | CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; | ||
| 295 | } | ||
| 296 | |||
| 297 | #if 1 /* This code is there only for consistency checking and can be disabled in future */ | ||
| 298 | host->dma_size = 0; | ||
| 299 | for(i=0; i<host->dma_nents; i++) | ||
| 300 | host->dma_size+=data->sg[i].length; | ||
| 301 | |||
| 302 | if (datasz > host->dma_size) { | ||
| 303 | dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", | ||
| 304 | datasz, host->dma_size); | ||
| 305 | } | ||
| 306 | #endif | ||
| 307 | |||
| 308 | host->dma_size = datasz; | ||
| 309 | |||
| 310 | wmb(); | ||
| 311 | |||
| 312 | if(host->actual_bus_width == MMC_BUS_WIDTH_4) | ||
| 313 | BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ | ||
| 314 | else | ||
| 315 | BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ | ||
| 316 | |||
| 317 | RSSR(host->dma) = DMA_REQ_SDHC; | ||
| 318 | |||
| 319 | set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | ||
| 320 | clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); | ||
| 321 | |||
| 322 | /* start DMA engine for read, write is delayed after initial response */ | ||
| 323 | if (host->dma_dir == DMA_FROM_DEVICE) { | ||
| 324 | imx_dma_enable(host->dma); | ||
| 325 | } | ||
| 326 | } | ||
| 327 | |||
| 328 | static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) | ||
| 329 | { | ||
| 330 | unsigned long flags; | ||
| 331 | u32 imask; | ||
| 332 | |||
| 333 | WARN_ON(host->cmd != NULL); | ||
| 334 | host->cmd = cmd; | ||
| 335 | |||
| 336 | /* Ensure, that clock are stopped else command programming and start fails */ | ||
| 337 | imxmci_stop_clock(host); | ||
| 338 | |||
| 339 | if (cmd->flags & MMC_RSP_BUSY) | ||
| 340 | cmdat |= CMD_DAT_CONT_BUSY; | ||
| 341 | |||
| 342 | switch (mmc_resp_type(cmd)) { | ||
| 343 | case MMC_RSP_R1: /* short CRC, OPCODE */ | ||
| 344 | case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ | ||
| 345 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1; | ||
| 346 | break; | ||
| 347 | case MMC_RSP_R2: /* long 136 bit + CRC */ | ||
| 348 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2; | ||
| 349 | break; | ||
| 350 | case MMC_RSP_R3: /* short */ | ||
| 351 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; | ||
| 352 | break; | ||
| 353 | default: | ||
| 354 | break; | ||
| 355 | } | ||
| 356 | |||
| 357 | if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) ) | ||
| 358 | cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ | ||
| 359 | |||
| 360 | if ( host->actual_bus_width == MMC_BUS_WIDTH_4 ) | ||
| 361 | cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; | ||
| 362 | |||
| 363 | MMC_CMD = cmd->opcode; | ||
| 364 | MMC_ARGH = cmd->arg >> 16; | ||
| 365 | MMC_ARGL = cmd->arg & 0xffff; | ||
| 366 | MMC_CMD_DAT_CONT = cmdat; | ||
| 367 | |||
| 368 | atomic_set(&host->stuck_timeout, 0); | ||
| 369 | set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); | ||
| 370 | |||
| 371 | |||
| 372 | imask = IMXMCI_INT_MASK_DEFAULT; | ||
| 373 | imask &= ~INT_MASK_END_CMD_RES; | ||
| 374 | if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) { | ||
| 375 | /*imask &= ~INT_MASK_BUF_READY;*/ | ||
| 376 | imask &= ~INT_MASK_DATA_TRAN; | ||
| 377 | if ( cmdat & CMD_DAT_CONT_WRITE ) | ||
| 378 | imask &= ~INT_MASK_WRITE_OP_DONE; | ||
| 379 | if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) | ||
| 380 | imask &= ~INT_MASK_BUF_READY; | ||
| 381 | } | ||
| 382 | |||
| 383 | spin_lock_irqsave(&host->lock, flags); | ||
| 384 | host->imask = imask; | ||
| 385 | MMC_INT_MASK = host->imask; | ||
| 386 | spin_unlock_irqrestore(&host->lock, flags); | ||
| 387 | |||
| 388 | dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", | ||
| 389 | cmd->opcode, cmd->opcode, imask); | ||
| 390 | |||
| 391 | imxmci_start_clock(host); | ||
| 392 | } | ||
| 393 | |||
| 394 | static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req) | ||
| 395 | { | ||
| 396 | unsigned long flags; | ||
| 397 | |||
| 398 | spin_lock_irqsave(&host->lock, flags); | ||
| 399 | |||
| 400 | host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | | ||
| 401 | IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); | ||
| 402 | |||
| 403 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
| 404 | MMC_INT_MASK = host->imask; | ||
| 405 | |||
| 406 | spin_unlock_irqrestore(&host->lock, flags); | ||
| 407 | |||
| 408 | if(req && req->cmd) | ||
| 409 | host->prev_cmd_code = req->cmd->opcode; | ||
| 410 | |||
| 411 | host->req = NULL; | ||
| 412 | host->cmd = NULL; | ||
| 413 | host->data = NULL; | ||
| 414 | mmc_request_done(host->mmc, req); | ||
| 415 | } | ||
| 416 | |||
| 417 | static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat) | ||
| 418 | { | ||
| 419 | struct mmc_data *data = host->data; | ||
| 420 | int data_error; | ||
| 421 | |||
| 422 | if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){ | ||
| 423 | imx_dma_disable(host->dma); | ||
| 424 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, | ||
| 425 | host->dma_dir); | ||
| 426 | } | ||
| 427 | |||
| 428 | if ( stat & STATUS_ERR_MASK ) { | ||
| 429 | dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat); | ||
| 430 | if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) | ||
| 431 | data->error = MMC_ERR_BADCRC; | ||
| 432 | else if(stat & STATUS_TIME_OUT_READ) | ||
| 433 | data->error = MMC_ERR_TIMEOUT; | ||
| 434 | else | ||
| 435 | data->error = MMC_ERR_FAILED; | ||
| 436 | } else { | ||
| 437 | data->bytes_xfered = host->dma_size; | ||
| 438 | } | ||
| 439 | |||
| 440 | data_error = data->error; | ||
| 441 | |||
| 442 | host->data = NULL; | ||
| 443 | |||
| 444 | return data_error; | ||
| 445 | } | ||
| 446 | |||
| 447 | static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) | ||
| 448 | { | ||
| 449 | struct mmc_command *cmd = host->cmd; | ||
| 450 | int i; | ||
| 451 | u32 a,b,c; | ||
| 452 | struct mmc_data *data = host->data; | ||
| 453 | |||
| 454 | if (!cmd) | ||
| 455 | return 0; | ||
| 456 | |||
| 457 | host->cmd = NULL; | ||
| 458 | |||
| 459 | if (stat & STATUS_TIME_OUT_RESP) { | ||
| 460 | dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); | ||
| 461 | cmd->error = MMC_ERR_TIMEOUT; | ||
| 462 | } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { | ||
| 463 | dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); | ||
| 464 | cmd->error = MMC_ERR_BADCRC; | ||
| 465 | } | ||
| 466 | |||
| 467 | if(cmd->flags & MMC_RSP_PRESENT) { | ||
| 468 | if(cmd->flags & MMC_RSP_136) { | ||
| 469 | for (i = 0; i < 4; i++) { | ||
| 470 | u32 a = MMC_RES_FIFO & 0xffff; | ||
| 471 | u32 b = MMC_RES_FIFO & 0xffff; | ||
| 472 | cmd->resp[i] = a<<16 | b; | ||
| 473 | } | ||
| 474 | } else { | ||
| 475 | a = MMC_RES_FIFO & 0xffff; | ||
| 476 | b = MMC_RES_FIFO & 0xffff; | ||
| 477 | c = MMC_RES_FIFO & 0xffff; | ||
| 478 | cmd->resp[0] = a<<24 | b<<8 | c>>8; | ||
| 479 | } | ||
| 480 | } | ||
| 481 | |||
| 482 | dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", | ||
| 483 | cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); | ||
| 484 | |||
| 485 | if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) { | ||
| 486 | if (host->req->data->flags & MMC_DATA_WRITE) { | ||
| 487 | |||
| 488 | /* Wait for FIFO to be empty before starting DMA write */ | ||
| 489 | |||
| 490 | stat = MMC_STATUS; | ||
| 491 | if(imxmci_busy_wait_for_status(host, &stat, | ||
| 492 | STATUS_APPL_BUFF_FE, | ||
| 493 | 40, "imxmci_cmd_done DMA WR") < 0) { | ||
| 494 | cmd->error = MMC_ERR_FIFO; | ||
| 495 | imxmci_finish_data(host, stat); | ||
| 496 | if(host->req) | ||
| 497 | imxmci_finish_request(host, host->req); | ||
| 498 | dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", | ||
| 499 | stat); | ||
| 500 | return 0; | ||
| 501 | } | ||
| 502 | |||
| 503 | if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
| 504 | imx_dma_enable(host->dma); | ||
| 505 | } | ||
| 506 | } | ||
| 507 | } else { | ||
| 508 | struct mmc_request *req; | ||
| 509 | imxmci_stop_clock(host); | ||
| 510 | req = host->req; | ||
| 511 | |||
| 512 | if(data) | ||
| 513 | imxmci_finish_data(host, stat); | ||
| 514 | |||
| 515 | if( req ) { | ||
| 516 | imxmci_finish_request(host, req); | ||
| 517 | } else { | ||
| 518 | dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | return 1; | ||
| 523 | } | ||
| 524 | |||
| 525 | static int imxmci_data_done(struct imxmci_host *host, unsigned int stat) | ||
| 526 | { | ||
| 527 | struct mmc_data *data = host->data; | ||
| 528 | int data_error; | ||
| 529 | |||
| 530 | if (!data) | ||
| 531 | return 0; | ||
| 532 | |||
| 533 | data_error = imxmci_finish_data(host, stat); | ||
| 534 | |||
| 535 | if (host->req->stop) { | ||
| 536 | imxmci_stop_clock(host); | ||
| 537 | imxmci_start_cmd(host, host->req->stop, 0); | ||
| 538 | } else { | ||
| 539 | struct mmc_request *req; | ||
| 540 | req = host->req; | ||
| 541 | if( req ) { | ||
| 542 | imxmci_finish_request(host, req); | ||
| 543 | } else { | ||
| 544 | dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); | ||
| 545 | } | ||
| 546 | } | ||
| 547 | |||
| 548 | return 1; | ||
| 549 | } | ||
| 550 | |||
| 551 | static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | ||
| 552 | { | ||
| 553 | int i; | ||
| 554 | int burst_len; | ||
| 555 | int trans_done = 0; | ||
| 556 | unsigned int stat = *pstat; | ||
| 557 | |||
| 558 | if(host->actual_bus_width != MMC_BUS_WIDTH_4) | ||
| 559 | burst_len = 16; | ||
| 560 | else | ||
| 561 | burst_len = 64; | ||
| 562 | |||
| 563 | /* This is unfortunately required */ | ||
| 564 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", | ||
| 565 | stat); | ||
| 566 | |||
| 567 | udelay(20); /* required for clocks < 8MHz*/ | ||
| 568 | |||
| 569 | if(host->dma_dir == DMA_FROM_DEVICE) { | ||
| 570 | imxmci_busy_wait_for_status(host, &stat, | ||
| 571 | STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE | | ||
| 572 | STATUS_TIME_OUT_READ, | ||
| 573 | 50, "imxmci_cpu_driven_data read"); | ||
| 574 | |||
| 575 | while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && | ||
| 576 | !(stat & STATUS_TIME_OUT_READ) && | ||
| 577 | (host->data_cnt < 512)) { | ||
| 578 | |||
| 579 | udelay(20); /* required for clocks < 8MHz*/ | ||
| 580 | |||
| 581 | for(i = burst_len; i>=2 ; i-=2) { | ||
| 582 | u16 data; | ||
| 583 | data = MMC_BUFFER_ACCESS; | ||
| 584 | udelay(10); /* required for clocks < 8MHz*/ | ||
| 585 | if(host->data_cnt+2 <= host->dma_size) { | ||
| 586 | *(host->data_ptr++) = data; | ||
| 587 | } else { | ||
| 588 | if(host->data_cnt < host->dma_size) | ||
| 589 | *(u8*)(host->data_ptr) = data; | ||
| 590 | } | ||
| 591 | host->data_cnt += 2; | ||
| 592 | } | ||
| 593 | |||
| 594 | stat = MMC_STATUS; | ||
| 595 | |||
| 596 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n", | ||
| 597 | host->data_cnt, burst_len, stat); | ||
| 598 | } | ||
| 599 | |||
| 600 | if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512)) | ||
| 601 | trans_done = 1; | ||
| 602 | |||
| 603 | if(host->dma_size & 0x1ff) | ||
| 604 | stat &= ~STATUS_CRC_READ_ERR; | ||
| 605 | |||
| 606 | if(stat & STATUS_TIME_OUT_READ) { | ||
| 607 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n", | ||
| 608 | stat); | ||
| 609 | trans_done = -1; | ||
| 610 | } | ||
| 611 | |||
| 612 | } else { | ||
| 613 | imxmci_busy_wait_for_status(host, &stat, | ||
| 614 | STATUS_APPL_BUFF_FE, | ||
| 615 | 20, "imxmci_cpu_driven_data write"); | ||
| 616 | |||
| 617 | while((stat & STATUS_APPL_BUFF_FE) && | ||
| 618 | (host->data_cnt < host->dma_size)) { | ||
| 619 | if(burst_len >= host->dma_size - host->data_cnt) { | ||
| 620 | burst_len = host->dma_size - host->data_cnt; | ||
| 621 | host->data_cnt = host->dma_size; | ||
| 622 | trans_done = 1; | ||
| 623 | } else { | ||
| 624 | host->data_cnt += burst_len; | ||
| 625 | } | ||
| 626 | |||
| 627 | for(i = burst_len; i>0 ; i-=2) | ||
| 628 | MMC_BUFFER_ACCESS = *(host->data_ptr++); | ||
| 629 | |||
| 630 | stat = MMC_STATUS; | ||
| 631 | |||
| 632 | dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", | ||
| 633 | burst_len, stat); | ||
| 634 | } | ||
| 635 | } | ||
| 636 | |||
| 637 | *pstat = stat; | ||
| 638 | |||
| 639 | return trans_done; | ||
| 640 | } | ||
| 641 | |||
| 642 | static void imxmci_dma_irq(int dma, void *devid) | ||
| 643 | { | ||
| 644 | struct imxmci_host *host = devid; | ||
| 645 | uint32_t stat = MMC_STATUS; | ||
| 646 | |||
| 647 | atomic_set(&host->stuck_timeout, 0); | ||
| 648 | host->status_reg = stat; | ||
| 649 | set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
| 650 | tasklet_schedule(&host->tasklet); | ||
| 651 | } | ||
| 652 | |||
| 653 | static irqreturn_t imxmci_irq(int irq, void *devid) | ||
| 654 | { | ||
| 655 | struct imxmci_host *host = devid; | ||
| 656 | uint32_t stat = MMC_STATUS; | ||
| 657 | int handled = 1; | ||
| 658 | |||
| 659 | MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT; | ||
| 660 | |||
| 661 | atomic_set(&host->stuck_timeout, 0); | ||
| 662 | host->status_reg = stat; | ||
| 663 | set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
| 664 | set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); | ||
| 665 | tasklet_schedule(&host->tasklet); | ||
| 666 | |||
| 667 | return IRQ_RETVAL(handled);; | ||
| 668 | } | ||
| 669 | |||
| 670 | static void imxmci_tasklet_fnc(unsigned long data) | ||
| 671 | { | ||
| 672 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
| 673 | u32 stat; | ||
| 674 | unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ | ||
| 675 | int timeout = 0; | ||
| 676 | |||
| 677 | if(atomic_read(&host->stuck_timeout) > 4) { | ||
| 678 | char *what; | ||
| 679 | timeout = 1; | ||
| 680 | stat = MMC_STATUS; | ||
| 681 | host->status_reg = stat; | ||
| 682 | if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
| 683 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
| 684 | what = "RESP+DMA"; | ||
| 685 | else | ||
| 686 | what = "RESP"; | ||
| 687 | else | ||
| 688 | if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) | ||
| 689 | if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) | ||
| 690 | what = "DATA"; | ||
| 691 | else | ||
| 692 | what = "DMA"; | ||
| 693 | else | ||
| 694 | what = "???"; | ||
| 695 | |||
| 696 | dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", | ||
| 697 | what, stat, MMC_INT_MASK); | ||
| 698 | dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", | ||
| 699 | MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); | ||
| 700 | dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", | ||
| 701 | host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size); | ||
| 702 | } | ||
| 703 | |||
| 704 | if(!host->present || timeout) | ||
| 705 | host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | | ||
| 706 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; | ||
| 707 | |||
| 708 | if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { | ||
| 709 | clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); | ||
| 710 | |||
| 711 | stat = MMC_STATUS; | ||
| 712 | /* | ||
| 713 | * This is not required in theory, but there is chance to miss some flag | ||
| 714 | * which clears automatically by mask write, FreeScale original code keeps | ||
| 715 | * stat from IRQ time so do I | ||
| 716 | */ | ||
| 717 | stat |= host->status_reg; | ||
| 718 | |||
| 719 | if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) | ||
| 720 | stat &= ~STATUS_CRC_READ_ERR; | ||
| 721 | |||
| 722 | if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
| 723 | imxmci_busy_wait_for_status(host, &stat, | ||
| 724 | STATUS_END_CMD_RESP | STATUS_ERR_MASK, | ||
| 725 | 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); | ||
| 726 | } | ||
| 727 | |||
| 728 | if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { | ||
| 729 | if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
| 730 | imxmci_cmd_done(host, stat); | ||
| 731 | if(host->data && (stat & STATUS_ERR_MASK)) | ||
| 732 | imxmci_data_done(host, stat); | ||
| 733 | } | ||
| 734 | |||
| 735 | if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { | ||
| 736 | stat |= MMC_STATUS; | ||
| 737 | if(imxmci_cpu_driven_data(host, &stat)){ | ||
| 738 | if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) | ||
| 739 | imxmci_cmd_done(host, stat); | ||
| 740 | atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, | ||
| 741 | &host->pending_events); | ||
| 742 | imxmci_data_done(host, stat); | ||
| 743 | } | ||
| 744 | } | ||
| 745 | } | ||
| 746 | |||
| 747 | if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && | ||
| 748 | !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { | ||
| 749 | |||
| 750 | stat = MMC_STATUS; | ||
| 751 | /* Same as above */ | ||
| 752 | stat |= host->status_reg; | ||
| 753 | |||
| 754 | if(host->dma_dir == DMA_TO_DEVICE) { | ||
| 755 | data_dir_mask = STATUS_WRITE_OP_DONE; | ||
| 756 | } else { | ||
| 757 | data_dir_mask = STATUS_DATA_TRANS_DONE; | ||
| 758 | } | ||
| 759 | |||
| 760 | if(stat & data_dir_mask) { | ||
| 761 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | ||
| 762 | imxmci_data_done(host, stat); | ||
| 763 | } | ||
| 764 | } | ||
| 765 | |||
| 766 | if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { | ||
| 767 | |||
| 768 | if(host->cmd) | ||
| 769 | imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); | ||
| 770 | |||
| 771 | if(host->data) | ||
| 772 | imxmci_data_done(host, STATUS_TIME_OUT_READ | | ||
| 773 | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); | ||
| 774 | |||
| 775 | if(host->req) | ||
| 776 | imxmci_finish_request(host, host->req); | ||
| 777 | |||
| 778 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
| 779 | |||
| 780 | } | ||
| 781 | } | ||
| 782 | |||
| 783 | static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req) | ||
| 784 | { | ||
| 785 | struct imxmci_host *host = mmc_priv(mmc); | ||
| 786 | unsigned int cmdat; | ||
| 787 | |||
| 788 | WARN_ON(host->req != NULL); | ||
| 789 | |||
| 790 | host->req = req; | ||
| 791 | |||
| 792 | cmdat = 0; | ||
| 793 | |||
| 794 | if (req->data) { | ||
| 795 | imxmci_setup_data(host, req->data); | ||
| 796 | |||
| 797 | cmdat |= CMD_DAT_CONT_DATA_ENABLE; | ||
| 798 | |||
| 799 | if (req->data->flags & MMC_DATA_WRITE) | ||
| 800 | cmdat |= CMD_DAT_CONT_WRITE; | ||
| 801 | |||
| 802 | if (req->data->flags & MMC_DATA_STREAM) { | ||
| 803 | cmdat |= CMD_DAT_CONT_STREAM_BLOCK; | ||
| 804 | } | ||
| 805 | } | ||
| 806 | |||
| 807 | imxmci_start_cmd(host, req->cmd, cmdat); | ||
| 808 | } | ||
| 809 | |||
| 810 | #define CLK_RATE 19200000 | ||
| 811 | |||
| 812 | static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
| 813 | { | ||
| 814 | struct imxmci_host *host = mmc_priv(mmc); | ||
| 815 | int prescaler; | ||
| 816 | |||
| 817 | if( ios->bus_width==MMC_BUS_WIDTH_4 ) { | ||
| 818 | host->actual_bus_width = MMC_BUS_WIDTH_4; | ||
| 819 | imx_gpio_mode(PB11_PF_SD_DAT3); | ||
| 820 | }else{ | ||
| 821 | host->actual_bus_width = MMC_BUS_WIDTH_1; | ||
| 822 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
| 823 | } | ||
| 824 | |||
| 825 | if ( host->power_mode != ios->power_mode ) { | ||
| 826 | switch (ios->power_mode) { | ||
| 827 | case MMC_POWER_OFF: | ||
| 828 | break; | ||
| 829 | case MMC_POWER_UP: | ||
| 830 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
| 831 | break; | ||
| 832 | case MMC_POWER_ON: | ||
| 833 | break; | ||
| 834 | } | ||
| 835 | host->power_mode = ios->power_mode; | ||
| 836 | } | ||
| 837 | |||
| 838 | if ( ios->clock ) { | ||
| 839 | unsigned int clk; | ||
| 840 | |||
| 841 | /* The prescaler is 5 for PERCLK2 equal to 96MHz | ||
| 842 | * then 96MHz / 5 = 19.2 MHz | ||
| 843 | */ | ||
| 844 | clk=imx_get_perclk2(); | ||
| 845 | prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE; | ||
| 846 | switch(prescaler) { | ||
| 847 | case 0: | ||
| 848 | case 1: prescaler = 0; | ||
| 849 | break; | ||
| 850 | case 2: prescaler = 1; | ||
| 851 | break; | ||
| 852 | case 3: prescaler = 2; | ||
| 853 | break; | ||
| 854 | case 4: prescaler = 4; | ||
| 855 | break; | ||
| 856 | default: | ||
| 857 | case 5: prescaler = 5; | ||
| 858 | break; | ||
| 859 | } | ||
| 860 | |||
| 861 | dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", | ||
| 862 | clk, prescaler); | ||
| 863 | |||
| 864 | for(clk=0; clk<8; clk++) { | ||
| 865 | int x; | ||
| 866 | x = CLK_RATE / (1<<clk); | ||
| 867 | if( x <= ios->clock) | ||
| 868 | break; | ||
| 869 | } | ||
| 870 | |||
| 871 | MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */ | ||
| 872 | |||
| 873 | imxmci_stop_clock(host); | ||
| 874 | MMC_CLK_RATE = (prescaler<<3) | clk; | ||
| 875 | /* | ||
| 876 | * Under my understanding, clock should not be started there, because it would | ||
| 877 | * initiate SDHC sequencer and send last or random command into card | ||
| 878 | */ | ||
| 879 | /*imxmci_start_clock(host);*/ | ||
| 880 | |||
| 881 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); | ||
| 882 | } else { | ||
| 883 | imxmci_stop_clock(host); | ||
| 884 | } | ||
| 885 | } | ||
| 886 | |||
| 887 | static const struct mmc_host_ops imxmci_ops = { | ||
| 888 | .request = imxmci_request, | ||
| 889 | .set_ios = imxmci_set_ios, | ||
| 890 | }; | ||
| 891 | |||
| 892 | static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr) | ||
| 893 | { | ||
| 894 | int i; | ||
| 895 | |||
| 896 | for (i = 0; i < dev->num_resources; i++) | ||
| 897 | if (dev->resource[i].flags == mask && nr-- == 0) | ||
| 898 | return &dev->resource[i]; | ||
| 899 | return NULL; | ||
| 900 | } | ||
| 901 | |||
| 902 | static int platform_device_irq(struct platform_device *dev, int nr) | ||
| 903 | { | ||
| 904 | int i; | ||
| 905 | |||
| 906 | for (i = 0; i < dev->num_resources; i++) | ||
| 907 | if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0) | ||
| 908 | return dev->resource[i].start; | ||
| 909 | return NO_IRQ; | ||
| 910 | } | ||
| 911 | |||
| 912 | static void imxmci_check_status(unsigned long data) | ||
| 913 | { | ||
| 914 | struct imxmci_host *host = (struct imxmci_host *)data; | ||
| 915 | |||
| 916 | if( host->pdata->card_present() != host->present ) { | ||
| 917 | host->present ^= 1; | ||
| 918 | dev_info(mmc_dev(host->mmc), "card %s\n", | ||
| 919 | host->present ? "inserted" : "removed"); | ||
| 920 | |||
| 921 | set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events); | ||
| 922 | tasklet_schedule(&host->tasklet); | ||
| 923 | } | ||
| 924 | |||
| 925 | if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || | ||
| 926 | test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { | ||
| 927 | atomic_inc(&host->stuck_timeout); | ||
| 928 | if(atomic_read(&host->stuck_timeout) > 4) | ||
| 929 | tasklet_schedule(&host->tasklet); | ||
| 930 | } else { | ||
| 931 | atomic_set(&host->stuck_timeout, 0); | ||
| 932 | |||
| 933 | } | ||
| 934 | |||
| 935 | mod_timer(&host->timer, jiffies + (HZ>>1)); | ||
| 936 | } | ||
| 937 | |||
| 938 | static int imxmci_probe(struct platform_device *pdev) | ||
| 939 | { | ||
| 940 | struct mmc_host *mmc; | ||
| 941 | struct imxmci_host *host = NULL; | ||
| 942 | struct resource *r; | ||
| 943 | int ret = 0, irq; | ||
| 944 | |||
| 945 | printk(KERN_INFO "i.MX mmc driver\n"); | ||
| 946 | |||
| 947 | r = platform_device_resource(pdev, IORESOURCE_MEM, 0); | ||
| 948 | irq = platform_device_irq(pdev, 0); | ||
| 949 | if (!r || irq == NO_IRQ) | ||
| 950 | return -ENXIO; | ||
| 951 | |||
| 952 | r = request_mem_region(r->start, 0x100, "IMXMCI"); | ||
| 953 | if (!r) | ||
| 954 | return -EBUSY; | ||
| 955 | |||
| 956 | mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); | ||
| 957 | if (!mmc) { | ||
| 958 | ret = -ENOMEM; | ||
| 959 | goto out; | ||
| 960 | } | ||
| 961 | |||
| 962 | mmc->ops = &imxmci_ops; | ||
| 963 | mmc->f_min = 150000; | ||
| 964 | mmc->f_max = CLK_RATE/2; | ||
| 965 | mmc->ocr_avail = MMC_VDD_32_33; | ||
| 966 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK; | ||
| 967 | |||
| 968 | /* MMC core transfer sizes tunable parameters */ | ||
| 969 | mmc->max_hw_segs = 64; | ||
| 970 | mmc->max_phys_segs = 64; | ||
| 971 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
| 972 | mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
| 973 | mmc->max_blk_size = 2048; | ||
| 974 | mmc->max_blk_count = 65535; | ||
| 975 | |||
| 976 | host = mmc_priv(mmc); | ||
| 977 | host->mmc = mmc; | ||
| 978 | host->dma_allocated = 0; | ||
| 979 | host->pdata = pdev->dev.platform_data; | ||
| 980 | |||
| 981 | spin_lock_init(&host->lock); | ||
| 982 | host->res = r; | ||
| 983 | host->irq = irq; | ||
| 984 | |||
| 985 | imx_gpio_mode(PB8_PF_SD_DAT0); | ||
| 986 | imx_gpio_mode(PB9_PF_SD_DAT1); | ||
| 987 | imx_gpio_mode(PB10_PF_SD_DAT2); | ||
| 988 | /* Configured as GPIO with pull-up to ensure right MCC card mode */ | ||
| 989 | /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ | ||
| 990 | imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); | ||
| 991 | /* imx_gpio_mode(PB11_PF_SD_DAT3); */ | ||
| 992 | imx_gpio_mode(PB12_PF_SD_CLK); | ||
| 993 | imx_gpio_mode(PB13_PF_SD_CMD); | ||
| 994 | |||
| 995 | imxmci_softreset(); | ||
| 996 | |||
| 997 | if ( MMC_REV_NO != 0x390 ) { | ||
| 998 | dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", | ||
| 999 | MMC_REV_NO); | ||
| 1000 | goto out; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | MMC_READ_TO = 0x2db4; /* recommended in data sheet */ | ||
| 1004 | |||
| 1005 | host->imask = IMXMCI_INT_MASK_DEFAULT; | ||
| 1006 | MMC_INT_MASK = host->imask; | ||
| 1007 | |||
| 1008 | |||
| 1009 | if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){ | ||
| 1010 | dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); | ||
| 1011 | ret = -EBUSY; | ||
| 1012 | goto out; | ||
| 1013 | } | ||
| 1014 | host->dma_allocated=1; | ||
| 1015 | imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); | ||
| 1016 | |||
| 1017 | tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); | ||
| 1018 | host->status_reg=0; | ||
| 1019 | host->pending_events=0; | ||
| 1020 | |||
| 1021 | ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); | ||
| 1022 | if (ret) | ||
| 1023 | goto out; | ||
| 1024 | |||
| 1025 | host->present = host->pdata->card_present(); | ||
| 1026 | init_timer(&host->timer); | ||
| 1027 | host->timer.data = (unsigned long)host; | ||
| 1028 | host->timer.function = imxmci_check_status; | ||
| 1029 | add_timer(&host->timer); | ||
| 1030 | mod_timer(&host->timer, jiffies + (HZ>>1)); | ||
| 1031 | |||
| 1032 | platform_set_drvdata(pdev, mmc); | ||
| 1033 | |||
| 1034 | mmc_add_host(mmc); | ||
| 1035 | |||
| 1036 | return 0; | ||
| 1037 | |||
| 1038 | out: | ||
| 1039 | if (host) { | ||
| 1040 | if(host->dma_allocated){ | ||
| 1041 | imx_dma_free(host->dma); | ||
| 1042 | host->dma_allocated=0; | ||
| 1043 | } | ||
| 1044 | } | ||
| 1045 | if (mmc) | ||
| 1046 | mmc_free_host(mmc); | ||
| 1047 | release_resource(r); | ||
| 1048 | return ret; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | static int imxmci_remove(struct platform_device *pdev) | ||
| 1052 | { | ||
| 1053 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
| 1054 | |||
| 1055 | platform_set_drvdata(pdev, NULL); | ||
| 1056 | |||
| 1057 | if (mmc) { | ||
| 1058 | struct imxmci_host *host = mmc_priv(mmc); | ||
| 1059 | |||
| 1060 | tasklet_disable(&host->tasklet); | ||
| 1061 | |||
| 1062 | del_timer_sync(&host->timer); | ||
| 1063 | mmc_remove_host(mmc); | ||
| 1064 | |||
| 1065 | free_irq(host->irq, host); | ||
| 1066 | if(host->dma_allocated){ | ||
| 1067 | imx_dma_free(host->dma); | ||
| 1068 | host->dma_allocated=0; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | tasklet_kill(&host->tasklet); | ||
| 1072 | |||
| 1073 | release_resource(host->res); | ||
| 1074 | |||
| 1075 | mmc_free_host(mmc); | ||
| 1076 | } | ||
| 1077 | return 0; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | #ifdef CONFIG_PM | ||
| 1081 | static int imxmci_suspend(struct platform_device *dev, pm_message_t state) | ||
| 1082 | { | ||
| 1083 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
| 1084 | int ret = 0; | ||
| 1085 | |||
| 1086 | if (mmc) | ||
| 1087 | ret = mmc_suspend_host(mmc, state); | ||
| 1088 | |||
| 1089 | return ret; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | static int imxmci_resume(struct platform_device *dev) | ||
| 1093 | { | ||
| 1094 | struct mmc_host *mmc = platform_get_drvdata(dev); | ||
| 1095 | struct imxmci_host *host; | ||
| 1096 | int ret = 0; | ||
| 1097 | |||
| 1098 | if (mmc) { | ||
| 1099 | host = mmc_priv(mmc); | ||
| 1100 | if(host) | ||
| 1101 | set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); | ||
| 1102 | ret = mmc_resume_host(mmc); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | return ret; | ||
| 1106 | } | ||
| 1107 | #else | ||
| 1108 | #define imxmci_suspend NULL | ||
| 1109 | #define imxmci_resume NULL | ||
| 1110 | #endif /* CONFIG_PM */ | ||
| 1111 | |||
| 1112 | static struct platform_driver imxmci_driver = { | ||
| 1113 | .probe = imxmci_probe, | ||
| 1114 | .remove = imxmci_remove, | ||
| 1115 | .suspend = imxmci_suspend, | ||
| 1116 | .resume = imxmci_resume, | ||
| 1117 | .driver = { | ||
| 1118 | .name = DRIVER_NAME, | ||
| 1119 | } | ||
| 1120 | }; | ||
| 1121 | |||
| 1122 | static int __init imxmci_init(void) | ||
| 1123 | { | ||
| 1124 | return platform_driver_register(&imxmci_driver); | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | static void __exit imxmci_exit(void) | ||
| 1128 | { | ||
| 1129 | platform_driver_unregister(&imxmci_driver); | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | module_init(imxmci_init); | ||
| 1133 | module_exit(imxmci_exit); | ||
| 1134 | |||
| 1135 | MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); | ||
| 1136 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | ||
| 1137 | MODULE_LICENSE("GPL"); | ||
