diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
commit | b22364c8eec89e6b0c081a237f3b6348df87796f (patch) | |
tree | 233a923281fb640106465d076997ff511efb6edf /drivers/mmc | |
parent | 2c8dc071517ec2843869024dc82be2e246f41064 (diff) | |
parent | 66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff) |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/Kconfig | 2 | ||||
-rw-r--r-- | drivers/mmc/at91_mci.c | 360 | ||||
-rw-r--r-- | drivers/mmc/au1xmmc.c | 15 | ||||
-rw-r--r-- | drivers/mmc/imxmmc.c | 7 | ||||
-rw-r--r-- | drivers/mmc/mmc.c | 182 | ||||
-rw-r--r-- | drivers/mmc/mmc_block.c | 15 | ||||
-rw-r--r-- | drivers/mmc/mmc_queue.c | 6 | ||||
-rw-r--r-- | drivers/mmc/mmc_sysfs.c | 2 | ||||
-rw-r--r-- | drivers/mmc/mmci.c | 19 | ||||
-rw-r--r-- | drivers/mmc/omap.c | 27 | ||||
-rw-r--r-- | drivers/mmc/pxamci.c | 16 | ||||
-rw-r--r-- | drivers/mmc/sdhci.c | 95 | ||||
-rw-r--r-- | drivers/mmc/sdhci.h | 2 | ||||
-rw-r--r-- | drivers/mmc/tifm_sd.c | 492 | ||||
-rw-r--r-- | drivers/mmc/wbsd.c | 102 | ||||
-rw-r--r-- | drivers/mmc/wbsd.h | 1 |
16 files changed, 802 insertions, 541 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 4224686fdf2a..12af9c718764 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig | |||
@@ -111,7 +111,7 @@ config MMC_IMX | |||
111 | 111 | ||
112 | config MMC_TIFM_SD | 112 | config MMC_TIFM_SD |
113 | tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" | 113 | tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" |
114 | depends on MMC && EXPERIMENTAL | 114 | depends on MMC && EXPERIMENTAL && PCI |
115 | select TIFM_CORE | 115 | select TIFM_CORE |
116 | help | 116 | help |
117 | Say Y here if you want to be able to access MMC/SD cards with | 117 | Say Y here if you want to be able to access MMC/SD cards with |
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c index 4633dbc9a90f..2ce50f38e3c7 100644 --- a/drivers/mmc/at91_mci.c +++ b/drivers/mmc/at91_mci.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver | 2 | * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved | 4 | * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved |
5 | * | 5 | * |
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | This is the AT91RM9200 MCI driver that has been tested with both MMC cards | 14 | This is the AT91 MCI driver that has been tested with both MMC cards |
15 | and SD-cards. Boards that support write protect are now supported. | 15 | and SD-cards. Boards that support write protect are now supported. |
16 | The CCAT91SBC001 board does not support SD cards. | 16 | The CCAT91SBC001 board does not support SD cards. |
17 | 17 | ||
@@ -38,8 +38,8 @@ | |||
38 | controller to manage the transfers. | 38 | controller to manage the transfers. |
39 | 39 | ||
40 | A read is done from the controller directly to the scatterlist passed in from the request. | 40 | A read is done from the controller directly to the scatterlist passed in from the request. |
41 | Due to a bug in the controller, when a read is completed, all the words are byte | 41 | Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte |
42 | swapped in the scatterlist buffers. | 42 | swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug. |
43 | 43 | ||
44 | The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY | 44 | The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY |
45 | 45 | ||
@@ -72,6 +72,7 @@ | |||
72 | #include <asm/irq.h> | 72 | #include <asm/irq.h> |
73 | #include <asm/mach/mmc.h> | 73 | #include <asm/mach/mmc.h> |
74 | #include <asm/arch/board.h> | 74 | #include <asm/arch/board.h> |
75 | #include <asm/arch/cpu.h> | ||
75 | #include <asm/arch/gpio.h> | 76 | #include <asm/arch/gpio.h> |
76 | #include <asm/arch/at91_mci.h> | 77 | #include <asm/arch/at91_mci.h> |
77 | #include <asm/arch/at91_pdc.h> | 78 | #include <asm/arch/at91_pdc.h> |
@@ -80,34 +81,18 @@ | |||
80 | 81 | ||
81 | #undef SUPPORT_4WIRE | 82 | #undef SUPPORT_4WIRE |
82 | 83 | ||
83 | static struct clk *mci_clk; | 84 | #define FL_SENT_COMMAND (1 << 0) |
85 | #define FL_SENT_STOP (1 << 1) | ||
84 | 86 | ||
85 | #define FL_SENT_COMMAND (1 << 0) | 87 | #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \ |
86 | #define FL_SENT_STOP (1 << 1) | 88 | | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \ |
89 | | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE) | ||
87 | 90 | ||
91 | #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg)) | ||
92 | #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg)) | ||
88 | 93 | ||
89 | 94 | ||
90 | /* | 95 | /* |
91 | * Read from a MCI register. | ||
92 | */ | ||
93 | static inline unsigned long at91_mci_read(unsigned int reg) | ||
94 | { | ||
95 | void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI; | ||
96 | |||
97 | return __raw_readl(mci_base + reg); | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Write to a MCI register. | ||
102 | */ | ||
103 | static inline void at91_mci_write(unsigned int reg, unsigned long value) | ||
104 | { | ||
105 | void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI; | ||
106 | |||
107 | __raw_writel(value, mci_base + reg); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Low level type for this driver | 96 | * Low level type for this driver |
112 | */ | 97 | */ |
113 | struct at91mci_host | 98 | struct at91mci_host |
@@ -116,9 +101,14 @@ struct at91mci_host | |||
116 | struct mmc_command *cmd; | 101 | struct mmc_command *cmd; |
117 | struct mmc_request *request; | 102 | struct mmc_request *request; |
118 | 103 | ||
104 | void __iomem *baseaddr; | ||
105 | int irq; | ||
106 | |||
119 | struct at91_mmc_data *board; | 107 | struct at91_mmc_data *board; |
120 | int present; | 108 | int present; |
121 | 109 | ||
110 | struct clk *mci_clk; | ||
111 | |||
122 | /* | 112 | /* |
123 | * Flag indicating when the command has been sent. This is used to | 113 | * Flag indicating when the command has been sent. This is used to |
124 | * work out whether or not to send the stop | 114 | * work out whether or not to send the stop |
@@ -158,7 +148,6 @@ static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data | |||
158 | for (i = 0; i < len; i++) { | 148 | for (i = 0; i < len; i++) { |
159 | struct scatterlist *sg; | 149 | struct scatterlist *sg; |
160 | int amount; | 150 | int amount; |
161 | int index; | ||
162 | unsigned int *sgbuffer; | 151 | unsigned int *sgbuffer; |
163 | 152 | ||
164 | sg = &data->sg[i]; | 153 | sg = &data->sg[i]; |
@@ -166,10 +155,15 @@ static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data | |||
166 | sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; | 155 | sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; |
167 | amount = min(size, sg->length); | 156 | amount = min(size, sg->length); |
168 | size -= amount; | 157 | size -= amount; |
169 | amount /= 4; | ||
170 | 158 | ||
171 | for (index = 0; index < amount; index++) | 159 | if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ |
172 | *dmabuf++ = swab32(sgbuffer[index]); | 160 | int index; |
161 | |||
162 | for (index = 0; index < (amount / 4); index++) | ||
163 | *dmabuf++ = swab32(sgbuffer[index]); | ||
164 | } | ||
165 | else | ||
166 | memcpy(dmabuf, sgbuffer, amount); | ||
173 | 167 | ||
174 | kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); | 168 | kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); |
175 | 169 | ||
@@ -217,13 +211,13 @@ static void at91mci_pre_dma_read(struct at91mci_host *host) | |||
217 | 211 | ||
218 | /* Check to see if this needs filling */ | 212 | /* Check to see if this needs filling */ |
219 | if (i == 0) { | 213 | if (i == 0) { |
220 | if (at91_mci_read(AT91_PDC_RCR) != 0) { | 214 | if (at91_mci_read(host, AT91_PDC_RCR) != 0) { |
221 | pr_debug("Transfer active in current\n"); | 215 | pr_debug("Transfer active in current\n"); |
222 | continue; | 216 | continue; |
223 | } | 217 | } |
224 | } | 218 | } |
225 | else { | 219 | else { |
226 | if (at91_mci_read(AT91_PDC_RNCR) != 0) { | 220 | if (at91_mci_read(host, AT91_PDC_RNCR) != 0) { |
227 | pr_debug("Transfer active in next\n"); | 221 | pr_debug("Transfer active in next\n"); |
228 | continue; | 222 | continue; |
229 | } | 223 | } |
@@ -240,12 +234,12 @@ static void at91mci_pre_dma_read(struct at91mci_host *host) | |||
240 | pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length); | 234 | pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length); |
241 | 235 | ||
242 | if (i == 0) { | 236 | if (i == 0) { |
243 | at91_mci_write(AT91_PDC_RPR, sg->dma_address); | 237 | at91_mci_write(host, AT91_PDC_RPR, sg->dma_address); |
244 | at91_mci_write(AT91_PDC_RCR, sg->length / 4); | 238 | at91_mci_write(host, AT91_PDC_RCR, sg->length / 4); |
245 | } | 239 | } |
246 | else { | 240 | else { |
247 | at91_mci_write(AT91_PDC_RNPR, sg->dma_address); | 241 | at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address); |
248 | at91_mci_write(AT91_PDC_RNCR, sg->length / 4); | 242 | at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4); |
249 | } | 243 | } |
250 | } | 244 | } |
251 | 245 | ||
@@ -276,8 +270,6 @@ static void at91mci_post_dma_read(struct at91mci_host *host) | |||
276 | 270 | ||
277 | while (host->in_use_index < host->transfer_index) { | 271 | while (host->in_use_index < host->transfer_index) { |
278 | unsigned int *buffer; | 272 | unsigned int *buffer; |
279 | int index; | ||
280 | int len; | ||
281 | 273 | ||
282 | struct scatterlist *sg; | 274 | struct scatterlist *sg; |
283 | 275 | ||
@@ -295,11 +287,13 @@ static void at91mci_post_dma_read(struct at91mci_host *host) | |||
295 | 287 | ||
296 | data->bytes_xfered += sg->length; | 288 | data->bytes_xfered += sg->length; |
297 | 289 | ||
298 | len = sg->length / 4; | 290 | if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ |
291 | int index; | ||
299 | 292 | ||
300 | for (index = 0; index < len; index++) { | 293 | for (index = 0; index < (sg->length / 4); index++) |
301 | buffer[index] = swab32(buffer[index]); | 294 | buffer[index] = swab32(buffer[index]); |
302 | } | 295 | } |
296 | |||
303 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); | 297 | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); |
304 | flush_dcache_page(sg->page); | 298 | flush_dcache_page(sg->page); |
305 | } | 299 | } |
@@ -308,8 +302,8 @@ static void at91mci_post_dma_read(struct at91mci_host *host) | |||
308 | if (host->transfer_index < data->sg_len) | 302 | if (host->transfer_index < data->sg_len) |
309 | at91mci_pre_dma_read(host); | 303 | at91mci_pre_dma_read(host); |
310 | else { | 304 | else { |
311 | at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF); | 305 | at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF); |
312 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | 306 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); |
313 | } | 307 | } |
314 | 308 | ||
315 | pr_debug("post dma read done\n"); | 309 | pr_debug("post dma read done\n"); |
@@ -326,11 +320,11 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host) | |||
326 | pr_debug("Handling the transmit\n"); | 320 | pr_debug("Handling the transmit\n"); |
327 | 321 | ||
328 | /* Disable the transfer */ | 322 | /* Disable the transfer */ |
329 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | 323 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); |
330 | 324 | ||
331 | /* Now wait for cmd ready */ | 325 | /* Now wait for cmd ready */ |
332 | at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE); | 326 | at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE); |
333 | at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY); | 327 | at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); |
334 | 328 | ||
335 | cmd = host->cmd; | 329 | cmd = host->cmd; |
336 | if (!cmd) return; | 330 | if (!cmd) return; |
@@ -344,21 +338,23 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host) | |||
344 | /* | 338 | /* |
345 | * Enable the controller | 339 | * Enable the controller |
346 | */ | 340 | */ |
347 | static void at91_mci_enable(void) | 341 | static void at91_mci_enable(struct at91mci_host *host) |
348 | { | 342 | { |
349 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); | 343 | at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); |
350 | at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF); | 344 | at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); |
351 | at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); | 345 | at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); |
352 | at91_mci_write(AT91_MCI_MR, 0x834A); | 346 | at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a); |
353 | at91_mci_write(AT91_MCI_SDCR, 0x0); | 347 | |
348 | /* use Slot A or B (only one at same time) */ | ||
349 | at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b); | ||
354 | } | 350 | } |
355 | 351 | ||
356 | /* | 352 | /* |
357 | * Disable the controller | 353 | * Disable the controller |
358 | */ | 354 | */ |
359 | static void at91_mci_disable(void) | 355 | static void at91_mci_disable(struct at91mci_host *host) |
360 | { | 356 | { |
361 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); | 357 | at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); |
362 | } | 358 | } |
363 | 359 | ||
364 | /* | 360 | /* |
@@ -378,13 +374,13 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_ | |||
378 | 374 | ||
379 | /* Not sure if this is needed */ | 375 | /* Not sure if this is needed */ |
380 | #if 0 | 376 | #if 0 |
381 | if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { | 377 | if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { |
382 | pr_debug("Clearing timeout\n"); | 378 | pr_debug("Clearing timeout\n"); |
383 | at91_mci_write(AT91_MCI_ARGR, 0); | 379 | at91_mci_write(host, AT91_MCI_ARGR, 0); |
384 | at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD); | 380 | at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD); |
385 | while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) { | 381 | while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) { |
386 | /* spin */ | 382 | /* spin */ |
387 | pr_debug("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR)); | 383 | pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR)); |
388 | } | 384 | } |
389 | } | 385 | } |
390 | #endif | 386 | #endif |
@@ -431,32 +427,32 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_ | |||
431 | /* | 427 | /* |
432 | * Set the arguments and send the command | 428 | * Set the arguments and send the command |
433 | */ | 429 | */ |
434 | pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n", | 430 | pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n", |
435 | cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR)); | 431 | cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR)); |
436 | 432 | ||
437 | if (!data) { | 433 | if (!data) { |
438 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS); | 434 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS); |
439 | at91_mci_write(AT91_PDC_RPR, 0); | 435 | at91_mci_write(host, AT91_PDC_RPR, 0); |
440 | at91_mci_write(AT91_PDC_RCR, 0); | 436 | at91_mci_write(host, AT91_PDC_RCR, 0); |
441 | at91_mci_write(AT91_PDC_RNPR, 0); | 437 | at91_mci_write(host, AT91_PDC_RNPR, 0); |
442 | at91_mci_write(AT91_PDC_RNCR, 0); | 438 | at91_mci_write(host, AT91_PDC_RNCR, 0); |
443 | at91_mci_write(AT91_PDC_TPR, 0); | 439 | at91_mci_write(host, AT91_PDC_TPR, 0); |
444 | at91_mci_write(AT91_PDC_TCR, 0); | 440 | at91_mci_write(host, AT91_PDC_TCR, 0); |
445 | at91_mci_write(AT91_PDC_TNPR, 0); | 441 | at91_mci_write(host, AT91_PDC_TNPR, 0); |
446 | at91_mci_write(AT91_PDC_TNCR, 0); | 442 | at91_mci_write(host, AT91_PDC_TNCR, 0); |
447 | 443 | ||
448 | at91_mci_write(AT91_MCI_ARGR, cmd->arg); | 444 | at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); |
449 | at91_mci_write(AT91_MCI_CMDR, cmdr); | 445 | at91_mci_write(host, AT91_MCI_CMDR, cmdr); |
450 | return AT91_MCI_CMDRDY; | 446 | return AT91_MCI_CMDRDY; |
451 | } | 447 | } |
452 | 448 | ||
453 | mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */ | 449 | mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */ |
454 | at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); | 450 | at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); |
455 | 451 | ||
456 | /* | 452 | /* |
457 | * Disable the PDC controller | 453 | * Disable the PDC controller |
458 | */ | 454 | */ |
459 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); | 455 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); |
460 | 456 | ||
461 | if (cmdr & AT91_MCI_TRCMD_START) { | 457 | if (cmdr & AT91_MCI_TRCMD_START) { |
462 | data->bytes_xfered = 0; | 458 | data->bytes_xfered = 0; |
@@ -485,8 +481,8 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_ | |||
485 | 481 | ||
486 | pr_debug("Transmitting %d bytes\n", host->total_length); | 482 | pr_debug("Transmitting %d bytes\n", host->total_length); |
487 | 483 | ||
488 | at91_mci_write(AT91_PDC_TPR, host->physical_address); | 484 | at91_mci_write(host, AT91_PDC_TPR, host->physical_address); |
489 | at91_mci_write(AT91_PDC_TCR, host->total_length / 4); | 485 | at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4); |
490 | ier = AT91_MCI_TXBUFE; | 486 | ier = AT91_MCI_TXBUFE; |
491 | } | 487 | } |
492 | } | 488 | } |
@@ -496,14 +492,14 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_ | |||
496 | * the data sheet says | 492 | * the data sheet says |
497 | */ | 493 | */ |
498 | 494 | ||
499 | at91_mci_write(AT91_MCI_ARGR, cmd->arg); | 495 | at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); |
500 | at91_mci_write(AT91_MCI_CMDR, cmdr); | 496 | at91_mci_write(host, AT91_MCI_CMDR, cmdr); |
501 | 497 | ||
502 | if (cmdr & AT91_MCI_TRCMD_START) { | 498 | if (cmdr & AT91_MCI_TRCMD_START) { |
503 | if (cmdr & AT91_MCI_TRDIR) | 499 | if (cmdr & AT91_MCI_TRDIR) |
504 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN); | 500 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN); |
505 | else | 501 | else |
506 | at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN); | 502 | at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN); |
507 | } | 503 | } |
508 | return ier; | 504 | return ier; |
509 | } | 505 | } |
@@ -520,7 +516,7 @@ static void at91mci_process_command(struct at91mci_host *host, struct mmc_comman | |||
520 | pr_debug("setting ier to %08X\n", ier); | 516 | pr_debug("setting ier to %08X\n", ier); |
521 | 517 | ||
522 | /* Stop on errors or the required value */ | 518 | /* Stop on errors or the required value */ |
523 | at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier); | 519 | at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier); |
524 | } | 520 | } |
525 | 521 | ||
526 | /* | 522 | /* |
@@ -548,19 +544,19 @@ static void at91mci_completed_command(struct at91mci_host *host) | |||
548 | struct mmc_command *cmd = host->cmd; | 544 | struct mmc_command *cmd = host->cmd; |
549 | unsigned int status; | 545 | unsigned int status; |
550 | 546 | ||
551 | at91_mci_write(AT91_MCI_IDR, 0xffffffff); | 547 | at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); |
552 | 548 | ||
553 | cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0)); | 549 | cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); |
554 | cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1)); | 550 | cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); |
555 | cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2)); | 551 | cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2)); |
556 | cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3)); | 552 | cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); |
557 | 553 | ||
558 | if (host->buffer) { | 554 | if (host->buffer) { |
559 | dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); | 555 | dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); |
560 | host->buffer = NULL; | 556 | host->buffer = NULL; |
561 | } | 557 | } |
562 | 558 | ||
563 | status = at91_mci_read(AT91_MCI_SR); | 559 | status = at91_mci_read(host, AT91_MCI_SR); |
564 | 560 | ||
565 | pr_debug("Status = %08X [%08X %08X %08X %08X]\n", | 561 | pr_debug("Status = %08X [%08X %08X %08X %08X]\n", |
566 | status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); | 562 | status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); |
@@ -611,18 +607,18 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
611 | { | 607 | { |
612 | int clkdiv; | 608 | int clkdiv; |
613 | struct at91mci_host *host = mmc_priv(mmc); | 609 | struct at91mci_host *host = mmc_priv(mmc); |
614 | unsigned long at91_master_clock = clk_get_rate(mci_clk); | 610 | unsigned long at91_master_clock = clk_get_rate(host->mci_clk); |
615 | 611 | ||
616 | host->bus_mode = ios->bus_mode; | 612 | host->bus_mode = ios->bus_mode; |
617 | 613 | ||
618 | if (ios->clock == 0) { | 614 | if (ios->clock == 0) { |
619 | /* Disable the MCI controller */ | 615 | /* Disable the MCI controller */ |
620 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS); | 616 | at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS); |
621 | clkdiv = 0; | 617 | clkdiv = 0; |
622 | } | 618 | } |
623 | else { | 619 | else { |
624 | /* Enable the MCI controller */ | 620 | /* Enable the MCI controller */ |
625 | at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); | 621 | at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); |
626 | 622 | ||
627 | if ((at91_master_clock % (ios->clock * 2)) == 0) | 623 | if ((at91_master_clock % (ios->clock * 2)) == 0) |
628 | clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; | 624 | clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; |
@@ -634,25 +630,25 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
634 | } | 630 | } |
635 | if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { | 631 | if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { |
636 | pr_debug("MMC: Setting controller bus width to 4\n"); | 632 | pr_debug("MMC: Setting controller bus width to 4\n"); |
637 | at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS); | 633 | at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS); |
638 | } | 634 | } |
639 | else { | 635 | else { |
640 | pr_debug("MMC: Setting controller bus width to 1\n"); | 636 | pr_debug("MMC: Setting controller bus width to 1\n"); |
641 | at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); | 637 | at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); |
642 | } | 638 | } |
643 | 639 | ||
644 | /* Set the clock divider */ | 640 | /* Set the clock divider */ |
645 | at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); | 641 | at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); |
646 | 642 | ||
647 | /* maybe switch power to the card */ | 643 | /* maybe switch power to the card */ |
648 | if (host->board->vcc_pin) { | 644 | if (host->board->vcc_pin) { |
649 | switch (ios->power_mode) { | 645 | switch (ios->power_mode) { |
650 | case MMC_POWER_OFF: | 646 | case MMC_POWER_OFF: |
651 | at91_set_gpio_output(host->board->vcc_pin, 0); | 647 | at91_set_gpio_value(host->board->vcc_pin, 0); |
652 | break; | 648 | break; |
653 | case MMC_POWER_UP: | 649 | case MMC_POWER_UP: |
654 | case MMC_POWER_ON: | 650 | case MMC_POWER_ON: |
655 | at91_set_gpio_output(host->board->vcc_pin, 1); | 651 | at91_set_gpio_value(host->board->vcc_pin, 1); |
656 | break; | 652 | break; |
657 | } | 653 | } |
658 | } | 654 | } |
@@ -665,39 +661,40 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) | |||
665 | { | 661 | { |
666 | struct at91mci_host *host = devid; | 662 | struct at91mci_host *host = devid; |
667 | int completed = 0; | 663 | int completed = 0; |
664 | unsigned int int_status, int_mask; | ||
668 | 665 | ||
669 | unsigned int int_status; | 666 | int_status = at91_mci_read(host, AT91_MCI_SR); |
667 | int_mask = at91_mci_read(host, AT91_MCI_IMR); | ||
668 | |||
669 | pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask, | ||
670 | int_status & int_mask); | ||
671 | |||
672 | int_status = int_status & int_mask; | ||
670 | 673 | ||
671 | int_status = at91_mci_read(AT91_MCI_SR); | 674 | if (int_status & AT91_MCI_ERRORS) { |
672 | pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR), | ||
673 | int_status & at91_mci_read(AT91_MCI_IMR)); | ||
674 | |||
675 | if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000) | ||
676 | completed = 1; | 675 | completed = 1; |
676 | |||
677 | if (int_status & AT91_MCI_UNRE) | ||
678 | pr_debug("MMC: Underrun error\n"); | ||
679 | if (int_status & AT91_MCI_OVRE) | ||
680 | pr_debug("MMC: Overrun error\n"); | ||
681 | if (int_status & AT91_MCI_DTOE) | ||
682 | pr_debug("MMC: Data timeout\n"); | ||
683 | if (int_status & AT91_MCI_DCRCE) | ||
684 | pr_debug("MMC: CRC error in data\n"); | ||
685 | if (int_status & AT91_MCI_RTOE) | ||
686 | pr_debug("MMC: Response timeout\n"); | ||
687 | if (int_status & AT91_MCI_RENDE) | ||
688 | pr_debug("MMC: Response end bit error\n"); | ||
689 | if (int_status & AT91_MCI_RCRCE) | ||
690 | pr_debug("MMC: Response CRC error\n"); | ||
691 | if (int_status & AT91_MCI_RDIRE) | ||
692 | pr_debug("MMC: Response direction error\n"); | ||
693 | if (int_status & AT91_MCI_RINDE) | ||
694 | pr_debug("MMC: Response index error\n"); | ||
695 | } else { | ||
696 | /* Only continue processing if no errors */ | ||
677 | 697 | ||
678 | int_status &= at91_mci_read(AT91_MCI_IMR); | ||
679 | |||
680 | if (int_status & AT91_MCI_UNRE) | ||
681 | pr_debug("MMC: Underrun error\n"); | ||
682 | if (int_status & AT91_MCI_OVRE) | ||
683 | pr_debug("MMC: Overrun error\n"); | ||
684 | if (int_status & AT91_MCI_DTOE) | ||
685 | pr_debug("MMC: Data timeout\n"); | ||
686 | if (int_status & AT91_MCI_DCRCE) | ||
687 | pr_debug("MMC: CRC error in data\n"); | ||
688 | if (int_status & AT91_MCI_RTOE) | ||
689 | pr_debug("MMC: Response timeout\n"); | ||
690 | if (int_status & AT91_MCI_RENDE) | ||
691 | pr_debug("MMC: Response end bit error\n"); | ||
692 | if (int_status & AT91_MCI_RCRCE) | ||
693 | pr_debug("MMC: Response CRC error\n"); | ||
694 | if (int_status & AT91_MCI_RDIRE) | ||
695 | pr_debug("MMC: Response direction error\n"); | ||
696 | if (int_status & AT91_MCI_RINDE) | ||
697 | pr_debug("MMC: Response index error\n"); | ||
698 | |||
699 | /* Only continue processing if no errors */ | ||
700 | if (!completed) { | ||
701 | if (int_status & AT91_MCI_TXBUFE) { | 698 | if (int_status & AT91_MCI_TXBUFE) { |
702 | pr_debug("TX buffer empty\n"); | 699 | pr_debug("TX buffer empty\n"); |
703 | at91_mci_handle_transmitted(host); | 700 | at91_mci_handle_transmitted(host); |
@@ -705,12 +702,11 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) | |||
705 | 702 | ||
706 | if (int_status & AT91_MCI_RXBUFF) { | 703 | if (int_status & AT91_MCI_RXBUFF) { |
707 | pr_debug("RX buffer full\n"); | 704 | pr_debug("RX buffer full\n"); |
708 | at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); | 705 | at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY); |
709 | } | 706 | } |
710 | 707 | ||
711 | if (int_status & AT91_MCI_ENDTX) { | 708 | if (int_status & AT91_MCI_ENDTX) |
712 | pr_debug("Transmit has ended\n"); | 709 | pr_debug("Transmit has ended\n"); |
713 | } | ||
714 | 710 | ||
715 | if (int_status & AT91_MCI_ENDRX) { | 711 | if (int_status & AT91_MCI_ENDRX) { |
716 | pr_debug("Receive has ended\n"); | 712 | pr_debug("Receive has ended\n"); |
@@ -719,37 +715,33 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) | |||
719 | 715 | ||
720 | if (int_status & AT91_MCI_NOTBUSY) { | 716 | if (int_status & AT91_MCI_NOTBUSY) { |
721 | pr_debug("Card is ready\n"); | 717 | pr_debug("Card is ready\n"); |
722 | at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); | 718 | at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY); |
723 | } | 719 | } |
724 | 720 | ||
725 | if (int_status & AT91_MCI_DTIP) { | 721 | if (int_status & AT91_MCI_DTIP) |
726 | pr_debug("Data transfer in progress\n"); | 722 | pr_debug("Data transfer in progress\n"); |
727 | } | ||
728 | 723 | ||
729 | if (int_status & AT91_MCI_BLKE) { | 724 | if (int_status & AT91_MCI_BLKE) |
730 | pr_debug("Block transfer has ended\n"); | 725 | pr_debug("Block transfer has ended\n"); |
731 | } | ||
732 | 726 | ||
733 | if (int_status & AT91_MCI_TXRDY) { | 727 | if (int_status & AT91_MCI_TXRDY) |
734 | pr_debug("Ready to transmit\n"); | 728 | pr_debug("Ready to transmit\n"); |
735 | } | ||
736 | 729 | ||
737 | if (int_status & AT91_MCI_RXRDY) { | 730 | if (int_status & AT91_MCI_RXRDY) |
738 | pr_debug("Ready to receive\n"); | 731 | pr_debug("Ready to receive\n"); |
739 | } | ||
740 | 732 | ||
741 | if (int_status & AT91_MCI_CMDRDY) { | 733 | if (int_status & AT91_MCI_CMDRDY) { |
742 | pr_debug("Command ready\n"); | 734 | pr_debug("Command ready\n"); |
743 | completed = 1; | 735 | completed = 1; |
744 | } | 736 | } |
745 | } | 737 | } |
746 | at91_mci_write(AT91_MCI_IDR, int_status); | ||
747 | 738 | ||
748 | if (completed) { | 739 | if (completed) { |
749 | pr_debug("Completed command\n"); | 740 | pr_debug("Completed command\n"); |
750 | at91_mci_write(AT91_MCI_IDR, 0xffffffff); | 741 | at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); |
751 | at91mci_completed_command(host); | 742 | at91mci_completed_command(host); |
752 | } | 743 | } else |
744 | at91_mci_write(host, AT91_MCI_IDR, int_status); | ||
753 | 745 | ||
754 | return IRQ_HANDLED; | 746 | return IRQ_HANDLED; |
755 | } | 747 | } |
@@ -769,14 +761,14 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host) | |||
769 | present ? "insert" : "remove"); | 761 | present ? "insert" : "remove"); |
770 | if (!present) { | 762 | if (!present) { |
771 | pr_debug("****** Resetting SD-card bus width ******\n"); | 763 | pr_debug("****** Resetting SD-card bus width ******\n"); |
772 | at91_mci_write(AT91_MCI_SDCR, 0); | 764 | at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); |
773 | } | 765 | } |
774 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | 766 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); |
775 | } | 767 | } |
776 | return IRQ_HANDLED; | 768 | return IRQ_HANDLED; |
777 | } | 769 | } |
778 | 770 | ||
779 | int at91_mci_get_ro(struct mmc_host *mmc) | 771 | static int at91_mci_get_ro(struct mmc_host *mmc) |
780 | { | 772 | { |
781 | int read_only = 0; | 773 | int read_only = 0; |
782 | struct at91mci_host *host = mmc_priv(mmc); | 774 | struct at91mci_host *host = mmc_priv(mmc); |
@@ -802,19 +794,26 @@ static const struct mmc_host_ops at91_mci_ops = { | |||
802 | /* | 794 | /* |
803 | * Probe for the device | 795 | * Probe for the device |
804 | */ | 796 | */ |
805 | static int at91_mci_probe(struct platform_device *pdev) | 797 | static int __init at91_mci_probe(struct platform_device *pdev) |
806 | { | 798 | { |
807 | struct mmc_host *mmc; | 799 | struct mmc_host *mmc; |
808 | struct at91mci_host *host; | 800 | struct at91mci_host *host; |
801 | struct resource *res; | ||
809 | int ret; | 802 | int ret; |
810 | 803 | ||
811 | pr_debug("Probe MCI devices\n"); | 804 | pr_debug("Probe MCI devices\n"); |
812 | at91_mci_disable(); | 805 | |
813 | at91_mci_enable(); | 806 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
807 | if (!res) | ||
808 | return -ENXIO; | ||
809 | |||
810 | if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME)) | ||
811 | return -EBUSY; | ||
814 | 812 | ||
815 | mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); | 813 | mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); |
816 | if (!mmc) { | 814 | if (!mmc) { |
817 | pr_debug("Failed to allocate mmc host\n"); | 815 | pr_debug("Failed to allocate mmc host\n"); |
816 | release_mem_region(res->start, res->end - res->start + 1); | ||
818 | return -ENOMEM; | 817 | return -ENOMEM; |
819 | } | 818 | } |
820 | 819 | ||
@@ -824,6 +823,9 @@ static int at91_mci_probe(struct platform_device *pdev) | |||
824 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 823 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
825 | mmc->caps = MMC_CAP_BYTEBLOCK; | 824 | mmc->caps = MMC_CAP_BYTEBLOCK; |
826 | 825 | ||
826 | mmc->max_blk_size = 4095; | ||
827 | mmc->max_blk_count = mmc->max_req_size; | ||
828 | |||
827 | host = mmc_priv(mmc); | 829 | host = mmc_priv(mmc); |
828 | host->mmc = mmc; | 830 | host->mmc = mmc; |
829 | host->buffer = NULL; | 831 | host->buffer = NULL; |
@@ -833,30 +835,51 @@ static int at91_mci_probe(struct platform_device *pdev) | |||
833 | #ifdef SUPPORT_4WIRE | 835 | #ifdef SUPPORT_4WIRE |
834 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 836 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
835 | #else | 837 | #else |
836 | printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n"); | 838 | printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n"); |
837 | #endif | 839 | #endif |
838 | } | 840 | } |
839 | 841 | ||
840 | /* | 842 | /* |
841 | * Get Clock | 843 | * Get Clock |
842 | */ | 844 | */ |
843 | mci_clk = clk_get(&pdev->dev, "mci_clk"); | 845 | host->mci_clk = clk_get(&pdev->dev, "mci_clk"); |
844 | if (IS_ERR(mci_clk)) { | 846 | if (IS_ERR(host->mci_clk)) { |
845 | printk(KERN_ERR "AT91 MMC: no clock defined.\n"); | 847 | printk(KERN_ERR "AT91 MMC: no clock defined.\n"); |
846 | mmc_free_host(mmc); | 848 | mmc_free_host(mmc); |
849 | release_mem_region(res->start, res->end - res->start + 1); | ||
847 | return -ENODEV; | 850 | return -ENODEV; |
848 | } | 851 | } |
849 | clk_enable(mci_clk); /* Enable the peripheral clock */ | 852 | |
853 | /* | ||
854 | * Map I/O region | ||
855 | */ | ||
856 | host->baseaddr = ioremap(res->start, res->end - res->start + 1); | ||
857 | if (!host->baseaddr) { | ||
858 | clk_put(host->mci_clk); | ||
859 | mmc_free_host(mmc); | ||
860 | release_mem_region(res->start, res->end - res->start + 1); | ||
861 | return -ENOMEM; | ||
862 | } | ||
863 | |||
864 | /* | ||
865 | * Reset hardware | ||
866 | */ | ||
867 | clk_enable(host->mci_clk); /* Enable the peripheral clock */ | ||
868 | at91_mci_disable(host); | ||
869 | at91_mci_enable(host); | ||
850 | 870 | ||
851 | /* | 871 | /* |
852 | * Allocate the MCI interrupt | 872 | * Allocate the MCI interrupt |
853 | */ | 873 | */ |
854 | ret = request_irq(AT91RM9200_ID_MCI, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host); | 874 | host->irq = platform_get_irq(pdev, 0); |
875 | ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host); | ||
855 | if (ret) { | 876 | if (ret) { |
856 | printk(KERN_ERR "Failed to request MCI interrupt\n"); | 877 | printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n"); |
857 | clk_disable(mci_clk); | 878 | clk_disable(host->mci_clk); |
858 | clk_put(mci_clk); | 879 | clk_put(host->mci_clk); |
859 | mmc_free_host(mmc); | 880 | mmc_free_host(mmc); |
881 | iounmap(host->baseaddr); | ||
882 | release_mem_region(res->start, res->end - res->start + 1); | ||
860 | return ret; | 883 | return ret; |
861 | } | 884 | } |
862 | 885 | ||
@@ -879,10 +902,10 @@ static int at91_mci_probe(struct platform_device *pdev) | |||
879 | ret = request_irq(host->board->det_pin, at91_mmc_det_irq, | 902 | ret = request_irq(host->board->det_pin, at91_mmc_det_irq, |
880 | 0, DRIVER_NAME, host); | 903 | 0, DRIVER_NAME, host); |
881 | if (ret) | 904 | if (ret) |
882 | printk(KERN_ERR "couldn't allocate MMC detect irq\n"); | 905 | printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n"); |
883 | } | 906 | } |
884 | 907 | ||
885 | pr_debug(KERN_INFO "Added MCI driver\n"); | 908 | pr_debug("Added MCI driver\n"); |
886 | 909 | ||
887 | return 0; | 910 | return 0; |
888 | } | 911 | } |
@@ -890,10 +913,11 @@ static int at91_mci_probe(struct platform_device *pdev) | |||
890 | /* | 913 | /* |
891 | * Remove a device | 914 | * Remove a device |
892 | */ | 915 | */ |
893 | static int at91_mci_remove(struct platform_device *pdev) | 916 | static int __exit at91_mci_remove(struct platform_device *pdev) |
894 | { | 917 | { |
895 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 918 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
896 | struct at91mci_host *host; | 919 | struct at91mci_host *host; |
920 | struct resource *res; | ||
897 | 921 | ||
898 | if (!mmc) | 922 | if (!mmc) |
899 | return -1; | 923 | return -1; |
@@ -905,16 +929,19 @@ static int at91_mci_remove(struct platform_device *pdev) | |||
905 | cancel_delayed_work(&host->mmc->detect); | 929 | cancel_delayed_work(&host->mmc->detect); |
906 | } | 930 | } |
907 | 931 | ||
932 | at91_mci_disable(host); | ||
908 | mmc_remove_host(mmc); | 933 | mmc_remove_host(mmc); |
909 | at91_mci_disable(); | 934 | free_irq(host->irq, host); |
910 | free_irq(AT91RM9200_ID_MCI, host); | ||
911 | mmc_free_host(mmc); | ||
912 | 935 | ||
913 | clk_disable(mci_clk); /* Disable the peripheral clock */ | 936 | clk_disable(host->mci_clk); /* Disable the peripheral clock */ |
914 | clk_put(mci_clk); | 937 | clk_put(host->mci_clk); |
915 | 938 | ||
916 | platform_set_drvdata(pdev, NULL); | 939 | iounmap(host->baseaddr); |
940 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
941 | release_mem_region(res->start, res->end - res->start + 1); | ||
917 | 942 | ||
943 | mmc_free_host(mmc); | ||
944 | platform_set_drvdata(pdev, NULL); | ||
918 | pr_debug("MCI Removed\n"); | 945 | pr_debug("MCI Removed\n"); |
919 | 946 | ||
920 | return 0; | 947 | return 0; |
@@ -948,8 +975,7 @@ static int at91_mci_resume(struct platform_device *pdev) | |||
948 | #endif | 975 | #endif |
949 | 976 | ||
950 | static struct platform_driver at91_mci_driver = { | 977 | static struct platform_driver at91_mci_driver = { |
951 | .probe = at91_mci_probe, | 978 | .remove = __exit_p(at91_mci_remove), |
952 | .remove = at91_mci_remove, | ||
953 | .suspend = at91_mci_suspend, | 979 | .suspend = at91_mci_suspend, |
954 | .resume = at91_mci_resume, | 980 | .resume = at91_mci_resume, |
955 | .driver = { | 981 | .driver = { |
@@ -960,7 +986,7 @@ static struct platform_driver at91_mci_driver = { | |||
960 | 986 | ||
961 | static int __init at91_mci_init(void) | 987 | static int __init at91_mci_init(void) |
962 | { | 988 | { |
963 | return platform_driver_register(&at91_mci_driver); | 989 | return platform_driver_probe(&at91_mci_driver, at91_mci_probe); |
964 | } | 990 | } |
965 | 991 | ||
966 | static void __exit at91_mci_exit(void) | 992 | static void __exit at91_mci_exit(void) |
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c index 447fba5825fd..b834be261ab7 100644 --- a/drivers/mmc/au1xmmc.c +++ b/drivers/mmc/au1xmmc.c | |||
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) | |||
152 | ? 1 : 0; | 152 | ? 1 : 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) | 155 | static int au1xmmc_card_readonly(struct mmc_host *mmc) |
156 | { | 156 | { |
157 | struct au1xmmc_host *host = mmc_priv(mmc); | ||
157 | return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) | 158 | return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) |
158 | ? 1 : 0; | 159 | ? 1 : 0; |
159 | } | 160 | } |
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
193 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); | 194 | u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); |
194 | 195 | ||
195 | switch (mmc_resp_type(cmd)) { | 196 | switch (mmc_resp_type(cmd)) { |
197 | case MMC_RSP_NONE: | ||
198 | break; | ||
196 | case MMC_RSP_R1: | 199 | case MMC_RSP_R1: |
197 | mmccmd |= SD_CMD_RT_1; | 200 | mmccmd |= SD_CMD_RT_1; |
198 | break; | 201 | break; |
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
205 | case MMC_RSP_R3: | 208 | case MMC_RSP_R3: |
206 | mmccmd |= SD_CMD_RT_3; | 209 | mmccmd |= SD_CMD_RT_3; |
207 | break; | 210 | break; |
211 | default: | ||
212 | printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", | ||
213 | mmc_resp_type(cmd)); | ||
214 | return MMC_ERR_INVALID; | ||
208 | } | 215 | } |
209 | 216 | ||
210 | switch(cmd->opcode) { | 217 | switch(cmd->opcode) { |
@@ -875,9 +882,10 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host) | |||
875 | host->rx_chan = rxchan; | 882 | host->rx_chan = rxchan; |
876 | } | 883 | } |
877 | 884 | ||
878 | struct const mmc_host_ops au1xmmc_ops = { | 885 | static const struct mmc_host_ops au1xmmc_ops = { |
879 | .request = au1xmmc_request, | 886 | .request = au1xmmc_request, |
880 | .set_ios = au1xmmc_set_ios, | 887 | .set_ios = au1xmmc_set_ios, |
888 | .get_ro = au1xmmc_card_readonly, | ||
881 | }; | 889 | }; |
882 | 890 | ||
883 | static int __devinit au1xmmc_probe(struct platform_device *pdev) | 891 | static int __devinit au1xmmc_probe(struct platform_device *pdev) |
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev) | |||
914 | mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; | 922 | mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; |
915 | mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; | 923 | mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; |
916 | 924 | ||
925 | mmc->max_blk_size = 2048; | ||
926 | mmc->max_blk_count = 512; | ||
927 | |||
917 | mmc->ocr_avail = AU1XMMC_OCR; | 928 | mmc->ocr_avail = AU1XMMC_OCR; |
918 | 929 | ||
919 | host = mmc_priv(mmc); | 930 | host = mmc_priv(mmc); |
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c index 06e7fcd19221..b060d4bfba29 100644 --- a/drivers/mmc/imxmmc.c +++ b/drivers/mmc/imxmmc.c | |||
@@ -351,9 +351,6 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, | |||
351 | case MMC_RSP_R3: /* short */ | 351 | case MMC_RSP_R3: /* short */ |
352 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; | 352 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; |
353 | break; | 353 | break; |
354 | case MMC_RSP_R6: /* short CRC */ | ||
355 | cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R6; | ||
356 | break; | ||
357 | default: | 354 | default: |
358 | break; | 355 | break; |
359 | } | 356 | } |
@@ -961,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev) | |||
961 | /* MMC core transfer sizes tunable parameters */ | 958 | /* MMC core transfer sizes tunable parameters */ |
962 | mmc->max_hw_segs = 64; | 959 | mmc->max_hw_segs = 64; |
963 | mmc->max_phys_segs = 64; | 960 | mmc->max_phys_segs = 64; |
964 | mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */ | ||
965 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ | 961 | mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ |
962 | mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ | ||
963 | mmc->max_blk_size = 2048; | ||
964 | mmc->max_blk_count = 65535; | ||
966 | 965 | ||
967 | host = mmc_priv(mmc); | 966 | host = mmc_priv(mmc); |
968 | host->mmc = mmc; | 967 | host->mmc = mmc; |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 6f2a282e2b97..5046a1661342 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
103 | mmc_hostname(host), mrq->cmd->opcode, | 103 | mmc_hostname(host), mrq->cmd->opcode, |
104 | mrq->cmd->arg, mrq->cmd->flags); | 104 | mrq->cmd->arg, mrq->cmd->flags); |
105 | 105 | ||
106 | WARN_ON(host->card_busy == NULL); | 106 | WARN_ON(!host->claimed); |
107 | 107 | ||
108 | mrq->cmd->error = 0; | 108 | mrq->cmd->error = 0; |
109 | mrq->cmd->mrq = mrq; | 109 | mrq->cmd->mrq = mrq; |
110 | if (mrq->data) { | 110 | if (mrq->data) { |
111 | BUG_ON(mrq->data->blksz > host->max_blk_size); | ||
112 | BUG_ON(mrq->data->blocks > host->max_blk_count); | ||
113 | BUG_ON(mrq->data->blocks * mrq->data->blksz > | ||
114 | host->max_req_size); | ||
115 | |||
111 | mrq->cmd->data = mrq->data; | 116 | mrq->cmd->data = mrq->data; |
112 | mrq->data->error = 0; | 117 | mrq->data->error = 0; |
113 | mrq->data->mrq = mrq; | 118 | mrq->data->mrq = mrq; |
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries | |||
157 | { | 162 | { |
158 | struct mmc_request mrq; | 163 | struct mmc_request mrq; |
159 | 164 | ||
160 | BUG_ON(host->card_busy == NULL); | 165 | BUG_ON(!host->claimed); |
161 | 166 | ||
162 | memset(&mrq, 0, sizeof(struct mmc_request)); | 167 | memset(&mrq, 0, sizeof(struct mmc_request)); |
163 | 168 | ||
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca, | |||
195 | 200 | ||
196 | int i, err; | 201 | int i, err; |
197 | 202 | ||
198 | BUG_ON(host->card_busy == NULL); | 203 | BUG_ON(!host->claimed); |
199 | BUG_ON(retries < 0); | 204 | BUG_ON(retries < 0); |
200 | 205 | ||
201 | err = MMC_ERR_INVALID; | 206 | err = MMC_ERR_INVALID; |
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, | |||
289 | else | 294 | else |
290 | limit_us = 100000; | 295 | limit_us = 100000; |
291 | 296 | ||
292 | if (timeout_us > limit_us) { | 297 | /* |
298 | * SDHC cards always use these fixed values. | ||
299 | */ | ||
300 | if (timeout_us > limit_us || mmc_card_blockaddr(card)) { | ||
293 | data->timeout_ns = limit_us * 1000; | 301 | data->timeout_ns = limit_us * 1000; |
294 | data->timeout_clks = 0; | 302 | data->timeout_clks = 0; |
295 | } | 303 | } |
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card) | |||
320 | spin_lock_irqsave(&host->lock, flags); | 328 | spin_lock_irqsave(&host->lock, flags); |
321 | while (1) { | 329 | while (1) { |
322 | set_current_state(TASK_UNINTERRUPTIBLE); | 330 | set_current_state(TASK_UNINTERRUPTIBLE); |
323 | if (host->card_busy == NULL) | 331 | if (!host->claimed) |
324 | break; | 332 | break; |
325 | spin_unlock_irqrestore(&host->lock, flags); | 333 | spin_unlock_irqrestore(&host->lock, flags); |
326 | schedule(); | 334 | schedule(); |
327 | spin_lock_irqsave(&host->lock, flags); | 335 | spin_lock_irqsave(&host->lock, flags); |
328 | } | 336 | } |
329 | set_current_state(TASK_RUNNING); | 337 | set_current_state(TASK_RUNNING); |
330 | host->card_busy = card; | 338 | host->claimed = 1; |
331 | spin_unlock_irqrestore(&host->lock, flags); | 339 | spin_unlock_irqrestore(&host->lock, flags); |
332 | remove_wait_queue(&host->wq, &wait); | 340 | remove_wait_queue(&host->wq, &wait); |
333 | 341 | ||
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host) | |||
353 | { | 361 | { |
354 | unsigned long flags; | 362 | unsigned long flags; |
355 | 363 | ||
356 | BUG_ON(host->card_busy == NULL); | 364 | BUG_ON(!host->claimed); |
357 | 365 | ||
358 | spin_lock_irqsave(&host->lock, flags); | 366 | spin_lock_irqsave(&host->lock, flags); |
359 | host->card_busy = NULL; | 367 | host->claimed = 0; |
360 | spin_unlock_irqrestore(&host->lock, flags); | 368 | spin_unlock_irqrestore(&host->lock, flags); |
361 | 369 | ||
362 | wake_up(&host->wq); | 370 | wake_up(&host->wq); |
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host) | |||
372 | mmc_hostname(host), ios->clock, ios->bus_mode, | 380 | mmc_hostname(host), ios->clock, ios->bus_mode, |
373 | ios->power_mode, ios->chip_select, ios->vdd, | 381 | ios->power_mode, ios->chip_select, ios->vdd, |
374 | ios->bus_width); | 382 | ios->bus_width); |
375 | 383 | ||
376 | host->ops->set_ios(host, ios); | 384 | host->ops->set_ios(host, ios); |
377 | } | 385 | } |
378 | 386 | ||
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) | |||
381 | int err; | 389 | int err; |
382 | struct mmc_command cmd; | 390 | struct mmc_command cmd; |
383 | 391 | ||
384 | BUG_ON(host->card_busy == NULL); | 392 | BUG_ON(!host->claimed); |
385 | 393 | ||
386 | if (host->card_selected == card) | 394 | if (host->card_selected == card) |
387 | return MMC_ERR_NONE; | 395 | return MMC_ERR_NONE; |
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card) | |||
588 | 596 | ||
589 | if (mmc_card_sd(card)) { | 597 | if (mmc_card_sd(card)) { |
590 | csd_struct = UNSTUFF_BITS(resp, 126, 2); | 598 | csd_struct = UNSTUFF_BITS(resp, 126, 2); |
591 | if (csd_struct != 0) { | 599 | |
600 | switch (csd_struct) { | ||
601 | case 0: | ||
602 | m = UNSTUFF_BITS(resp, 115, 4); | ||
603 | e = UNSTUFF_BITS(resp, 112, 3); | ||
604 | csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; | ||
605 | csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; | ||
606 | |||
607 | m = UNSTUFF_BITS(resp, 99, 4); | ||
608 | e = UNSTUFF_BITS(resp, 96, 3); | ||
609 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
610 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
611 | |||
612 | e = UNSTUFF_BITS(resp, 47, 3); | ||
613 | m = UNSTUFF_BITS(resp, 62, 12); | ||
614 | csd->capacity = (1 + m) << (e + 2); | ||
615 | |||
616 | csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); | ||
617 | csd->read_partial = UNSTUFF_BITS(resp, 79, 1); | ||
618 | csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); | ||
619 | csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); | ||
620 | csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); | ||
621 | csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); | ||
622 | csd->write_partial = UNSTUFF_BITS(resp, 21, 1); | ||
623 | break; | ||
624 | case 1: | ||
625 | /* | ||
626 | * This is a block-addressed SDHC card. Most | ||
627 | * interesting fields are unused and have fixed | ||
628 | * values. To avoid getting tripped by buggy cards, | ||
629 | * we assume those fixed values ourselves. | ||
630 | */ | ||
631 | mmc_card_set_blockaddr(card); | ||
632 | |||
633 | csd->tacc_ns = 0; /* Unused */ | ||
634 | csd->tacc_clks = 0; /* Unused */ | ||
635 | |||
636 | m = UNSTUFF_BITS(resp, 99, 4); | ||
637 | e = UNSTUFF_BITS(resp, 96, 3); | ||
638 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
639 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
640 | |||
641 | m = UNSTUFF_BITS(resp, 48, 22); | ||
642 | csd->capacity = (1 + m) << 10; | ||
643 | |||
644 | csd->read_blkbits = 9; | ||
645 | csd->read_partial = 0; | ||
646 | csd->write_misalign = 0; | ||
647 | csd->read_misalign = 0; | ||
648 | csd->r2w_factor = 4; /* Unused */ | ||
649 | csd->write_blkbits = 9; | ||
650 | csd->write_partial = 0; | ||
651 | break; | ||
652 | default: | ||
592 | printk("%s: unrecognised CSD structure version %d\n", | 653 | printk("%s: unrecognised CSD structure version %d\n", |
593 | mmc_hostname(card->host), csd_struct); | 654 | mmc_hostname(card->host), csd_struct); |
594 | mmc_card_set_bad(card); | 655 | mmc_card_set_bad(card); |
595 | return; | 656 | return; |
596 | } | 657 | } |
597 | |||
598 | m = UNSTUFF_BITS(resp, 115, 4); | ||
599 | e = UNSTUFF_BITS(resp, 112, 3); | ||
600 | csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; | ||
601 | csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; | ||
602 | |||
603 | m = UNSTUFF_BITS(resp, 99, 4); | ||
604 | e = UNSTUFF_BITS(resp, 96, 3); | ||
605 | csd->max_dtr = tran_exp[e] * tran_mant[m]; | ||
606 | csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); | ||
607 | |||
608 | e = UNSTUFF_BITS(resp, 47, 3); | ||
609 | m = UNSTUFF_BITS(resp, 62, 12); | ||
610 | csd->capacity = (1 + m) << (e + 2); | ||
611 | |||
612 | csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); | ||
613 | csd->read_partial = UNSTUFF_BITS(resp, 79, 1); | ||
614 | csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); | ||
615 | csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); | ||
616 | csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); | ||
617 | csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); | ||
618 | csd->write_partial = UNSTUFF_BITS(resp, 21, 1); | ||
619 | } else { | 658 | } else { |
620 | /* | 659 | /* |
621 | * We only understand CSD structure v1.1 and v1.2. | 660 | * We only understand CSD structure v1.1 and v1.2. |
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) | |||
848 | return err; | 887 | return err; |
849 | } | 888 | } |
850 | 889 | ||
890 | static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2) | ||
891 | { | ||
892 | struct mmc_command cmd; | ||
893 | int err, sd2; | ||
894 | static const u8 test_pattern = 0xAA; | ||
895 | |||
896 | /* | ||
897 | * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND | ||
898 | * before SD_APP_OP_COND. This command will harmlessly fail for | ||
899 | * SD 1.0 cards. | ||
900 | */ | ||
901 | cmd.opcode = SD_SEND_IF_COND; | ||
902 | cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; | ||
903 | cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; | ||
904 | |||
905 | err = mmc_wait_for_cmd(host, &cmd, 0); | ||
906 | if (err == MMC_ERR_NONE) { | ||
907 | if ((cmd.resp[0] & 0xFF) == test_pattern) { | ||
908 | sd2 = 1; | ||
909 | } else { | ||
910 | sd2 = 0; | ||
911 | err = MMC_ERR_FAILED; | ||
912 | } | ||
913 | } else { | ||
914 | /* | ||
915 | * Treat errors as SD 1.0 card. | ||
916 | */ | ||
917 | sd2 = 0; | ||
918 | err = MMC_ERR_NONE; | ||
919 | } | ||
920 | if (rsd2) | ||
921 | *rsd2 = sd2; | ||
922 | return err; | ||
923 | } | ||
924 | |||
851 | /* | 925 | /* |
852 | * Discover cards by requesting their CID. If this command | 926 | * Discover cards by requesting their CID. If this command |
853 | * times out, it is not an error; there are no further cards | 927 | * times out, it is not an error; there are no further cards |
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1018 | mmc_wait_for_req(host, &mrq); | 1092 | mmc_wait_for_req(host, &mrq); |
1019 | 1093 | ||
1020 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1094 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { |
1021 | mmc_card_set_dead(card); | 1095 | printk("%s: unable to read EXT_CSD, performance " |
1096 | "might suffer.\n", mmc_hostname(card->host)); | ||
1022 | continue; | 1097 | continue; |
1023 | } | 1098 | } |
1024 | 1099 | ||
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1034 | printk("%s: card is mmc v4 but doesn't support " | 1109 | printk("%s: card is mmc v4 but doesn't support " |
1035 | "any high-speed modes.\n", | 1110 | "any high-speed modes.\n", |
1036 | mmc_hostname(card->host)); | 1111 | mmc_hostname(card->host)); |
1037 | mmc_card_set_bad(card); | ||
1038 | continue; | 1112 | continue; |
1039 | } | 1113 | } |
1040 | 1114 | ||
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1215 | mmc_wait_for_req(host, &mrq); | 1289 | mmc_wait_for_req(host, &mrq); |
1216 | 1290 | ||
1217 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1291 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { |
1218 | mmc_card_set_dead(card); | 1292 | printk("%s: unable to read switch capabilities, " |
1293 | "performance might suffer.\n", | ||
1294 | mmc_hostname(card->host)); | ||
1219 | continue; | 1295 | continue; |
1220 | } | 1296 | } |
1221 | 1297 | ||
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1247 | 1323 | ||
1248 | mmc_wait_for_req(host, &mrq); | 1324 | mmc_wait_for_req(host, &mrq); |
1249 | 1325 | ||
1250 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { | 1326 | if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE || |
1251 | mmc_card_set_dead(card); | 1327 | (status[16] & 0xF) != 1) { |
1252 | continue; | ||
1253 | } | ||
1254 | |||
1255 | if ((status[16] & 0xF) != 1) { | ||
1256 | printk(KERN_WARNING "%s: Problem switching card " | 1328 | printk(KERN_WARNING "%s: Problem switching card " |
1257 | "into high-speed mode!\n", | 1329 | "into high-speed mode!\n", |
1258 | mmc_hostname(host)); | 1330 | mmc_hostname(host)); |
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host) | |||
1334 | mmc_power_up(host); | 1406 | mmc_power_up(host); |
1335 | mmc_idle_cards(host); | 1407 | mmc_idle_cards(host); |
1336 | 1408 | ||
1409 | err = mmc_send_if_cond(host, host->ocr_avail, NULL); | ||
1410 | if (err != MMC_ERR_NONE) { | ||
1411 | return; | ||
1412 | } | ||
1337 | err = mmc_send_app_op_cond(host, 0, &ocr); | 1413 | err = mmc_send_app_op_cond(host, 0, &ocr); |
1338 | 1414 | ||
1339 | /* | 1415 | /* |
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host) | |||
1386 | * all get the idea that they should be ready for CMD2. | 1462 | * all get the idea that they should be ready for CMD2. |
1387 | * (My SanDisk card seems to need this.) | 1463 | * (My SanDisk card seems to need this.) |
1388 | */ | 1464 | */ |
1389 | if (host->mode == MMC_MODE_SD) | 1465 | if (host->mode == MMC_MODE_SD) { |
1390 | mmc_send_app_op_cond(host, host->ocr, NULL); | 1466 | int err, sd2; |
1391 | else | 1467 | err = mmc_send_if_cond(host, host->ocr, &sd2); |
1468 | if (err == MMC_ERR_NONE) { | ||
1469 | /* | ||
1470 | * If SD_SEND_IF_COND indicates an SD 2.0 | ||
1471 | * compliant card and we should set bit 30 | ||
1472 | * of the ocr to indicate that we can handle | ||
1473 | * block-addressed SDHC cards. | ||
1474 | */ | ||
1475 | mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL); | ||
1476 | } | ||
1477 | } else { | ||
1392 | mmc_send_op_cond(host, host->ocr, NULL); | 1478 | mmc_send_op_cond(host, host->ocr, NULL); |
1479 | } | ||
1393 | 1480 | ||
1394 | mmc_discover_cards(host); | 1481 | mmc_discover_cards(host); |
1395 | 1482 | ||
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
1519 | */ | 1606 | */ |
1520 | host->max_hw_segs = 1; | 1607 | host->max_hw_segs = 1; |
1521 | host->max_phys_segs = 1; | 1608 | host->max_phys_segs = 1; |
1522 | host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
1523 | host->max_seg_size = PAGE_CACHE_SIZE; | 1609 | host->max_seg_size = PAGE_CACHE_SIZE; |
1610 | |||
1611 | host->max_req_size = PAGE_CACHE_SIZE; | ||
1612 | host->max_blk_size = 512; | ||
1613 | host->max_blk_count = PAGE_CACHE_SIZE / 512; | ||
1524 | } | 1614 | } |
1525 | 1615 | ||
1526 | return host; | 1616 | return host; |
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 87713572293f..05ba8ace70e7 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
237 | brq.mrq.cmd = &brq.cmd; | 237 | brq.mrq.cmd = &brq.cmd; |
238 | brq.mrq.data = &brq.data; | 238 | brq.mrq.data = &brq.data; |
239 | 239 | ||
240 | brq.cmd.arg = req->sector << 9; | 240 | brq.cmd.arg = req->sector; |
241 | if (!mmc_card_blockaddr(card)) | ||
242 | brq.cmd.arg <<= 9; | ||
241 | brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; | 243 | brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; |
242 | brq.data.blksz = 1 << md->block_bits; | 244 | brq.data.blksz = 1 << md->block_bits; |
243 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | ||
244 | brq.stop.opcode = MMC_STOP_TRANSMISSION; | 245 | brq.stop.opcode = MMC_STOP_TRANSMISSION; |
245 | brq.stop.arg = 0; | 246 | brq.stop.arg = 0; |
246 | brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; | 247 | brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; |
248 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | ||
249 | if (brq.data.blocks > card->host->max_blk_count) | ||
250 | brq.data.blocks = card->host->max_blk_count; | ||
247 | 251 | ||
248 | mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); | 252 | mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); |
249 | 253 | ||
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
375 | spin_unlock_irq(&md->lock); | 379 | spin_unlock_irq(&md->lock); |
376 | } | 380 | } |
377 | 381 | ||
382 | flush_queue: | ||
383 | |||
378 | mmc_card_release_host(card); | 384 | mmc_card_release_host(card); |
379 | 385 | ||
380 | flush_queue: | ||
381 | spin_lock_irq(&md->lock); | 386 | spin_lock_irq(&md->lock); |
382 | while (ret) { | 387 | while (ret) { |
383 | ret = end_that_request_chunk(req, 0, | 388 | ret = end_that_request_chunk(req, 0, |
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | |||
494 | struct mmc_command cmd; | 499 | struct mmc_command cmd; |
495 | int err; | 500 | int err; |
496 | 501 | ||
502 | /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */ | ||
503 | if (mmc_card_blockaddr(card)) | ||
504 | return 0; | ||
505 | |||
497 | mmc_card_claim_host(card); | 506 | mmc_card_claim_host(card); |
498 | cmd.opcode = MMC_SET_BLOCKLEN; | 507 | cmd.opcode = MMC_SET_BLOCKLEN; |
499 | cmd.arg = 1 << md->block_bits; | 508 | cmd.arg = 1 << md->block_bits; |
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c index a17423a4ed8f..c27e42645cdb 100644 --- a/drivers/mmc/mmc_queue.c +++ b/drivers/mmc/mmc_queue.c | |||
@@ -78,8 +78,10 @@ static int mmc_queue_thread(void *d) | |||
78 | spin_unlock_irq(q->queue_lock); | 78 | spin_unlock_irq(q->queue_lock); |
79 | 79 | ||
80 | if (!req) { | 80 | if (!req) { |
81 | if (kthread_should_stop()) | 81 | if (kthread_should_stop()) { |
82 | set_current_state(TASK_RUNNING); | ||
82 | break; | 83 | break; |
84 | } | ||
83 | up(&mq->thread_sem); | 85 | up(&mq->thread_sem); |
84 | schedule(); | 86 | schedule(); |
85 | down(&mq->thread_sem); | 87 | down(&mq->thread_sem); |
@@ -145,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
145 | 147 | ||
146 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 148 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
147 | blk_queue_bounce_limit(mq->queue, limit); | 149 | blk_queue_bounce_limit(mq->queue, limit); |
148 | blk_queue_max_sectors(mq->queue, host->max_sectors); | 150 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); |
149 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | 151 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); |
150 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | 152 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); |
151 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 153 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c index e334acd045bc..d32698b02d7f 100644 --- a/drivers/mmc/mmc_sysfs.c +++ b/drivers/mmc/mmc_sysfs.c | |||
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host) | |||
199 | memset(card, 0, sizeof(struct mmc_card)); | 199 | memset(card, 0, sizeof(struct mmc_card)); |
200 | card->host = host; | 200 | card->host = host; |
201 | device_initialize(&card->dev); | 201 | device_initialize(&card->dev); |
202 | card->dev.parent = mmc_dev(host); | 202 | card->dev.parent = mmc_classdev(host); |
203 | card->dev.bus = &mmc_bus_type; | 203 | card->dev.bus = &mmc_bus_type; |
204 | card->dev.release = mmc_release_card; | 204 | card->dev.release = mmc_release_card; |
205 | } | 205 | } |
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c index e9b80e920266..5941dd951e82 100644 --- a/drivers/mmc/mmci.c +++ b/drivers/mmc/mmci.c | |||
@@ -42,6 +42,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
42 | { | 42 | { |
43 | writel(0, host->base + MMCICOMMAND); | 43 | writel(0, host->base + MMCICOMMAND); |
44 | 44 | ||
45 | BUG_ON(host->data); | ||
46 | |||
45 | host->mrq = NULL; | 47 | host->mrq = NULL; |
46 | host->cmd = NULL; | 48 | host->cmd = NULL; |
47 | 49 | ||
@@ -198,6 +200,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, | |||
198 | } | 200 | } |
199 | 201 | ||
200 | if (!cmd->data || cmd->error != MMC_ERR_NONE) { | 202 | if (!cmd->data || cmd->error != MMC_ERR_NONE) { |
203 | if (host->data) | ||
204 | mmci_stop_data(host); | ||
201 | mmci_request_end(host, cmd->mrq); | 205 | mmci_request_end(host, cmd->mrq); |
202 | } else if (!(cmd->data->flags & MMC_DATA_READ)) { | 206 | } else if (!(cmd->data->flags & MMC_DATA_READ)) { |
203 | mmci_start_data(host, cmd->data); | 207 | mmci_start_data(host, cmd->data); |
@@ -520,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id) | |||
520 | /* | 524 | /* |
521 | * Since we only have a 16-bit data length register, we must | 525 | * Since we only have a 16-bit data length register, we must |
522 | * ensure that we don't exceed 2^16-1 bytes in a single request. | 526 | * ensure that we don't exceed 2^16-1 bytes in a single request. |
523 | * Choose 64 (512-byte) sectors as the limit. | ||
524 | */ | 527 | */ |
525 | mmc->max_sectors = 64; | 528 | mmc->max_req_size = 65535; |
526 | 529 | ||
527 | /* | 530 | /* |
528 | * Set the maximum segment size. Since we aren't doing DMA | 531 | * Set the maximum segment size. Since we aren't doing DMA |
529 | * (yet) we are only limited by the data length register. | 532 | * (yet) we are only limited by the data length register. |
530 | */ | 533 | */ |
531 | mmc->max_seg_size = mmc->max_sectors << 9; | 534 | mmc->max_seg_size = mmc->max_req_size; |
535 | |||
536 | /* | ||
537 | * Block size can be up to 2048 bytes, but must be a power of two. | ||
538 | */ | ||
539 | mmc->max_blk_size = 2048; | ||
540 | |||
541 | /* | ||
542 | * No limit on the number of blocks transferred. | ||
543 | */ | ||
544 | mmc->max_blk_count = mmc->max_req_size; | ||
532 | 545 | ||
533 | spin_lock_init(&host->lock); | 546 | spin_lock_init(&host->lock); |
534 | 547 | ||
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c index 435d331e772a..1e96a2f65022 100644 --- a/drivers/mmc/omap.c +++ b/drivers/mmc/omap.c | |||
@@ -91,7 +91,6 @@ | |||
91 | 91 | ||
92 | 92 | ||
93 | #define DRIVER_NAME "mmci-omap" | 93 | #define DRIVER_NAME "mmci-omap" |
94 | #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) | ||
95 | 94 | ||
96 | /* Specifies how often in millisecs to poll for card status changes | 95 | /* Specifies how often in millisecs to poll for card status changes |
97 | * when the cover switch is open */ | 96 | * when the cover switch is open */ |
@@ -204,18 +203,22 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) | |||
204 | cmdtype = 0; | 203 | cmdtype = 0; |
205 | 204 | ||
206 | /* Our hardware needs to know exact type */ | 205 | /* Our hardware needs to know exact type */ |
207 | switch (RSP_TYPE(mmc_resp_type(cmd))) { | 206 | switch (mmc_resp_type(cmd)) { |
208 | case RSP_TYPE(MMC_RSP_R1): | 207 | case MMC_RSP_NONE: |
209 | /* resp 1, resp 1b */ | 208 | break; |
209 | case MMC_RSP_R1: | ||
210 | case MMC_RSP_R1B: | ||
211 | /* resp 1, 1b, 6, 7 */ | ||
210 | resptype = 1; | 212 | resptype = 1; |
211 | break; | 213 | break; |
212 | case RSP_TYPE(MMC_RSP_R2): | 214 | case MMC_RSP_R2: |
213 | resptype = 2; | 215 | resptype = 2; |
214 | break; | 216 | break; |
215 | case RSP_TYPE(MMC_RSP_R3): | 217 | case MMC_RSP_R3: |
216 | resptype = 3; | 218 | resptype = 3; |
217 | break; | 219 | break; |
218 | default: | 220 | default: |
221 | dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd)); | ||
219 | break; | 222 | break; |
220 | } | 223 | } |
221 | 224 | ||
@@ -581,9 +584,9 @@ static void mmc_omap_switch_timer(unsigned long arg) | |||
581 | schedule_work(&host->switch_work); | 584 | schedule_work(&host->switch_work); |
582 | } | 585 | } |
583 | 586 | ||
584 | static void mmc_omap_switch_handler(void *data) | 587 | static void mmc_omap_switch_handler(struct work_struct *work) |
585 | { | 588 | { |
586 | struct mmc_omap_host *host = (struct mmc_omap_host *) data; | 589 | struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, switch_work); |
587 | struct mmc_card *card; | 590 | struct mmc_card *card; |
588 | static int complained = 0; | 591 | static int complained = 0; |
589 | int cards = 0, cover_open; | 592 | int cards = 0, cover_open; |
@@ -1096,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev) | |||
1096 | */ | 1099 | */ |
1097 | mmc->max_phys_segs = 32; | 1100 | mmc->max_phys_segs = 32; |
1098 | mmc->max_hw_segs = 32; | 1101 | mmc->max_hw_segs = 32; |
1099 | mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ | 1102 | mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ |
1100 | mmc->max_seg_size = mmc->max_sectors * 512; | 1103 | mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ |
1104 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1105 | mmc->max_seg_size = mmc->max_req_size; | ||
1101 | 1106 | ||
1102 | if (host->power_pin >= 0) { | 1107 | if (host->power_pin >= 0) { |
1103 | if ((ret = omap_request_gpio(host->power_pin)) != 0) { | 1108 | if ((ret = omap_request_gpio(host->power_pin)) != 0) { |
@@ -1116,7 +1121,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev) | |||
1116 | platform_set_drvdata(pdev, host); | 1121 | platform_set_drvdata(pdev, host); |
1117 | 1122 | ||
1118 | if (host->switch_pin >= 0) { | 1123 | if (host->switch_pin >= 0) { |
1119 | INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host); | 1124 | INIT_WORK(&host->switch_work, mmc_omap_switch_handler); |
1120 | init_timer(&host->switch_timer); | 1125 | init_timer(&host->switch_timer); |
1121 | host->switch_timer.function = mmc_omap_switch_timer; | 1126 | host->switch_timer.function = mmc_omap_switch_timer; |
1122 | host->switch_timer.data = (unsigned long) host; | 1127 | host->switch_timer.data = (unsigned long) host; |
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c index 471e9f4e0530..9774fc68b61a 100644 --- a/drivers/mmc/pxamci.c +++ b/drivers/mmc/pxamci.c | |||
@@ -171,7 +171,7 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, | |||
171 | 171 | ||
172 | #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) | 172 | #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) |
173 | switch (RSP_TYPE(mmc_resp_type(cmd))) { | 173 | switch (RSP_TYPE(mmc_resp_type(cmd))) { |
174 | case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6 */ | 174 | case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ |
175 | cmdat |= CMDAT_RESP_SHORT; | 175 | cmdat |= CMDAT_RESP_SHORT; |
176 | break; | 176 | break; |
177 | case RSP_TYPE(MMC_RSP_R3): | 177 | case RSP_TYPE(MMC_RSP_R3): |
@@ -355,7 +355,7 @@ static int pxamci_get_ro(struct mmc_host *mmc) | |||
355 | struct pxamci_host *host = mmc_priv(mmc); | 355 | struct pxamci_host *host = mmc_priv(mmc); |
356 | 356 | ||
357 | if (host->pdata && host->pdata->get_ro) | 357 | if (host->pdata && host->pdata->get_ro) |
358 | return host->pdata->get_ro(mmc->dev); | 358 | return host->pdata->get_ro(mmc_dev(mmc)); |
359 | /* Host doesn't support read only detection so assume writeable */ | 359 | /* Host doesn't support read only detection so assume writeable */ |
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
@@ -383,7 +383,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
383 | host->power_mode = ios->power_mode; | 383 | host->power_mode = ios->power_mode; |
384 | 384 | ||
385 | if (host->pdata && host->pdata->setpower) | 385 | if (host->pdata && host->pdata->setpower) |
386 | host->pdata->setpower(mmc->dev, ios->vdd); | 386 | host->pdata->setpower(mmc_dev(mmc), ios->vdd); |
387 | 387 | ||
388 | if (ios->power_mode == MMC_POWER_ON) | 388 | if (ios->power_mode == MMC_POWER_ON) |
389 | host->cmdat |= CMDAT_INIT; | 389 | host->cmdat |= CMDAT_INIT; |
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev) | |||
450 | */ | 450 | */ |
451 | mmc->max_seg_size = PAGE_SIZE; | 451 | mmc->max_seg_size = PAGE_SIZE; |
452 | 452 | ||
453 | /* | ||
454 | * Block length register is 10 bits. | ||
455 | */ | ||
456 | mmc->max_blk_size = 1023; | ||
457 | |||
458 | /* | ||
459 | * Block count register is 16 bits. | ||
460 | */ | ||
461 | mmc->max_blk_count = 65535; | ||
462 | |||
453 | host = mmc_priv(mmc); | 463 | host = mmc_priv(mmc); |
454 | host->mmc = mmc; | 464 | host->mmc = mmc; |
455 | host->dma = -1; | 465 | host->dma = -1; |
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index cd98117632d3..4bf1fea5e2c4 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c | |||
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0; | |||
37 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) | 37 | #define SDHCI_QUIRK_FORCE_DMA (1<<1) |
38 | /* Controller doesn't like some resets when there is no card inserted. */ | 38 | /* Controller doesn't like some resets when there is no card inserted. */ |
39 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) | 39 | #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) |
40 | #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) | ||
40 | 41 | ||
41 | static const struct pci_device_id pci_ids[] __devinitdata = { | 42 | static const struct pci_device_id pci_ids[] __devinitdata = { |
42 | { | 43 | { |
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
65 | .driver_data = SDHCI_QUIRK_FORCE_DMA, | 66 | .driver_data = SDHCI_QUIRK_FORCE_DMA, |
66 | }, | 67 | }, |
67 | 68 | ||
69 | { | ||
70 | .vendor = PCI_VENDOR_ID_ENE, | ||
71 | .device = PCI_DEVICE_ID_ENE_CB712_SD, | ||
72 | .subvendor = PCI_ANY_ID, | ||
73 | .subdevice = PCI_ANY_ID, | ||
74 | .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE, | ||
75 | }, | ||
76 | |||
68 | { /* Generic SD host controller */ | 77 | { /* Generic SD host controller */ |
69 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) | 78 | PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) |
70 | }, | 79 | }, |
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host) | |||
197 | * * | 206 | * * |
198 | \*****************************************************************************/ | 207 | \*****************************************************************************/ |
199 | 208 | ||
200 | static inline char* sdhci_kmap_sg(struct sdhci_host* host) | 209 | static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) |
201 | { | 210 | { |
202 | host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); | 211 | return page_address(host->cur_sg->page) + host->cur_sg->offset; |
203 | return host->mapped_sg + host->cur_sg->offset; | ||
204 | } | ||
205 | |||
206 | static inline void sdhci_kunmap_sg(struct sdhci_host* host) | ||
207 | { | ||
208 | kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ); | ||
209 | } | 212 | } |
210 | 213 | ||
211 | static inline int sdhci_next_sg(struct sdhci_host* host) | 214 | static inline int sdhci_next_sg(struct sdhci_host* host) |
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host) | |||
240 | chunk_remain = 0; | 243 | chunk_remain = 0; |
241 | data = 0; | 244 | data = 0; |
242 | 245 | ||
243 | buffer = sdhci_kmap_sg(host) + host->offset; | 246 | buffer = sdhci_sg_to_buffer(host) + host->offset; |
244 | 247 | ||
245 | while (blksize) { | 248 | while (blksize) { |
246 | if (chunk_remain == 0) { | 249 | if (chunk_remain == 0) { |
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host) | |||
264 | } | 267 | } |
265 | 268 | ||
266 | if (host->remain == 0) { | 269 | if (host->remain == 0) { |
267 | sdhci_kunmap_sg(host); | ||
268 | if (sdhci_next_sg(host) == 0) { | 270 | if (sdhci_next_sg(host) == 0) { |
269 | BUG_ON(blksize != 0); | 271 | BUG_ON(blksize != 0); |
270 | return; | 272 | return; |
271 | } | 273 | } |
272 | buffer = sdhci_kmap_sg(host); | 274 | buffer = sdhci_sg_to_buffer(host); |
273 | } | 275 | } |
274 | } | 276 | } |
275 | |||
276 | sdhci_kunmap_sg(host); | ||
277 | } | 277 | } |
278 | 278 | ||
279 | static void sdhci_write_block_pio(struct sdhci_host *host) | 279 | static void sdhci_write_block_pio(struct sdhci_host *host) |
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host) | |||
290 | data = 0; | 290 | data = 0; |
291 | 291 | ||
292 | bytes = 0; | 292 | bytes = 0; |
293 | buffer = sdhci_kmap_sg(host) + host->offset; | 293 | buffer = sdhci_sg_to_buffer(host) + host->offset; |
294 | 294 | ||
295 | while (blksize) { | 295 | while (blksize) { |
296 | size = min(host->size, host->remain); | 296 | size = min(host->size, host->remain); |
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | if (host->remain == 0) { | 316 | if (host->remain == 0) { |
317 | sdhci_kunmap_sg(host); | ||
318 | if (sdhci_next_sg(host) == 0) { | 317 | if (sdhci_next_sg(host) == 0) { |
319 | BUG_ON(blksize != 0); | 318 | BUG_ON(blksize != 0); |
320 | return; | 319 | return; |
321 | } | 320 | } |
322 | buffer = sdhci_kmap_sg(host); | 321 | buffer = sdhci_sg_to_buffer(host); |
323 | } | 322 | } |
324 | } | 323 | } |
325 | |||
326 | sdhci_kunmap_sg(host); | ||
327 | } | 324 | } |
328 | 325 | ||
329 | static void sdhci_transfer_pio(struct sdhci_host *host) | 326 | static void sdhci_transfer_pio(struct sdhci_host *host) |
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
372 | 369 | ||
373 | /* Sanity checks */ | 370 | /* Sanity checks */ |
374 | BUG_ON(data->blksz * data->blocks > 524288); | 371 | BUG_ON(data->blksz * data->blocks > 524288); |
375 | BUG_ON(data->blksz > host->max_block); | 372 | BUG_ON(data->blksz > host->mmc->max_blk_size); |
376 | BUG_ON(data->blocks > 65535); | 373 | BUG_ON(data->blocks > 65535); |
377 | 374 | ||
378 | /* timeout in us */ | 375 | /* timeout in us */ |
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) | |||
674 | if (host->power == power) | 671 | if (host->power == power) |
675 | return; | 672 | return; |
676 | 673 | ||
677 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | 674 | if (power == (unsigned short)-1) { |
678 | 675 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | |
679 | if (power == (unsigned short)-1) | ||
680 | goto out; | 676 | goto out; |
677 | } | ||
678 | |||
679 | /* | ||
680 | * Spec says that we should clear the power reg before setting | ||
681 | * a new value. Some controllers don't seem to like this though. | ||
682 | */ | ||
683 | if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) | ||
684 | writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); | ||
681 | 685 | ||
682 | pwr = SDHCI_POWER_ON; | 686 | pwr = SDHCI_POWER_ON; |
683 | 687 | ||
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev) | |||
1109 | 1113 | ||
1110 | pci_set_power_state(pdev, PCI_D0); | 1114 | pci_set_power_state(pdev, PCI_D0); |
1111 | pci_restore_state(pdev); | 1115 | pci_restore_state(pdev); |
1112 | pci_enable_device(pdev); | 1116 | ret = pci_enable_device(pdev); |
1117 | if (ret) | ||
1118 | return ret; | ||
1113 | 1119 | ||
1114 | for (i = 0;i < chip->num_slots;i++) { | 1120 | for (i = 0;i < chip->num_slots;i++) { |
1115 | if (!chip->hosts[i]) | 1121 | if (!chip->hosts[i]) |
@@ -1170,8 +1176,8 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1170 | } | 1176 | } |
1171 | 1177 | ||
1172 | if (pci_resource_len(pdev, first_bar + slot) != 0x100) { | 1178 | if (pci_resource_len(pdev, first_bar + slot) != 0x100) { |
1173 | printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. Aborting.\n"); | 1179 | printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " |
1174 | return -ENODEV; | 1180 | "You may experience problems.\n"); |
1175 | } | 1181 | } |
1176 | 1182 | ||
1177 | if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { | 1183 | if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { |
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1274 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) | 1280 | if (caps & SDHCI_TIMEOUT_CLK_UNIT) |
1275 | host->timeout_clk *= 1000; | 1281 | host->timeout_clk *= 1000; |
1276 | 1282 | ||
1277 | host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; | ||
1278 | if (host->max_block >= 3) { | ||
1279 | printk(KERN_ERR "%s: Invalid maximum block size.\n", | ||
1280 | host->slot_descr); | ||
1281 | ret = -ENODEV; | ||
1282 | goto unmap; | ||
1283 | } | ||
1284 | host->max_block = 512 << host->max_block; | ||
1285 | |||
1286 | /* | 1283 | /* |
1287 | * Set host parameters. | 1284 | * Set host parameters. |
1288 | */ | 1285 | */ |
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1294 | mmc->ocr_avail = 0; | 1291 | mmc->ocr_avail = 0; |
1295 | if (caps & SDHCI_CAN_VDD_330) | 1292 | if (caps & SDHCI_CAN_VDD_330) |
1296 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; | 1293 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; |
1297 | else if (caps & SDHCI_CAN_VDD_300) | 1294 | if (caps & SDHCI_CAN_VDD_300) |
1298 | mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; | 1295 | mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; |
1299 | else if (caps & SDHCI_CAN_VDD_180) | 1296 | if (caps & SDHCI_CAN_VDD_180) |
1300 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; | 1297 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; |
1301 | 1298 | ||
1302 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { | 1299 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { |
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1326 | 1323 | ||
1327 | /* | 1324 | /* |
1328 | * Maximum number of sectors in one transfer. Limited by DMA boundary | 1325 | * Maximum number of sectors in one transfer. Limited by DMA boundary |
1329 | * size (512KiB), which means (512 KiB/512=) 1024 entries. | 1326 | * size (512KiB). |
1330 | */ | 1327 | */ |
1331 | mmc->max_sectors = 1024; | 1328 | mmc->max_req_size = 524288; |
1332 | 1329 | ||
1333 | /* | 1330 | /* |
1334 | * Maximum segment size. Could be one segment with the maximum number | 1331 | * Maximum segment size. Could be one segment with the maximum number |
1335 | * of sectors. | 1332 | * of bytes. |
1333 | */ | ||
1334 | mmc->max_seg_size = mmc->max_req_size; | ||
1335 | |||
1336 | /* | ||
1337 | * Maximum block size. This varies from controller to controller and | ||
1338 | * is specified in the capabilities register. | ||
1339 | */ | ||
1340 | mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; | ||
1341 | if (mmc->max_blk_size >= 3) { | ||
1342 | printk(KERN_ERR "%s: Invalid maximum block size.\n", | ||
1343 | host->slot_descr); | ||
1344 | ret = -ENODEV; | ||
1345 | goto unmap; | ||
1346 | } | ||
1347 | mmc->max_blk_size = 512 << mmc->max_blk_size; | ||
1348 | |||
1349 | /* | ||
1350 | * Maximum block count. | ||
1336 | */ | 1351 | */ |
1337 | mmc->max_seg_size = mmc->max_sectors * 512; | 1352 | mmc->max_blk_count = 65535; |
1338 | 1353 | ||
1339 | /* | 1354 | /* |
1340 | * Init tasklets. | 1355 | * Init tasklets. |
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h index f9d1a0a6f03a..e324f0a623dc 100644 --- a/drivers/mmc/sdhci.h +++ b/drivers/mmc/sdhci.h | |||
@@ -174,7 +174,6 @@ struct sdhci_host { | |||
174 | 174 | ||
175 | unsigned int max_clk; /* Max possible freq (MHz) */ | 175 | unsigned int max_clk; /* Max possible freq (MHz) */ |
176 | unsigned int timeout_clk; /* Timeout freq (KHz) */ | 176 | unsigned int timeout_clk; /* Timeout freq (KHz) */ |
177 | unsigned int max_block; /* Max block size (bytes) */ | ||
178 | 177 | ||
179 | unsigned int clock; /* Current clock (MHz) */ | 178 | unsigned int clock; /* Current clock (MHz) */ |
180 | unsigned short power; /* Current voltage */ | 179 | unsigned short power; /* Current voltage */ |
@@ -184,7 +183,6 @@ struct sdhci_host { | |||
184 | struct mmc_data *data; /* Current data request */ | 183 | struct mmc_data *data; /* Current data request */ |
185 | 184 | ||
186 | struct scatterlist *cur_sg; /* We're working on this */ | 185 | struct scatterlist *cur_sg; /* We're working on this */ |
187 | char *mapped_sg; /* This is where it's mapped */ | ||
188 | int num_sg; /* Entries left */ | 186 | int num_sg; /* Entries left */ |
189 | int offset; /* Offset into current sg */ | 187 | int offset; /* Offset into current sg */ |
190 | int remain; /* Bytes left in current */ | 188 | int remain; /* Bytes left in current */ |
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index e846499a004c..e65f8a0a9349 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | #define DRIVER_NAME "tifm_sd" | 19 | #define DRIVER_NAME "tifm_sd" |
20 | #define DRIVER_VERSION "0.6" | 20 | #define DRIVER_VERSION "0.7" |
21 | 21 | ||
22 | static int no_dma = 0; | 22 | static int no_dma = 0; |
23 | static int fixed_timeout = 0; | 23 | static int fixed_timeout = 0; |
@@ -79,7 +79,6 @@ typedef enum { | |||
79 | 79 | ||
80 | enum { | 80 | enum { |
81 | FIFO_RDY = 0x0001, /* hardware dependent value */ | 81 | FIFO_RDY = 0x0001, /* hardware dependent value */ |
82 | HOST_REG = 0x0002, | ||
83 | EJECT = 0x0004, | 82 | EJECT = 0x0004, |
84 | EJECT_DONE = 0x0008, | 83 | EJECT_DONE = 0x0008, |
85 | CARD_BUSY = 0x0010, | 84 | CARD_BUSY = 0x0010, |
@@ -95,46 +94,53 @@ struct tifm_sd { | |||
95 | card_state_t state; | 94 | card_state_t state; |
96 | unsigned int clk_freq; | 95 | unsigned int clk_freq; |
97 | unsigned int clk_div; | 96 | unsigned int clk_div; |
98 | unsigned long timeout_jiffies; // software timeout - 2 sec | 97 | unsigned long timeout_jiffies; |
99 | 98 | ||
99 | struct tasklet_struct finish_tasklet; | ||
100 | struct timer_list timer; | ||
100 | struct mmc_request *req; | 101 | struct mmc_request *req; |
101 | struct work_struct cmd_handler; | 102 | wait_queue_head_t notify; |
102 | struct delayed_work abort_handler; | ||
103 | wait_queue_head_t can_eject; | ||
104 | 103 | ||
105 | size_t written_blocks; | 104 | size_t written_blocks; |
106 | char *buffer; | ||
107 | size_t buffer_size; | 105 | size_t buffer_size; |
108 | size_t buffer_pos; | 106 | size_t buffer_pos; |
109 | 107 | ||
110 | }; | 108 | }; |
111 | 109 | ||
110 | static char* tifm_sd_data_buffer(struct mmc_data *data) | ||
111 | { | ||
112 | return page_address(data->sg->page) + data->sg->offset; | ||
113 | } | ||
114 | |||
112 | static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, | 115 | static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, |
113 | unsigned int host_status) | 116 | unsigned int host_status) |
114 | { | 117 | { |
115 | struct mmc_command *cmd = host->req->cmd; | 118 | struct mmc_command *cmd = host->req->cmd; |
116 | unsigned int t_val = 0, cnt = 0; | 119 | unsigned int t_val = 0, cnt = 0; |
120 | char *buffer; | ||
117 | 121 | ||
118 | if (host_status & TIFM_MMCSD_BRS) { | 122 | if (host_status & TIFM_MMCSD_BRS) { |
119 | /* in non-dma rx mode BRS fires when fifo is still not empty */ | 123 | /* in non-dma rx mode BRS fires when fifo is still not empty */ |
120 | if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { | 124 | if (no_dma && (cmd->data->flags & MMC_DATA_READ)) { |
125 | buffer = tifm_sd_data_buffer(host->req->data); | ||
121 | while (host->buffer_size > host->buffer_pos) { | 126 | while (host->buffer_size > host->buffer_pos) { |
122 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); | 127 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); |
123 | host->buffer[host->buffer_pos++] = t_val & 0xff; | 128 | buffer[host->buffer_pos++] = t_val & 0xff; |
124 | host->buffer[host->buffer_pos++] = | 129 | buffer[host->buffer_pos++] = |
125 | (t_val >> 8) & 0xff; | 130 | (t_val >> 8) & 0xff; |
126 | } | 131 | } |
127 | } | 132 | } |
128 | return 1; | 133 | return 1; |
129 | } else if (host->buffer) { | 134 | } else if (no_dma) { |
135 | buffer = tifm_sd_data_buffer(host->req->data); | ||
130 | if ((cmd->data->flags & MMC_DATA_READ) && | 136 | if ((cmd->data->flags & MMC_DATA_READ) && |
131 | (host_status & TIFM_MMCSD_AF)) { | 137 | (host_status & TIFM_MMCSD_AF)) { |
132 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { | 138 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { |
133 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); | 139 | t_val = readl(sock->addr + SOCK_MMCSD_DATA); |
134 | if (host->buffer_size > host->buffer_pos) { | 140 | if (host->buffer_size > host->buffer_pos) { |
135 | host->buffer[host->buffer_pos++] = | 141 | buffer[host->buffer_pos++] = |
136 | t_val & 0xff; | 142 | t_val & 0xff; |
137 | host->buffer[host->buffer_pos++] = | 143 | buffer[host->buffer_pos++] = |
138 | (t_val >> 8) & 0xff; | 144 | (t_val >> 8) & 0xff; |
139 | } | 145 | } |
140 | } | 146 | } |
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, | |||
142 | && (host_status & TIFM_MMCSD_AE)) { | 148 | && (host_status & TIFM_MMCSD_AE)) { |
143 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { | 149 | for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { |
144 | if (host->buffer_size > host->buffer_pos) { | 150 | if (host->buffer_size > host->buffer_pos) { |
145 | t_val = host->buffer[host->buffer_pos++] & 0x00ff; | 151 | t_val = buffer[host->buffer_pos++] |
146 | t_val |= ((host->buffer[host->buffer_pos++]) << 8) | 152 | & 0x00ff; |
147 | & 0xff00; | 153 | t_val |= ((buffer[host->buffer_pos++]) |
154 | << 8) & 0xff00; | ||
148 | writel(t_val, | 155 | writel(t_val, |
149 | sock->addr + SOCK_MMCSD_DATA); | 156 | sock->addr + SOCK_MMCSD_DATA); |
150 | } | 157 | } |
151 | } | 158 | } |
152 | } | 159 | } |
@@ -173,9 +180,6 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) | |||
173 | case MMC_RSP_R3: | 180 | case MMC_RSP_R3: |
174 | rc |= TIFM_MMCSD_RSP_R3; | 181 | rc |= TIFM_MMCSD_RSP_R3; |
175 | break; | 182 | break; |
176 | case MMC_RSP_R6: | ||
177 | rc |= TIFM_MMCSD_RSP_R6; | ||
178 | break; | ||
179 | default: | 183 | default: |
180 | BUG(); | 184 | BUG(); |
181 | } | 185 | } |
@@ -209,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) | |||
209 | cmd_mask |= TIFM_MMCSD_READ; | 213 | cmd_mask |= TIFM_MMCSD_READ; |
210 | 214 | ||
211 | dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", | 215 | dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", |
212 | cmd->opcode, cmd->arg, cmd_mask); | 216 | cmd->opcode, cmd->arg, cmd_mask); |
213 | 217 | ||
214 | writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); | 218 | writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); |
215 | writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); | 219 | writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); |
@@ -242,65 +246,78 @@ change_state: | |||
242 | tifm_sd_fetch_resp(cmd, sock); | 246 | tifm_sd_fetch_resp(cmd, sock); |
243 | if (cmd->data) { | 247 | if (cmd->data) { |
244 | host->state = BRS; | 248 | host->state = BRS; |
245 | } else | 249 | } else { |
246 | host->state = READY; | 250 | host->state = READY; |
251 | } | ||
247 | goto change_state; | 252 | goto change_state; |
248 | } | 253 | } |
249 | break; | 254 | break; |
250 | case BRS: | 255 | case BRS: |
251 | if (tifm_sd_transfer_data(sock, host, host_status)) { | 256 | if (tifm_sd_transfer_data(sock, host, host_status)) { |
252 | if (!host->req->stop) { | 257 | if (cmd->data->flags & MMC_DATA_WRITE) { |
253 | if (cmd->data->flags & MMC_DATA_WRITE) { | 258 | host->state = CARD; |
254 | host->state = CARD; | 259 | } else { |
260 | if (no_dma) { | ||
261 | if (host->req->stop) { | ||
262 | tifm_sd_exec(host, host->req->stop); | ||
263 | host->state = SCMD; | ||
264 | } else { | ||
265 | host->state = READY; | ||
266 | } | ||
255 | } else { | 267 | } else { |
256 | host->state = | 268 | host->state = FIFO; |
257 | host->buffer ? READY : FIFO; | ||
258 | } | 269 | } |
259 | goto change_state; | ||
260 | } | 270 | } |
261 | tifm_sd_exec(host, host->req->stop); | 271 | goto change_state; |
262 | host->state = SCMD; | ||
263 | } | 272 | } |
264 | break; | 273 | break; |
265 | case SCMD: | 274 | case SCMD: |
266 | if (host_status & TIFM_MMCSD_EOC) { | 275 | if (host_status & TIFM_MMCSD_EOC) { |
267 | tifm_sd_fetch_resp(host->req->stop, sock); | 276 | tifm_sd_fetch_resp(host->req->stop, sock); |
268 | if (cmd->error) { | 277 | host->state = READY; |
269 | host->state = READY; | ||
270 | } else if (cmd->data->flags & MMC_DATA_WRITE) { | ||
271 | host->state = CARD; | ||
272 | } else { | ||
273 | host->state = host->buffer ? READY : FIFO; | ||
274 | } | ||
275 | goto change_state; | 278 | goto change_state; |
276 | } | 279 | } |
277 | break; | 280 | break; |
278 | case CARD: | 281 | case CARD: |
282 | dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n", | ||
283 | host->written_blocks); | ||
279 | if (!(host->flags & CARD_BUSY) | 284 | if (!(host->flags & CARD_BUSY) |
280 | && (host->written_blocks == cmd->data->blocks)) { | 285 | && (host->written_blocks == cmd->data->blocks)) { |
281 | host->state = host->buffer ? READY : FIFO; | 286 | if (no_dma) { |
287 | if (host->req->stop) { | ||
288 | tifm_sd_exec(host, host->req->stop); | ||
289 | host->state = SCMD; | ||
290 | } else { | ||
291 | host->state = READY; | ||
292 | } | ||
293 | } else { | ||
294 | host->state = FIFO; | ||
295 | } | ||
282 | goto change_state; | 296 | goto change_state; |
283 | } | 297 | } |
284 | break; | 298 | break; |
285 | case FIFO: | 299 | case FIFO: |
286 | if (host->flags & FIFO_RDY) { | 300 | if (host->flags & FIFO_RDY) { |
287 | host->state = READY; | ||
288 | host->flags &= ~FIFO_RDY; | 301 | host->flags &= ~FIFO_RDY; |
302 | if (host->req->stop) { | ||
303 | tifm_sd_exec(host, host->req->stop); | ||
304 | host->state = SCMD; | ||
305 | } else { | ||
306 | host->state = READY; | ||
307 | } | ||
289 | goto change_state; | 308 | goto change_state; |
290 | } | 309 | } |
291 | break; | 310 | break; |
292 | case READY: | 311 | case READY: |
293 | queue_work(sock->wq, &host->cmd_handler); | 312 | tasklet_schedule(&host->finish_tasklet); |
294 | return; | 313 | return; |
295 | } | 314 | } |
296 | 315 | ||
297 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
298 | host->timeout_jiffies); | ||
299 | } | 316 | } |
300 | 317 | ||
301 | /* Called from interrupt handler */ | 318 | /* Called from interrupt handler */ |
302 | static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | 319 | static void tifm_sd_signal_irq(struct tifm_dev *sock, |
303 | unsigned int sock_irq_status) | 320 | unsigned int sock_irq_status) |
304 | { | 321 | { |
305 | struct tifm_sd *host; | 322 | struct tifm_sd *host; |
306 | unsigned int host_status = 0, fifo_status = 0; | 323 | unsigned int host_status = 0, fifo_status = 0; |
@@ -308,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
308 | 325 | ||
309 | spin_lock(&sock->lock); | 326 | spin_lock(&sock->lock); |
310 | host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); | 327 | host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); |
311 | cancel_delayed_work(&host->abort_handler); | ||
312 | 328 | ||
313 | if (sock_irq_status & FIFO_EVENT) { | 329 | if (sock_irq_status & FIFO_EVENT) { |
314 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); | 330 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); |
@@ -321,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
321 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); | 337 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); |
322 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); | 338 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); |
323 | 339 | ||
324 | if (!(host->flags & HOST_REG)) | ||
325 | queue_work(sock->wq, &host->cmd_handler); | ||
326 | if (!host->req) | 340 | if (!host->req) |
327 | goto done; | 341 | goto done; |
328 | 342 | ||
329 | if (host_status & TIFM_MMCSD_ERRMASK) { | 343 | if (host_status & TIFM_MMCSD_ERRMASK) { |
330 | if (host_status & TIFM_MMCSD_CERR) | 344 | if (host_status & TIFM_MMCSD_CERR) |
331 | error_code = MMC_ERR_FAILED; | 345 | error_code = MMC_ERR_FAILED; |
332 | else if (host_status & | 346 | else if (host_status |
333 | (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) | 347 | & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) |
334 | error_code = MMC_ERR_TIMEOUT; | 348 | error_code = MMC_ERR_TIMEOUT; |
335 | else if (host_status & | 349 | else if (host_status |
336 | (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) | 350 | & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) |
337 | error_code = MMC_ERR_BADCRC; | 351 | error_code = MMC_ERR_BADCRC; |
338 | 352 | ||
339 | writel(TIFM_FIFO_INT_SETALL, | 353 | writel(TIFM_FIFO_INT_SETALL, |
@@ -343,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
343 | if (host->req->stop) { | 357 | if (host->req->stop) { |
344 | if (host->state == SCMD) { | 358 | if (host->state == SCMD) { |
345 | host->req->stop->error = error_code; | 359 | host->req->stop->error = error_code; |
346 | } else if(host->state == BRS) { | 360 | } else if (host->state == BRS |
361 | || host->state == CARD | ||
362 | || host->state == FIFO) { | ||
347 | host->req->cmd->error = error_code; | 363 | host->req->cmd->error = error_code; |
348 | tifm_sd_exec(host, host->req->stop); | 364 | tifm_sd_exec(host, host->req->stop); |
349 | queue_delayed_work(sock->wq, | ||
350 | &host->abort_handler, | ||
351 | host->timeout_jiffies); | ||
352 | host->state = SCMD; | 365 | host->state = SCMD; |
353 | goto done; | 366 | goto done; |
354 | } else { | 367 | } else { |
@@ -362,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
362 | 375 | ||
363 | if (host_status & TIFM_MMCSD_CB) | 376 | if (host_status & TIFM_MMCSD_CB) |
364 | host->flags |= CARD_BUSY; | 377 | host->flags |= CARD_BUSY; |
365 | if ((host_status & TIFM_MMCSD_EOFB) && | 378 | if ((host_status & TIFM_MMCSD_EOFB) |
366 | (host->flags & CARD_BUSY)) { | 379 | && (host->flags & CARD_BUSY)) { |
367 | host->written_blocks++; | 380 | host->written_blocks++; |
368 | host->flags &= ~CARD_BUSY; | 381 | host->flags &= ~CARD_BUSY; |
369 | } | 382 | } |
@@ -373,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, | |||
373 | tifm_sd_process_cmd(sock, host, host_status); | 386 | tifm_sd_process_cmd(sock, host, host_status); |
374 | done: | 387 | done: |
375 | dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", | 388 | dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", |
376 | host_status, fifo_status); | 389 | host_status, fifo_status); |
377 | spin_unlock(&sock->lock); | 390 | spin_unlock(&sock->lock); |
378 | return sock_irq_status; | ||
379 | } | 391 | } |
380 | 392 | ||
381 | static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | 393 | static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd) |
382 | { | 394 | { |
383 | struct tifm_dev *sock = card->dev; | 395 | struct tifm_dev *sock = host->dev; |
384 | unsigned int dest_cnt; | 396 | unsigned int dest_cnt; |
385 | 397 | ||
386 | /* DMA style IO */ | 398 | /* DMA style IO */ |
387 | 399 | dev_dbg(&sock->dev, "setting dma for %d blocks\n", | |
400 | cmd->data->blocks); | ||
388 | writel(TIFM_FIFO_INT_SETALL, | 401 | writel(TIFM_FIFO_INT_SETALL, |
389 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | 402 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); |
390 | writel(long_log2(cmd->data->blksz) - 2, | 403 | writel(ilog2(cmd->data->blksz) - 2, |
391 | sock->addr + SOCK_FIFO_PAGE_SIZE); | 404 | sock->addr + SOCK_FIFO_PAGE_SIZE); |
392 | writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); | 405 | writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); |
393 | writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | 406 | writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); |
394 | 407 | ||
@@ -402,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | |||
402 | if (cmd->data->flags & MMC_DATA_WRITE) { | 415 | if (cmd->data->flags & MMC_DATA_WRITE) { |
403 | writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 416 | writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
404 | writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, | 417 | writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, |
405 | sock->addr + SOCK_DMA_CONTROL); | 418 | sock->addr + SOCK_DMA_CONTROL); |
406 | } else { | 419 | } else { |
407 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 420 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
408 | writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); | 421 | writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); |
@@ -410,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) | |||
410 | } | 423 | } |
411 | 424 | ||
412 | static void tifm_sd_set_data_timeout(struct tifm_sd *host, | 425 | static void tifm_sd_set_data_timeout(struct tifm_sd *host, |
413 | struct mmc_data *data) | 426 | struct mmc_data *data) |
414 | { | 427 | { |
415 | struct tifm_dev *sock = host->dev; | 428 | struct tifm_dev *sock = host->dev; |
416 | unsigned int data_timeout = data->timeout_clks; | 429 | unsigned int data_timeout = data->timeout_clks; |
@@ -419,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host, | |||
419 | return; | 432 | return; |
420 | 433 | ||
421 | data_timeout += data->timeout_ns / | 434 | data_timeout += data->timeout_ns / |
422 | ((1000000000 / host->clk_freq) * host->clk_div); | 435 | ((1000000000UL / host->clk_freq) * host->clk_div); |
423 | data_timeout *= 10; // call it fudge factor for now | ||
424 | 436 | ||
425 | if (data_timeout < 0xffff) { | 437 | if (data_timeout < 0xffff) { |
426 | writel((~TIFM_MMCSD_DPE) & | ||
427 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
428 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
429 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); | 438 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); |
439 | writel((~TIFM_MMCSD_DPE) | ||
440 | & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
441 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
430 | } else { | 442 | } else { |
431 | writel(TIFM_MMCSD_DPE | | ||
432 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
433 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
434 | data_timeout = (data_timeout >> 10) + 1; | 443 | data_timeout = (data_timeout >> 10) + 1; |
435 | if(data_timeout > 0xffff) | 444 | if (data_timeout > 0xffff) |
436 | data_timeout = 0; /* set to unlimited */ | 445 | data_timeout = 0; /* set to unlimited */ |
437 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); | 446 | writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); |
447 | writel(TIFM_MMCSD_DPE | ||
448 | | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), | ||
449 | sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); | ||
438 | } | 450 | } |
439 | } | 451 | } |
440 | 452 | ||
@@ -477,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
477 | } | 489 | } |
478 | 490 | ||
479 | host->req = mrq; | 491 | host->req = mrq; |
492 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | ||
480 | host->state = CMD; | 493 | host->state = CMD; |
481 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
482 | host->timeout_jiffies); | ||
483 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), | 494 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), |
484 | sock->addr + SOCK_CONTROL); | 495 | sock->addr + SOCK_CONTROL); |
485 | tifm_sd_exec(host, mrq->cmd); | 496 | tifm_sd_exec(host, mrq->cmd); |
486 | spin_unlock_irqrestore(&sock->lock, flags); | 497 | spin_unlock_irqrestore(&sock->lock, flags); |
487 | return; | 498 | return; |
@@ -496,9 +507,9 @@ err_out: | |||
496 | mmc_request_done(mmc, mrq); | 507 | mmc_request_done(mmc, mrq); |
497 | } | 508 | } |
498 | 509 | ||
499 | static void tifm_sd_end_cmd(struct work_struct *work) | 510 | static void tifm_sd_end_cmd(unsigned long data) |
500 | { | 511 | { |
501 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 512 | struct tifm_sd *host = (struct tifm_sd*)data; |
502 | struct tifm_dev *sock = host->dev; | 513 | struct tifm_dev *sock = host->dev; |
503 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 514 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
504 | struct mmc_request *mrq; | 515 | struct mmc_request *mrq; |
@@ -507,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
507 | 518 | ||
508 | spin_lock_irqsave(&sock->lock, flags); | 519 | spin_lock_irqsave(&sock->lock, flags); |
509 | 520 | ||
521 | del_timer(&host->timer); | ||
510 | mrq = host->req; | 522 | mrq = host->req; |
511 | host->req = NULL; | 523 | host->req = NULL; |
512 | host->state = IDLE; | 524 | host->state = IDLE; |
@@ -520,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
520 | r_data = mrq->cmd->data; | 532 | r_data = mrq->cmd->data; |
521 | if (r_data) { | 533 | if (r_data) { |
522 | if (r_data->flags & MMC_DATA_WRITE) { | 534 | if (r_data->flags & MMC_DATA_WRITE) { |
523 | r_data->bytes_xfered = host->written_blocks * | 535 | r_data->bytes_xfered = host->written_blocks |
524 | r_data->blksz; | 536 | * r_data->blksz; |
525 | } else { | 537 | } else { |
526 | r_data->bytes_xfered = r_data->blocks - | 538 | r_data->bytes_xfered = r_data->blocks - |
527 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; | 539 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; |
@@ -535,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work) | |||
535 | } | 547 | } |
536 | 548 | ||
537 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), | 549 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), |
538 | sock->addr + SOCK_CONTROL); | 550 | sock->addr + SOCK_CONTROL); |
539 | 551 | ||
540 | spin_unlock_irqrestore(&sock->lock, flags); | 552 | spin_unlock_irqrestore(&sock->lock, flags); |
541 | mmc_request_done(mmc, mrq); | 553 | mmc_request_done(mmc, mrq); |
@@ -547,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
547 | struct tifm_dev *sock = host->dev; | 559 | struct tifm_dev *sock = host->dev; |
548 | unsigned long flags; | 560 | unsigned long flags; |
549 | struct mmc_data *r_data = mrq->cmd->data; | 561 | struct mmc_data *r_data = mrq->cmd->data; |
550 | char *t_buffer = NULL; | ||
551 | |||
552 | if (r_data) { | ||
553 | t_buffer = kmap(r_data->sg->page); | ||
554 | if (!t_buffer) { | ||
555 | printk(KERN_ERR DRIVER_NAME ": kmap failed\n"); | ||
556 | goto err_out; | ||
557 | } | ||
558 | } | ||
559 | 562 | ||
560 | spin_lock_irqsave(&sock->lock, flags); | 563 | spin_lock_irqsave(&sock->lock, flags); |
561 | if (host->flags & EJECT) { | 564 | if (host->flags & EJECT) { |
@@ -572,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
572 | if (r_data) { | 575 | if (r_data) { |
573 | tifm_sd_set_data_timeout(host, r_data); | 576 | tifm_sd_set_data_timeout(host, r_data); |
574 | 577 | ||
575 | host->buffer = t_buffer + r_data->sg->offset; | 578 | host->buffer_size = mrq->cmd->data->blocks |
576 | host->buffer_size = mrq->cmd->data->blocks * | 579 | * mrq->cmd->data->blksz; |
577 | mrq->cmd->data->blksz; | ||
578 | 580 | ||
579 | writel(TIFM_MMCSD_BUFINT | | 581 | writel(TIFM_MMCSD_BUFINT |
580 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), | 582 | | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), |
581 | sock->addr + SOCK_MMCSD_INT_ENABLE); | 583 | sock->addr + SOCK_MMCSD_INT_ENABLE); |
582 | writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | | 584 | writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) |
583 | (TIFM_MMCSD_FIFO_SIZE - 1), | 585 | | (TIFM_MMCSD_FIFO_SIZE - 1), |
584 | sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 586 | sock->addr + SOCK_MMCSD_BUFFER_CONFIG); |
585 | 587 | ||
586 | host->written_blocks = 0; | 588 | host->written_blocks = 0; |
@@ -591,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq) | |||
591 | } | 593 | } |
592 | 594 | ||
593 | host->req = mrq; | 595 | host->req = mrq; |
596 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | ||
594 | host->state = CMD; | 597 | host->state = CMD; |
595 | queue_delayed_work(sock->wq, &host->abort_handler, | ||
596 | host->timeout_jiffies); | ||
597 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), | 598 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), |
598 | sock->addr + SOCK_CONTROL); | 599 | sock->addr + SOCK_CONTROL); |
599 | tifm_sd_exec(host, mrq->cmd); | 600 | tifm_sd_exec(host, mrq->cmd); |
600 | spin_unlock_irqrestore(&sock->lock, flags); | 601 | spin_unlock_irqrestore(&sock->lock, flags); |
601 | return; | 602 | return; |
602 | 603 | ||
603 | err_out: | 604 | err_out: |
604 | if (t_buffer) | ||
605 | kunmap(r_data->sg->page); | ||
606 | |||
607 | mrq->cmd->error = MMC_ERR_TIMEOUT; | 605 | mrq->cmd->error = MMC_ERR_TIMEOUT; |
608 | mmc_request_done(mmc, mrq); | 606 | mmc_request_done(mmc, mrq); |
609 | } | 607 | } |
610 | 608 | ||
611 | static void tifm_sd_end_cmd_nodma(struct work_struct *work) | 609 | static void tifm_sd_end_cmd_nodma(unsigned long data) |
612 | { | 610 | { |
613 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 611 | struct tifm_sd *host = (struct tifm_sd*)data; |
614 | struct tifm_dev *sock = host->dev; | 612 | struct tifm_dev *sock = host->dev; |
615 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 613 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
616 | struct mmc_request *mrq; | 614 | struct mmc_request *mrq; |
@@ -619,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
619 | 617 | ||
620 | spin_lock_irqsave(&sock->lock, flags); | 618 | spin_lock_irqsave(&sock->lock, flags); |
621 | 619 | ||
620 | del_timer(&host->timer); | ||
622 | mrq = host->req; | 621 | mrq = host->req; |
623 | host->req = NULL; | 622 | host->req = NULL; |
624 | host->state = IDLE; | 623 | host->state = IDLE; |
@@ -636,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
636 | sock->addr + SOCK_MMCSD_INT_ENABLE); | 635 | sock->addr + SOCK_MMCSD_INT_ENABLE); |
637 | 636 | ||
638 | if (r_data->flags & MMC_DATA_WRITE) { | 637 | if (r_data->flags & MMC_DATA_WRITE) { |
639 | r_data->bytes_xfered = host->written_blocks * | 638 | r_data->bytes_xfered = host->written_blocks |
640 | r_data->blksz; | 639 | * r_data->blksz; |
641 | } else { | 640 | } else { |
642 | r_data->bytes_xfered = r_data->blocks - | 641 | r_data->bytes_xfered = r_data->blocks - |
643 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; | 642 | readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; |
@@ -645,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work) | |||
645 | r_data->bytes_xfered += r_data->blksz - | 644 | r_data->bytes_xfered += r_data->blksz - |
646 | readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; | 645 | readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; |
647 | } | 646 | } |
648 | host->buffer = NULL; | ||
649 | host->buffer_pos = 0; | 647 | host->buffer_pos = 0; |
650 | host->buffer_size = 0; | 648 | host->buffer_size = 0; |
651 | } | 649 | } |
652 | 650 | ||
653 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), | 651 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), |
654 | sock->addr + SOCK_CONTROL); | 652 | sock->addr + SOCK_CONTROL); |
655 | 653 | ||
656 | spin_unlock_irqrestore(&sock->lock, flags); | 654 | spin_unlock_irqrestore(&sock->lock, flags); |
657 | 655 | ||
658 | if (r_data) | ||
659 | kunmap(r_data->sg->page); | ||
660 | |||
661 | mmc_request_done(mmc, mrq); | 656 | mmc_request_done(mmc, mrq); |
662 | } | 657 | } |
663 | 658 | ||
664 | static void tifm_sd_abort(struct work_struct *work) | 659 | static void tifm_sd_terminate(struct tifm_sd *host) |
660 | { | ||
661 | struct tifm_dev *sock = host->dev; | ||
662 | unsigned long flags; | ||
663 | |||
664 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
665 | mmiowb(); | ||
666 | spin_lock_irqsave(&sock->lock, flags); | ||
667 | host->flags |= EJECT; | ||
668 | if (host->req) { | ||
669 | writel(TIFM_FIFO_INT_SETALL, | ||
670 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | ||
671 | writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
672 | tasklet_schedule(&host->finish_tasklet); | ||
673 | } | ||
674 | spin_unlock_irqrestore(&sock->lock, flags); | ||
675 | } | ||
676 | |||
677 | static void tifm_sd_abort(unsigned long data) | ||
665 | { | 678 | { |
666 | struct tifm_sd *host = | 679 | struct tifm_sd *host = (struct tifm_sd*)data; |
667 | container_of(work, struct tifm_sd, abort_handler.work); | ||
668 | 680 | ||
669 | printk(KERN_ERR DRIVER_NAME | 681 | printk(KERN_ERR DRIVER_NAME |
670 | ": card failed to respond for a long period of time"); | 682 | ": card failed to respond for a long period of time"); |
683 | |||
684 | tifm_sd_terminate(host); | ||
671 | tifm_eject(host->dev); | 685 | tifm_eject(host->dev); |
672 | } | 686 | } |
673 | 687 | ||
@@ -686,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
686 | writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), | 700 | writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), |
687 | sock->addr + SOCK_MMCSD_CONFIG); | 701 | sock->addr + SOCK_MMCSD_CONFIG); |
688 | } else { | 702 | } else { |
689 | writel((~TIFM_MMCSD_4BBUS) & | 703 | writel((~TIFM_MMCSD_4BBUS) |
690 | readl(sock->addr + SOCK_MMCSD_CONFIG), | 704 | & readl(sock->addr + SOCK_MMCSD_CONFIG), |
691 | sock->addr + SOCK_MMCSD_CONFIG); | 705 | sock->addr + SOCK_MMCSD_CONFIG); |
692 | } | 706 | } |
693 | 707 | ||
694 | if (ios->clock) { | 708 | if (ios->clock) { |
@@ -707,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
707 | if ((20000000 / clk_div1) > (24000000 / clk_div2)) { | 721 | if ((20000000 / clk_div1) > (24000000 / clk_div2)) { |
708 | host->clk_freq = 20000000; | 722 | host->clk_freq = 20000000; |
709 | host->clk_div = clk_div1; | 723 | host->clk_div = clk_div1; |
710 | writel((~TIFM_CTRL_FAST_CLK) & | 724 | writel((~TIFM_CTRL_FAST_CLK) |
711 | readl(sock->addr + SOCK_CONTROL), | 725 | & readl(sock->addr + SOCK_CONTROL), |
712 | sock->addr + SOCK_CONTROL); | 726 | sock->addr + SOCK_CONTROL); |
713 | } else { | 727 | } else { |
714 | host->clk_freq = 24000000; | 728 | host->clk_freq = 24000000; |
715 | host->clk_div = clk_div2; | 729 | host->clk_div = clk_div2; |
716 | writel(TIFM_CTRL_FAST_CLK | | 730 | writel(TIFM_CTRL_FAST_CLK |
717 | readl(sock->addr + SOCK_CONTROL), | 731 | | readl(sock->addr + SOCK_CONTROL), |
718 | sock->addr + SOCK_CONTROL); | 732 | sock->addr + SOCK_CONTROL); |
719 | } | 733 | } |
720 | } else { | 734 | } else { |
721 | host->clk_div = 0; | 735 | host->clk_div = 0; |
722 | } | 736 | } |
723 | host->clk_div &= TIFM_MMCSD_CLKMASK; | 737 | host->clk_div &= TIFM_MMCSD_CLKMASK; |
724 | writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & | 738 | writel(host->clk_div |
725 | readl(sock->addr + SOCK_MMCSD_CONFIG)), | 739 | | ((~TIFM_MMCSD_CLKMASK) |
726 | sock->addr + SOCK_MMCSD_CONFIG); | 740 | & readl(sock->addr + SOCK_MMCSD_CONFIG)), |
741 | sock->addr + SOCK_MMCSD_CONFIG); | ||
727 | 742 | ||
728 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | 743 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) |
729 | host->flags |= OPENDRAIN; | 744 | host->flags |= OPENDRAIN; |
@@ -737,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
737 | // allow removal. | 752 | // allow removal. |
738 | if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { | 753 | if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { |
739 | host->flags |= EJECT_DONE; | 754 | host->flags |= EJECT_DONE; |
740 | wake_up_all(&host->can_eject); | 755 | wake_up_all(&host->notify); |
741 | } | 756 | } |
742 | 757 | ||
743 | spin_unlock_irqrestore(&sock->lock, flags); | 758 | spin_unlock_irqrestore(&sock->lock, flags); |
@@ -765,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = { | |||
765 | .get_ro = tifm_sd_ro | 780 | .get_ro = tifm_sd_ro |
766 | }; | 781 | }; |
767 | 782 | ||
768 | static void tifm_sd_register_host(struct work_struct *work) | 783 | static int tifm_sd_initialize_host(struct tifm_sd *host) |
769 | { | 784 | { |
770 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); | 785 | int rc; |
786 | unsigned int host_status = 0; | ||
771 | struct tifm_dev *sock = host->dev; | 787 | struct tifm_dev *sock = host->dev; |
772 | struct mmc_host *mmc = tifm_get_drvdata(sock); | ||
773 | unsigned long flags; | ||
774 | 788 | ||
775 | spin_lock_irqsave(&sock->lock, flags); | 789 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); |
776 | host->flags |= HOST_REG; | 790 | mmiowb(); |
777 | PREPARE_WORK(&host->cmd_handler, | 791 | host->clk_div = 61; |
778 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); | 792 | host->clk_freq = 20000000; |
779 | spin_unlock_irqrestore(&sock->lock, flags); | 793 | writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); |
780 | dev_dbg(&sock->dev, "adding host\n"); | 794 | writel(host->clk_div | TIFM_MMCSD_POWER, |
781 | mmc_add_host(mmc); | 795 | sock->addr + SOCK_MMCSD_CONFIG); |
796 | |||
797 | /* wait up to 0.51 sec for reset */ | ||
798 | for (rc = 2; rc <= 256; rc <<= 1) { | ||
799 | if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { | ||
800 | rc = 0; | ||
801 | break; | ||
802 | } | ||
803 | msleep(rc); | ||
804 | } | ||
805 | |||
806 | if (rc) { | ||
807 | printk(KERN_ERR DRIVER_NAME | ||
808 | ": controller failed to reset\n"); | ||
809 | return -ENODEV; | ||
810 | } | ||
811 | |||
812 | writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); | ||
813 | writel(host->clk_div | TIFM_MMCSD_POWER, | ||
814 | sock->addr + SOCK_MMCSD_CONFIG); | ||
815 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | ||
816 | |||
817 | // command timeout fixed to 64 clocks for now | ||
818 | writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); | ||
819 | writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); | ||
820 | |||
821 | /* INAB should take much less than reset */ | ||
822 | for (rc = 1; rc <= 16; rc <<= 1) { | ||
823 | host_status = readl(sock->addr + SOCK_MMCSD_STATUS); | ||
824 | writel(host_status, sock->addr + SOCK_MMCSD_STATUS); | ||
825 | if (!(host_status & TIFM_MMCSD_ERRMASK) | ||
826 | && (host_status & TIFM_MMCSD_EOC)) { | ||
827 | rc = 0; | ||
828 | break; | ||
829 | } | ||
830 | msleep(rc); | ||
831 | } | ||
832 | |||
833 | if (rc) { | ||
834 | printk(KERN_ERR DRIVER_NAME | ||
835 | ": card not ready - probe failed on initialization\n"); | ||
836 | return -ENODEV; | ||
837 | } | ||
838 | |||
839 | writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK, | ||
840 | sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
841 | mmiowb(); | ||
842 | |||
843 | return 0; | ||
782 | } | 844 | } |
783 | 845 | ||
784 | static int tifm_sd_probe(struct tifm_dev *sock) | 846 | static int tifm_sd_probe(struct tifm_dev *sock) |
@@ -787,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
787 | struct tifm_sd *host; | 849 | struct tifm_sd *host; |
788 | int rc = -EIO; | 850 | int rc = -EIO; |
789 | 851 | ||
790 | if (!(TIFM_SOCK_STATE_OCCUPIED & | 852 | if (!(TIFM_SOCK_STATE_OCCUPIED |
791 | readl(sock->addr + SOCK_PRESENT_STATE))) { | 853 | & readl(sock->addr + SOCK_PRESENT_STATE))) { |
792 | printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); | 854 | printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); |
793 | return rc; | 855 | return rc; |
794 | } | 856 | } |
@@ -798,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
798 | return -ENOMEM; | 860 | return -ENOMEM; |
799 | 861 | ||
800 | host = mmc_priv(mmc); | 862 | host = mmc_priv(mmc); |
801 | host->dev = sock; | ||
802 | host->clk_div = 61; | ||
803 | init_waitqueue_head(&host->can_eject); | ||
804 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host); | ||
805 | INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); | ||
806 | |||
807 | tifm_set_drvdata(sock, mmc); | 863 | tifm_set_drvdata(sock, mmc); |
808 | sock->signal_irq = tifm_sd_signal_irq; | 864 | host->dev = sock; |
809 | |||
810 | host->clk_freq = 20000000; | ||
811 | host->timeout_jiffies = msecs_to_jiffies(1000); | 865 | host->timeout_jiffies = msecs_to_jiffies(1000); |
812 | 866 | ||
867 | init_waitqueue_head(&host->notify); | ||
868 | tasklet_init(&host->finish_tasklet, | ||
869 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, | ||
870 | (unsigned long)host); | ||
871 | setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); | ||
872 | |||
813 | tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; | 873 | tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; |
814 | mmc->ops = &tifm_sd_ops; | 874 | mmc->ops = &tifm_sd_ops; |
815 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 875 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
816 | mmc->caps = MMC_CAP_4_BIT_DATA; | 876 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; |
817 | mmc->f_min = 20000000 / 60; | 877 | mmc->f_min = 20000000 / 60; |
818 | mmc->f_max = 24000000; | 878 | mmc->f_max = 24000000; |
819 | mmc->max_hw_segs = 1; | 879 | mmc->max_hw_segs = 1; |
820 | mmc->max_phys_segs = 1; | 880 | mmc->max_phys_segs = 1; |
821 | mmc->max_sectors = 127; | 881 | // limited by DMA counter - it's safer to stick with |
822 | mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length | 882 | // block counter has 11 bits though |
823 | 883 | mmc->max_blk_count = 256; | |
824 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | 884 | // 2k maximum hw block length |
825 | writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); | 885 | mmc->max_blk_size = 2048; |
826 | writel(host->clk_div | TIFM_MMCSD_POWER, | 886 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
827 | sock->addr + SOCK_MMCSD_CONFIG); | 887 | mmc->max_seg_size = mmc->max_req_size; |
888 | sock->signal_irq = tifm_sd_signal_irq; | ||
889 | rc = tifm_sd_initialize_host(host); | ||
828 | 890 | ||
829 | for (rc = 0; rc < 50; rc++) { | 891 | if (!rc) |
830 | /* Wait for reset ack */ | 892 | rc = mmc_add_host(mmc); |
831 | if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { | 893 | if (rc) |
832 | rc = 0; | 894 | goto out_free_mmc; |
833 | break; | ||
834 | } | ||
835 | msleep(10); | ||
836 | } | ||
837 | 895 | ||
838 | if (rc) { | 896 | return 0; |
839 | printk(KERN_ERR DRIVER_NAME | 897 | out_free_mmc: |
840 | ": card not ready - probe failed\n"); | 898 | mmc_free_host(mmc); |
841 | mmc_free_host(mmc); | 899 | return rc; |
842 | return -ENODEV; | 900 | } |
843 | } | ||
844 | 901 | ||
845 | writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); | 902 | static void tifm_sd_remove(struct tifm_dev *sock) |
846 | writel(host->clk_div | TIFM_MMCSD_POWER, | 903 | { |
847 | sock->addr + SOCK_MMCSD_CONFIG); | 904 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
848 | writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); | 905 | struct tifm_sd *host = mmc_priv(mmc); |
849 | writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK, | ||
850 | sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
851 | 906 | ||
852 | writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now | 907 | del_timer_sync(&host->timer); |
853 | writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); | 908 | tifm_sd_terminate(host); |
854 | writel(host->clk_div | TIFM_MMCSD_POWER, | 909 | wait_event_timeout(host->notify, host->flags & EJECT_DONE, |
855 | sock->addr + SOCK_MMCSD_CONFIG); | 910 | host->timeout_jiffies); |
911 | tasklet_kill(&host->finish_tasklet); | ||
912 | mmc_remove_host(mmc); | ||
856 | 913 | ||
857 | queue_delayed_work(sock->wq, &host->abort_handler, | 914 | /* The meaning of the bit majority in this constant is unknown. */ |
858 | host->timeout_jiffies); | 915 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), |
916 | sock->addr + SOCK_CONTROL); | ||
859 | 917 | ||
860 | return 0; | 918 | tifm_set_drvdata(sock, NULL); |
919 | mmc_free_host(mmc); | ||
861 | } | 920 | } |
862 | 921 | ||
863 | static int tifm_sd_host_is_down(struct tifm_dev *sock) | 922 | #ifdef CONFIG_PM |
923 | |||
924 | static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) | ||
864 | { | 925 | { |
865 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 926 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
866 | struct tifm_sd *host = mmc_priv(mmc); | 927 | int rc; |
867 | unsigned long flags; | ||
868 | int rc = 0; | ||
869 | 928 | ||
870 | spin_lock_irqsave(&sock->lock, flags); | 929 | rc = mmc_suspend_host(mmc, state); |
871 | rc = (host->flags & EJECT_DONE); | 930 | /* The meaning of the bit majority in this constant is unknown. */ |
872 | spin_unlock_irqrestore(&sock->lock, flags); | 931 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), |
932 | sock->addr + SOCK_CONTROL); | ||
873 | return rc; | 933 | return rc; |
874 | } | 934 | } |
875 | 935 | ||
876 | static void tifm_sd_remove(struct tifm_dev *sock) | 936 | static int tifm_sd_resume(struct tifm_dev *sock) |
877 | { | 937 | { |
878 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 938 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
879 | struct tifm_sd *host = mmc_priv(mmc); | 939 | struct tifm_sd *host = mmc_priv(mmc); |
880 | unsigned long flags; | ||
881 | 940 | ||
882 | spin_lock_irqsave(&sock->lock, flags); | 941 | if (sock->media_id != FM_SD |
883 | host->flags |= EJECT; | 942 | || tifm_sd_initialize_host(host)) { |
884 | if (host->req) | 943 | tifm_eject(sock); |
885 | queue_work(sock->wq, &host->cmd_handler); | 944 | return 0; |
886 | spin_unlock_irqrestore(&sock->lock, flags); | 945 | } else { |
887 | wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), | 946 | return mmc_resume_host(mmc); |
888 | host->timeout_jiffies); | 947 | } |
948 | } | ||
889 | 949 | ||
890 | if (host->flags & HOST_REG) | 950 | #else |
891 | mmc_remove_host(mmc); | ||
892 | 951 | ||
893 | /* The meaning of the bit majority in this constant is unknown. */ | 952 | #define tifm_sd_suspend NULL |
894 | writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), | 953 | #define tifm_sd_resume NULL |
895 | sock->addr + SOCK_CONTROL); | ||
896 | writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); | ||
897 | writel(TIFM_FIFO_INT_SETALL, | ||
898 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | ||
899 | writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
900 | 954 | ||
901 | tifm_set_drvdata(sock, NULL); | 955 | #endif /* CONFIG_PM */ |
902 | mmc_free_host(mmc); | ||
903 | } | ||
904 | 956 | ||
905 | static tifm_media_id tifm_sd_id_tbl[] = { | 957 | static tifm_media_id tifm_sd_id_tbl[] = { |
906 | FM_SD, 0 | 958 | FM_SD, 0 |
@@ -913,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = { | |||
913 | }, | 965 | }, |
914 | .id_table = tifm_sd_id_tbl, | 966 | .id_table = tifm_sd_id_tbl, |
915 | .probe = tifm_sd_probe, | 967 | .probe = tifm_sd_probe, |
916 | .remove = tifm_sd_remove | 968 | .remove = tifm_sd_remove, |
969 | .suspend = tifm_sd_suspend, | ||
970 | .resume = tifm_sd_resume | ||
917 | }; | 971 | }; |
918 | 972 | ||
919 | static int __init tifm_sd_init(void) | 973 | static int __init tifm_sd_init(void) |
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 7a282672f8e9..a44d8777ab9f 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver | 2 | * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. | 4 | * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host) | |||
272 | return host->num_sg; | 272 | return host->num_sg; |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline char *wbsd_kmap_sg(struct wbsd_host *host) | 275 | static inline char *wbsd_sg_to_buffer(struct wbsd_host *host) |
276 | { | 276 | { |
277 | host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + | 277 | return page_address(host->cur_sg->page) + host->cur_sg->offset; |
278 | host->cur_sg->offset; | ||
279 | return host->mapped_sg; | ||
280 | } | ||
281 | |||
282 | static inline void wbsd_kunmap_sg(struct wbsd_host *host) | ||
283 | { | ||
284 | kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ); | ||
285 | } | 278 | } |
286 | 279 | ||
287 | static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) | 280 | static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) |
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) | |||
302 | * we do not transfer too much. | 295 | * we do not transfer too much. |
303 | */ | 296 | */ |
304 | for (i = 0; i < len; i++) { | 297 | for (i = 0; i < len; i++) { |
305 | sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; | 298 | sgbuf = page_address(sg[i].page) + sg[i].offset; |
306 | if (size < sg[i].length) | 299 | if (size < sg[i].length) |
307 | memcpy(dmabuf, sgbuf, size); | 300 | memcpy(dmabuf, sgbuf, size); |
308 | else | 301 | else |
309 | memcpy(dmabuf, sgbuf, sg[i].length); | 302 | memcpy(dmabuf, sgbuf, sg[i].length); |
310 | kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ); | ||
311 | dmabuf += sg[i].length; | 303 | dmabuf += sg[i].length; |
312 | 304 | ||
313 | if (size < sg[i].length) | 305 | if (size < sg[i].length) |
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) | |||
347 | * we do not transfer too much. | 339 | * we do not transfer too much. |
348 | */ | 340 | */ |
349 | for (i = 0; i < len; i++) { | 341 | for (i = 0; i < len; i++) { |
350 | sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; | 342 | sgbuf = page_address(sg[i].page) + sg[i].offset; |
351 | if (size < sg[i].length) | 343 | if (size < sg[i].length) |
352 | memcpy(sgbuf, dmabuf, size); | 344 | memcpy(sgbuf, dmabuf, size); |
353 | else | 345 | else |
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
497 | if (data->bytes_xfered == host->size) | 489 | if (data->bytes_xfered == host->size) |
498 | return; | 490 | return; |
499 | 491 | ||
500 | buffer = wbsd_kmap_sg(host) + host->offset; | 492 | buffer = wbsd_sg_to_buffer(host) + host->offset; |
501 | 493 | ||
502 | /* | 494 | /* |
503 | * Drain the fifo. This has a tendency to loop longer | 495 | * Drain the fifo. This has a tendency to loop longer |
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
526 | /* | 518 | /* |
527 | * Transfer done? | 519 | * Transfer done? |
528 | */ | 520 | */ |
529 | if (data->bytes_xfered == host->size) { | 521 | if (data->bytes_xfered == host->size) |
530 | wbsd_kunmap_sg(host); | ||
531 | return; | 522 | return; |
532 | } | ||
533 | 523 | ||
534 | /* | 524 | /* |
535 | * End of scatter list entry? | 525 | * End of scatter list entry? |
536 | */ | 526 | */ |
537 | if (host->remain == 0) { | 527 | if (host->remain == 0) { |
538 | wbsd_kunmap_sg(host); | ||
539 | |||
540 | /* | 528 | /* |
541 | * Get next entry. Check if last. | 529 | * Get next entry. Check if last. |
542 | */ | 530 | */ |
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host) | |||
554 | return; | 542 | return; |
555 | } | 543 | } |
556 | 544 | ||
557 | buffer = wbsd_kmap_sg(host); | 545 | buffer = wbsd_sg_to_buffer(host); |
558 | } | 546 | } |
559 | } | 547 | } |
560 | } | 548 | } |
561 | 549 | ||
562 | wbsd_kunmap_sg(host); | ||
563 | |||
564 | /* | 550 | /* |
565 | * This is a very dirty hack to solve a | 551 | * This is a very dirty hack to solve a |
566 | * hardware problem. The chip doesn't trigger | 552 | * hardware problem. The chip doesn't trigger |
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
583 | if (data->bytes_xfered == host->size) | 569 | if (data->bytes_xfered == host->size) |
584 | return; | 570 | return; |
585 | 571 | ||
586 | buffer = wbsd_kmap_sg(host) + host->offset; | 572 | buffer = wbsd_sg_to_buffer(host) + host->offset; |
587 | 573 | ||
588 | /* | 574 | /* |
589 | * Fill the fifo. This has a tendency to loop longer | 575 | * Fill the fifo. This has a tendency to loop longer |
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
612 | /* | 598 | /* |
613 | * Transfer done? | 599 | * Transfer done? |
614 | */ | 600 | */ |
615 | if (data->bytes_xfered == host->size) { | 601 | if (data->bytes_xfered == host->size) |
616 | wbsd_kunmap_sg(host); | ||
617 | return; | 602 | return; |
618 | } | ||
619 | 603 | ||
620 | /* | 604 | /* |
621 | * End of scatter list entry? | 605 | * End of scatter list entry? |
622 | */ | 606 | */ |
623 | if (host->remain == 0) { | 607 | if (host->remain == 0) { |
624 | wbsd_kunmap_sg(host); | ||
625 | |||
626 | /* | 608 | /* |
627 | * Get next entry. Check if last. | 609 | * Get next entry. Check if last. |
628 | */ | 610 | */ |
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host) | |||
640 | return; | 622 | return; |
641 | } | 623 | } |
642 | 624 | ||
643 | buffer = wbsd_kmap_sg(host); | 625 | buffer = wbsd_sg_to_buffer(host); |
644 | } | 626 | } |
645 | } | 627 | } |
646 | } | 628 | } |
647 | 629 | ||
648 | wbsd_kunmap_sg(host); | ||
649 | |||
650 | /* | 630 | /* |
651 | * The controller stops sending interrupts for | 631 | * The controller stops sending interrupts for |
652 | * 'FIFO empty' under certain conditions. So we | 632 | * 'FIFO empty' under certain conditions. So we |
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
910 | */ | 890 | */ |
911 | if (cmd->data && (cmd->error == MMC_ERR_NONE)) { | 891 | if (cmd->data && (cmd->error == MMC_ERR_NONE)) { |
912 | /* | 892 | /* |
893 | * The hardware is so delightfully stupid that it has a list | ||
894 | * of "data" commands. If a command isn't on this list, it'll | ||
895 | * just go back to the idle state and won't send any data | ||
896 | * interrupts. | ||
897 | */ | ||
898 | switch (cmd->opcode) { | ||
899 | case 11: | ||
900 | case 17: | ||
901 | case 18: | ||
902 | case 20: | ||
903 | case 24: | ||
904 | case 25: | ||
905 | case 26: | ||
906 | case 27: | ||
907 | case 30: | ||
908 | case 42: | ||
909 | case 56: | ||
910 | break; | ||
911 | |||
912 | /* ACMDs. We don't keep track of state, so we just treat them | ||
913 | * like any other command. */ | ||
914 | case 51: | ||
915 | break; | ||
916 | |||
917 | default: | ||
918 | #ifdef CONFIG_MMC_DEBUG | ||
919 | printk(KERN_WARNING "%s: Data command %d is not " | ||
920 | "supported by this controller.\n", | ||
921 | mmc_hostname(host->mmc), cmd->opcode); | ||
922 | #endif | ||
923 | cmd->data->error = MMC_ERR_INVALID; | ||
924 | |||
925 | if (cmd->data->stop) | ||
926 | wbsd_send_command(host, cmd->data->stop); | ||
927 | |||
928 | goto done; | ||
929 | }; | ||
930 | |||
931 | /* | ||
913 | * Dirty fix for hardware bug. | 932 | * Dirty fix for hardware bug. |
914 | */ | 933 | */ |
915 | if (host->dma == -1) | 934 | if (host->dma == -1) |
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) | |||
1343 | mmc->max_phys_segs = 128; | 1362 | mmc->max_phys_segs = 128; |
1344 | 1363 | ||
1345 | /* | 1364 | /* |
1346 | * Maximum number of sectors in one transfer. Also limited by 64kB | 1365 | * Maximum request size. Also limited by 64KiB buffer. |
1347 | * buffer. | ||
1348 | */ | 1366 | */ |
1349 | mmc->max_sectors = 128; | 1367 | mmc->max_req_size = 65536; |
1350 | 1368 | ||
1351 | /* | 1369 | /* |
1352 | * Maximum segment size. Could be one segment with the maximum number | 1370 | * Maximum segment size. Could be one segment with the maximum number |
1353 | * of segments. | 1371 | * of bytes. |
1372 | */ | ||
1373 | mmc->max_seg_size = mmc->max_req_size; | ||
1374 | |||
1375 | /* | ||
1376 | * Maximum block size. We have 12 bits (= 4095) but have to subtract | ||
1377 | * space for CRC. So the maximum is 4095 - 4*2 = 4087. | ||
1378 | */ | ||
1379 | mmc->max_blk_size = 4087; | ||
1380 | |||
1381 | /* | ||
1382 | * Maximum block count. There is no real limit so the maximum | ||
1383 | * request size will be the only restriction. | ||
1354 | */ | 1384 | */ |
1355 | mmc->max_seg_size = mmc->max_sectors * 512; | 1385 | mmc->max_blk_count = mmc->max_req_size; |
1356 | 1386 | ||
1357 | dev_set_drvdata(dev, mmc); | 1387 | dev_set_drvdata(dev, mmc); |
1358 | 1388 | ||
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h index 6072993f01e3..d06718b0e2ab 100644 --- a/drivers/mmc/wbsd.h +++ b/drivers/mmc/wbsd.h | |||
@@ -154,7 +154,6 @@ struct wbsd_host | |||
154 | 154 | ||
155 | struct scatterlist* cur_sg; /* Current SG entry */ | 155 | struct scatterlist* cur_sg; /* Current SG entry */ |
156 | unsigned int num_sg; /* Number of entries left */ | 156 | unsigned int num_sg; /* Number of entries left */ |
157 | void* mapped_sg; /* vaddr of mapped sg */ | ||
158 | 157 | ||
159 | unsigned int offset; /* Offset into current entry */ | 158 | unsigned int offset; /* Offset into current entry */ |
160 | unsigned int remain; /* Data left in curren entry */ | 159 | unsigned int remain; /* Data left in curren entry */ |