aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2007-02-11 13:57:36 -0500
committerPierre Ossman <drzeus@drzeus.cx>2007-05-01 07:04:17 -0400
commit1c6a0718f0bfdab0d9b7da5f7b74f38a0058c03a (patch)
tree5e7f2a26d5d1782d87c596b40f874c6c0b8b8e1a /drivers/mmc/host
parent98ac2162699f7e9880683cb954891817f20b607c (diff)
mmc: Move host and card drivers to subdirs
Clean up the drivers/mmc directory by moving card and host drivers into subdirectories. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r--drivers/mmc/host/Kconfig103
-rw-r--r--drivers/mmc/host/Makefile18
-rw-r--r--drivers/mmc/host/at91_mci.c1001
-rw-r--r--drivers/mmc/host/au1xmmc.c1031
-rw-r--r--drivers/mmc/host/au1xmmc.h96
-rw-r--r--drivers/mmc/host/imxmmc.c1137
-rw-r--r--drivers/mmc/host/imxmmc.h67
-rw-r--r--drivers/mmc/host/mmci.c702
-rw-r--r--drivers/mmc/host/mmci.h179
-rw-r--r--drivers/mmc/host/omap.c1288
-rw-r--r--drivers/mmc/host/pxamci.c616
-rw-r--r--drivers/mmc/host/pxamci.h124
-rw-r--r--drivers/mmc/host/sdhci.c1539
-rw-r--r--drivers/mmc/host/sdhci.h210
-rw-r--r--drivers/mmc/host/tifm_sd.c1102
-rw-r--r--drivers/mmc/host/wbsd.c2062
-rw-r--r--drivers/mmc/host/wbsd.h185
17 files changed, 11460 insertions, 0 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
new file mode 100644
index 000000000000..ed4deab2203d
--- /dev/null
+++ b/drivers/mmc/host/Kconfig
@@ -0,0 +1,103 @@
1#
2# MMC/SD host controller drivers
3#
4
5comment "MMC/SD Host Controller Drivers"
6 depends on MMC
7
8config MMC_ARMMMCI
9 tristate "ARM AMBA Multimedia Card Interface support"
10 depends on ARM_AMBA && MMC
11 help
12 This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
13 Interface (PL180 and PL181) support. If you have an ARM(R)
14 platform with a Multimedia Card slot, say Y or M here.
15
16 If unsure, say N.
17
18config MMC_PXA
19 tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
20 depends on ARCH_PXA && MMC
21 help
22 This selects the Intel(R) PXA(R) Multimedia card Interface.
23 If you have a PXA(R) platform with a Multimedia Card slot,
24 say Y or M here.
25
26 If unsure, say N.
27
28config MMC_SDHCI
29 tristate "Secure Digital Host Controller Interface support (EXPERIMENTAL)"
30 depends on PCI && MMC && EXPERIMENTAL
31 help
32 This select the generic Secure Digital Host Controller Interface.
33 It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
34 and Toshiba(R). Most controllers found in laptops are of this type.
35 If you have a controller with this interface, say Y or M here.
36
37 If unsure, say N.
38
39config MMC_OMAP
40 tristate "TI OMAP Multimedia Card Interface support"
41 depends on ARCH_OMAP && MMC
42 select TPS65010 if MACH_OMAP_H2
43 help
44 This selects the TI OMAP Multimedia card Interface.
45 If you have an OMAP board with a Multimedia Card slot,
46 say Y or M here.
47
48 If unsure, say N.
49
50config MMC_WBSD
51 tristate "Winbond W83L51xD SD/MMC Card Interface support"
52 depends on MMC && ISA_DMA_API
53 help
54 This selects the Winbond(R) W83L51xD Secure digital and
55 Multimedia card Interface.
56 If you have a machine with a integrated W83L518D or W83L519D
57 SD/MMC card reader, say Y or M here.
58
59 If unsure, say N.
60
61config MMC_AU1X
62 tristate "Alchemy AU1XX0 MMC Card Interface support"
63 depends on MMC && SOC_AU1200
64 help
65 This selects the AMD Alchemy(R) Multimedia card interface.
66 If you have a Alchemy platform with a MMC slot, say Y or M here.
67
68 If unsure, say N.
69
70config MMC_AT91
71 tristate "AT91 SD/MMC Card Interface support"
72 depends on ARCH_AT91 && MMC
73 help
74 This selects the AT91 MCI controller.
75
76 If unsure, say N.
77
78config MMC_IMX
79 tristate "Motorola i.MX Multimedia Card Interface support"
80 depends on ARCH_IMX && MMC
81 help
82 This selects the Motorola i.MX Multimedia card Interface.
83 If you have a i.MX platform with a Multimedia Card slot,
84 say Y or M here.
85
86 If unsure, say N.
87
88config MMC_TIFM_SD
89 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
90 depends on MMC && EXPERIMENTAL && PCI
91 select TIFM_CORE
92 help
93 Say Y here if you want to be able to access MMC/SD cards with
94 the Texas Instruments(R) Flash Media card reader, found in many
95 laptops.
96 This option 'selects' (turns on, enables) 'TIFM_CORE', but you
97 probably also need appropriate card reader host adapter, such as
98 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
99 (TIFM_7XX1)'.
100
101 To compile this driver as a module, choose M here: the
102 module will be called tifm_sd.
103
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
new file mode 100644
index 000000000000..6685f64345b4
--- /dev/null
+++ b/drivers/mmc/host/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for MMC/SD host controller drivers
3#
4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
10obj-$(CONFIG_MMC_PXA) += pxamci.o
11obj-$(CONFIG_MMC_IMX) += imxmmc.o
12obj-$(CONFIG_MMC_SDHCI) += sdhci.o
13obj-$(CONFIG_MMC_WBSD) += wbsd.o
14obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
15obj-$(CONFIG_MMC_OMAP) += omap.o
16obj-$(CONFIG_MMC_AT91) += at91_mci.o
17obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
18
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
new file mode 100644
index 000000000000..e37943c314cb
--- /dev/null
+++ b/drivers/mmc/host/at91_mci.c
@@ -0,0 +1,1001 @@
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67#include <linux/atmel_pdc.h>
68
69#include <linux/mmc/host.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
75#include <asm/arch/cpu.h>
76#include <asm/arch/gpio.h>
77#include <asm/arch/at91_mci.h>
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
83#define FL_SENT_COMMAND (1 << 0)
84#define FL_SENT_STOP (1 << 1)
85
86#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
89
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
92
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
103 void __iomem *baseaddr;
104 int irq;
105
106 struct at91_mmc_data *board;
107 int present;
108
109 struct clk *mci_clk;
110
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129};
130
131/*
132 * Copy from sg to a dma block - used for transfers
133 */
134static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
135{
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 unsigned int *sgbuffer;
151
152 sg = &data->sg[i];
153
154 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
155 amount = min(size, sg->length);
156 size -= amount;
157
158 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
159 int index;
160
161 for (index = 0; index < (amount / 4); index++)
162 *dmabuf++ = swab32(sgbuffer[index]);
163 }
164 else
165 memcpy(dmabuf, sgbuffer, amount);
166
167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
168
169 if (size == 0)
170 break;
171 }
172
173 /*
174 * Check that we didn't get a request to transfer
175 * more data than can fit into the SG list.
176 */
177 BUG_ON(size != 0);
178}
179
180/*
181 * Prepare a dma read
182 */
183static void at91mci_pre_dma_read(struct at91mci_host *host)
184{
185 int i;
186 struct scatterlist *sg;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189
190 pr_debug("pre dma read\n");
191
192 cmd = host->cmd;
193 if (!cmd) {
194 pr_debug("no command\n");
195 return;
196 }
197
198 data = cmd->data;
199 if (!data) {
200 pr_debug("no data\n");
201 return;
202 }
203
204 for (i = 0; i < 2; i++) {
205 /* nothing left to transfer */
206 if (host->transfer_index >= data->sg_len) {
207 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
208 break;
209 }
210
211 /* Check to see if this needs filling */
212 if (i == 0) {
213 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
214 pr_debug("Transfer active in current\n");
215 continue;
216 }
217 }
218 else {
219 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
220 pr_debug("Transfer active in next\n");
221 continue;
222 }
223 }
224
225 /* Setup the next transfer */
226 pr_debug("Using transfer index %d\n", host->transfer_index);
227
228 sg = &data->sg[host->transfer_index++];
229 pr_debug("sg = %p\n", sg);
230
231 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
232
233 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
234
235 if (i == 0) {
236 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
237 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
238 }
239 else {
240 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
241 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
242 }
243 }
244
245 pr_debug("pre dma read done\n");
246}
247
248/*
249 * Handle after a dma read
250 */
251static void at91mci_post_dma_read(struct at91mci_host *host)
252{
253 struct mmc_command *cmd;
254 struct mmc_data *data;
255
256 pr_debug("post dma read\n");
257
258 cmd = host->cmd;
259 if (!cmd) {
260 pr_debug("no command\n");
261 return;
262 }
263
264 data = cmd->data;
265 if (!data) {
266 pr_debug("no data\n");
267 return;
268 }
269
270 while (host->in_use_index < host->transfer_index) {
271 unsigned int *buffer;
272
273 struct scatterlist *sg;
274
275 pr_debug("finishing index %d\n", host->in_use_index);
276
277 sg = &data->sg[host->in_use_index++];
278
279 pr_debug("Unmapping page %08X\n", sg->dma_address);
280
281 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
282
283 /* Swap the contents of the buffer */
284 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
285 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
286
287 data->bytes_xfered += sg->length;
288
289 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
290 int index;
291
292 for (index = 0; index < (sg->length / 4); index++)
293 buffer[index] = swab32(buffer[index]);
294 }
295
296 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
297 flush_dcache_page(sg->page);
298 }
299
300 /* Is there another transfer to trigger? */
301 if (host->transfer_index < data->sg_len)
302 at91mci_pre_dma_read(host);
303 else {
304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
305 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
306 }
307
308 pr_debug("post dma read done\n");
309}
310
311/*
312 * Handle transmitted data
313 */
314static void at91_mci_handle_transmitted(struct at91mci_host *host)
315{
316 struct mmc_command *cmd;
317 struct mmc_data *data;
318
319 pr_debug("Handling the transmit\n");
320
321 /* Disable the transfer */
322 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
323
324 /* Now wait for cmd ready */
325 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
326 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
327
328 cmd = host->cmd;
329 if (!cmd) return;
330
331 data = cmd->data;
332 if (!data) return;
333
334 data->bytes_xfered = host->total_length;
335}
336
337/*
338 * Enable the controller
339 */
340static void at91_mci_enable(struct at91mci_host *host)
341{
342 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
343 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
344 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
345 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
346
347 /* use Slot A or B (only one at same time) */
348 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
349}
350
351/*
352 * Disable the controller
353 */
354static void at91_mci_disable(struct at91mci_host *host)
355{
356 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
357}
358
359/*
360 * Send a command
361 * return the interrupts to enable
362 */
363static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
364{
365 unsigned int cmdr, mr;
366 unsigned int block_length;
367 struct mmc_data *data = cmd->data;
368
369 unsigned int blocks;
370 unsigned int ier = 0;
371
372 host->cmd = cmd;
373
374 /* Not sure if this is needed */
375#if 0
376 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
377 pr_debug("Clearing timeout\n");
378 at91_mci_write(host, AT91_MCI_ARGR, 0);
379 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
380 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
381 /* spin */
382 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
383 }
384 }
385#endif
386 cmdr = cmd->opcode;
387
388 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
389 cmdr |= AT91_MCI_RSPTYP_NONE;
390 else {
391 /* if a response is expected then allow maximum response latancy */
392 cmdr |= AT91_MCI_MAXLAT;
393 /* set 136 bit response for R2, 48 bit response otherwise */
394 if (mmc_resp_type(cmd) == MMC_RSP_R2)
395 cmdr |= AT91_MCI_RSPTYP_136;
396 else
397 cmdr |= AT91_MCI_RSPTYP_48;
398 }
399
400 if (data) {
401 block_length = data->blksz;
402 blocks = data->blocks;
403
404 /* always set data start - also set direction flag for read */
405 if (data->flags & MMC_DATA_READ)
406 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
407 else if (data->flags & MMC_DATA_WRITE)
408 cmdr |= AT91_MCI_TRCMD_START;
409
410 if (data->flags & MMC_DATA_STREAM)
411 cmdr |= AT91_MCI_TRTYP_STREAM;
412 if (data->flags & MMC_DATA_MULTI)
413 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
414 }
415 else {
416 block_length = 0;
417 blocks = 0;
418 }
419
420 if (cmd->opcode == MMC_STOP_TRANSMISSION)
421 cmdr |= AT91_MCI_TRCMD_STOP;
422
423 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
424 cmdr |= AT91_MCI_OPDCMD;
425
426 /*
427 * Set the arguments and send the command
428 */
429 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
430 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
431
432 if (!data) {
433 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
434 at91_mci_write(host, ATMEL_PDC_RPR, 0);
435 at91_mci_write(host, ATMEL_PDC_RCR, 0);
436 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
437 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
438 at91_mci_write(host, ATMEL_PDC_TPR, 0);
439 at91_mci_write(host, ATMEL_PDC_TCR, 0);
440 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
441 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
442
443 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
444 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
445 return AT91_MCI_CMDRDY;
446 }
447
448 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
449 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
450
451 /*
452 * Disable the PDC controller
453 */
454 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
455
456 if (cmdr & AT91_MCI_TRCMD_START) {
457 data->bytes_xfered = 0;
458 host->transfer_index = 0;
459 host->in_use_index = 0;
460 if (cmdr & AT91_MCI_TRDIR) {
461 /*
462 * Handle a read
463 */
464 host->buffer = NULL;
465 host->total_length = 0;
466
467 at91mci_pre_dma_read(host);
468 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
469 }
470 else {
471 /*
472 * Handle a write
473 */
474 host->total_length = block_length * blocks;
475 host->buffer = dma_alloc_coherent(NULL,
476 host->total_length,
477 &host->physical_address, GFP_KERNEL);
478
479 at91mci_sg_to_dma(host, data);
480
481 pr_debug("Transmitting %d bytes\n", host->total_length);
482
483 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
484 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
485 ier = AT91_MCI_TXBUFE;
486 }
487 }
488
489 /*
490 * Send the command and then enable the PDC - not the other way round as
491 * the data sheet says
492 */
493
494 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
495 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
496
497 if (cmdr & AT91_MCI_TRCMD_START) {
498 if (cmdr & AT91_MCI_TRDIR)
499 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
500 else
501 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
502 }
503 return ier;
504}
505
506/*
507 * Wait for a command to complete
508 */
509static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
510{
511 unsigned int ier;
512
513 ier = at91_mci_send_command(host, cmd);
514
515 pr_debug("setting ier to %08X\n", ier);
516
517 /* Stop on errors or the required value */
518 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
519}
520
521/*
522 * Process the next step in the request
523 */
524static void at91mci_process_next(struct at91mci_host *host)
525{
526 if (!(host->flags & FL_SENT_COMMAND)) {
527 host->flags |= FL_SENT_COMMAND;
528 at91mci_process_command(host, host->request->cmd);
529 }
530 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
531 host->flags |= FL_SENT_STOP;
532 at91mci_process_command(host, host->request->stop);
533 }
534 else
535 mmc_request_done(host->mmc, host->request);
536}
537
538/*
539 * Handle a command that has been completed
540 */
541static void at91mci_completed_command(struct at91mci_host *host)
542{
543 struct mmc_command *cmd = host->cmd;
544 unsigned int status;
545
546 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
547
548 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
549 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
550 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
551 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
552
553 if (host->buffer) {
554 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
555 host->buffer = NULL;
556 }
557
558 status = at91_mci_read(host, AT91_MCI_SR);
559
560 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
561 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
562
563 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
564 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
565 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
566 if ((status & AT91_MCI_RCRCE) &&
567 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
568 cmd->error = MMC_ERR_NONE;
569 }
570 else {
571 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
572 cmd->error = MMC_ERR_TIMEOUT;
573 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
574 cmd->error = MMC_ERR_BADCRC;
575 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
576 cmd->error = MMC_ERR_FIFO;
577 else
578 cmd->error = MMC_ERR_FAILED;
579
580 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
581 cmd->error, cmd->opcode, cmd->retries);
582 }
583 }
584 else
585 cmd->error = MMC_ERR_NONE;
586
587 at91mci_process_next(host);
588}
589
590/*
591 * Handle an MMC request
592 */
593static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
594{
595 struct at91mci_host *host = mmc_priv(mmc);
596 host->request = mrq;
597 host->flags = 0;
598
599 at91mci_process_next(host);
600}
601
602/*
603 * Set the IOS
604 */
605static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
606{
607 int clkdiv;
608 struct at91mci_host *host = mmc_priv(mmc);
609 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
610
611 host->bus_mode = ios->bus_mode;
612
613 if (ios->clock == 0) {
614 /* Disable the MCI controller */
615 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
616 clkdiv = 0;
617 }
618 else {
619 /* Enable the MCI controller */
620 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
621
622 if ((at91_master_clock % (ios->clock * 2)) == 0)
623 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
624 else
625 clkdiv = (at91_master_clock / ios->clock) / 2;
626
627 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
628 at91_master_clock / (2 * (clkdiv + 1)));
629 }
630 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
631 pr_debug("MMC: Setting controller bus width to 4\n");
632 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
633 }
634 else {
635 pr_debug("MMC: Setting controller bus width to 1\n");
636 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
637 }
638
639 /* Set the clock divider */
640 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
641
642 /* maybe switch power to the card */
643 if (host->board->vcc_pin) {
644 switch (ios->power_mode) {
645 case MMC_POWER_OFF:
646 at91_set_gpio_value(host->board->vcc_pin, 0);
647 break;
648 case MMC_POWER_UP:
649 case MMC_POWER_ON:
650 at91_set_gpio_value(host->board->vcc_pin, 1);
651 break;
652 }
653 }
654}
655
656/*
657 * Handle an interrupt
658 */
659static irqreturn_t at91_mci_irq(int irq, void *devid)
660{
661 struct at91mci_host *host = devid;
662 int completed = 0;
663 unsigned int int_status, int_mask;
664
665 int_status = at91_mci_read(host, AT91_MCI_SR);
666 int_mask = at91_mci_read(host, AT91_MCI_IMR);
667
668 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
669 int_status & int_mask);
670
671 int_status = int_status & int_mask;
672
673 if (int_status & AT91_MCI_ERRORS) {
674 completed = 1;
675
676 if (int_status & AT91_MCI_UNRE)
677 pr_debug("MMC: Underrun error\n");
678 if (int_status & AT91_MCI_OVRE)
679 pr_debug("MMC: Overrun error\n");
680 if (int_status & AT91_MCI_DTOE)
681 pr_debug("MMC: Data timeout\n");
682 if (int_status & AT91_MCI_DCRCE)
683 pr_debug("MMC: CRC error in data\n");
684 if (int_status & AT91_MCI_RTOE)
685 pr_debug("MMC: Response timeout\n");
686 if (int_status & AT91_MCI_RENDE)
687 pr_debug("MMC: Response end bit error\n");
688 if (int_status & AT91_MCI_RCRCE)
689 pr_debug("MMC: Response CRC error\n");
690 if (int_status & AT91_MCI_RDIRE)
691 pr_debug("MMC: Response direction error\n");
692 if (int_status & AT91_MCI_RINDE)
693 pr_debug("MMC: Response index error\n");
694 } else {
695 /* Only continue processing if no errors */
696
697 if (int_status & AT91_MCI_TXBUFE) {
698 pr_debug("TX buffer empty\n");
699 at91_mci_handle_transmitted(host);
700 }
701
702 if (int_status & AT91_MCI_RXBUFF) {
703 pr_debug("RX buffer full\n");
704 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
705 }
706
707 if (int_status & AT91_MCI_ENDTX)
708 pr_debug("Transmit has ended\n");
709
710 if (int_status & AT91_MCI_ENDRX) {
711 pr_debug("Receive has ended\n");
712 at91mci_post_dma_read(host);
713 }
714
715 if (int_status & AT91_MCI_NOTBUSY) {
716 pr_debug("Card is ready\n");
717 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
718 }
719
720 if (int_status & AT91_MCI_DTIP)
721 pr_debug("Data transfer in progress\n");
722
723 if (int_status & AT91_MCI_BLKE)
724 pr_debug("Block transfer has ended\n");
725
726 if (int_status & AT91_MCI_TXRDY)
727 pr_debug("Ready to transmit\n");
728
729 if (int_status & AT91_MCI_RXRDY)
730 pr_debug("Ready to receive\n");
731
732 if (int_status & AT91_MCI_CMDRDY) {
733 pr_debug("Command ready\n");
734 completed = 1;
735 }
736 }
737
738 if (completed) {
739 pr_debug("Completed command\n");
740 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
741 at91mci_completed_command(host);
742 } else
743 at91_mci_write(host, AT91_MCI_IDR, int_status);
744
745 return IRQ_HANDLED;
746}
747
748static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
749{
750 struct at91mci_host *host = _host;
751 int present = !at91_get_gpio_value(irq);
752
753 /*
754 * we expect this irq on both insert and remove,
755 * and use a short delay to debounce.
756 */
757 if (present != host->present) {
758 host->present = present;
759 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
760 present ? "insert" : "remove");
761 if (!present) {
762 pr_debug("****** Resetting SD-card bus width ******\n");
763 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
764 }
765 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
766 }
767 return IRQ_HANDLED;
768}
769
770static int at91_mci_get_ro(struct mmc_host *mmc)
771{
772 int read_only = 0;
773 struct at91mci_host *host = mmc_priv(mmc);
774
775 if (host->board->wp_pin) {
776 read_only = at91_get_gpio_value(host->board->wp_pin);
777 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
778 (read_only ? "read-only" : "read-write") );
779 }
780 else {
781 printk(KERN_WARNING "%s: host does not support reading read-only "
782 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
783 }
784 return read_only;
785}
786
787static const struct mmc_host_ops at91_mci_ops = {
788 .request = at91_mci_request,
789 .set_ios = at91_mci_set_ios,
790 .get_ro = at91_mci_get_ro,
791};
792
793/*
794 * Probe for the device
795 */
796static int __init at91_mci_probe(struct platform_device *pdev)
797{
798 struct mmc_host *mmc;
799 struct at91mci_host *host;
800 struct resource *res;
801 int ret;
802
803 pr_debug("Probe MCI devices\n");
804
805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
806 if (!res)
807 return -ENXIO;
808
809 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
810 return -EBUSY;
811
812 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
813 if (!mmc) {
814 pr_debug("Failed to allocate mmc host\n");
815 release_mem_region(res->start, res->end - res->start + 1);
816 return -ENOMEM;
817 }
818
819 mmc->ops = &at91_mci_ops;
820 mmc->f_min = 375000;
821 mmc->f_max = 25000000;
822 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
823 mmc->caps = MMC_CAP_BYTEBLOCK;
824
825 mmc->max_blk_size = 4095;
826 mmc->max_blk_count = mmc->max_req_size;
827
828 host = mmc_priv(mmc);
829 host->mmc = mmc;
830 host->buffer = NULL;
831 host->bus_mode = 0;
832 host->board = pdev->dev.platform_data;
833 if (host->board->wire4) {
834#ifdef SUPPORT_4WIRE
835 mmc->caps |= MMC_CAP_4_BIT_DATA;
836#else
837 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
838#endif
839 }
840
841 /*
842 * Get Clock
843 */
844 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
845 if (IS_ERR(host->mci_clk)) {
846 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
847 mmc_free_host(mmc);
848 release_mem_region(res->start, res->end - res->start + 1);
849 return -ENODEV;
850 }
851
852 /*
853 * Map I/O region
854 */
855 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
856 if (!host->baseaddr) {
857 clk_put(host->mci_clk);
858 mmc_free_host(mmc);
859 release_mem_region(res->start, res->end - res->start + 1);
860 return -ENOMEM;
861 }
862
863 /*
864 * Reset hardware
865 */
866 clk_enable(host->mci_clk); /* Enable the peripheral clock */
867 at91_mci_disable(host);
868 at91_mci_enable(host);
869
870 /*
871 * Allocate the MCI interrupt
872 */
873 host->irq = platform_get_irq(pdev, 0);
874 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
875 if (ret) {
876 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
877 clk_disable(host->mci_clk);
878 clk_put(host->mci_clk);
879 mmc_free_host(mmc);
880 iounmap(host->baseaddr);
881 release_mem_region(res->start, res->end - res->start + 1);
882 return ret;
883 }
884
885 platform_set_drvdata(pdev, mmc);
886
887 /*
888 * Add host to MMC layer
889 */
890 if (host->board->det_pin)
891 host->present = !at91_get_gpio_value(host->board->det_pin);
892 else
893 host->present = -1;
894
895 mmc_add_host(mmc);
896
897 /*
898 * monitor card insertion/removal if we can
899 */
900 if (host->board->det_pin) {
901 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
902 0, DRIVER_NAME, host);
903 if (ret)
904 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
905 }
906
907 pr_debug("Added MCI driver\n");
908
909 return 0;
910}
911
912/*
913 * Remove a device
914 */
915static int __exit at91_mci_remove(struct platform_device *pdev)
916{
917 struct mmc_host *mmc = platform_get_drvdata(pdev);
918 struct at91mci_host *host;
919 struct resource *res;
920
921 if (!mmc)
922 return -1;
923
924 host = mmc_priv(mmc);
925
926 if (host->present != -1) {
927 free_irq(host->board->det_pin, host);
928 cancel_delayed_work(&host->mmc->detect);
929 }
930
931 at91_mci_disable(host);
932 mmc_remove_host(mmc);
933 free_irq(host->irq, host);
934
935 clk_disable(host->mci_clk); /* Disable the peripheral clock */
936 clk_put(host->mci_clk);
937
938 iounmap(host->baseaddr);
939 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
940 release_mem_region(res->start, res->end - res->start + 1);
941
942 mmc_free_host(mmc);
943 platform_set_drvdata(pdev, NULL);
944 pr_debug("MCI Removed\n");
945
946 return 0;
947}
948
949#ifdef CONFIG_PM
950static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
951{
952 struct mmc_host *mmc = platform_get_drvdata(pdev);
953 int ret = 0;
954
955 if (mmc)
956 ret = mmc_suspend_host(mmc, state);
957
958 return ret;
959}
960
961static int at91_mci_resume(struct platform_device *pdev)
962{
963 struct mmc_host *mmc = platform_get_drvdata(pdev);
964 int ret = 0;
965
966 if (mmc)
967 ret = mmc_resume_host(mmc);
968
969 return ret;
970}
971#else
972#define at91_mci_suspend NULL
973#define at91_mci_resume NULL
974#endif
975
976static struct platform_driver at91_mci_driver = {
977 .remove = __exit_p(at91_mci_remove),
978 .suspend = at91_mci_suspend,
979 .resume = at91_mci_resume,
980 .driver = {
981 .name = DRIVER_NAME,
982 .owner = THIS_MODULE,
983 },
984};
985
986static int __init at91_mci_init(void)
987{
988 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
989}
990
991static void __exit at91_mci_exit(void)
992{
993 platform_driver_unregister(&at91_mci_driver);
994}
995
996module_init(at91_mci_init);
997module_exit(at91_mci_exit);
998
999MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1000MODULE_AUTHOR("Nick Randell");
1001MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
new file mode 100644
index 000000000000..b7156a4555b5
--- /dev/null
+++ b/drivers/mmc/host/au1xmmc.c
@@ -0,0 +1,1031 @@
1/*
2 * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
3 *
4 * Copyright (c) 2005, Advanced Micro Devices, Inc.
5 *
6 * Developed with help from the 2.4.30 MMC AU1XXX controller including
7 * the following copyright notices:
8 * Copyright (c) 2003-2004 Embedded Edge, LLC.
9 * Portions Copyright (C) 2002 Embedix, Inc
10 * Copyright 2002 Hewlett-Packard Company
11
12 * 2.6 version of this driver inspired by:
13 * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 * All Rights Reserved.
15 * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 * All Rights Reserved.
17 *
18
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24/* Why is a timer used to detect insert events?
25 *
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/platform_device.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43
44#include <linux/mmc/host.h>
45#include <asm/io.h>
46#include <asm/mach-au1x00/au1000.h>
47#include <asm/mach-au1x00/au1xxx_dbdma.h>
48#include <asm/mach-au1x00/au1100_mmc.h>
49#include <asm/scatterlist.h>
50
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc"
55
56/* Set this to enable special debugging macros */
57
58#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
60#else
61#define DBG(fmt, idx, args...)
62#endif
63
64const struct {
65 u32 iobase;
66 u32 tx_devid, rx_devid;
67 u16 bcsrpwr;
68 u16 bcsrstatus;
69 u16 wpstatus;
70} au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78
79#define AU1XMMC_CONTROLLER_COUNT \
80 (sizeof(au1xmmc_card_table) / sizeof(au1xmmc_card_table[0]))
81
82/* This array stores pointers for the hosts (used by the IRQ handler) */
83struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT];
84static int dma = 1;
85
86#ifdef MODULE
87module_param(dma, bool, 0);
88MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)");
89#endif
90
91static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
92{
93 u32 val = au_readl(HOST_CONFIG(host));
94 val |= mask;
95 au_writel(val, HOST_CONFIG(host));
96 au_sync();
97}
98
99static inline void FLUSH_FIFO(struct au1xmmc_host *host)
100{
101 u32 val = au_readl(HOST_CONFIG2(host));
102
103 au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
104 au_sync_delay(1);
105
106 /* SEND_STOP will turn off clock control - this re-enables it */
107 val &= ~SD_CONFIG2_DF;
108
109 au_writel(val, HOST_CONFIG2(host));
110 au_sync();
111}
112
113static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
114{
115 u32 val = au_readl(HOST_CONFIG(host));
116 val &= ~mask;
117 au_writel(val, HOST_CONFIG(host));
118 au_sync();
119}
120
121static inline void SEND_STOP(struct au1xmmc_host *host)
122{
123
124 /* We know the value of CONFIG2, so avoid a read we don't need */
125 u32 mask = SD_CONFIG2_EN;
126
127 WARN_ON(host->status != HOST_S_DATA);
128 host->status = HOST_S_STOP;
129
130 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host));
131 au_sync();
132
133 /* Send the stop commmand */
134 au_writel(STOP_CMD, HOST_CMD(host));
135}
136
137static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
138{
139
140 u32 val = au1xmmc_card_table[host->id].bcsrpwr;
141
142 bcsr->board &= ~val;
143 if (state) bcsr->board |= val;
144
145 au_sync_delay(1);
146}
147
148static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
149{
150 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus)
151 ? 1 : 0;
152}
153
154static int au1xmmc_card_readonly(struct mmc_host *mmc)
155{
156 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0;
159}
160
161static void au1xmmc_finish_request(struct au1xmmc_host *host)
162{
163
164 struct mmc_request *mrq = host->mrq;
165
166 host->mrq = NULL;
167 host->flags &= HOST_F_ACTIVE;
168
169 host->dma.len = 0;
170 host->dma.dir = 0;
171
172 host->pio.index = 0;
173 host->pio.offset = 0;
174 host->pio.len = 0;
175
176 host->status = HOST_S_IDLE;
177
178 bcsr->disk_leds |= (1 << 8);
179
180 mmc_request_done(host->mmc, mrq);
181}
182
183static void au1xmmc_tasklet_finish(unsigned long param)
184{
185 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
186 au1xmmc_finish_request(host);
187}
188
189static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
190 struct mmc_command *cmd)
191{
192
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194
195 switch (mmc_resp_type(cmd)) {
196 case MMC_RSP_NONE:
197 break;
198 case MMC_RSP_R1:
199 mmccmd |= SD_CMD_RT_1;
200 break;
201 case MMC_RSP_R1B:
202 mmccmd |= SD_CMD_RT_1B;
203 break;
204 case MMC_RSP_R2:
205 mmccmd |= SD_CMD_RT_2;
206 break;
207 case MMC_RSP_R3:
208 mmccmd |= SD_CMD_RT_3;
209 break;
210 default:
211 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
212 mmc_resp_type(cmd));
213 return MMC_ERR_INVALID;
214 }
215
216 switch(cmd->opcode) {
217 case MMC_READ_SINGLE_BLOCK:
218 case SD_APP_SEND_SCR:
219 mmccmd |= SD_CMD_CT_2;
220 break;
221 case MMC_READ_MULTIPLE_BLOCK:
222 mmccmd |= SD_CMD_CT_4;
223 break;
224 case MMC_WRITE_BLOCK:
225 mmccmd |= SD_CMD_CT_1;
226 break;
227
228 case MMC_WRITE_MULTIPLE_BLOCK:
229 mmccmd |= SD_CMD_CT_3;
230 break;
231 case MMC_STOP_TRANSMISSION:
232 mmccmd |= SD_CMD_CT_7;
233 break;
234 }
235
236 au_writel(cmd->arg, HOST_CMDARG(host));
237 au_sync();
238
239 if (wait)
240 IRQ_OFF(host, SD_CONFIG_CR);
241
242 au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
243 au_sync();
244
245 /* Wait for the command to go on the line */
246
247 while(1) {
248 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
249 break;
250 }
251
252 /* Wait for the command to come back */
253
254 if (wait) {
255 u32 status = au_readl(HOST_STATUS(host));
256
257 while(!(status & SD_STATUS_CR))
258 status = au_readl(HOST_STATUS(host));
259
260 /* Clear the CR status */
261 au_writel(SD_STATUS_CR, HOST_STATUS(host));
262
263 IRQ_ON(host, SD_CONFIG_CR);
264 }
265
266 return MMC_ERR_NONE;
267}
268
269static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
270{
271
272 struct mmc_request *mrq = host->mrq;
273 struct mmc_data *data;
274 u32 crc;
275
276 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
277
278 if (host->mrq == NULL)
279 return;
280
281 data = mrq->cmd->data;
282
283 if (status == 0)
284 status = au_readl(HOST_STATUS(host));
285
286 /* The transaction is really over when the SD_STATUS_DB bit is clear */
287
288 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
289 status = au_readl(HOST_STATUS(host));
290
291 data->error = MMC_ERR_NONE;
292 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
293
294 /* Process any errors */
295
296 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
297 if (host->flags & HOST_F_XMIT)
298 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
299
300 if (crc)
301 data->error = MMC_ERR_BADCRC;
302
303 /* Clear the CRC bits */
304 au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
305
306 data->bytes_xfered = 0;
307
308 if (data->error == MMC_ERR_NONE) {
309 if (host->flags & HOST_F_DMA) {
310 u32 chan = DMA_CHANNEL(host);
311
312 chan_tab_t *c = *((chan_tab_t **) chan);
313 au1x_dma_chan_t *cp = c->chan_ptr;
314 data->bytes_xfered = cp->ddma_bytecnt;
315 }
316 else
317 data->bytes_xfered =
318 (data->blocks * data->blksz) -
319 host->pio.len;
320 }
321
322 au1xmmc_finish_request(host);
323}
324
325static void au1xmmc_tasklet_data(unsigned long param)
326{
327 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
328
329 u32 status = au_readl(HOST_STATUS(host));
330 au1xmmc_data_complete(host, status);
331}
332
333#define AU1XMMC_MAX_TRANSFER 8
334
335static void au1xmmc_send_pio(struct au1xmmc_host *host)
336{
337
338 struct mmc_data *data = 0;
339 int sg_len, max, count = 0;
340 unsigned char *sg_ptr;
341 u32 status = 0;
342 struct scatterlist *sg;
343
344 data = host->mrq->data;
345
346 if (!(host->flags & HOST_F_XMIT))
347 return;
348
349 /* This is the pointer to the data buffer */
350 sg = &data->sg[host->pio.index];
351 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
352
353 /* This is the space left inside the buffer */
354 sg_len = data->sg[host->pio.index].length - host->pio.offset;
355
356 /* Check to if we need less then the size of the sg_buffer */
357
358 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
359 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
360
361 for(count = 0; count < max; count++ ) {
362 unsigned char val;
363
364 status = au_readl(HOST_STATUS(host));
365
366 if (!(status & SD_STATUS_TH))
367 break;
368
369 val = *sg_ptr++;
370
371 au_writel((unsigned long) val, HOST_TXPORT(host));
372 au_sync();
373 }
374
375 host->pio.len -= count;
376 host->pio.offset += count;
377
378 if (count == sg_len) {
379 host->pio.index++;
380 host->pio.offset = 0;
381 }
382
383 if (host->pio.len == 0) {
384 IRQ_OFF(host, SD_CONFIG_TH);
385
386 if (host->flags & HOST_F_STOP)
387 SEND_STOP(host);
388
389 tasklet_schedule(&host->data_task);
390 }
391}
392
393static void au1xmmc_receive_pio(struct au1xmmc_host *host)
394{
395
396 struct mmc_data *data = 0;
397 int sg_len = 0, max = 0, count = 0;
398 unsigned char *sg_ptr = 0;
399 u32 status = 0;
400 struct scatterlist *sg;
401
402 data = host->mrq->data;
403
404 if (!(host->flags & HOST_F_RECV))
405 return;
406
407 max = host->pio.len;
408
409 if (host->pio.index < host->dma.len) {
410 sg = &data->sg[host->pio.index];
411 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
412
413 /* This is the space left inside the buffer */
414 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
415
416 /* Check to if we need less then the size of the sg_buffer */
417 if (sg_len < max) max = sg_len;
418 }
419
420 if (max > AU1XMMC_MAX_TRANSFER)
421 max = AU1XMMC_MAX_TRANSFER;
422
423 for(count = 0; count < max; count++ ) {
424 u32 val;
425 status = au_readl(HOST_STATUS(host));
426
427 if (!(status & SD_STATUS_NE))
428 break;
429
430 if (status & SD_STATUS_RC) {
431 DBG("RX CRC Error [%d + %d].\n", host->id,
432 host->pio.len, count);
433 break;
434 }
435
436 if (status & SD_STATUS_RO) {
437 DBG("RX Overrun [%d + %d]\n", host->id,
438 host->pio.len, count);
439 break;
440 }
441 else if (status & SD_STATUS_RU) {
442 DBG("RX Underrun [%d + %d]\n", host->id,
443 host->pio.len, count);
444 break;
445 }
446
447 val = au_readl(HOST_RXPORT(host));
448
449 if (sg_ptr)
450 *sg_ptr++ = (unsigned char) (val & 0xFF);
451 }
452
453 host->pio.len -= count;
454 host->pio.offset += count;
455
456 if (sg_len && count == sg_len) {
457 host->pio.index++;
458 host->pio.offset = 0;
459 }
460
461 if (host->pio.len == 0) {
462 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
463 IRQ_OFF(host, SD_CONFIG_NE);
464
465 if (host->flags & HOST_F_STOP)
466 SEND_STOP(host);
467
468 tasklet_schedule(&host->data_task);
469 }
470}
471
472/* static void au1xmmc_cmd_complete
473 This is called when a command has been completed - grab the response
474 and check for errors. Then start the data transfer if it is indicated.
475*/
476
477static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
478{
479
480 struct mmc_request *mrq = host->mrq;
481 struct mmc_command *cmd;
482 int trans;
483
484 if (!host->mrq)
485 return;
486
487 cmd = mrq->cmd;
488 cmd->error = MMC_ERR_NONE;
489
490 if (cmd->flags & MMC_RSP_PRESENT) {
491 if (cmd->flags & MMC_RSP_136) {
492 u32 r[4];
493 int i;
494
495 r[0] = au_readl(host->iobase + SD_RESP3);
496 r[1] = au_readl(host->iobase + SD_RESP2);
497 r[2] = au_readl(host->iobase + SD_RESP1);
498 r[3] = au_readl(host->iobase + SD_RESP0);
499
500 /* The CRC is omitted from the response, so really
501 * we only got 120 bytes, but the engine expects
502 * 128 bits, so we have to shift things up
503 */
504
505 for(i = 0; i < 4; i++) {
506 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
507 if (i != 3)
508 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
509 }
510 } else {
511 /* Techincally, we should be getting all 48 bits of
512 * the response (SD_RESP1 + SD_RESP2), but because
513 * our response omits the CRC, our data ends up
514 * being shifted 8 bits to the right. In this case,
515 * that means that the OSR data starts at bit 31,
516 * so we can just read RESP0 and return that
517 */
518 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
519 }
520 }
521
522 /* Figure out errors */
523
524 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
525 cmd->error = MMC_ERR_BADCRC;
526
527 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
528
529 if (!trans || cmd->error != MMC_ERR_NONE) {
530
531 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
532 tasklet_schedule(&host->finish_task);
533 return;
534 }
535
536 host->status = HOST_S_DATA;
537
538 if (host->flags & HOST_F_DMA) {
539 u32 channel = DMA_CHANNEL(host);
540
541 /* Start the DMA as soon as the buffer gets something in it */
542
543 if (host->flags & HOST_F_RECV) {
544 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
545
546 while((status & mask) != mask)
547 status = au_readl(HOST_STATUS(host));
548 }
549
550 au1xxx_dbdma_start(channel);
551 }
552}
553
554static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
555{
556
557 unsigned int pbus = get_au1x00_speed();
558 unsigned int divisor;
559 u32 config;
560
561 /* From databook:
562 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
563 */
564
565 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
566 pbus /= 2;
567
568 divisor = ((pbus / rate) / 2) - 1;
569
570 config = au_readl(HOST_CONFIG(host));
571
572 config &= ~(SD_CONFIG_DIV);
573 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
574
575 au_writel(config, HOST_CONFIG(host));
576 au_sync();
577}
578
579static int
580au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
581{
582
583 int datalen = data->blocks * data->blksz;
584
585 if (dma != 0)
586 host->flags |= HOST_F_DMA;
587
588 if (data->flags & MMC_DATA_READ)
589 host->flags |= HOST_F_RECV;
590 else
591 host->flags |= HOST_F_XMIT;
592
593 if (host->mrq->stop)
594 host->flags |= HOST_F_STOP;
595
596 host->dma.dir = DMA_BIDIRECTIONAL;
597
598 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
599 data->sg_len, host->dma.dir);
600
601 if (host->dma.len == 0)
602 return MMC_ERR_TIMEOUT;
603
604 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
605
606 if (host->flags & HOST_F_DMA) {
607 int i;
608 u32 channel = DMA_CHANNEL(host);
609
610 au1xxx_dbdma_stop(channel);
611
612 for(i = 0; i < host->dma.len; i++) {
613 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
614 struct scatterlist *sg = &data->sg[i];
615 int sg_len = sg->length;
616
617 int len = (datalen > sg_len) ? sg_len : datalen;
618
619 if (i == host->dma.len - 1)
620 flags = DDMA_FLAGS_IE;
621
622 if (host->flags & HOST_F_XMIT){
623 ret = au1xxx_dbdma_put_source_flags(channel,
624 (void *) (page_address(sg->page) +
625 sg->offset),
626 len, flags);
627 }
628 else {
629 ret = au1xxx_dbdma_put_dest_flags(channel,
630 (void *) (page_address(sg->page) +
631 sg->offset),
632 len, flags);
633 }
634
635 if (!ret)
636 goto dataerr;
637
638 datalen -= len;
639 }
640 }
641 else {
642 host->pio.index = 0;
643 host->pio.offset = 0;
644 host->pio.len = datalen;
645
646 if (host->flags & HOST_F_XMIT)
647 IRQ_ON(host, SD_CONFIG_TH);
648 else
649 IRQ_ON(host, SD_CONFIG_NE);
650 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
651 }
652
653 return MMC_ERR_NONE;
654
655 dataerr:
656 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir);
657 return MMC_ERR_TIMEOUT;
658}
659
660/* static void au1xmmc_request
661 This actually starts a command or data transaction
662*/
663
664static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
665{
666
667 struct au1xmmc_host *host = mmc_priv(mmc);
668 int ret = MMC_ERR_NONE;
669
670 WARN_ON(irqs_disabled());
671 WARN_ON(host->status != HOST_S_IDLE);
672
673 host->mrq = mrq;
674 host->status = HOST_S_CMD;
675
676 bcsr->disk_leds &= ~(1 << 8);
677
678 if (mrq->data) {
679 FLUSH_FIFO(host);
680 ret = au1xmmc_prepare_data(host, mrq->data);
681 }
682
683 if (ret == MMC_ERR_NONE)
684 ret = au1xmmc_send_command(host, 0, mrq->cmd);
685
686 if (ret != MMC_ERR_NONE) {
687 mrq->cmd->error = ret;
688 au1xmmc_finish_request(host);
689 }
690}
691
692static void au1xmmc_reset_controller(struct au1xmmc_host *host)
693{
694
695 /* Apply the clock */
696 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
697 au_sync_delay(1);
698
699 au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
700 au_sync_delay(5);
701
702 au_writel(~0, HOST_STATUS(host));
703 au_sync();
704
705 au_writel(0, HOST_BLKSIZE(host));
706 au_writel(0x001fffff, HOST_TIMEOUT(host));
707 au_sync();
708
709 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
710 au_sync();
711
712 au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
713 au_sync_delay(1);
714
715 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
716 au_sync();
717
718 /* Configure interrupts */
719 au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
720 au_sync();
721}
722
723
724static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
725{
726 struct au1xmmc_host *host = mmc_priv(mmc);
727
728 if (ios->power_mode == MMC_POWER_OFF)
729 au1xmmc_set_power(host, 0);
730 else if (ios->power_mode == MMC_POWER_ON) {
731 au1xmmc_set_power(host, 1);
732 }
733
734 if (ios->clock && ios->clock != host->clock) {
735 au1xmmc_set_clock(host, ios->clock);
736 host->clock = ios->clock;
737 }
738}
739
740static void au1xmmc_dma_callback(int irq, void *dev_id)
741{
742 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
743
744 /* Avoid spurious interrupts */
745
746 if (!host->mrq)
747 return;
748
749 if (host->flags & HOST_F_STOP)
750 SEND_STOP(host);
751
752 tasklet_schedule(&host->data_task);
753}
754
755#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
756#define STATUS_DATA_IN (SD_STATUS_NE)
757#define STATUS_DATA_OUT (SD_STATUS_TH)
758
759static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
760{
761
762 u32 status;
763 int i, ret = 0;
764
765 disable_irq(AU1100_SD_IRQ);
766
767 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
768 struct au1xmmc_host * host = au1xmmc_hosts[i];
769 u32 handled = 1;
770
771 status = au_readl(HOST_STATUS(host));
772
773 if (host->mrq && (status & STATUS_TIMEOUT)) {
774 if (status & SD_STATUS_RAT)
775 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
776
777 else if (status & SD_STATUS_DT)
778 host->mrq->data->error = MMC_ERR_TIMEOUT;
779
780 /* In PIO mode, interrupts might still be enabled */
781 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
782
783 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF);
784 tasklet_schedule(&host->finish_task);
785 }
786#if 0
787 else if (status & SD_STATUS_DD) {
788
789 /* Sometimes we get a DD before a NE in PIO mode */
790
791 if (!(host->flags & HOST_F_DMA) &&
792 (status & SD_STATUS_NE))
793 au1xmmc_receive_pio(host);
794 else {
795 au1xmmc_data_complete(host, status);
796 //tasklet_schedule(&host->data_task);
797 }
798 }
799#endif
800 else if (status & (SD_STATUS_CR)) {
801 if (host->status == HOST_S_CMD)
802 au1xmmc_cmd_complete(host,status);
803 }
804 else if (!(host->flags & HOST_F_DMA)) {
805 if ((host->flags & HOST_F_XMIT) &&
806 (status & STATUS_DATA_OUT))
807 au1xmmc_send_pio(host);
808 else if ((host->flags & HOST_F_RECV) &&
809 (status & STATUS_DATA_IN))
810 au1xmmc_receive_pio(host);
811 }
812 else if (status & 0x203FBC70) {
813 DBG("Unhandled status %8.8x\n", host->id, status);
814 handled = 0;
815 }
816
817 au_writel(status, HOST_STATUS(host));
818 au_sync();
819
820 ret |= handled;
821 }
822
823 enable_irq(AU1100_SD_IRQ);
824 return ret;
825}
826
827static void au1xmmc_poll_event(unsigned long arg)
828{
829 struct au1xmmc_host *host = (struct au1xmmc_host *) arg;
830
831 int card = au1xmmc_card_inserted(host);
832 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
833
834 if (card != controller) {
835 host->flags &= ~HOST_F_ACTIVE;
836 if (card) host->flags |= HOST_F_ACTIVE;
837 mmc_detect_change(host->mmc, 0);
838 }
839
840 if (host->mrq != NULL) {
841 u32 status = au_readl(HOST_STATUS(host));
842 DBG("PENDING - %8.8x\n", host->id, status);
843 }
844
845 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
846}
847
848static dbdev_tab_t au1xmmc_mem_dbdev =
849{
850 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
851};
852
853static void au1xmmc_init_dma(struct au1xmmc_host *host)
854{
855
856 u32 rxchan, txchan;
857
858 int txid = au1xmmc_card_table[host->id].tx_devid;
859 int rxid = au1xmmc_card_table[host->id].rx_devid;
860
861 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
862 of 8 bits. And since devices are shared, we need to create
863 our own to avoid freaking out other devices
864 */
865
866 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
867
868 txchan = au1xxx_dbdma_chan_alloc(memid, txid,
869 au1xmmc_dma_callback, (void *) host);
870
871 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid,
872 au1xmmc_dma_callback, (void *) host);
873
874 au1xxx_dbdma_set_devwidth(txchan, 8);
875 au1xxx_dbdma_set_devwidth(rxchan, 8);
876
877 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT);
878 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT);
879
880 host->tx_chan = txchan;
881 host->rx_chan = rxchan;
882}
883
884static const struct mmc_host_ops au1xmmc_ops = {
885 .request = au1xmmc_request,
886 .set_ios = au1xmmc_set_ios,
887 .get_ro = au1xmmc_card_readonly,
888};
889
890static int __devinit au1xmmc_probe(struct platform_device *pdev)
891{
892
893 int i, ret = 0;
894
895 /* THe interrupt is shared among all controllers */
896 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
897
898 if (ret) {
899 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
900 AU1100_SD_IRQ, ret);
901 return -ENXIO;
902 }
903
904 disable_irq(AU1100_SD_IRQ);
905
906 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
907 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
908 struct au1xmmc_host *host = 0;
909
910 if (!mmc) {
911 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i);
912 au1xmmc_hosts[i] = 0;
913 continue;
914 }
915
916 mmc->ops = &au1xmmc_ops;
917
918 mmc->f_min = 450000;
919 mmc->f_max = 24000000;
920
921 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
922 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
923
924 mmc->max_blk_size = 2048;
925 mmc->max_blk_count = 512;
926
927 mmc->ocr_avail = AU1XMMC_OCR;
928
929 host = mmc_priv(mmc);
930 host->mmc = mmc;
931
932 host->id = i;
933 host->iobase = au1xmmc_card_table[host->id].iobase;
934 host->clock = 0;
935 host->power_mode = MMC_POWER_OFF;
936
937 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0;
938 host->status = HOST_S_IDLE;
939
940 init_timer(&host->timer);
941
942 host->timer.function = au1xmmc_poll_event;
943 host->timer.data = (unsigned long) host;
944 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
945
946 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
947 (unsigned long) host);
948
949 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
950 (unsigned long) host);
951
952 spin_lock_init(&host->lock);
953
954 if (dma != 0)
955 au1xmmc_init_dma(host);
956
957 au1xmmc_reset_controller(host);
958
959 mmc_add_host(mmc);
960 au1xmmc_hosts[i] = host;
961
962 add_timer(&host->timer);
963
964 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n",
965 host->id, host->iobase, dma ? "dma" : "pio");
966 }
967
968 enable_irq(AU1100_SD_IRQ);
969
970 return 0;
971}
972
973static int __devexit au1xmmc_remove(struct platform_device *pdev)
974{
975
976 int i;
977
978 disable_irq(AU1100_SD_IRQ);
979
980 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
981 struct au1xmmc_host *host = au1xmmc_hosts[i];
982 if (!host) continue;
983
984 tasklet_kill(&host->data_task);
985 tasklet_kill(&host->finish_task);
986
987 del_timer_sync(&host->timer);
988 au1xmmc_set_power(host, 0);
989
990 mmc_remove_host(host->mmc);
991
992 au1xxx_dbdma_chan_free(host->tx_chan);
993 au1xxx_dbdma_chan_free(host->rx_chan);
994
995 au_writel(0x0, HOST_ENABLE(host));
996 au_sync();
997 }
998
999 free_irq(AU1100_SD_IRQ, 0);
1000 return 0;
1001}
1002
1003static struct platform_driver au1xmmc_driver = {
1004 .probe = au1xmmc_probe,
1005 .remove = au1xmmc_remove,
1006 .suspend = NULL,
1007 .resume = NULL,
1008 .driver = {
1009 .name = DRIVER_NAME,
1010 },
1011};
1012
1013static int __init au1xmmc_init(void)
1014{
1015 return platform_driver_register(&au1xmmc_driver);
1016}
1017
1018static void __exit au1xmmc_exit(void)
1019{
1020 platform_driver_unregister(&au1xmmc_driver);
1021}
1022
1023module_init(au1xmmc_init);
1024module_exit(au1xmmc_exit);
1025
1026#ifdef MODULE
1027MODULE_AUTHOR("Advanced Micro Devices, Inc");
1028MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1029MODULE_LICENSE("GPL");
1030#endif
1031
diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h
new file mode 100644
index 000000000000..341cbdf0baca
--- /dev/null
+++ b/drivers/mmc/host/au1xmmc.h
@@ -0,0 +1,96 @@
1#ifndef _AU1XMMC_H_
2#define _AU1XMMC_H_
3
4/* Hardware definitions */
5
6#define AU1XMMC_DESCRIPTOR_COUNT 1
7#define AU1XMMC_DESCRIPTOR_SIZE 2048
8
9#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
10 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
11 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
12
13/* Easy access macros */
14
15#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
16#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
17#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
18#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
19#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
20#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
21#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
22#define HOST_CMD(h) ((h)->iobase + SD_CMD)
23#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
24#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
25#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
26
27#define DMA_CHANNEL(h) \
28 ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
29
30/* This gives us a hard value for the stop command that we can write directly
31 * to the command register
32 */
33
34#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO)
35
36/* This is the set of interrupts that we configure by default */
37
38#if 0
39#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \
40 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
41#endif
42
43#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \
44 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
45/* The poll event (looking for insert/remove events runs twice a second */
46#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
47
48struct au1xmmc_host {
49 struct mmc_host *mmc;
50 struct mmc_request *mrq;
51
52 u32 id;
53
54 u32 flags;
55 u32 iobase;
56 u32 clock;
57 u32 bus_width;
58 u32 power_mode;
59
60 int status;
61
62 struct {
63 int len;
64 int dir;
65 } dma;
66
67 struct {
68 int index;
69 int offset;
70 int len;
71 } pio;
72
73 u32 tx_chan;
74 u32 rx_chan;
75
76 struct timer_list timer;
77 struct tasklet_struct finish_task;
78 struct tasklet_struct data_task;
79
80 spinlock_t lock;
81};
82
83/* Status flags used by the host structure */
84
85#define HOST_F_XMIT 0x0001
86#define HOST_F_RECV 0x0002
87#define HOST_F_DMA 0x0010
88#define HOST_F_ACTIVE 0x0100
89#define HOST_F_STOP 0x1000
90
91#define HOST_S_IDLE 0x0001
92#define HOST_S_CMD 0x0002
93#define HOST_S_DATA 0x0003
94#define HOST_S_STOP 0x0004
95
96#endif
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
new file mode 100644
index 000000000000..7ee2045acbef
--- /dev/null
+++ b/drivers/mmc/host/imxmmc.c
@@ -0,0 +1,1137 @@
1/*
2 * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 * derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Changed to conform redesigned i.MX scatter gather DMA interface
15 *
16 * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17 * Updated for 2.6.14 kernel
18 *
19 * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20 * Found and corrected problems in the write path
21 *
22 * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23 * The event handling rewritten right way in softirq.
24 * Added many ugly hacks and delays to overcome SDHC
25 * deficiencies
26 *
27 */
28
29#ifdef CONFIG_MMC_DEBUG
30#define DEBUG
31#else
32#undef DEBUG
33#endif
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/ioport.h>
38#include <linux/platform_device.h>
39#include <linux/interrupt.h>
40#include <linux/blkdev.h>
41#include <linux/dma-mapping.h>
42#include <linux/mmc/host.h>
43#include <linux/mmc/card.h>
44#include <linux/delay.h>
45
46#include <asm/dma.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/sizes.h>
50#include <asm/arch/mmc.h>
51#include <asm/arch/imx-dma.h>
52
53#include "imxmmc.h"
54
55#define DRIVER_NAME "imx-mmc"
56
57#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
58 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
59 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
60
61struct imxmci_host {
62 struct mmc_host *mmc;
63 spinlock_t lock;
64 struct resource *res;
65 int irq;
66 imx_dmach_t dma;
67 unsigned int clkrt;
68 unsigned int cmdat;
69 volatile unsigned int imask;
70 unsigned int power_mode;
71 unsigned int present;
72 struct imxmmc_platform_data *pdata;
73
74 struct mmc_request *req;
75 struct mmc_command *cmd;
76 struct mmc_data *data;
77
78 struct timer_list timer;
79 struct tasklet_struct tasklet;
80 unsigned int status_reg;
81 unsigned long pending_events;
82 /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */
83 u16 *data_ptr;
84 unsigned int data_cnt;
85 atomic_t stuck_timeout;
86
87 unsigned int dma_nents;
88 unsigned int dma_size;
89 unsigned int dma_dir;
90 int dma_allocated;
91
92 unsigned char actual_bus_width;
93
94 int prev_cmd_code;
95};
96
97#define IMXMCI_PEND_IRQ_b 0
98#define IMXMCI_PEND_DMA_END_b 1
99#define IMXMCI_PEND_DMA_ERR_b 2
100#define IMXMCI_PEND_WAIT_RESP_b 3
101#define IMXMCI_PEND_DMA_DATA_b 4
102#define IMXMCI_PEND_CPU_DATA_b 5
103#define IMXMCI_PEND_CARD_XCHG_b 6
104#define IMXMCI_PEND_SET_INIT_b 7
105#define IMXMCI_PEND_STARTED_b 8
106
107#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
108#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
109#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
110#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
111#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
112#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
113#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
114#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
115#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
116
117static void imxmci_stop_clock(struct imxmci_host *host)
118{
119 int i = 0;
120 MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK;
121 while(i < 0x1000) {
122 if(!(i & 0x7f))
123 MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK;
124
125 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) {
126 /* Check twice before cut */
127 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN))
128 return;
129 }
130
131 i++;
132 }
133 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
134}
135
136static int imxmci_start_clock(struct imxmci_host *host)
137{
138 unsigned int trials = 0;
139 unsigned int delay_limit = 128;
140 unsigned long flags;
141
142 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
143
144 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
145
146 /*
147 * Command start of the clock, this usually succeeds in less
148 * then 6 delay loops, but during card detection (low clockrate)
149 * it takes up to 5000 delay loops and sometimes fails for the first time
150 */
151 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
152
153 do {
154 unsigned int delay = delay_limit;
155
156 while(delay--){
157 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
158 /* Check twice before cut */
159 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
160 return 0;
161
162 if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
163 return 0;
164 }
165
166 local_irq_save(flags);
167 /*
168 * Ensure, that request is not doubled under all possible circumstances.
169 * It is possible, that cock running state is missed, because some other
170 * IRQ or schedule delays this function execution and the clocks has
171 * been already stopped by other means (response processing, SDHC HW)
172 */
173 if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
174 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
175 local_irq_restore(flags);
176
177 } while(++trials<256);
178
179 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
180
181 return -1;
182}
183
184static void imxmci_softreset(void)
185{
186 /* reset sequence */
187 MMC_STR_STP_CLK = 0x8;
188 MMC_STR_STP_CLK = 0xD;
189 MMC_STR_STP_CLK = 0x5;
190 MMC_STR_STP_CLK = 0x5;
191 MMC_STR_STP_CLK = 0x5;
192 MMC_STR_STP_CLK = 0x5;
193 MMC_STR_STP_CLK = 0x5;
194 MMC_STR_STP_CLK = 0x5;
195 MMC_STR_STP_CLK = 0x5;
196 MMC_STR_STP_CLK = 0x5;
197
198 MMC_RES_TO = 0xff;
199 MMC_BLK_LEN = 512;
200 MMC_NOB = 1;
201}
202
203static int imxmci_busy_wait_for_status(struct imxmci_host *host,
204 unsigned int *pstat, unsigned int stat_mask,
205 int timeout, const char *where)
206{
207 int loops=0;
208 while(!(*pstat & stat_mask)) {
209 loops+=2;
210 if(loops >= timeout) {
211 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
212 where, *pstat, stat_mask);
213 return -1;
214 }
215 udelay(2);
216 *pstat |= MMC_STATUS;
217 }
218 if(!loops)
219 return 0;
220
221 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
222 if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
223 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
224 loops, where, *pstat, stat_mask);
225 return loops;
226}
227
228static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
229{
230 unsigned int nob = data->blocks;
231 unsigned int blksz = data->blksz;
232 unsigned int datasz = nob * blksz;
233 int i;
234
235 if (data->flags & MMC_DATA_STREAM)
236 nob = 0xffff;
237
238 host->data = data;
239 data->bytes_xfered = 0;
240
241 MMC_NOB = nob;
242 MMC_BLK_LEN = blksz;
243
244 /*
245 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
246 * We are in big troubles for non-512 byte transfers according to note in the paragraph
247 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
248 * The situation is even more complex in reality. The SDHC in not able to handle wll
249 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
250 * This is required for SCR read at least.
251 */
252 if (datasz < 512) {
253 host->dma_size = datasz;
254 if (data->flags & MMC_DATA_READ) {
255 host->dma_dir = DMA_FROM_DEVICE;
256
257 /* Hack to enable read SCR */
258 MMC_NOB = 1;
259 MMC_BLK_LEN = 512;
260 } else {
261 host->dma_dir = DMA_TO_DEVICE;
262 }
263
264 /* Convert back to virtual address */
265 host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
266 host->data_cnt = 0;
267
268 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
269 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
270
271 return;
272 }
273
274 if (data->flags & MMC_DATA_READ) {
275 host->dma_dir = DMA_FROM_DEVICE;
276 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
277 data->sg_len, host->dma_dir);
278
279 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
280 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);
281
282 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
283 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
284 } else {
285 host->dma_dir = DMA_TO_DEVICE;
286
287 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
288 data->sg_len, host->dma_dir);
289
290 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
291 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);
292
293 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
294 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
295 }
296
297#if 1 /* This code is there only for consistency checking and can be disabled in future */
298 host->dma_size = 0;
299 for(i=0; i<host->dma_nents; i++)
300 host->dma_size+=data->sg[i].length;
301
302 if (datasz > host->dma_size) {
303 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
304 datasz, host->dma_size);
305 }
306#endif
307
308 host->dma_size = datasz;
309
310 wmb();
311
312 if(host->actual_bus_width == MMC_BUS_WIDTH_4)
313 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */
314 else
315 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */
316
317 RSSR(host->dma) = DMA_REQ_SDHC;
318
319 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
320 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
321
322 /* start DMA engine for read, write is delayed after initial response */
323 if (host->dma_dir == DMA_FROM_DEVICE) {
324 imx_dma_enable(host->dma);
325 }
326}
327
328static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
329{
330 unsigned long flags;
331 u32 imask;
332
333 WARN_ON(host->cmd != NULL);
334 host->cmd = cmd;
335
336 /* Ensure, that clock are stopped else command programming and start fails */
337 imxmci_stop_clock(host);
338
339 if (cmd->flags & MMC_RSP_BUSY)
340 cmdat |= CMD_DAT_CONT_BUSY;
341
342 switch (mmc_resp_type(cmd)) {
343 case MMC_RSP_R1: /* short CRC, OPCODE */
344 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
345 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
346 break;
347 case MMC_RSP_R2: /* long 136 bit + CRC */
348 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
349 break;
350 case MMC_RSP_R3: /* short */
351 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
352 break;
353 default:
354 break;
355 }
356
357 if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) )
358 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
359
360 if ( host->actual_bus_width == MMC_BUS_WIDTH_4 )
361 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
362
363 MMC_CMD = cmd->opcode;
364 MMC_ARGH = cmd->arg >> 16;
365 MMC_ARGL = cmd->arg & 0xffff;
366 MMC_CMD_DAT_CONT = cmdat;
367
368 atomic_set(&host->stuck_timeout, 0);
369 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
370
371
372 imask = IMXMCI_INT_MASK_DEFAULT;
373 imask &= ~INT_MASK_END_CMD_RES;
374 if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) {
375 /*imask &= ~INT_MASK_BUF_READY;*/
376 imask &= ~INT_MASK_DATA_TRAN;
377 if ( cmdat & CMD_DAT_CONT_WRITE )
378 imask &= ~INT_MASK_WRITE_OP_DONE;
379 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
380 imask &= ~INT_MASK_BUF_READY;
381 }
382
383 spin_lock_irqsave(&host->lock, flags);
384 host->imask = imask;
385 MMC_INT_MASK = host->imask;
386 spin_unlock_irqrestore(&host->lock, flags);
387
388 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
389 cmd->opcode, cmd->opcode, imask);
390
391 imxmci_start_clock(host);
392}
393
394static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
395{
396 unsigned long flags;
397
398 spin_lock_irqsave(&host->lock, flags);
399
400 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
401 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
402
403 host->imask = IMXMCI_INT_MASK_DEFAULT;
404 MMC_INT_MASK = host->imask;
405
406 spin_unlock_irqrestore(&host->lock, flags);
407
408 if(req && req->cmd)
409 host->prev_cmd_code = req->cmd->opcode;
410
411 host->req = NULL;
412 host->cmd = NULL;
413 host->data = NULL;
414 mmc_request_done(host->mmc, req);
415}
416
417static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
418{
419 struct mmc_data *data = host->data;
420 int data_error;
421
422 if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
423 imx_dma_disable(host->dma);
424 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
425 host->dma_dir);
426 }
427
428 if ( stat & STATUS_ERR_MASK ) {
429 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
430 if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
431 data->error = MMC_ERR_BADCRC;
432 else if(stat & STATUS_TIME_OUT_READ)
433 data->error = MMC_ERR_TIMEOUT;
434 else
435 data->error = MMC_ERR_FAILED;
436 } else {
437 data->bytes_xfered = host->dma_size;
438 }
439
440 data_error = data->error;
441
442 host->data = NULL;
443
444 return data_error;
445}
446
447static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
448{
449 struct mmc_command *cmd = host->cmd;
450 int i;
451 u32 a,b,c;
452 struct mmc_data *data = host->data;
453
454 if (!cmd)
455 return 0;
456
457 host->cmd = NULL;
458
459 if (stat & STATUS_TIME_OUT_RESP) {
460 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
461 cmd->error = MMC_ERR_TIMEOUT;
462 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
463 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
464 cmd->error = MMC_ERR_BADCRC;
465 }
466
467 if(cmd->flags & MMC_RSP_PRESENT) {
468 if(cmd->flags & MMC_RSP_136) {
469 for (i = 0; i < 4; i++) {
470 u32 a = MMC_RES_FIFO & 0xffff;
471 u32 b = MMC_RES_FIFO & 0xffff;
472 cmd->resp[i] = a<<16 | b;
473 }
474 } else {
475 a = MMC_RES_FIFO & 0xffff;
476 b = MMC_RES_FIFO & 0xffff;
477 c = MMC_RES_FIFO & 0xffff;
478 cmd->resp[0] = a<<24 | b<<8 | c>>8;
479 }
480 }
481
482 dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
483 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
484
485 if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) {
486 if (host->req->data->flags & MMC_DATA_WRITE) {
487
488 /* Wait for FIFO to be empty before starting DMA write */
489
490 stat = MMC_STATUS;
491 if(imxmci_busy_wait_for_status(host, &stat,
492 STATUS_APPL_BUFF_FE,
493 40, "imxmci_cmd_done DMA WR") < 0) {
494 cmd->error = MMC_ERR_FIFO;
495 imxmci_finish_data(host, stat);
496 if(host->req)
497 imxmci_finish_request(host, host->req);
498 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
499 stat);
500 return 0;
501 }
502
503 if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
504 imx_dma_enable(host->dma);
505 }
506 }
507 } else {
508 struct mmc_request *req;
509 imxmci_stop_clock(host);
510 req = host->req;
511
512 if(data)
513 imxmci_finish_data(host, stat);
514
515 if( req ) {
516 imxmci_finish_request(host, req);
517 } else {
518 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
519 }
520 }
521
522 return 1;
523}
524
525static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
526{
527 struct mmc_data *data = host->data;
528 int data_error;
529
530 if (!data)
531 return 0;
532
533 data_error = imxmci_finish_data(host, stat);
534
535 if (host->req->stop) {
536 imxmci_stop_clock(host);
537 imxmci_start_cmd(host, host->req->stop, 0);
538 } else {
539 struct mmc_request *req;
540 req = host->req;
541 if( req ) {
542 imxmci_finish_request(host, req);
543 } else {
544 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
545 }
546 }
547
548 return 1;
549}
550
551static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
552{
553 int i;
554 int burst_len;
555 int trans_done = 0;
556 unsigned int stat = *pstat;
557
558 if(host->actual_bus_width != MMC_BUS_WIDTH_4)
559 burst_len = 16;
560 else
561 burst_len = 64;
562
563 /* This is unfortunately required */
564 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
565 stat);
566
567 udelay(20); /* required for clocks < 8MHz*/
568
569 if(host->dma_dir == DMA_FROM_DEVICE) {
570 imxmci_busy_wait_for_status(host, &stat,
571 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
572 STATUS_TIME_OUT_READ,
573 50, "imxmci_cpu_driven_data read");
574
575 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
576 !(stat & STATUS_TIME_OUT_READ) &&
577 (host->data_cnt < 512)) {
578
579 udelay(20); /* required for clocks < 8MHz*/
580
581 for(i = burst_len; i>=2 ; i-=2) {
582 u16 data;
583 data = MMC_BUFFER_ACCESS;
584 udelay(10); /* required for clocks < 8MHz*/
585 if(host->data_cnt+2 <= host->dma_size) {
586 *(host->data_ptr++) = data;
587 } else {
588 if(host->data_cnt < host->dma_size)
589 *(u8*)(host->data_ptr) = data;
590 }
591 host->data_cnt += 2;
592 }
593
594 stat = MMC_STATUS;
595
596 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
597 host->data_cnt, burst_len, stat);
598 }
599
600 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
601 trans_done = 1;
602
603 if(host->dma_size & 0x1ff)
604 stat &= ~STATUS_CRC_READ_ERR;
605
606 if(stat & STATUS_TIME_OUT_READ) {
607 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
608 stat);
609 trans_done = -1;
610 }
611
612 } else {
613 imxmci_busy_wait_for_status(host, &stat,
614 STATUS_APPL_BUFF_FE,
615 20, "imxmci_cpu_driven_data write");
616
617 while((stat & STATUS_APPL_BUFF_FE) &&
618 (host->data_cnt < host->dma_size)) {
619 if(burst_len >= host->dma_size - host->data_cnt) {
620 burst_len = host->dma_size - host->data_cnt;
621 host->data_cnt = host->dma_size;
622 trans_done = 1;
623 } else {
624 host->data_cnt += burst_len;
625 }
626
627 for(i = burst_len; i>0 ; i-=2)
628 MMC_BUFFER_ACCESS = *(host->data_ptr++);
629
630 stat = MMC_STATUS;
631
632 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
633 burst_len, stat);
634 }
635 }
636
637 *pstat = stat;
638
639 return trans_done;
640}
641
642static void imxmci_dma_irq(int dma, void *devid)
643{
644 struct imxmci_host *host = devid;
645 uint32_t stat = MMC_STATUS;
646
647 atomic_set(&host->stuck_timeout, 0);
648 host->status_reg = stat;
649 set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
650 tasklet_schedule(&host->tasklet);
651}
652
653static irqreturn_t imxmci_irq(int irq, void *devid)
654{
655 struct imxmci_host *host = devid;
656 uint32_t stat = MMC_STATUS;
657 int handled = 1;
658
659 MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT;
660
661 atomic_set(&host->stuck_timeout, 0);
662 host->status_reg = stat;
663 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
664 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
665 tasklet_schedule(&host->tasklet);
666
667 return IRQ_RETVAL(handled);;
668}
669
670static void imxmci_tasklet_fnc(unsigned long data)
671{
672 struct imxmci_host *host = (struct imxmci_host *)data;
673 u32 stat;
674 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
675 int timeout = 0;
676
677 if(atomic_read(&host->stuck_timeout) > 4) {
678 char *what;
679 timeout = 1;
680 stat = MMC_STATUS;
681 host->status_reg = stat;
682 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
683 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
684 what = "RESP+DMA";
685 else
686 what = "RESP";
687 else
688 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
689 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
690 what = "DATA";
691 else
692 what = "DMA";
693 else
694 what = "???";
695
696 dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
697 what, stat, MMC_INT_MASK);
698 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
699 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
700 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
701 host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
702 }
703
704 if(!host->present || timeout)
705 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
706 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
707
708 if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
709 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
710
711 stat = MMC_STATUS;
712 /*
713 * This is not required in theory, but there is chance to miss some flag
714 * which clears automatically by mask write, FreeScale original code keeps
715 * stat from IRQ time so do I
716 */
717 stat |= host->status_reg;
718
719 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
720 stat &= ~STATUS_CRC_READ_ERR;
721
722 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
723 imxmci_busy_wait_for_status(host, &stat,
724 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
725 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
726 }
727
728 if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
729 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
730 imxmci_cmd_done(host, stat);
731 if(host->data && (stat & STATUS_ERR_MASK))
732 imxmci_data_done(host, stat);
733 }
734
735 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
736 stat |= MMC_STATUS;
737 if(imxmci_cpu_driven_data(host, &stat)){
738 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
739 imxmci_cmd_done(host, stat);
740 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
741 &host->pending_events);
742 imxmci_data_done(host, stat);
743 }
744 }
745 }
746
747 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
748 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
749
750 stat = MMC_STATUS;
751 /* Same as above */
752 stat |= host->status_reg;
753
754 if(host->dma_dir == DMA_TO_DEVICE) {
755 data_dir_mask = STATUS_WRITE_OP_DONE;
756 } else {
757 data_dir_mask = STATUS_DATA_TRANS_DONE;
758 }
759
760 if(stat & data_dir_mask) {
761 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
762 imxmci_data_done(host, stat);
763 }
764 }
765
766 if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
767
768 if(host->cmd)
769 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
770
771 if(host->data)
772 imxmci_data_done(host, STATUS_TIME_OUT_READ |
773 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
774
775 if(host->req)
776 imxmci_finish_request(host, host->req);
777
778 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
779
780 }
781}
782
783static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
784{
785 struct imxmci_host *host = mmc_priv(mmc);
786 unsigned int cmdat;
787
788 WARN_ON(host->req != NULL);
789
790 host->req = req;
791
792 cmdat = 0;
793
794 if (req->data) {
795 imxmci_setup_data(host, req->data);
796
797 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
798
799 if (req->data->flags & MMC_DATA_WRITE)
800 cmdat |= CMD_DAT_CONT_WRITE;
801
802 if (req->data->flags & MMC_DATA_STREAM) {
803 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
804 }
805 }
806
807 imxmci_start_cmd(host, req->cmd, cmdat);
808}
809
810#define CLK_RATE 19200000
811
812static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
813{
814 struct imxmci_host *host = mmc_priv(mmc);
815 int prescaler;
816
817 if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
818 host->actual_bus_width = MMC_BUS_WIDTH_4;
819 imx_gpio_mode(PB11_PF_SD_DAT3);
820 }else{
821 host->actual_bus_width = MMC_BUS_WIDTH_1;
822 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
823 }
824
825 if ( host->power_mode != ios->power_mode ) {
826 switch (ios->power_mode) {
827 case MMC_POWER_OFF:
828 break;
829 case MMC_POWER_UP:
830 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
831 break;
832 case MMC_POWER_ON:
833 break;
834 }
835 host->power_mode = ios->power_mode;
836 }
837
838 if ( ios->clock ) {
839 unsigned int clk;
840
841 /* The prescaler is 5 for PERCLK2 equal to 96MHz
842 * then 96MHz / 5 = 19.2 MHz
843 */
844 clk=imx_get_perclk2();
845 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
846 switch(prescaler) {
847 case 0:
848 case 1: prescaler = 0;
849 break;
850 case 2: prescaler = 1;
851 break;
852 case 3: prescaler = 2;
853 break;
854 case 4: prescaler = 4;
855 break;
856 default:
857 case 5: prescaler = 5;
858 break;
859 }
860
861 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
862 clk, prescaler);
863
864 for(clk=0; clk<8; clk++) {
865 int x;
866 x = CLK_RATE / (1<<clk);
867 if( x <= ios->clock)
868 break;
869 }
870
871 MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */
872
873 imxmci_stop_clock(host);
874 MMC_CLK_RATE = (prescaler<<3) | clk;
875 /*
876 * Under my understanding, clock should not be started there, because it would
877 * initiate SDHC sequencer and send last or random command into card
878 */
879 /*imxmci_start_clock(host);*/
880
881 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
882 } else {
883 imxmci_stop_clock(host);
884 }
885}
886
887static const struct mmc_host_ops imxmci_ops = {
888 .request = imxmci_request,
889 .set_ios = imxmci_set_ios,
890};
891
892static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
893{
894 int i;
895
896 for (i = 0; i < dev->num_resources; i++)
897 if (dev->resource[i].flags == mask && nr-- == 0)
898 return &dev->resource[i];
899 return NULL;
900}
901
902static int platform_device_irq(struct platform_device *dev, int nr)
903{
904 int i;
905
906 for (i = 0; i < dev->num_resources; i++)
907 if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
908 return dev->resource[i].start;
909 return NO_IRQ;
910}
911
912static void imxmci_check_status(unsigned long data)
913{
914 struct imxmci_host *host = (struct imxmci_host *)data;
915
916 if( host->pdata->card_present() != host->present ) {
917 host->present ^= 1;
918 dev_info(mmc_dev(host->mmc), "card %s\n",
919 host->present ? "inserted" : "removed");
920
921 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
922 tasklet_schedule(&host->tasklet);
923 }
924
925 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
926 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
927 atomic_inc(&host->stuck_timeout);
928 if(atomic_read(&host->stuck_timeout) > 4)
929 tasklet_schedule(&host->tasklet);
930 } else {
931 atomic_set(&host->stuck_timeout, 0);
932
933 }
934
935 mod_timer(&host->timer, jiffies + (HZ>>1));
936}
937
938static int imxmci_probe(struct platform_device *pdev)
939{
940 struct mmc_host *mmc;
941 struct imxmci_host *host = NULL;
942 struct resource *r;
943 int ret = 0, irq;
944
945 printk(KERN_INFO "i.MX mmc driver\n");
946
947 r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
948 irq = platform_device_irq(pdev, 0);
949 if (!r || irq == NO_IRQ)
950 return -ENXIO;
951
952 r = request_mem_region(r->start, 0x100, "IMXMCI");
953 if (!r)
954 return -EBUSY;
955
956 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
957 if (!mmc) {
958 ret = -ENOMEM;
959 goto out;
960 }
961
962 mmc->ops = &imxmci_ops;
963 mmc->f_min = 150000;
964 mmc->f_max = CLK_RATE/2;
965 mmc->ocr_avail = MMC_VDD_32_33;
966 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK;
967
968 /* MMC core transfer sizes tunable parameters */
969 mmc->max_hw_segs = 64;
970 mmc->max_phys_segs = 64;
971 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
972 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
973 mmc->max_blk_size = 2048;
974 mmc->max_blk_count = 65535;
975
976 host = mmc_priv(mmc);
977 host->mmc = mmc;
978 host->dma_allocated = 0;
979 host->pdata = pdev->dev.platform_data;
980
981 spin_lock_init(&host->lock);
982 host->res = r;
983 host->irq = irq;
984
985 imx_gpio_mode(PB8_PF_SD_DAT0);
986 imx_gpio_mode(PB9_PF_SD_DAT1);
987 imx_gpio_mode(PB10_PF_SD_DAT2);
988 /* Configured as GPIO with pull-up to ensure right MCC card mode */
989 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
990 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
991 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
992 imx_gpio_mode(PB12_PF_SD_CLK);
993 imx_gpio_mode(PB13_PF_SD_CMD);
994
995 imxmci_softreset();
996
997 if ( MMC_REV_NO != 0x390 ) {
998 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
999 MMC_REV_NO);
1000 goto out;
1001 }
1002
1003 MMC_READ_TO = 0x2db4; /* recommended in data sheet */
1004
1005 host->imask = IMXMCI_INT_MASK_DEFAULT;
1006 MMC_INT_MASK = host->imask;
1007
1008
1009 if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){
1010 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1011 ret = -EBUSY;
1012 goto out;
1013 }
1014 host->dma_allocated=1;
1015 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1016
1017 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1018 host->status_reg=0;
1019 host->pending_events=0;
1020
1021 ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1022 if (ret)
1023 goto out;
1024
1025 host->present = host->pdata->card_present();
1026 init_timer(&host->timer);
1027 host->timer.data = (unsigned long)host;
1028 host->timer.function = imxmci_check_status;
1029 add_timer(&host->timer);
1030 mod_timer(&host->timer, jiffies + (HZ>>1));
1031
1032 platform_set_drvdata(pdev, mmc);
1033
1034 mmc_add_host(mmc);
1035
1036 return 0;
1037
1038out:
1039 if (host) {
1040 if(host->dma_allocated){
1041 imx_dma_free(host->dma);
1042 host->dma_allocated=0;
1043 }
1044 }
1045 if (mmc)
1046 mmc_free_host(mmc);
1047 release_resource(r);
1048 return ret;
1049}
1050
1051static int imxmci_remove(struct platform_device *pdev)
1052{
1053 struct mmc_host *mmc = platform_get_drvdata(pdev);
1054
1055 platform_set_drvdata(pdev, NULL);
1056
1057 if (mmc) {
1058 struct imxmci_host *host = mmc_priv(mmc);
1059
1060 tasklet_disable(&host->tasklet);
1061
1062 del_timer_sync(&host->timer);
1063 mmc_remove_host(mmc);
1064
1065 free_irq(host->irq, host);
1066 if(host->dma_allocated){
1067 imx_dma_free(host->dma);
1068 host->dma_allocated=0;
1069 }
1070
1071 tasklet_kill(&host->tasklet);
1072
1073 release_resource(host->res);
1074
1075 mmc_free_host(mmc);
1076 }
1077 return 0;
1078}
1079
1080#ifdef CONFIG_PM
1081static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1082{
1083 struct mmc_host *mmc = platform_get_drvdata(dev);
1084 int ret = 0;
1085
1086 if (mmc)
1087 ret = mmc_suspend_host(mmc, state);
1088
1089 return ret;
1090}
1091
1092static int imxmci_resume(struct platform_device *dev)
1093{
1094 struct mmc_host *mmc = platform_get_drvdata(dev);
1095 struct imxmci_host *host;
1096 int ret = 0;
1097
1098 if (mmc) {
1099 host = mmc_priv(mmc);
1100 if(host)
1101 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1102 ret = mmc_resume_host(mmc);
1103 }
1104
1105 return ret;
1106}
1107#else
1108#define imxmci_suspend NULL
1109#define imxmci_resume NULL
1110#endif /* CONFIG_PM */
1111
1112static struct platform_driver imxmci_driver = {
1113 .probe = imxmci_probe,
1114 .remove = imxmci_remove,
1115 .suspend = imxmci_suspend,
1116 .resume = imxmci_resume,
1117 .driver = {
1118 .name = DRIVER_NAME,
1119 }
1120};
1121
1122static int __init imxmci_init(void)
1123{
1124 return platform_driver_register(&imxmci_driver);
1125}
1126
1127static void __exit imxmci_exit(void)
1128{
1129 platform_driver_unregister(&imxmci_driver);
1130}
1131
1132module_init(imxmci_init);
1133module_exit(imxmci_exit);
1134
1135MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1136MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1137MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
new file mode 100644
index 000000000000..e5339e334dbb
--- /dev/null
+++ b/drivers/mmc/host/imxmmc.h
@@ -0,0 +1,67 @@
1
2# define __REG16(x) (*((volatile u16 *)IO_ADDRESS(x)))
3
4#define MMC_STR_STP_CLK __REG16(IMX_MMC_BASE + 0x00)
5#define MMC_STATUS __REG16(IMX_MMC_BASE + 0x04)
6#define MMC_CLK_RATE __REG16(IMX_MMC_BASE + 0x08)
7#define MMC_CMD_DAT_CONT __REG16(IMX_MMC_BASE + 0x0C)
8#define MMC_RES_TO __REG16(IMX_MMC_BASE + 0x10)
9#define MMC_READ_TO __REG16(IMX_MMC_BASE + 0x14)
10#define MMC_BLK_LEN __REG16(IMX_MMC_BASE + 0x18)
11#define MMC_NOB __REG16(IMX_MMC_BASE + 0x1C)
12#define MMC_REV_NO __REG16(IMX_MMC_BASE + 0x20)
13#define MMC_INT_MASK __REG16(IMX_MMC_BASE + 0x24)
14#define MMC_CMD __REG16(IMX_MMC_BASE + 0x28)
15#define MMC_ARGH __REG16(IMX_MMC_BASE + 0x2C)
16#define MMC_ARGL __REG16(IMX_MMC_BASE + 0x30)
17#define MMC_RES_FIFO __REG16(IMX_MMC_BASE + 0x34)
18#define MMC_BUFFER_ACCESS __REG16(IMX_MMC_BASE + 0x38)
19#define MMC_BUFFER_ACCESS_OFS 0x38
20
21
22#define STR_STP_CLK_ENDIAN (1<<5)
23#define STR_STP_CLK_RESET (1<<3)
24#define STR_STP_CLK_ENABLE (1<<2)
25#define STR_STP_CLK_START_CLK (1<<1)
26#define STR_STP_CLK_STOP_CLK (1<<0)
27#define STATUS_CARD_PRESENCE (1<<15)
28#define STATUS_SDIO_INT_ACTIVE (1<<14)
29#define STATUS_END_CMD_RESP (1<<13)
30#define STATUS_WRITE_OP_DONE (1<<12)
31#define STATUS_DATA_TRANS_DONE (1<<11)
32#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
33#define STATUS_CARD_BUS_CLK_RUN (1<<8)
34#define STATUS_APPL_BUFF_FF (1<<7)
35#define STATUS_APPL_BUFF_FE (1<<6)
36#define STATUS_RESP_CRC_ERR (1<<5)
37#define STATUS_CRC_READ_ERR (1<<3)
38#define STATUS_CRC_WRITE_ERR (1<<2)
39#define STATUS_TIME_OUT_RESP (1<<1)
40#define STATUS_TIME_OUT_READ (1<<0)
41#define STATUS_ERR_MASK 0x2f
42#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
43#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
44#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
45#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
46#define CMD_DAT_CONT_START_READWAIT (1<<10)
47#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
48#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
49#define CMD_DAT_CONT_INIT (1<<7)
50#define CMD_DAT_CONT_BUSY (1<<6)
51#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
52#define CMD_DAT_CONT_WRITE (1<<4)
53#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
54#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
55#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
56#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
57#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
58#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
59#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
60#define INT_MASK_AUTO_CARD_DETECT (1<<6)
61#define INT_MASK_DAT0_EN (1<<5)
62#define INT_MASK_SDIO (1<<4)
63#define INT_MASK_BUF_READY (1<<3)
64#define INT_MASK_END_CMD_RES (1<<2)
65#define INT_MASK_WRITE_OP_DONE (1<<1)
66#define INT_MASK_DATA_TRAN (1<<0)
67#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
new file mode 100644
index 000000000000..d11c2d23ceea
--- /dev/null
+++ b/drivers/mmc/host/mmci.c
@@ -0,0 +1,702 @@
1/*
2 * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/highmem.h>
19#include <linux/mmc/host.h>
20#include <linux/amba/bus.h>
21#include <linux/clk.h>
22
23#include <asm/cacheflush.h>
24#include <asm/div64.h>
25#include <asm/io.h>
26#include <asm/scatterlist.h>
27#include <asm/sizes.h>
28#include <asm/mach/mmc.h>
29
30#include "mmci.h"
31
32#define DRIVER_NAME "mmci-pl18x"
33
34#define DBG(host,fmt,args...) \
35 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
36
37static unsigned int fmax = 515633;
38
39static void
40mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
41{
42 writel(0, host->base + MMCICOMMAND);
43
44 BUG_ON(host->data);
45
46 host->mrq = NULL;
47 host->cmd = NULL;
48
49 if (mrq->data)
50 mrq->data->bytes_xfered = host->data_xfered;
51
52 /*
53 * Need to drop the host lock here; mmc_request_done may call
54 * back into the driver...
55 */
56 spin_unlock(&host->lock);
57 mmc_request_done(host->mmc, mrq);
58 spin_lock(&host->lock);
59}
60
61static void mmci_stop_data(struct mmci_host *host)
62{
63 writel(0, host->base + MMCIDATACTRL);
64 writel(0, host->base + MMCIMASK1);
65 host->data = NULL;
66}
67
68static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
69{
70 unsigned int datactrl, timeout, irqmask;
71 unsigned long long clks;
72 void __iomem *base;
73 int blksz_bits;
74
75 DBG(host, "blksz %04x blks %04x flags %08x\n",
76 data->blksz, data->blocks, data->flags);
77
78 host->data = data;
79 host->size = data->blksz;
80 host->data_xfered = 0;
81
82 mmci_init_sg(host, data);
83
84 clks = (unsigned long long)data->timeout_ns * host->cclk;
85 do_div(clks, 1000000000UL);
86
87 timeout = data->timeout_clks + (unsigned int)clks;
88
89 base = host->base;
90 writel(timeout, base + MMCIDATATIMER);
91 writel(host->size, base + MMCIDATALENGTH);
92
93 blksz_bits = ffs(data->blksz) - 1;
94 BUG_ON(1 << blksz_bits != data->blksz);
95
96 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
97 if (data->flags & MMC_DATA_READ) {
98 datactrl |= MCI_DPSM_DIRECTION;
99 irqmask = MCI_RXFIFOHALFFULLMASK;
100
101 /*
102 * If we have less than a FIFOSIZE of bytes to transfer,
103 * trigger a PIO interrupt as soon as any data is available.
104 */
105 if (host->size < MCI_FIFOSIZE)
106 irqmask |= MCI_RXDATAAVLBLMASK;
107 } else {
108 /*
109 * We don't actually need to include "FIFO empty" here
110 * since its implicit in "FIFO half empty".
111 */
112 irqmask = MCI_TXFIFOHALFEMPTYMASK;
113 }
114
115 writel(datactrl, base + MMCIDATACTRL);
116 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
117 writel(irqmask, base + MMCIMASK1);
118}
119
120static void
121mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
122{
123 void __iomem *base = host->base;
124
125 DBG(host, "op %02x arg %08x flags %08x\n",
126 cmd->opcode, cmd->arg, cmd->flags);
127
128 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
129 writel(0, base + MMCICOMMAND);
130 udelay(1);
131 }
132
133 c |= cmd->opcode | MCI_CPSM_ENABLE;
134 if (cmd->flags & MMC_RSP_PRESENT) {
135 if (cmd->flags & MMC_RSP_136)
136 c |= MCI_CPSM_LONGRSP;
137 c |= MCI_CPSM_RESPONSE;
138 }
139 if (/*interrupt*/0)
140 c |= MCI_CPSM_INTERRUPT;
141
142 host->cmd = cmd;
143
144 writel(cmd->arg, base + MMCIARGUMENT);
145 writel(c, base + MMCICOMMAND);
146}
147
148static void
149mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
150 unsigned int status)
151{
152 if (status & MCI_DATABLOCKEND) {
153 host->data_xfered += data->blksz;
154 }
155 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
156 if (status & MCI_DATACRCFAIL)
157 data->error = MMC_ERR_BADCRC;
158 else if (status & MCI_DATATIMEOUT)
159 data->error = MMC_ERR_TIMEOUT;
160 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
161 data->error = MMC_ERR_FIFO;
162 status |= MCI_DATAEND;
163
164 /*
165 * We hit an error condition. Ensure that any data
166 * partially written to a page is properly coherent.
167 */
168 if (host->sg_len && data->flags & MMC_DATA_READ)
169 flush_dcache_page(host->sg_ptr->page);
170 }
171 if (status & MCI_DATAEND) {
172 mmci_stop_data(host);
173
174 if (!data->stop) {
175 mmci_request_end(host, data->mrq);
176 } else {
177 mmci_start_command(host, data->stop, 0);
178 }
179 }
180}
181
182static void
183mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
184 unsigned int status)
185{
186 void __iomem *base = host->base;
187
188 host->cmd = NULL;
189
190 cmd->resp[0] = readl(base + MMCIRESPONSE0);
191 cmd->resp[1] = readl(base + MMCIRESPONSE1);
192 cmd->resp[2] = readl(base + MMCIRESPONSE2);
193 cmd->resp[3] = readl(base + MMCIRESPONSE3);
194
195 if (status & MCI_CMDTIMEOUT) {
196 cmd->error = MMC_ERR_TIMEOUT;
197 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
198 cmd->error = MMC_ERR_BADCRC;
199 }
200
201 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
202 if (host->data)
203 mmci_stop_data(host);
204 mmci_request_end(host, cmd->mrq);
205 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
206 mmci_start_data(host, cmd->data);
207 }
208}
209
210static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
211{
212 void __iomem *base = host->base;
213 char *ptr = buffer;
214 u32 status;
215
216 do {
217 int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
218
219 if (count > remain)
220 count = remain;
221
222 if (count <= 0)
223 break;
224
225 readsl(base + MMCIFIFO, ptr, count >> 2);
226
227 ptr += count;
228 remain -= count;
229
230 if (remain == 0)
231 break;
232
233 status = readl(base + MMCISTATUS);
234 } while (status & MCI_RXDATAAVLBL);
235
236 return ptr - buffer;
237}
238
239static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
240{
241 void __iomem *base = host->base;
242 char *ptr = buffer;
243
244 do {
245 unsigned int count, maxcnt;
246
247 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
248 count = min(remain, maxcnt);
249
250 writesl(base + MMCIFIFO, ptr, count >> 2);
251
252 ptr += count;
253 remain -= count;
254
255 if (remain == 0)
256 break;
257
258 status = readl(base + MMCISTATUS);
259 } while (status & MCI_TXFIFOHALFEMPTY);
260
261 return ptr - buffer;
262}
263
264/*
265 * PIO data transfer IRQ handler.
266 */
267static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
268{
269 struct mmci_host *host = dev_id;
270 void __iomem *base = host->base;
271 u32 status;
272
273 status = readl(base + MMCISTATUS);
274
275 DBG(host, "irq1 %08x\n", status);
276
277 do {
278 unsigned long flags;
279 unsigned int remain, len;
280 char *buffer;
281
282 /*
283 * For write, we only need to test the half-empty flag
284 * here - if the FIFO is completely empty, then by
285 * definition it is more than half empty.
286 *
287 * For read, check for data available.
288 */
289 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
290 break;
291
292 /*
293 * Map the current scatter buffer.
294 */
295 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
296 remain = host->sg_ptr->length - host->sg_off;
297
298 len = 0;
299 if (status & MCI_RXACTIVE)
300 len = mmci_pio_read(host, buffer, remain);
301 if (status & MCI_TXACTIVE)
302 len = mmci_pio_write(host, buffer, remain, status);
303
304 /*
305 * Unmap the buffer.
306 */
307 mmci_kunmap_atomic(host, buffer, &flags);
308
309 host->sg_off += len;
310 host->size -= len;
311 remain -= len;
312
313 if (remain)
314 break;
315
316 /*
317 * If we were reading, and we have completed this
318 * page, ensure that the data cache is coherent.
319 */
320 if (status & MCI_RXACTIVE)
321 flush_dcache_page(host->sg_ptr->page);
322
323 if (!mmci_next_sg(host))
324 break;
325
326 status = readl(base + MMCISTATUS);
327 } while (1);
328
329 /*
330 * If we're nearing the end of the read, switch to
331 * "any data available" mode.
332 */
333 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
334 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
335
336 /*
337 * If we run out of data, disable the data IRQs; this
338 * prevents a race where the FIFO becomes empty before
339 * the chip itself has disabled the data path, and
340 * stops us racing with our data end IRQ.
341 */
342 if (host->size == 0) {
343 writel(0, base + MMCIMASK1);
344 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
345 }
346
347 return IRQ_HANDLED;
348}
349
350/*
351 * Handle completion of command and data transfers.
352 */
353static irqreturn_t mmci_irq(int irq, void *dev_id)
354{
355 struct mmci_host *host = dev_id;
356 u32 status;
357 int ret = 0;
358
359 spin_lock(&host->lock);
360
361 do {
362 struct mmc_command *cmd;
363 struct mmc_data *data;
364
365 status = readl(host->base + MMCISTATUS);
366 status &= readl(host->base + MMCIMASK0);
367 writel(status, host->base + MMCICLEAR);
368
369 DBG(host, "irq0 %08x\n", status);
370
371 data = host->data;
372 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
373 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
374 mmci_data_irq(host, data, status);
375
376 cmd = host->cmd;
377 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
378 mmci_cmd_irq(host, cmd, status);
379
380 ret = 1;
381 } while (status);
382
383 spin_unlock(&host->lock);
384
385 return IRQ_RETVAL(ret);
386}
387
388static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
389{
390 struct mmci_host *host = mmc_priv(mmc);
391
392 WARN_ON(host->mrq != NULL);
393
394 spin_lock_irq(&host->lock);
395
396 host->mrq = mrq;
397
398 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
399 mmci_start_data(host, mrq->data);
400
401 mmci_start_command(host, mrq->cmd, 0);
402
403 spin_unlock_irq(&host->lock);
404}
405
406static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
407{
408 struct mmci_host *host = mmc_priv(mmc);
409 u32 clk = 0, pwr = 0;
410
411 if (ios->clock) {
412 if (ios->clock >= host->mclk) {
413 clk = MCI_CLK_BYPASS;
414 host->cclk = host->mclk;
415 } else {
416 clk = host->mclk / (2 * ios->clock) - 1;
417 if (clk > 256)
418 clk = 255;
419 host->cclk = host->mclk / (2 * (clk + 1));
420 }
421 clk |= MCI_CLK_ENABLE;
422 }
423
424 if (host->plat->translate_vdd)
425 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
426
427 switch (ios->power_mode) {
428 case MMC_POWER_OFF:
429 break;
430 case MMC_POWER_UP:
431 pwr |= MCI_PWR_UP;
432 break;
433 case MMC_POWER_ON:
434 pwr |= MCI_PWR_ON;
435 break;
436 }
437
438 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
439 pwr |= MCI_ROD;
440
441 writel(clk, host->base + MMCICLOCK);
442
443 if (host->pwr != pwr) {
444 host->pwr = pwr;
445 writel(pwr, host->base + MMCIPOWER);
446 }
447}
448
449static const struct mmc_host_ops mmci_ops = {
450 .request = mmci_request,
451 .set_ios = mmci_set_ios,
452};
453
454static void mmci_check_status(unsigned long data)
455{
456 struct mmci_host *host = (struct mmci_host *)data;
457 unsigned int status;
458
459 status = host->plat->status(mmc_dev(host->mmc));
460 if (status ^ host->oldstat)
461 mmc_detect_change(host->mmc, 0);
462
463 host->oldstat = status;
464 mod_timer(&host->timer, jiffies + HZ);
465}
466
467static int mmci_probe(struct amba_device *dev, void *id)
468{
469 struct mmc_platform_data *plat = dev->dev.platform_data;
470 struct mmci_host *host;
471 struct mmc_host *mmc;
472 int ret;
473
474 /* must have platform data */
475 if (!plat) {
476 ret = -EINVAL;
477 goto out;
478 }
479
480 ret = amba_request_regions(dev, DRIVER_NAME);
481 if (ret)
482 goto out;
483
484 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
485 if (!mmc) {
486 ret = -ENOMEM;
487 goto rel_regions;
488 }
489
490 host = mmc_priv(mmc);
491 host->clk = clk_get(&dev->dev, "MCLK");
492 if (IS_ERR(host->clk)) {
493 ret = PTR_ERR(host->clk);
494 host->clk = NULL;
495 goto host_free;
496 }
497
498 ret = clk_enable(host->clk);
499 if (ret)
500 goto clk_free;
501
502 host->plat = plat;
503 host->mclk = clk_get_rate(host->clk);
504 host->mmc = mmc;
505 host->base = ioremap(dev->res.start, SZ_4K);
506 if (!host->base) {
507 ret = -ENOMEM;
508 goto clk_disable;
509 }
510
511 mmc->ops = &mmci_ops;
512 mmc->f_min = (host->mclk + 511) / 512;
513 mmc->f_max = min(host->mclk, fmax);
514 mmc->ocr_avail = plat->ocr_mask;
515 mmc->caps = MMC_CAP_MULTIWRITE;
516
517 /*
518 * We can do SGIO
519 */
520 mmc->max_hw_segs = 16;
521 mmc->max_phys_segs = NR_SG;
522
523 /*
524 * Since we only have a 16-bit data length register, we must
525 * ensure that we don't exceed 2^16-1 bytes in a single request.
526 */
527 mmc->max_req_size = 65535;
528
529 /*
530 * Set the maximum segment size. Since we aren't doing DMA
531 * (yet) we are only limited by the data length register.
532 */
533 mmc->max_seg_size = mmc->max_req_size;
534
535 /*
536 * Block size can be up to 2048 bytes, but must be a power of two.
537 */
538 mmc->max_blk_size = 2048;
539
540 /*
541 * No limit on the number of blocks transferred.
542 */
543 mmc->max_blk_count = mmc->max_req_size;
544
545 spin_lock_init(&host->lock);
546
547 writel(0, host->base + MMCIMASK0);
548 writel(0, host->base + MMCIMASK1);
549 writel(0xfff, host->base + MMCICLEAR);
550
551 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
552 if (ret)
553 goto unmap;
554
555 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
556 if (ret)
557 goto irq0_free;
558
559 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
560
561 amba_set_drvdata(dev, mmc);
562
563 mmc_add_host(mmc);
564
565 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
566 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
567 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
568
569 init_timer(&host->timer);
570 host->timer.data = (unsigned long)host;
571 host->timer.function = mmci_check_status;
572 host->timer.expires = jiffies + HZ;
573 add_timer(&host->timer);
574
575 return 0;
576
577 irq0_free:
578 free_irq(dev->irq[0], host);
579 unmap:
580 iounmap(host->base);
581 clk_disable:
582 clk_disable(host->clk);
583 clk_free:
584 clk_put(host->clk);
585 host_free:
586 mmc_free_host(mmc);
587 rel_regions:
588 amba_release_regions(dev);
589 out:
590 return ret;
591}
592
593static int mmci_remove(struct amba_device *dev)
594{
595 struct mmc_host *mmc = amba_get_drvdata(dev);
596
597 amba_set_drvdata(dev, NULL);
598
599 if (mmc) {
600 struct mmci_host *host = mmc_priv(mmc);
601
602 del_timer_sync(&host->timer);
603
604 mmc_remove_host(mmc);
605
606 writel(0, host->base + MMCIMASK0);
607 writel(0, host->base + MMCIMASK1);
608
609 writel(0, host->base + MMCICOMMAND);
610 writel(0, host->base + MMCIDATACTRL);
611
612 free_irq(dev->irq[0], host);
613 free_irq(dev->irq[1], host);
614
615 iounmap(host->base);
616 clk_disable(host->clk);
617 clk_put(host->clk);
618
619 mmc_free_host(mmc);
620
621 amba_release_regions(dev);
622 }
623
624 return 0;
625}
626
627#ifdef CONFIG_PM
628static int mmci_suspend(struct amba_device *dev, pm_message_t state)
629{
630 struct mmc_host *mmc = amba_get_drvdata(dev);
631 int ret = 0;
632
633 if (mmc) {
634 struct mmci_host *host = mmc_priv(mmc);
635
636 ret = mmc_suspend_host(mmc, state);
637 if (ret == 0)
638 writel(0, host->base + MMCIMASK0);
639 }
640
641 return ret;
642}
643
644static int mmci_resume(struct amba_device *dev)
645{
646 struct mmc_host *mmc = amba_get_drvdata(dev);
647 int ret = 0;
648
649 if (mmc) {
650 struct mmci_host *host = mmc_priv(mmc);
651
652 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
653
654 ret = mmc_resume_host(mmc);
655 }
656
657 return ret;
658}
659#else
660#define mmci_suspend NULL
661#define mmci_resume NULL
662#endif
663
664static struct amba_id mmci_ids[] = {
665 {
666 .id = 0x00041180,
667 .mask = 0x000fffff,
668 },
669 {
670 .id = 0x00041181,
671 .mask = 0x000fffff,
672 },
673 { 0, 0 },
674};
675
676static struct amba_driver mmci_driver = {
677 .drv = {
678 .name = DRIVER_NAME,
679 },
680 .probe = mmci_probe,
681 .remove = mmci_remove,
682 .suspend = mmci_suspend,
683 .resume = mmci_resume,
684 .id_table = mmci_ids,
685};
686
687static int __init mmci_init(void)
688{
689 return amba_driver_register(&mmci_driver);
690}
691
692static void __exit mmci_exit(void)
693{
694 amba_driver_unregister(&mmci_driver);
695}
696
697module_init(mmci_init);
698module_exit(mmci_exit);
699module_param(fmax, uint, 0444);
700
701MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
702MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
new file mode 100644
index 000000000000..6d7eadc9a678
--- /dev/null
+++ b/drivers/mmc/host/mmci.h
@@ -0,0 +1,179 @@
1/*
2 * linux/drivers/mmc/mmci.h - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define MMCIPOWER 0x000
11#define MCI_PWR_OFF 0x00
12#define MCI_PWR_UP 0x02
13#define MCI_PWR_ON 0x03
14#define MCI_OD (1 << 6)
15#define MCI_ROD (1 << 7)
16
17#define MMCICLOCK 0x004
18#define MCI_CLK_ENABLE (1 << 8)
19#define MCI_CLK_PWRSAVE (1 << 9)
20#define MCI_CLK_BYPASS (1 << 10)
21
22#define MMCIARGUMENT 0x008
23#define MMCICOMMAND 0x00c
24#define MCI_CPSM_RESPONSE (1 << 6)
25#define MCI_CPSM_LONGRSP (1 << 7)
26#define MCI_CPSM_INTERRUPT (1 << 8)
27#define MCI_CPSM_PENDING (1 << 9)
28#define MCI_CPSM_ENABLE (1 << 10)
29
30#define MMCIRESPCMD 0x010
31#define MMCIRESPONSE0 0x014
32#define MMCIRESPONSE1 0x018
33#define MMCIRESPONSE2 0x01c
34#define MMCIRESPONSE3 0x020
35#define MMCIDATATIMER 0x024
36#define MMCIDATALENGTH 0x028
37#define MMCIDATACTRL 0x02c
38#define MCI_DPSM_ENABLE (1 << 0)
39#define MCI_DPSM_DIRECTION (1 << 1)
40#define MCI_DPSM_MODE (1 << 2)
41#define MCI_DPSM_DMAENABLE (1 << 3)
42
43#define MMCIDATACNT 0x030
44#define MMCISTATUS 0x034
45#define MCI_CMDCRCFAIL (1 << 0)
46#define MCI_DATACRCFAIL (1 << 1)
47#define MCI_CMDTIMEOUT (1 << 2)
48#define MCI_DATATIMEOUT (1 << 3)
49#define MCI_TXUNDERRUN (1 << 4)
50#define MCI_RXOVERRUN (1 << 5)
51#define MCI_CMDRESPEND (1 << 6)
52#define MCI_CMDSENT (1 << 7)
53#define MCI_DATAEND (1 << 8)
54#define MCI_DATABLOCKEND (1 << 10)
55#define MCI_CMDACTIVE (1 << 11)
56#define MCI_TXACTIVE (1 << 12)
57#define MCI_RXACTIVE (1 << 13)
58#define MCI_TXFIFOHALFEMPTY (1 << 14)
59#define MCI_RXFIFOHALFFULL (1 << 15)
60#define MCI_TXFIFOFULL (1 << 16)
61#define MCI_RXFIFOFULL (1 << 17)
62#define MCI_TXFIFOEMPTY (1 << 18)
63#define MCI_RXFIFOEMPTY (1 << 19)
64#define MCI_TXDATAAVLBL (1 << 20)
65#define MCI_RXDATAAVLBL (1 << 21)
66
67#define MMCICLEAR 0x038
68#define MCI_CMDCRCFAILCLR (1 << 0)
69#define MCI_DATACRCFAILCLR (1 << 1)
70#define MCI_CMDTIMEOUTCLR (1 << 2)
71#define MCI_DATATIMEOUTCLR (1 << 3)
72#define MCI_TXUNDERRUNCLR (1 << 4)
73#define MCI_RXOVERRUNCLR (1 << 5)
74#define MCI_CMDRESPENDCLR (1 << 6)
75#define MCI_CMDSENTCLR (1 << 7)
76#define MCI_DATAENDCLR (1 << 8)
77#define MCI_DATABLOCKENDCLR (1 << 10)
78
79#define MMCIMASK0 0x03c
80#define MCI_CMDCRCFAILMASK (1 << 0)
81#define MCI_DATACRCFAILMASK (1 << 1)
82#define MCI_CMDTIMEOUTMASK (1 << 2)
83#define MCI_DATATIMEOUTMASK (1 << 3)
84#define MCI_TXUNDERRUNMASK (1 << 4)
85#define MCI_RXOVERRUNMASK (1 << 5)
86#define MCI_CMDRESPENDMASK (1 << 6)
87#define MCI_CMDSENTMASK (1 << 7)
88#define MCI_DATAENDMASK (1 << 8)
89#define MCI_DATABLOCKENDMASK (1 << 10)
90#define MCI_CMDACTIVEMASK (1 << 11)
91#define MCI_TXACTIVEMASK (1 << 12)
92#define MCI_RXACTIVEMASK (1 << 13)
93#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
94#define MCI_RXFIFOHALFFULLMASK (1 << 15)
95#define MCI_TXFIFOFULLMASK (1 << 16)
96#define MCI_RXFIFOFULLMASK (1 << 17)
97#define MCI_TXFIFOEMPTYMASK (1 << 18)
98#define MCI_RXFIFOEMPTYMASK (1 << 19)
99#define MCI_TXDATAAVLBLMASK (1 << 20)
100#define MCI_RXDATAAVLBLMASK (1 << 21)
101
102#define MMCIMASK1 0x040
103#define MMCIFIFOCNT 0x048
104#define MMCIFIFO 0x080 /* to 0x0bc */
105
106#define MCI_IRQENABLE \
107 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
108 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
109 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
110
111/*
112 * The size of the FIFO in bytes.
113 */
114#define MCI_FIFOSIZE (16*4)
115
116#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
117
118#define NR_SG 16
119
120struct clk;
121
122struct mmci_host {
123 void __iomem *base;
124 struct mmc_request *mrq;
125 struct mmc_command *cmd;
126 struct mmc_data *data;
127 struct mmc_host *mmc;
128 struct clk *clk;
129
130 unsigned int data_xfered;
131
132 spinlock_t lock;
133
134 unsigned int mclk;
135 unsigned int cclk;
136 u32 pwr;
137 struct mmc_platform_data *plat;
138
139 struct timer_list timer;
140 unsigned int oldstat;
141
142 unsigned int sg_len;
143
144 /* pio stuff */
145 struct scatterlist *sg_ptr;
146 unsigned int sg_off;
147 unsigned int size;
148};
149
150static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
151{
152 /*
153 * Ideally, we want the higher levels to pass us a scatter list.
154 */
155 host->sg_len = data->sg_len;
156 host->sg_ptr = data->sg;
157 host->sg_off = 0;
158}
159
160static inline int mmci_next_sg(struct mmci_host *host)
161{
162 host->sg_ptr++;
163 host->sg_off = 0;
164 return --host->sg_len;
165}
166
167static inline char *mmci_kmap_atomic(struct mmci_host *host, unsigned long *flags)
168{
169 struct scatterlist *sg = host->sg_ptr;
170
171 local_irq_save(*flags);
172 return kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
173}
174
175static inline void mmci_kunmap_atomic(struct mmci_host *host, void *buffer, unsigned long *flags)
176{
177 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
178 local_irq_restore(*flags);
179}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
new file mode 100644
index 000000000000..e851384e51f4
--- /dev/null
+++ b/drivers/mmc/host/omap.c
@@ -0,0 +1,1288 @@
1/*
2 * linux/drivers/media/mmc/omap.c
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/ioport.h>
18#include <linux/platform_device.h>
19#include <linux/interrupt.h>
20#include <linux/dma-mapping.h>
21#include <linux/delay.h>
22#include <linux/spinlock.h>
23#include <linux/timer.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
26#include <linux/clk.h>
27
28#include <asm/io.h>
29#include <asm/irq.h>
30#include <asm/scatterlist.h>
31#include <asm/mach-types.h>
32
33#include <asm/arch/board.h>
34#include <asm/arch/gpio.h>
35#include <asm/arch/dma.h>
36#include <asm/arch/mux.h>
37#include <asm/arch/fpga.h>
38#include <asm/arch/tps65010.h>
39
40#define OMAP_MMC_REG_CMD 0x00
41#define OMAP_MMC_REG_ARGL 0x04
42#define OMAP_MMC_REG_ARGH 0x08
43#define OMAP_MMC_REG_CON 0x0c
44#define OMAP_MMC_REG_STAT 0x10
45#define OMAP_MMC_REG_IE 0x14
46#define OMAP_MMC_REG_CTO 0x18
47#define OMAP_MMC_REG_DTO 0x1c
48#define OMAP_MMC_REG_DATA 0x20
49#define OMAP_MMC_REG_BLEN 0x24
50#define OMAP_MMC_REG_NBLK 0x28
51#define OMAP_MMC_REG_BUF 0x2c
52#define OMAP_MMC_REG_SDIO 0x34
53#define OMAP_MMC_REG_REV 0x3c
54#define OMAP_MMC_REG_RSP0 0x40
55#define OMAP_MMC_REG_RSP1 0x44
56#define OMAP_MMC_REG_RSP2 0x48
57#define OMAP_MMC_REG_RSP3 0x4c
58#define OMAP_MMC_REG_RSP4 0x50
59#define OMAP_MMC_REG_RSP5 0x54
60#define OMAP_MMC_REG_RSP6 0x58
61#define OMAP_MMC_REG_RSP7 0x5c
62#define OMAP_MMC_REG_IOSR 0x60
63#define OMAP_MMC_REG_SYSC 0x64
64#define OMAP_MMC_REG_SYSS 0x68
65
66#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
67#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
68#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
69#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
70#define OMAP_MMC_STAT_A_FULL (1 << 10)
71#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
72#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
73#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
74#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
75#define OMAP_MMC_STAT_END_BUSY (1 << 4)
76#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
77#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
78#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
79
80#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
81#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
82
83/*
84 * Command types
85 */
86#define OMAP_MMC_CMDTYPE_BC 0
87#define OMAP_MMC_CMDTYPE_BCR 1
88#define OMAP_MMC_CMDTYPE_AC 2
89#define OMAP_MMC_CMDTYPE_ADTC 3
90
91
92#define DRIVER_NAME "mmci-omap"
93
94/* Specifies how often in millisecs to poll for card status changes
95 * when the cover switch is open */
96#define OMAP_MMC_SWITCH_POLL_DELAY 500
97
98static int mmc_omap_enable_poll = 1;
99
100struct mmc_omap_host {
101 int initialized;
102 int suspended;
103 struct mmc_request * mrq;
104 struct mmc_command * cmd;
105 struct mmc_data * data;
106 struct mmc_host * mmc;
107 struct device * dev;
108 unsigned char id; /* 16xx chips have 2 MMC blocks */
109 struct clk * iclk;
110 struct clk * fclk;
111 struct resource *mem_res;
112 void __iomem *virt_base;
113 unsigned int phys_base;
114 int irq;
115 unsigned char bus_mode;
116 unsigned char hw_bus_mode;
117
118 unsigned int sg_len;
119 int sg_idx;
120 u16 * buffer;
121 u32 buffer_bytes_left;
122 u32 total_bytes_left;
123
124 unsigned use_dma:1;
125 unsigned brs_received:1, dma_done:1;
126 unsigned dma_is_read:1;
127 unsigned dma_in_use:1;
128 int dma_ch;
129 spinlock_t dma_lock;
130 struct timer_list dma_timer;
131 unsigned dma_len;
132
133 short power_pin;
134 short wp_pin;
135
136 int switch_pin;
137 struct work_struct switch_work;
138 struct timer_list switch_timer;
139 int switch_last_state;
140};
141
142static inline int
143mmc_omap_cover_is_open(struct mmc_omap_host *host)
144{
145 if (host->switch_pin < 0)
146 return 0;
147 return omap_get_gpio_datain(host->switch_pin);
148}
149
150static ssize_t
151mmc_omap_show_cover_switch(struct device *dev,
152 struct device_attribute *attr, char *buf)
153{
154 struct mmc_omap_host *host = dev_get_drvdata(dev);
155
156 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
157 "closed");
158}
159
160static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
161
162static ssize_t
163mmc_omap_show_enable_poll(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
167}
168
169static ssize_t
170mmc_omap_store_enable_poll(struct device *dev,
171 struct device_attribute *attr, const char *buf,
172 size_t size)
173{
174 int enable_poll;
175
176 if (sscanf(buf, "%10d", &enable_poll) != 1)
177 return -EINVAL;
178
179 if (enable_poll != mmc_omap_enable_poll) {
180 struct mmc_omap_host *host = dev_get_drvdata(dev);
181
182 mmc_omap_enable_poll = enable_poll;
183 if (enable_poll && host->switch_pin >= 0)
184 schedule_work(&host->switch_work);
185 }
186 return size;
187}
188
189static DEVICE_ATTR(enable_poll, 0664,
190 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
191
192static void
193mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
194{
195 u32 cmdreg;
196 u32 resptype;
197 u32 cmdtype;
198
199 host->cmd = cmd;
200
201 resptype = 0;
202 cmdtype = 0;
203
204 /* Our hardware needs to know exact type */
205 switch (mmc_resp_type(cmd)) {
206 case MMC_RSP_NONE:
207 break;
208 case MMC_RSP_R1:
209 case MMC_RSP_R1B:
210 /* resp 1, 1b, 6, 7 */
211 resptype = 1;
212 break;
213 case MMC_RSP_R2:
214 resptype = 2;
215 break;
216 case MMC_RSP_R3:
217 resptype = 3;
218 break;
219 default:
220 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
221 break;
222 }
223
224 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
225 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
226 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
227 cmdtype = OMAP_MMC_CMDTYPE_BC;
228 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
229 cmdtype = OMAP_MMC_CMDTYPE_BCR;
230 } else {
231 cmdtype = OMAP_MMC_CMDTYPE_AC;
232 }
233
234 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
235
236 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
237 cmdreg |= 1 << 6;
238
239 if (cmd->flags & MMC_RSP_BUSY)
240 cmdreg |= 1 << 11;
241
242 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
243 cmdreg |= 1 << 15;
244
245 clk_enable(host->fclk);
246
247 OMAP_MMC_WRITE(host, CTO, 200);
248 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
249 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
250 OMAP_MMC_WRITE(host, IE,
251 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
252 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
253 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
254 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
255 OMAP_MMC_STAT_END_OF_DATA);
256 OMAP_MMC_WRITE(host, CMD, cmdreg);
257}
258
259static void
260mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
261{
262 if (host->dma_in_use) {
263 enum dma_data_direction dma_data_dir;
264
265 BUG_ON(host->dma_ch < 0);
266 if (data->error != MMC_ERR_NONE)
267 omap_stop_dma(host->dma_ch);
268 /* Release DMA channel lazily */
269 mod_timer(&host->dma_timer, jiffies + HZ);
270 if (data->flags & MMC_DATA_WRITE)
271 dma_data_dir = DMA_TO_DEVICE;
272 else
273 dma_data_dir = DMA_FROM_DEVICE;
274 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
275 dma_data_dir);
276 }
277 host->data = NULL;
278 host->sg_len = 0;
279 clk_disable(host->fclk);
280
281 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
282 * dozens of requests until the card finishes writing data.
283 * It'd be cheaper to just wait till an EOFB interrupt arrives...
284 */
285
286 if (!data->stop) {
287 host->mrq = NULL;
288 mmc_request_done(host->mmc, data->mrq);
289 return;
290 }
291
292 mmc_omap_start_command(host, data->stop);
293}
294
295static void
296mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
297{
298 unsigned long flags;
299 int done;
300
301 if (!host->dma_in_use) {
302 mmc_omap_xfer_done(host, data);
303 return;
304 }
305 done = 0;
306 spin_lock_irqsave(&host->dma_lock, flags);
307 if (host->dma_done)
308 done = 1;
309 else
310 host->brs_received = 1;
311 spin_unlock_irqrestore(&host->dma_lock, flags);
312 if (done)
313 mmc_omap_xfer_done(host, data);
314}
315
316static void
317mmc_omap_dma_timer(unsigned long data)
318{
319 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
320
321 BUG_ON(host->dma_ch < 0);
322 omap_free_dma(host->dma_ch);
323 host->dma_ch = -1;
324}
325
326static void
327mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
328{
329 unsigned long flags;
330 int done;
331
332 done = 0;
333 spin_lock_irqsave(&host->dma_lock, flags);
334 if (host->brs_received)
335 done = 1;
336 else
337 host->dma_done = 1;
338 spin_unlock_irqrestore(&host->dma_lock, flags);
339 if (done)
340 mmc_omap_xfer_done(host, data);
341}
342
343static void
344mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
345{
346 host->cmd = NULL;
347
348 if (cmd->flags & MMC_RSP_PRESENT) {
349 if (cmd->flags & MMC_RSP_136) {
350 /* response type 2 */
351 cmd->resp[3] =
352 OMAP_MMC_READ(host, RSP0) |
353 (OMAP_MMC_READ(host, RSP1) << 16);
354 cmd->resp[2] =
355 OMAP_MMC_READ(host, RSP2) |
356 (OMAP_MMC_READ(host, RSP3) << 16);
357 cmd->resp[1] =
358 OMAP_MMC_READ(host, RSP4) |
359 (OMAP_MMC_READ(host, RSP5) << 16);
360 cmd->resp[0] =
361 OMAP_MMC_READ(host, RSP6) |
362 (OMAP_MMC_READ(host, RSP7) << 16);
363 } else {
364 /* response types 1, 1b, 3, 4, 5, 6 */
365 cmd->resp[0] =
366 OMAP_MMC_READ(host, RSP6) |
367 (OMAP_MMC_READ(host, RSP7) << 16);
368 }
369 }
370
371 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
372 host->mrq = NULL;
373 clk_disable(host->fclk);
374 mmc_request_done(host->mmc, cmd->mrq);
375 }
376}
377
378/* PIO only */
379static void
380mmc_omap_sg_to_buf(struct mmc_omap_host *host)
381{
382 struct scatterlist *sg;
383
384 sg = host->data->sg + host->sg_idx;
385 host->buffer_bytes_left = sg->length;
386 host->buffer = page_address(sg->page) + sg->offset;
387 if (host->buffer_bytes_left > host->total_bytes_left)
388 host->buffer_bytes_left = host->total_bytes_left;
389}
390
391/* PIO only */
392static void
393mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
394{
395 int n;
396
397 if (host->buffer_bytes_left == 0) {
398 host->sg_idx++;
399 BUG_ON(host->sg_idx == host->sg_len);
400 mmc_omap_sg_to_buf(host);
401 }
402 n = 64;
403 if (n > host->buffer_bytes_left)
404 n = host->buffer_bytes_left;
405 host->buffer_bytes_left -= n;
406 host->total_bytes_left -= n;
407 host->data->bytes_xfered += n;
408
409 if (write) {
410 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
411 } else {
412 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
413 }
414}
415
416static inline void mmc_omap_report_irq(u16 status)
417{
418 static const char *mmc_omap_status_bits[] = {
419 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
420 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
421 };
422 int i, c = 0;
423
424 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
425 if (status & (1 << i)) {
426 if (c)
427 printk(" ");
428 printk("%s", mmc_omap_status_bits[i]);
429 c++;
430 }
431}
432
433static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
434{
435 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
436 u16 status;
437 int end_command;
438 int end_transfer;
439 int transfer_error;
440
441 if (host->cmd == NULL && host->data == NULL) {
442 status = OMAP_MMC_READ(host, STAT);
443 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
444 if (status != 0) {
445 OMAP_MMC_WRITE(host, STAT, status);
446 OMAP_MMC_WRITE(host, IE, 0);
447 }
448 return IRQ_HANDLED;
449 }
450
451 end_command = 0;
452 end_transfer = 0;
453 transfer_error = 0;
454
455 while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
456 OMAP_MMC_WRITE(host, STAT, status);
457#ifdef CONFIG_MMC_DEBUG
458 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
459 status, host->cmd != NULL ? host->cmd->opcode : -1);
460 mmc_omap_report_irq(status);
461 printk("\n");
462#endif
463 if (host->total_bytes_left) {
464 if ((status & OMAP_MMC_STAT_A_FULL) ||
465 (status & OMAP_MMC_STAT_END_OF_DATA))
466 mmc_omap_xfer_data(host, 0);
467 if (status & OMAP_MMC_STAT_A_EMPTY)
468 mmc_omap_xfer_data(host, 1);
469 }
470
471 if (status & OMAP_MMC_STAT_END_OF_DATA) {
472 end_transfer = 1;
473 }
474
475 if (status & OMAP_MMC_STAT_DATA_TOUT) {
476 dev_dbg(mmc_dev(host->mmc), "data timeout\n");
477 if (host->data) {
478 host->data->error |= MMC_ERR_TIMEOUT;
479 transfer_error = 1;
480 }
481 }
482
483 if (status & OMAP_MMC_STAT_DATA_CRC) {
484 if (host->data) {
485 host->data->error |= MMC_ERR_BADCRC;
486 dev_dbg(mmc_dev(host->mmc),
487 "data CRC error, bytes left %d\n",
488 host->total_bytes_left);
489 transfer_error = 1;
490 } else {
491 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
492 }
493 }
494
495 if (status & OMAP_MMC_STAT_CMD_TOUT) {
496 /* Timeouts are routine with some commands */
497 if (host->cmd) {
498 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
499 host->cmd->opcode !=
500 MMC_SEND_OP_COND &&
501 host->cmd->opcode !=
502 MMC_APP_CMD &&
503 !mmc_omap_cover_is_open(host))
504 dev_err(mmc_dev(host->mmc),
505 "command timeout, CMD %d\n",
506 host->cmd->opcode);
507 host->cmd->error = MMC_ERR_TIMEOUT;
508 end_command = 1;
509 }
510 }
511
512 if (status & OMAP_MMC_STAT_CMD_CRC) {
513 if (host->cmd) {
514 dev_err(mmc_dev(host->mmc),
515 "command CRC error (CMD%d, arg 0x%08x)\n",
516 host->cmd->opcode, host->cmd->arg);
517 host->cmd->error = MMC_ERR_BADCRC;
518 end_command = 1;
519 } else
520 dev_err(mmc_dev(host->mmc),
521 "command CRC error without cmd?\n");
522 }
523
524 if (status & OMAP_MMC_STAT_CARD_ERR) {
525 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
526 u32 response = OMAP_MMC_READ(host, RSP6)
527 | (OMAP_MMC_READ(host, RSP7) << 16);
528 /* STOP sometimes sets must-ignore bits */
529 if (!(response & (R1_CC_ERROR
530 | R1_ILLEGAL_COMMAND
531 | R1_COM_CRC_ERROR))) {
532 end_command = 1;
533 continue;
534 }
535 }
536
537 dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
538 host->cmd->opcode);
539 if (host->cmd) {
540 host->cmd->error = MMC_ERR_FAILED;
541 end_command = 1;
542 }
543 if (host->data) {
544 host->data->error = MMC_ERR_FAILED;
545 transfer_error = 1;
546 }
547 }
548
549 /*
550 * NOTE: On 1610 the END_OF_CMD may come too early when
551 * starting a write
552 */
553 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
554 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
555 end_command = 1;
556 }
557 }
558
559 if (end_command) {
560 mmc_omap_cmd_done(host, host->cmd);
561 }
562 if (transfer_error)
563 mmc_omap_xfer_done(host, host->data);
564 else if (end_transfer)
565 mmc_omap_end_of_data(host, host->data);
566
567 return IRQ_HANDLED;
568}
569
570static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id)
571{
572 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
573
574 schedule_work(&host->switch_work);
575
576 return IRQ_HANDLED;
577}
578
579static void mmc_omap_switch_timer(unsigned long arg)
580{
581 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
582
583 schedule_work(&host->switch_work);
584}
585
586static void mmc_omap_switch_handler(struct work_struct *work)
587{
588 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, switch_work);
589 struct mmc_card *card;
590 static int complained = 0;
591 int cards = 0, cover_open;
592
593 if (host->switch_pin == -1)
594 return;
595 cover_open = mmc_omap_cover_is_open(host);
596 if (cover_open != host->switch_last_state) {
597 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
598 host->switch_last_state = cover_open;
599 }
600 mmc_detect_change(host->mmc, 0);
601 list_for_each_entry(card, &host->mmc->cards, node) {
602 if (mmc_card_present(card))
603 cards++;
604 }
605 if (mmc_omap_cover_is_open(host)) {
606 if (!complained) {
607 dev_info(mmc_dev(host->mmc), "cover is open");
608 complained = 1;
609 }
610 if (mmc_omap_enable_poll)
611 mod_timer(&host->switch_timer, jiffies +
612 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
613 } else {
614 complained = 0;
615 }
616}
617
618/* Prepare to transfer the next segment of a scatterlist */
619static void
620mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
621{
622 int dma_ch = host->dma_ch;
623 unsigned long data_addr;
624 u16 buf, frame;
625 u32 count;
626 struct scatterlist *sg = &data->sg[host->sg_idx];
627 int src_port = 0;
628 int dst_port = 0;
629 int sync_dev = 0;
630
631 data_addr = host->phys_base + OMAP_MMC_REG_DATA;
632 frame = data->blksz;
633 count = sg_dma_len(sg);
634
635 if ((data->blocks == 1) && (count > data->blksz))
636 count = frame;
637
638 host->dma_len = count;
639
640 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
641 * Use 16 or 32 word frames when the blocksize is at least that large.
642 * Blocksize is usually 512 bytes; but not for some SD reads.
643 */
644 if (cpu_is_omap15xx() && frame > 32)
645 frame = 32;
646 else if (frame > 64)
647 frame = 64;
648 count /= frame;
649 frame >>= 1;
650
651 if (!(data->flags & MMC_DATA_WRITE)) {
652 buf = 0x800f | ((frame - 1) << 8);
653
654 if (cpu_class_is_omap1()) {
655 src_port = OMAP_DMA_PORT_TIPB;
656 dst_port = OMAP_DMA_PORT_EMIFF;
657 }
658 if (cpu_is_omap24xx())
659 sync_dev = OMAP24XX_DMA_MMC1_RX;
660
661 omap_set_dma_src_params(dma_ch, src_port,
662 OMAP_DMA_AMODE_CONSTANT,
663 data_addr, 0, 0);
664 omap_set_dma_dest_params(dma_ch, dst_port,
665 OMAP_DMA_AMODE_POST_INC,
666 sg_dma_address(sg), 0, 0);
667 omap_set_dma_dest_data_pack(dma_ch, 1);
668 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
669 } else {
670 buf = 0x0f80 | ((frame - 1) << 0);
671
672 if (cpu_class_is_omap1()) {
673 src_port = OMAP_DMA_PORT_EMIFF;
674 dst_port = OMAP_DMA_PORT_TIPB;
675 }
676 if (cpu_is_omap24xx())
677 sync_dev = OMAP24XX_DMA_MMC1_TX;
678
679 omap_set_dma_dest_params(dma_ch, dst_port,
680 OMAP_DMA_AMODE_CONSTANT,
681 data_addr, 0, 0);
682 omap_set_dma_src_params(dma_ch, src_port,
683 OMAP_DMA_AMODE_POST_INC,
684 sg_dma_address(sg), 0, 0);
685 omap_set_dma_src_data_pack(dma_ch, 1);
686 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
687 }
688
689 /* Max limit for DMA frame count is 0xffff */
690 BUG_ON(count > 0xffff);
691
692 OMAP_MMC_WRITE(host, BUF, buf);
693 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
694 frame, count, OMAP_DMA_SYNC_FRAME,
695 sync_dev, 0);
696}
697
698/* A scatterlist segment completed */
699static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
700{
701 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
702 struct mmc_data *mmcdat = host->data;
703
704 if (unlikely(host->dma_ch < 0)) {
705 dev_err(mmc_dev(host->mmc),
706 "DMA callback while DMA not enabled\n");
707 return;
708 }
709 /* FIXME: We really should do something to _handle_ the errors */
710 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
711 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
712 return;
713 }
714 if (ch_status & OMAP_DMA_DROP_IRQ) {
715 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
716 return;
717 }
718 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
719 return;
720 }
721 mmcdat->bytes_xfered += host->dma_len;
722 host->sg_idx++;
723 if (host->sg_idx < host->sg_len) {
724 mmc_omap_prepare_dma(host, host->data);
725 omap_start_dma(host->dma_ch);
726 } else
727 mmc_omap_dma_done(host, host->data);
728}
729
730static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
731{
732 const char *dev_name;
733 int sync_dev, dma_ch, is_read, r;
734
735 is_read = !(data->flags & MMC_DATA_WRITE);
736 del_timer_sync(&host->dma_timer);
737 if (host->dma_ch >= 0) {
738 if (is_read == host->dma_is_read)
739 return 0;
740 omap_free_dma(host->dma_ch);
741 host->dma_ch = -1;
742 }
743
744 if (is_read) {
745 if (host->id == 1) {
746 sync_dev = OMAP_DMA_MMC_RX;
747 dev_name = "MMC1 read";
748 } else {
749 sync_dev = OMAP_DMA_MMC2_RX;
750 dev_name = "MMC2 read";
751 }
752 } else {
753 if (host->id == 1) {
754 sync_dev = OMAP_DMA_MMC_TX;
755 dev_name = "MMC1 write";
756 } else {
757 sync_dev = OMAP_DMA_MMC2_TX;
758 dev_name = "MMC2 write";
759 }
760 }
761 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
762 host, &dma_ch);
763 if (r != 0) {
764 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
765 return r;
766 }
767 host->dma_ch = dma_ch;
768 host->dma_is_read = is_read;
769
770 return 0;
771}
772
773static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
774{
775 u16 reg;
776
777 reg = OMAP_MMC_READ(host, SDIO);
778 reg &= ~(1 << 5);
779 OMAP_MMC_WRITE(host, SDIO, reg);
780 /* Set maximum timeout */
781 OMAP_MMC_WRITE(host, CTO, 0xff);
782}
783
784static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
785{
786 int timeout;
787 u16 reg;
788
789 /* Convert ns to clock cycles by assuming 20MHz frequency
790 * 1 cycle at 20MHz = 500 ns
791 */
792 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
793
794 /* Check if we need to use timeout multiplier register */
795 reg = OMAP_MMC_READ(host, SDIO);
796 if (timeout > 0xffff) {
797 reg |= (1 << 5);
798 timeout /= 1024;
799 } else
800 reg &= ~(1 << 5);
801 OMAP_MMC_WRITE(host, SDIO, reg);
802 OMAP_MMC_WRITE(host, DTO, timeout);
803}
804
805static void
806mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
807{
808 struct mmc_data *data = req->data;
809 int i, use_dma, block_size;
810 unsigned sg_len;
811
812 host->data = data;
813 if (data == NULL) {
814 OMAP_MMC_WRITE(host, BLEN, 0);
815 OMAP_MMC_WRITE(host, NBLK, 0);
816 OMAP_MMC_WRITE(host, BUF, 0);
817 host->dma_in_use = 0;
818 set_cmd_timeout(host, req);
819 return;
820 }
821
822 block_size = data->blksz;
823
824 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
825 OMAP_MMC_WRITE(host, BLEN, block_size - 1);
826 set_data_timeout(host, req);
827
828 /* cope with calling layer confusion; it issues "single
829 * block" writes using multi-block scatterlists.
830 */
831 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
832
833 /* Only do DMA for entire blocks */
834 use_dma = host->use_dma;
835 if (use_dma) {
836 for (i = 0; i < sg_len; i++) {
837 if ((data->sg[i].length % block_size) != 0) {
838 use_dma = 0;
839 break;
840 }
841 }
842 }
843
844 host->sg_idx = 0;
845 if (use_dma) {
846 if (mmc_omap_get_dma_channel(host, data) == 0) {
847 enum dma_data_direction dma_data_dir;
848
849 if (data->flags & MMC_DATA_WRITE)
850 dma_data_dir = DMA_TO_DEVICE;
851 else
852 dma_data_dir = DMA_FROM_DEVICE;
853
854 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
855 sg_len, dma_data_dir);
856 host->total_bytes_left = 0;
857 mmc_omap_prepare_dma(host, req->data);
858 host->brs_received = 0;
859 host->dma_done = 0;
860 host->dma_in_use = 1;
861 } else
862 use_dma = 0;
863 }
864
865 /* Revert to PIO? */
866 if (!use_dma) {
867 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
868 host->total_bytes_left = data->blocks * block_size;
869 host->sg_len = sg_len;
870 mmc_omap_sg_to_buf(host);
871 host->dma_in_use = 0;
872 }
873}
874
875static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
876{
877 struct mmc_omap_host *host = mmc_priv(mmc);
878
879 WARN_ON(host->mrq != NULL);
880
881 host->mrq = req;
882
883 /* only touch fifo AFTER the controller readies it */
884 mmc_omap_prepare_data(host, req);
885 mmc_omap_start_command(host, req->cmd);
886 if (host->dma_in_use)
887 omap_start_dma(host->dma_ch);
888}
889
890static void innovator_fpga_socket_power(int on)
891{
892#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
893 if (on) {
894 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
895 OMAP1510_FPGA_POWER);
896 } else {
897 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
898 OMAP1510_FPGA_POWER);
899 }
900#endif
901}
902
903/*
904 * Turn the socket power on/off. Innovator uses FPGA, most boards
905 * probably use GPIO.
906 */
907static void mmc_omap_power(struct mmc_omap_host *host, int on)
908{
909 if (on) {
910 if (machine_is_omap_innovator())
911 innovator_fpga_socket_power(1);
912 else if (machine_is_omap_h2())
913 tps65010_set_gpio_out_value(GPIO3, HIGH);
914 else if (machine_is_omap_h3())
915 /* GPIO 4 of TPS65010 sends SD_EN signal */
916 tps65010_set_gpio_out_value(GPIO4, HIGH);
917 else if (cpu_is_omap24xx()) {
918 u16 reg = OMAP_MMC_READ(host, CON);
919 OMAP_MMC_WRITE(host, CON, reg | (1 << 11));
920 } else
921 if (host->power_pin >= 0)
922 omap_set_gpio_dataout(host->power_pin, 1);
923 } else {
924 if (machine_is_omap_innovator())
925 innovator_fpga_socket_power(0);
926 else if (machine_is_omap_h2())
927 tps65010_set_gpio_out_value(GPIO3, LOW);
928 else if (machine_is_omap_h3())
929 tps65010_set_gpio_out_value(GPIO4, LOW);
930 else if (cpu_is_omap24xx()) {
931 u16 reg = OMAP_MMC_READ(host, CON);
932 OMAP_MMC_WRITE(host, CON, reg & ~(1 << 11));
933 } else
934 if (host->power_pin >= 0)
935 omap_set_gpio_dataout(host->power_pin, 0);
936 }
937}
938
939static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
940{
941 struct mmc_omap_host *host = mmc_priv(mmc);
942 int dsor;
943 int realclock, i;
944
945 realclock = ios->clock;
946
947 if (ios->clock == 0)
948 dsor = 0;
949 else {
950 int func_clk_rate = clk_get_rate(host->fclk);
951
952 dsor = func_clk_rate / realclock;
953 if (dsor < 1)
954 dsor = 1;
955
956 if (func_clk_rate / dsor > realclock)
957 dsor++;
958
959 if (dsor > 250)
960 dsor = 250;
961 dsor++;
962
963 if (ios->bus_width == MMC_BUS_WIDTH_4)
964 dsor |= 1 << 15;
965 }
966
967 switch (ios->power_mode) {
968 case MMC_POWER_OFF:
969 mmc_omap_power(host, 0);
970 break;
971 case MMC_POWER_UP:
972 case MMC_POWER_ON:
973 mmc_omap_power(host, 1);
974 dsor |= 1 << 11;
975 break;
976 }
977
978 host->bus_mode = ios->bus_mode;
979 host->hw_bus_mode = host->bus_mode;
980
981 clk_enable(host->fclk);
982
983 /* On insanely high arm_per frequencies something sometimes
984 * goes somehow out of sync, and the POW bit is not being set,
985 * which results in the while loop below getting stuck.
986 * Writing to the CON register twice seems to do the trick. */
987 for (i = 0; i < 2; i++)
988 OMAP_MMC_WRITE(host, CON, dsor);
989 if (ios->power_mode == MMC_POWER_UP) {
990 /* Send clock cycles, poll completion */
991 OMAP_MMC_WRITE(host, IE, 0);
992 OMAP_MMC_WRITE(host, STAT, 0xffff);
993 OMAP_MMC_WRITE(host, CMD, 1 << 7);
994 while ((OMAP_MMC_READ(host, STAT) & 1) == 0);
995 OMAP_MMC_WRITE(host, STAT, 1);
996 }
997 clk_disable(host->fclk);
998}
999
1000static int mmc_omap_get_ro(struct mmc_host *mmc)
1001{
1002 struct mmc_omap_host *host = mmc_priv(mmc);
1003
1004 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
1005}
1006
1007static const struct mmc_host_ops mmc_omap_ops = {
1008 .request = mmc_omap_request,
1009 .set_ios = mmc_omap_set_ios,
1010 .get_ro = mmc_omap_get_ro,
1011};
1012
1013static int __init mmc_omap_probe(struct platform_device *pdev)
1014{
1015 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
1016 struct mmc_host *mmc;
1017 struct mmc_omap_host *host = NULL;
1018 struct resource *res;
1019 int ret = 0;
1020 int irq;
1021
1022 if (minfo == NULL) {
1023 dev_err(&pdev->dev, "platform data missing\n");
1024 return -ENXIO;
1025 }
1026
1027 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1028 irq = platform_get_irq(pdev, 0);
1029 if (res == NULL || irq < 0)
1030 return -ENXIO;
1031
1032 res = request_mem_region(res->start, res->end - res->start + 1,
1033 pdev->name);
1034 if (res == NULL)
1035 return -EBUSY;
1036
1037 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
1038 if (mmc == NULL) {
1039 ret = -ENOMEM;
1040 goto err_free_mem_region;
1041 }
1042
1043 host = mmc_priv(mmc);
1044 host->mmc = mmc;
1045
1046 spin_lock_init(&host->dma_lock);
1047 init_timer(&host->dma_timer);
1048 host->dma_timer.function = mmc_omap_dma_timer;
1049 host->dma_timer.data = (unsigned long) host;
1050
1051 host->id = pdev->id;
1052 host->mem_res = res;
1053 host->irq = irq;
1054
1055 if (cpu_is_omap24xx()) {
1056 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1057 if (IS_ERR(host->iclk))
1058 goto err_free_mmc_host;
1059 clk_enable(host->iclk);
1060 }
1061
1062 if (!cpu_is_omap24xx())
1063 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1064 else
1065 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1066
1067 if (IS_ERR(host->fclk)) {
1068 ret = PTR_ERR(host->fclk);
1069 goto err_free_iclk;
1070 }
1071
1072 /* REVISIT:
1073 * Also, use minfo->cover to decide how to manage
1074 * the card detect sensing.
1075 */
1076 host->power_pin = minfo->power_pin;
1077 host->switch_pin = minfo->switch_pin;
1078 host->wp_pin = minfo->wp_pin;
1079 host->use_dma = 1;
1080 host->dma_ch = -1;
1081
1082 host->irq = irq;
1083 host->phys_base = host->mem_res->start;
1084 host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
1085
1086 mmc->ops = &mmc_omap_ops;
1087 mmc->f_min = 400000;
1088 mmc->f_max = 24000000;
1089 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1090 mmc->caps = MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1091
1092 if (minfo->wire4)
1093 mmc->caps |= MMC_CAP_4_BIT_DATA;
1094
1095 /* Use scatterlist DMA to reduce per-transfer costs.
1096 * NOTE max_seg_size assumption that small blocks aren't
1097 * normally used (except e.g. for reading SD registers).
1098 */
1099 mmc->max_phys_segs = 32;
1100 mmc->max_hw_segs = 32;
1101 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1102 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1103 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1104 mmc->max_seg_size = mmc->max_req_size;
1105
1106 if (host->power_pin >= 0) {
1107 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1108 dev_err(mmc_dev(host->mmc),
1109 "Unable to get GPIO pin for MMC power\n");
1110 goto err_free_fclk;
1111 }
1112 omap_set_gpio_direction(host->power_pin, 0);
1113 }
1114
1115 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1116 if (ret)
1117 goto err_free_power_gpio;
1118
1119 host->dev = &pdev->dev;
1120 platform_set_drvdata(pdev, host);
1121
1122 if (host->switch_pin >= 0) {
1123 INIT_WORK(&host->switch_work, mmc_omap_switch_handler);
1124 init_timer(&host->switch_timer);
1125 host->switch_timer.function = mmc_omap_switch_timer;
1126 host->switch_timer.data = (unsigned long) host;
1127 if (omap_request_gpio(host->switch_pin) != 0) {
1128 dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
1129 host->switch_pin = -1;
1130 goto no_switch;
1131 }
1132
1133 omap_set_gpio_direction(host->switch_pin, 1);
1134 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1135 mmc_omap_switch_irq, IRQF_TRIGGER_RISING, DRIVER_NAME, host);
1136 if (ret) {
1137 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
1138 omap_free_gpio(host->switch_pin);
1139 host->switch_pin = -1;
1140 goto no_switch;
1141 }
1142 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1143 if (ret == 0) {
1144 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1145 if (ret != 0)
1146 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1147 }
1148 if (ret) {
1149 dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
1150 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1151 omap_free_gpio(host->switch_pin);
1152 host->switch_pin = -1;
1153 goto no_switch;
1154 }
1155 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1156 schedule_work(&host->switch_work);
1157 }
1158
1159 mmc_add_host(mmc);
1160
1161 return 0;
1162
1163no_switch:
1164 /* FIXME: Free other resources too. */
1165 if (host) {
1166 if (host->iclk && !IS_ERR(host->iclk))
1167 clk_put(host->iclk);
1168 if (host->fclk && !IS_ERR(host->fclk))
1169 clk_put(host->fclk);
1170 mmc_free_host(host->mmc);
1171 }
1172err_free_power_gpio:
1173 if (host->power_pin >= 0)
1174 omap_free_gpio(host->power_pin);
1175err_free_fclk:
1176 clk_put(host->fclk);
1177err_free_iclk:
1178 if (host->iclk != NULL) {
1179 clk_disable(host->iclk);
1180 clk_put(host->iclk);
1181 }
1182err_free_mmc_host:
1183 mmc_free_host(host->mmc);
1184err_free_mem_region:
1185 release_mem_region(res->start, res->end - res->start + 1);
1186 return ret;
1187}
1188
1189static int mmc_omap_remove(struct platform_device *pdev)
1190{
1191 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1192
1193 platform_set_drvdata(pdev, NULL);
1194
1195 BUG_ON(host == NULL);
1196
1197 mmc_remove_host(host->mmc);
1198 free_irq(host->irq, host);
1199
1200 if (host->power_pin >= 0)
1201 omap_free_gpio(host->power_pin);
1202 if (host->switch_pin >= 0) {
1203 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1204 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1205 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1206 omap_free_gpio(host->switch_pin);
1207 host->switch_pin = -1;
1208 del_timer_sync(&host->switch_timer);
1209 flush_scheduled_work();
1210 }
1211 if (host->iclk && !IS_ERR(host->iclk))
1212 clk_put(host->iclk);
1213 if (host->fclk && !IS_ERR(host->fclk))
1214 clk_put(host->fclk);
1215
1216 release_mem_region(pdev->resource[0].start,
1217 pdev->resource[0].end - pdev->resource[0].start + 1);
1218
1219 mmc_free_host(host->mmc);
1220
1221 return 0;
1222}
1223
1224#ifdef CONFIG_PM
1225static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1226{
1227 int ret = 0;
1228 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1229
1230 if (host && host->suspended)
1231 return 0;
1232
1233 if (host) {
1234 ret = mmc_suspend_host(host->mmc, mesg);
1235 if (ret == 0)
1236 host->suspended = 1;
1237 }
1238 return ret;
1239}
1240
1241static int mmc_omap_resume(struct platform_device *pdev)
1242{
1243 int ret = 0;
1244 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1245
1246 if (host && !host->suspended)
1247 return 0;
1248
1249 if (host) {
1250 ret = mmc_resume_host(host->mmc);
1251 if (ret == 0)
1252 host->suspended = 0;
1253 }
1254
1255 return ret;
1256}
1257#else
1258#define mmc_omap_suspend NULL
1259#define mmc_omap_resume NULL
1260#endif
1261
1262static struct platform_driver mmc_omap_driver = {
1263 .probe = mmc_omap_probe,
1264 .remove = mmc_omap_remove,
1265 .suspend = mmc_omap_suspend,
1266 .resume = mmc_omap_resume,
1267 .driver = {
1268 .name = DRIVER_NAME,
1269 },
1270};
1271
1272static int __init mmc_omap_init(void)
1273{
1274 return platform_driver_register(&mmc_omap_driver);
1275}
1276
1277static void __exit mmc_omap_exit(void)
1278{
1279 platform_driver_unregister(&mmc_omap_driver);
1280}
1281
1282module_init(mmc_omap_init);
1283module_exit(mmc_omap_exit);
1284
1285MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1286MODULE_LICENSE("GPL");
1287MODULE_ALIAS(DRIVER_NAME);
1288MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
new file mode 100644
index 000000000000..a98ff98fa567
--- /dev/null
+++ b/drivers/mmc/host/pxamci.c
@@ -0,0 +1,616 @@
1/*
2 * linux/drivers/mmc/pxa.c - PXA MMCI driver
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
14 * Yuck!
15 *
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
18 */
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/ioport.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/dma-mapping.h>
26#include <linux/mmc/host.h>
27
28#include <asm/dma.h>
29#include <asm/io.h>
30#include <asm/scatterlist.h>
31#include <asm/sizes.h>
32
33#include <asm/arch/pxa-regs.h>
34#include <asm/arch/mmc.h>
35
36#include "pxamci.h"
37
38#define DRIVER_NAME "pxa2xx-mci"
39
40#define NR_SG 1
41
42struct pxamci_host {
43 struct mmc_host *mmc;
44 spinlock_t lock;
45 struct resource *res;
46 void __iomem *base;
47 int irq;
48 int dma;
49 unsigned int clkrt;
50 unsigned int cmdat;
51 unsigned int imask;
52 unsigned int power_mode;
53 struct pxamci_platform_data *pdata;
54
55 struct mmc_request *mrq;
56 struct mmc_command *cmd;
57 struct mmc_data *data;
58
59 dma_addr_t sg_dma;
60 struct pxa_dma_desc *sg_cpu;
61 unsigned int dma_len;
62
63 unsigned int dma_dir;
64};
65
66static void pxamci_stop_clock(struct pxamci_host *host)
67{
68 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
69 unsigned long timeout = 10000;
70 unsigned int v;
71
72 writel(STOP_CLOCK, host->base + MMC_STRPCL);
73
74 do {
75 v = readl(host->base + MMC_STAT);
76 if (!(v & STAT_CLK_EN))
77 break;
78 udelay(1);
79 } while (timeout--);
80
81 if (v & STAT_CLK_EN)
82 dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
83 }
84}
85
86static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&host->lock, flags);
91 host->imask &= ~mask;
92 writel(host->imask, host->base + MMC_I_MASK);
93 spin_unlock_irqrestore(&host->lock, flags);
94}
95
96static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&host->lock, flags);
101 host->imask |= mask;
102 writel(host->imask, host->base + MMC_I_MASK);
103 spin_unlock_irqrestore(&host->lock, flags);
104}
105
106static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
107{
108 unsigned int nob = data->blocks;
109 unsigned long long clks;
110 unsigned int timeout;
111 u32 dcmd;
112 int i;
113
114 host->data = data;
115
116 if (data->flags & MMC_DATA_STREAM)
117 nob = 0xffff;
118
119 writel(nob, host->base + MMC_NOB);
120 writel(data->blksz, host->base + MMC_BLKLEN);
121
122 clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
123 do_div(clks, 1000000000UL);
124 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
125 writel((timeout + 255) / 256, host->base + MMC_RDTO);
126
127 if (data->flags & MMC_DATA_READ) {
128 host->dma_dir = DMA_FROM_DEVICE;
129 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
130 DRCMRTXMMC = 0;
131 DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
132 } else {
133 host->dma_dir = DMA_TO_DEVICE;
134 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
135 DRCMRRXMMC = 0;
136 DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
137 }
138
139 dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
140
141 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
142 host->dma_dir);
143
144 for (i = 0; i < host->dma_len; i++) {
145 if (data->flags & MMC_DATA_READ) {
146 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
147 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
148 } else {
149 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
150 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
151 }
152 host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]);
153 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
154 sizeof(struct pxa_dma_desc);
155 }
156 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
157 wmb();
158
159 DDADR(host->dma) = host->sg_dma;
160 DCSR(host->dma) = DCSR_RUN;
161}
162
163static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
164{
165 WARN_ON(host->cmd != NULL);
166 host->cmd = cmd;
167
168 if (cmd->flags & MMC_RSP_BUSY)
169 cmdat |= CMDAT_BUSY;
170
171#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
172 switch (RSP_TYPE(mmc_resp_type(cmd))) {
173 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
174 cmdat |= CMDAT_RESP_SHORT;
175 break;
176 case RSP_TYPE(MMC_RSP_R3):
177 cmdat |= CMDAT_RESP_R3;
178 break;
179 case RSP_TYPE(MMC_RSP_R2):
180 cmdat |= CMDAT_RESP_R2;
181 break;
182 default:
183 break;
184 }
185
186 writel(cmd->opcode, host->base + MMC_CMD);
187 writel(cmd->arg >> 16, host->base + MMC_ARGH);
188 writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
189 writel(cmdat, host->base + MMC_CMDAT);
190 writel(host->clkrt, host->base + MMC_CLKRT);
191
192 writel(START_CLOCK, host->base + MMC_STRPCL);
193
194 pxamci_enable_irq(host, END_CMD_RES);
195}
196
197static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
198{
199 host->mrq = NULL;
200 host->cmd = NULL;
201 host->data = NULL;
202 mmc_request_done(host->mmc, mrq);
203}
204
205static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
206{
207 struct mmc_command *cmd = host->cmd;
208 int i;
209 u32 v;
210
211 if (!cmd)
212 return 0;
213
214 host->cmd = NULL;
215
216 /*
217 * Did I mention this is Sick. We always need to
218 * discard the upper 8 bits of the first 16-bit word.
219 */
220 v = readl(host->base + MMC_RES) & 0xffff;
221 for (i = 0; i < 4; i++) {
222 u32 w1 = readl(host->base + MMC_RES) & 0xffff;
223 u32 w2 = readl(host->base + MMC_RES) & 0xffff;
224 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
225 v = w2;
226 }
227
228 if (stat & STAT_TIME_OUT_RESPONSE) {
229 cmd->error = MMC_ERR_TIMEOUT;
230 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
231#ifdef CONFIG_PXA27x
232 /*
233 * workaround for erratum #42:
234 * Intel PXA27x Family Processor Specification Update Rev 001
235 */
236 if (cmd->opcode == MMC_ALL_SEND_CID ||
237 cmd->opcode == MMC_SEND_CSD ||
238 cmd->opcode == MMC_SEND_CID) {
239 /* a bogus CRC error can appear if the msb of
240 the 15 byte response is a one */
241 if ((cmd->resp[0] & 0x80000000) == 0)
242 cmd->error = MMC_ERR_BADCRC;
243 } else {
244 pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
245 }
246#else
247 cmd->error = MMC_ERR_BADCRC;
248#endif
249 }
250
251 pxamci_disable_irq(host, END_CMD_RES);
252 if (host->data && cmd->error == MMC_ERR_NONE) {
253 pxamci_enable_irq(host, DATA_TRAN_DONE);
254 } else {
255 pxamci_finish_request(host, host->mrq);
256 }
257
258 return 1;
259}
260
261static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
262{
263 struct mmc_data *data = host->data;
264
265 if (!data)
266 return 0;
267
268 DCSR(host->dma) = 0;
269 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
270 host->dma_dir);
271
272 if (stat & STAT_READ_TIME_OUT)
273 data->error = MMC_ERR_TIMEOUT;
274 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
275 data->error = MMC_ERR_BADCRC;
276
277 /*
278 * There appears to be a hardware design bug here. There seems to
279 * be no way to find out how much data was transferred to the card.
280 * This means that if there was an error on any block, we mark all
281 * data blocks as being in error.
282 */
283 if (data->error == MMC_ERR_NONE)
284 data->bytes_xfered = data->blocks * data->blksz;
285 else
286 data->bytes_xfered = 0;
287
288 pxamci_disable_irq(host, DATA_TRAN_DONE);
289
290 host->data = NULL;
291 if (host->mrq->stop) {
292 pxamci_stop_clock(host);
293 pxamci_start_cmd(host, host->mrq->stop, 0);
294 } else {
295 pxamci_finish_request(host, host->mrq);
296 }
297
298 return 1;
299}
300
301static irqreturn_t pxamci_irq(int irq, void *devid)
302{
303 struct pxamci_host *host = devid;
304 unsigned int ireg;
305 int handled = 0;
306
307 ireg = readl(host->base + MMC_I_REG);
308
309 if (ireg) {
310 unsigned stat = readl(host->base + MMC_STAT);
311
312 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
313
314 if (ireg & END_CMD_RES)
315 handled |= pxamci_cmd_done(host, stat);
316 if (ireg & DATA_TRAN_DONE)
317 handled |= pxamci_data_done(host, stat);
318 }
319
320 return IRQ_RETVAL(handled);
321}
322
323static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
324{
325 struct pxamci_host *host = mmc_priv(mmc);
326 unsigned int cmdat;
327
328 WARN_ON(host->mrq != NULL);
329
330 host->mrq = mrq;
331
332 pxamci_stop_clock(host);
333
334 cmdat = host->cmdat;
335 host->cmdat &= ~CMDAT_INIT;
336
337 if (mrq->data) {
338 pxamci_setup_data(host, mrq->data);
339
340 cmdat &= ~CMDAT_BUSY;
341 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
342 if (mrq->data->flags & MMC_DATA_WRITE)
343 cmdat |= CMDAT_WRITE;
344
345 if (mrq->data->flags & MMC_DATA_STREAM)
346 cmdat |= CMDAT_STREAM;
347 }
348
349 pxamci_start_cmd(host, mrq->cmd, cmdat);
350}
351
352static int pxamci_get_ro(struct mmc_host *mmc)
353{
354 struct pxamci_host *host = mmc_priv(mmc);
355
356 if (host->pdata && host->pdata->get_ro)
357 return host->pdata->get_ro(mmc_dev(mmc));
358 /* Host doesn't support read only detection so assume writeable */
359 return 0;
360}
361
362static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
363{
364 struct pxamci_host *host = mmc_priv(mmc);
365
366 if (ios->clock) {
367 unsigned int clk = CLOCKRATE / ios->clock;
368 if (CLOCKRATE / clk > ios->clock)
369 clk <<= 1;
370 host->clkrt = fls(clk) - 1;
371 pxa_set_cken(CKEN12_MMC, 1);
372
373 /*
374 * we write clkrt on the next command
375 */
376 } else {
377 pxamci_stop_clock(host);
378 pxa_set_cken(CKEN12_MMC, 0);
379 }
380
381 if (host->power_mode != ios->power_mode) {
382 host->power_mode = ios->power_mode;
383
384 if (host->pdata && host->pdata->setpower)
385 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
386
387 if (ios->power_mode == MMC_POWER_ON)
388 host->cmdat |= CMDAT_INIT;
389 }
390
391 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
392 host->clkrt, host->cmdat);
393}
394
395static const struct mmc_host_ops pxamci_ops = {
396 .request = pxamci_request,
397 .get_ro = pxamci_get_ro,
398 .set_ios = pxamci_set_ios,
399};
400
401static void pxamci_dma_irq(int dma, void *devid)
402{
403 printk(KERN_ERR "DMA%d: IRQ???\n", dma);
404 DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
405}
406
407static irqreturn_t pxamci_detect_irq(int irq, void *devid)
408{
409 struct pxamci_host *host = mmc_priv(devid);
410
411 mmc_detect_change(devid, host->pdata->detect_delay);
412 return IRQ_HANDLED;
413}
414
415static int pxamci_probe(struct platform_device *pdev)
416{
417 struct mmc_host *mmc;
418 struct pxamci_host *host = NULL;
419 struct resource *r;
420 int ret, irq;
421
422 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
423 irq = platform_get_irq(pdev, 0);
424 if (!r || irq < 0)
425 return -ENXIO;
426
427 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
428 if (!r)
429 return -EBUSY;
430
431 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
432 if (!mmc) {
433 ret = -ENOMEM;
434 goto out;
435 }
436
437 mmc->ops = &pxamci_ops;
438 mmc->f_min = CLOCKRATE_MIN;
439 mmc->f_max = CLOCKRATE_MAX;
440
441 /*
442 * We can do SG-DMA, but we don't because we never know how much
443 * data we successfully wrote to the card.
444 */
445 mmc->max_phys_segs = NR_SG;
446
447 /*
448 * Our hardware DMA can handle a maximum of one page per SG entry.
449 */
450 mmc->max_seg_size = PAGE_SIZE;
451
452 /*
453 * Block length register is 10 bits.
454 */
455 mmc->max_blk_size = 1023;
456
457 /*
458 * Block count register is 16 bits.
459 */
460 mmc->max_blk_count = 65535;
461
462 host = mmc_priv(mmc);
463 host->mmc = mmc;
464 host->dma = -1;
465 host->pdata = pdev->dev.platform_data;
466 mmc->ocr_avail = host->pdata ?
467 host->pdata->ocr_mask :
468 MMC_VDD_32_33|MMC_VDD_33_34;
469
470 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
471 if (!host->sg_cpu) {
472 ret = -ENOMEM;
473 goto out;
474 }
475
476 spin_lock_init(&host->lock);
477 host->res = r;
478 host->irq = irq;
479 host->imask = MMC_I_MASK_ALL;
480
481 host->base = ioremap(r->start, SZ_4K);
482 if (!host->base) {
483 ret = -ENOMEM;
484 goto out;
485 }
486
487 /*
488 * Ensure that the host controller is shut down, and setup
489 * with our defaults.
490 */
491 pxamci_stop_clock(host);
492 writel(0, host->base + MMC_SPI);
493 writel(64, host->base + MMC_RESTO);
494 writel(host->imask, host->base + MMC_I_MASK);
495
496 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
497 pxamci_dma_irq, host);
498 if (host->dma < 0) {
499 ret = -EBUSY;
500 goto out;
501 }
502
503 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
504 if (ret)
505 goto out;
506
507 platform_set_drvdata(pdev, mmc);
508
509 if (host->pdata && host->pdata->init)
510 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
511
512 mmc_add_host(mmc);
513
514 return 0;
515
516 out:
517 if (host) {
518 if (host->dma >= 0)
519 pxa_free_dma(host->dma);
520 if (host->base)
521 iounmap(host->base);
522 if (host->sg_cpu)
523 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
524 }
525 if (mmc)
526 mmc_free_host(mmc);
527 release_resource(r);
528 return ret;
529}
530
531static int pxamci_remove(struct platform_device *pdev)
532{
533 struct mmc_host *mmc = platform_get_drvdata(pdev);
534
535 platform_set_drvdata(pdev, NULL);
536
537 if (mmc) {
538 struct pxamci_host *host = mmc_priv(mmc);
539
540 if (host->pdata && host->pdata->exit)
541 host->pdata->exit(&pdev->dev, mmc);
542
543 mmc_remove_host(mmc);
544
545 pxamci_stop_clock(host);
546 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
547 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
548 host->base + MMC_I_MASK);
549
550 DRCMRRXMMC = 0;
551 DRCMRTXMMC = 0;
552
553 free_irq(host->irq, host);
554 pxa_free_dma(host->dma);
555 iounmap(host->base);
556 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
557
558 release_resource(host->res);
559
560 mmc_free_host(mmc);
561 }
562 return 0;
563}
564
565#ifdef CONFIG_PM
566static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
567{
568 struct mmc_host *mmc = platform_get_drvdata(dev);
569 int ret = 0;
570
571 if (mmc)
572 ret = mmc_suspend_host(mmc, state);
573
574 return ret;
575}
576
577static int pxamci_resume(struct platform_device *dev)
578{
579 struct mmc_host *mmc = platform_get_drvdata(dev);
580 int ret = 0;
581
582 if (mmc)
583 ret = mmc_resume_host(mmc);
584
585 return ret;
586}
587#else
588#define pxamci_suspend NULL
589#define pxamci_resume NULL
590#endif
591
592static struct platform_driver pxamci_driver = {
593 .probe = pxamci_probe,
594 .remove = pxamci_remove,
595 .suspend = pxamci_suspend,
596 .resume = pxamci_resume,
597 .driver = {
598 .name = DRIVER_NAME,
599 },
600};
601
602static int __init pxamci_init(void)
603{
604 return platform_driver_register(&pxamci_driver);
605}
606
607static void __exit pxamci_exit(void)
608{
609 platform_driver_unregister(&pxamci_driver);
610}
611
612module_init(pxamci_init);
613module_exit(pxamci_exit);
614
615MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
616MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h
new file mode 100644
index 000000000000..1b163220df2b
--- /dev/null
+++ b/drivers/mmc/host/pxamci.h
@@ -0,0 +1,124 @@
1#undef MMC_STRPCL
2#undef MMC_STAT
3#undef MMC_CLKRT
4#undef MMC_SPI
5#undef MMC_CMDAT
6#undef MMC_RESTO
7#undef MMC_RDTO
8#undef MMC_BLKLEN
9#undef MMC_NOB
10#undef MMC_PRTBUF
11#undef MMC_I_MASK
12#undef END_CMD_RES
13#undef PRG_DONE
14#undef DATA_TRAN_DONE
15#undef MMC_I_REG
16#undef MMC_CMD
17#undef MMC_ARGH
18#undef MMC_ARGL
19#undef MMC_RES
20#undef MMC_RXFIFO
21#undef MMC_TXFIFO
22
23#define MMC_STRPCL 0x0000
24#define STOP_CLOCK (1 << 0)
25#define START_CLOCK (2 << 0)
26
27#define MMC_STAT 0x0004
28#define STAT_END_CMD_RES (1 << 13)
29#define STAT_PRG_DONE (1 << 12)
30#define STAT_DATA_TRAN_DONE (1 << 11)
31#define STAT_CLK_EN (1 << 8)
32#define STAT_RECV_FIFO_FULL (1 << 7)
33#define STAT_XMIT_FIFO_EMPTY (1 << 6)
34#define STAT_RES_CRC_ERR (1 << 5)
35#define STAT_SPI_READ_ERROR_TOKEN (1 << 4)
36#define STAT_CRC_READ_ERROR (1 << 3)
37#define STAT_CRC_WRITE_ERROR (1 << 2)
38#define STAT_TIME_OUT_RESPONSE (1 << 1)
39#define STAT_READ_TIME_OUT (1 << 0)
40
41#define MMC_CLKRT 0x0008 /* 3 bit */
42
43#define MMC_SPI 0x000c
44#define SPI_CS_ADDRESS (1 << 3)
45#define SPI_CS_EN (1 << 2)
46#define CRC_ON (1 << 1)
47#define SPI_EN (1 << 0)
48
49#define MMC_CMDAT 0x0010
50#define CMDAT_DMAEN (1 << 7)
51#define CMDAT_INIT (1 << 6)
52#define CMDAT_BUSY (1 << 5)
53#define CMDAT_STREAM (1 << 4) /* 1 = stream */
54#define CMDAT_WRITE (1 << 3) /* 1 = write */
55#define CMDAT_DATAEN (1 << 2)
56#define CMDAT_RESP_NONE (0 << 0)
57#define CMDAT_RESP_SHORT (1 << 0)
58#define CMDAT_RESP_R2 (2 << 0)
59#define CMDAT_RESP_R3 (3 << 0)
60
61#define MMC_RESTO 0x0014 /* 7 bit */
62
63#define MMC_RDTO 0x0018 /* 16 bit */
64
65#define MMC_BLKLEN 0x001c /* 10 bit */
66
67#define MMC_NOB 0x0020 /* 16 bit */
68
69#define MMC_PRTBUF 0x0024
70#define BUF_PART_FULL (1 << 0)
71
72#define MMC_I_MASK 0x0028
73
74/*PXA27x MMC interrupts*/
75#define SDIO_SUSPEND_ACK (1 << 12)
76#define SDIO_INT (1 << 11)
77#define RD_STALLED (1 << 10)
78#define RES_ERR (1 << 9)
79#define DAT_ERR (1 << 8)
80#define TINT (1 << 7)
81
82/*PXA2xx MMC interrupts*/
83#define TXFIFO_WR_REQ (1 << 6)
84#define RXFIFO_RD_REQ (1 << 5)
85#define CLK_IS_OFF (1 << 4)
86#define STOP_CMD (1 << 3)
87#define END_CMD_RES (1 << 2)
88#define PRG_DONE (1 << 1)
89#define DATA_TRAN_DONE (1 << 0)
90
91#ifdef CONFIG_PXA27x
92#define MMC_I_MASK_ALL 0x00001fff
93#else
94#define MMC_I_MASK_ALL 0x0000007f
95#endif
96
97#define MMC_I_REG 0x002c
98/* same as MMC_I_MASK */
99
100#define MMC_CMD 0x0030
101
102#define MMC_ARGH 0x0034 /* 16 bit */
103
104#define MMC_ARGL 0x0038 /* 16 bit */
105
106#define MMC_RES 0x003c /* 16 bit */
107
108#define MMC_RXFIFO 0x0040 /* 8 bit */
109
110#define MMC_TXFIFO 0x0044 /* 8 bit */
111
112/*
113 * The base MMC clock rate
114 */
115#ifdef CONFIG_PXA27x
116#define CLOCKRATE_MIN 304688
117#define CLOCKRATE_MAX 19500000
118#else
119#define CLOCKRATE_MIN 312500
120#define CLOCKRATE_MAX 20000000
121#endif
122
123#define CLOCKRATE CLOCKRATE_MAX
124
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
new file mode 100644
index 000000000000..579142a7904b
--- /dev/null
+++ b/drivers/mmc/host/sdhci.c
@@ -0,0 +1,1539 @@
1/*
2 * linux/drivers/mmc/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/delay.h>
13#include <linux/highmem.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16
17#include <linux/mmc/host.h>
18
19#include <asm/scatterlist.h>
20
21#include "sdhci.h"
22
23#define DRIVER_NAME "sdhci"
24
25#define DBG(f, x...) \
26 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
27
28static unsigned int debug_nodma = 0;
29static unsigned int debug_forcedma = 0;
30static unsigned int debug_quirks = 0;
31
32#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
33#define SDHCI_QUIRK_FORCE_DMA (1<<1)
34/* Controller doesn't like some resets when there is no card inserted. */
35#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
36#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
37
38static const struct pci_device_id pci_ids[] __devinitdata = {
39 {
40 .vendor = PCI_VENDOR_ID_RICOH,
41 .device = PCI_DEVICE_ID_RICOH_R5C822,
42 .subvendor = PCI_VENDOR_ID_IBM,
43 .subdevice = PCI_ANY_ID,
44 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
45 SDHCI_QUIRK_FORCE_DMA,
46 },
47
48 {
49 .vendor = PCI_VENDOR_ID_RICOH,
50 .device = PCI_DEVICE_ID_RICOH_R5C822,
51 .subvendor = PCI_ANY_ID,
52 .subdevice = PCI_ANY_ID,
53 .driver_data = SDHCI_QUIRK_FORCE_DMA |
54 SDHCI_QUIRK_NO_CARD_NO_RESET,
55 },
56
57 {
58 .vendor = PCI_VENDOR_ID_TI,
59 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
60 .subvendor = PCI_ANY_ID,
61 .subdevice = PCI_ANY_ID,
62 .driver_data = SDHCI_QUIRK_FORCE_DMA,
63 },
64
65 {
66 .vendor = PCI_VENDOR_ID_ENE,
67 .device = PCI_DEVICE_ID_ENE_CB712_SD,
68 .subvendor = PCI_ANY_ID,
69 .subdevice = PCI_ANY_ID,
70 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
71 },
72
73 { /* Generic SD host controller */
74 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
75 },
76
77 { /* end: all zeroes */ },
78};
79
80MODULE_DEVICE_TABLE(pci, pci_ids);
81
82static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
83static void sdhci_finish_data(struct sdhci_host *);
84
85static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
86static void sdhci_finish_command(struct sdhci_host *);
87
88static void sdhci_dumpregs(struct sdhci_host *host)
89{
90 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
91
92 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
93 readl(host->ioaddr + SDHCI_DMA_ADDRESS),
94 readw(host->ioaddr + SDHCI_HOST_VERSION));
95 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
96 readw(host->ioaddr + SDHCI_BLOCK_SIZE),
97 readw(host->ioaddr + SDHCI_BLOCK_COUNT));
98 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
99 readl(host->ioaddr + SDHCI_ARGUMENT),
100 readw(host->ioaddr + SDHCI_TRANSFER_MODE));
101 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
102 readl(host->ioaddr + SDHCI_PRESENT_STATE),
103 readb(host->ioaddr + SDHCI_HOST_CONTROL));
104 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
105 readb(host->ioaddr + SDHCI_POWER_CONTROL),
106 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
107 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
108 readb(host->ioaddr + SDHCI_WALK_UP_CONTROL),
109 readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
110 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
111 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
112 readl(host->ioaddr + SDHCI_INT_STATUS));
113 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
114 readl(host->ioaddr + SDHCI_INT_ENABLE),
115 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
116 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
117 readw(host->ioaddr + SDHCI_ACMD12_ERR),
118 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
119 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
120 readl(host->ioaddr + SDHCI_CAPABILITIES),
121 readl(host->ioaddr + SDHCI_MAX_CURRENT));
122
123 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
124}
125
126/*****************************************************************************\
127 * *
128 * Low level functions *
129 * *
130\*****************************************************************************/
131
132static void sdhci_reset(struct sdhci_host *host, u8 mask)
133{
134 unsigned long timeout;
135
136 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
137 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
138 SDHCI_CARD_PRESENT))
139 return;
140 }
141
142 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
143
144 if (mask & SDHCI_RESET_ALL)
145 host->clock = 0;
146
147 /* Wait max 100 ms */
148 timeout = 100;
149
150 /* hw clears the bit when it's done */
151 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
152 if (timeout == 0) {
153 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
154 mmc_hostname(host->mmc), (int)mask);
155 sdhci_dumpregs(host);
156 return;
157 }
158 timeout--;
159 mdelay(1);
160 }
161}
162
163static void sdhci_init(struct sdhci_host *host)
164{
165 u32 intmask;
166
167 sdhci_reset(host, SDHCI_RESET_ALL);
168
169 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
170 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
171 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
172 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
173 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
174 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
175
176 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
177 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
178}
179
180static void sdhci_activate_led(struct sdhci_host *host)
181{
182 u8 ctrl;
183
184 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
185 ctrl |= SDHCI_CTRL_LED;
186 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
187}
188
189static void sdhci_deactivate_led(struct sdhci_host *host)
190{
191 u8 ctrl;
192
193 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
194 ctrl &= ~SDHCI_CTRL_LED;
195 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
196}
197
198/*****************************************************************************\
199 * *
200 * Core functions *
201 * *
202\*****************************************************************************/
203
204static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
205{
206 return page_address(host->cur_sg->page) + host->cur_sg->offset;
207}
208
209static inline int sdhci_next_sg(struct sdhci_host* host)
210{
211 /*
212 * Skip to next SG entry.
213 */
214 host->cur_sg++;
215 host->num_sg--;
216
217 /*
218 * Any entries left?
219 */
220 if (host->num_sg > 0) {
221 host->offset = 0;
222 host->remain = host->cur_sg->length;
223 }
224
225 return host->num_sg;
226}
227
228static void sdhci_read_block_pio(struct sdhci_host *host)
229{
230 int blksize, chunk_remain;
231 u32 data;
232 char *buffer;
233 int size;
234
235 DBG("PIO reading\n");
236
237 blksize = host->data->blksz;
238 chunk_remain = 0;
239 data = 0;
240
241 buffer = sdhci_sg_to_buffer(host) + host->offset;
242
243 while (blksize) {
244 if (chunk_remain == 0) {
245 data = readl(host->ioaddr + SDHCI_BUFFER);
246 chunk_remain = min(blksize, 4);
247 }
248
249 size = min(host->remain, chunk_remain);
250
251 chunk_remain -= size;
252 blksize -= size;
253 host->offset += size;
254 host->remain -= size;
255
256 while (size) {
257 *buffer = data & 0xFF;
258 buffer++;
259 data >>= 8;
260 size--;
261 }
262
263 if (host->remain == 0) {
264 if (sdhci_next_sg(host) == 0) {
265 BUG_ON(blksize != 0);
266 return;
267 }
268 buffer = sdhci_sg_to_buffer(host);
269 }
270 }
271}
272
273static void sdhci_write_block_pio(struct sdhci_host *host)
274{
275 int blksize, chunk_remain;
276 u32 data;
277 char *buffer;
278 int bytes, size;
279
280 DBG("PIO writing\n");
281
282 blksize = host->data->blksz;
283 chunk_remain = 4;
284 data = 0;
285
286 bytes = 0;
287 buffer = sdhci_sg_to_buffer(host) + host->offset;
288
289 while (blksize) {
290 size = min(host->remain, chunk_remain);
291
292 chunk_remain -= size;
293 blksize -= size;
294 host->offset += size;
295 host->remain -= size;
296
297 while (size) {
298 data >>= 8;
299 data |= (u32)*buffer << 24;
300 buffer++;
301 size--;
302 }
303
304 if (chunk_remain == 0) {
305 writel(data, host->ioaddr + SDHCI_BUFFER);
306 chunk_remain = min(blksize, 4);
307 }
308
309 if (host->remain == 0) {
310 if (sdhci_next_sg(host) == 0) {
311 BUG_ON(blksize != 0);
312 return;
313 }
314 buffer = sdhci_sg_to_buffer(host);
315 }
316 }
317}
318
319static void sdhci_transfer_pio(struct sdhci_host *host)
320{
321 u32 mask;
322
323 BUG_ON(!host->data);
324
325 if (host->num_sg == 0)
326 return;
327
328 if (host->data->flags & MMC_DATA_READ)
329 mask = SDHCI_DATA_AVAILABLE;
330 else
331 mask = SDHCI_SPACE_AVAILABLE;
332
333 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
334 if (host->data->flags & MMC_DATA_READ)
335 sdhci_read_block_pio(host);
336 else
337 sdhci_write_block_pio(host);
338
339 if (host->num_sg == 0)
340 break;
341 }
342
343 DBG("PIO transfer complete.\n");
344}
345
346static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
347{
348 u8 count;
349 unsigned target_timeout, current_timeout;
350
351 WARN_ON(host->data);
352
353 if (data == NULL)
354 return;
355
356 DBG("blksz %04x blks %04x flags %08x\n",
357 data->blksz, data->blocks, data->flags);
358 DBG("tsac %d ms nsac %d clk\n",
359 data->timeout_ns / 1000000, data->timeout_clks);
360
361 /* Sanity checks */
362 BUG_ON(data->blksz * data->blocks > 524288);
363 BUG_ON(data->blksz > host->mmc->max_blk_size);
364 BUG_ON(data->blocks > 65535);
365
366 /* timeout in us */
367 target_timeout = data->timeout_ns / 1000 +
368 data->timeout_clks / host->clock;
369
370 /*
371 * Figure out needed cycles.
372 * We do this in steps in order to fit inside a 32 bit int.
373 * The first step is the minimum timeout, which will have a
374 * minimum resolution of 6 bits:
375 * (1) 2^13*1000 > 2^22,
376 * (2) host->timeout_clk < 2^16
377 * =>
378 * (1) / (2) > 2^6
379 */
380 count = 0;
381 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
382 while (current_timeout < target_timeout) {
383 count++;
384 current_timeout <<= 1;
385 if (count >= 0xF)
386 break;
387 }
388
389 if (count >= 0xF) {
390 printk(KERN_WARNING "%s: Too large timeout requested!\n",
391 mmc_hostname(host->mmc));
392 count = 0xE;
393 }
394
395 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
396
397 if (host->flags & SDHCI_USE_DMA) {
398 int count;
399
400 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
401 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
402 BUG_ON(count != 1);
403
404 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
405 } else {
406 host->cur_sg = data->sg;
407 host->num_sg = data->sg_len;
408
409 host->offset = 0;
410 host->remain = host->cur_sg->length;
411 }
412
413 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
414 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
415 host->ioaddr + SDHCI_BLOCK_SIZE);
416 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
417}
418
419static void sdhci_set_transfer_mode(struct sdhci_host *host,
420 struct mmc_data *data)
421{
422 u16 mode;
423
424 WARN_ON(host->data);
425
426 if (data == NULL)
427 return;
428
429 mode = SDHCI_TRNS_BLK_CNT_EN;
430 if (data->blocks > 1)
431 mode |= SDHCI_TRNS_MULTI;
432 if (data->flags & MMC_DATA_READ)
433 mode |= SDHCI_TRNS_READ;
434 if (host->flags & SDHCI_USE_DMA)
435 mode |= SDHCI_TRNS_DMA;
436
437 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
438}
439
440static void sdhci_finish_data(struct sdhci_host *host)
441{
442 struct mmc_data *data;
443 u16 blocks;
444
445 BUG_ON(!host->data);
446
447 data = host->data;
448 host->data = NULL;
449
450 if (host->flags & SDHCI_USE_DMA) {
451 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
452 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
453 }
454
455 /*
456 * Controller doesn't count down when in single block mode.
457 */
458 if ((data->blocks == 1) && (data->error == MMC_ERR_NONE))
459 blocks = 0;
460 else
461 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
462 data->bytes_xfered = data->blksz * (data->blocks - blocks);
463
464 if ((data->error == MMC_ERR_NONE) && blocks) {
465 printk(KERN_ERR "%s: Controller signalled completion even "
466 "though there were blocks left.\n",
467 mmc_hostname(host->mmc));
468 data->error = MMC_ERR_FAILED;
469 }
470
471 DBG("Ending data transfer (%d bytes)\n", data->bytes_xfered);
472
473 if (data->stop) {
474 /*
475 * The controller needs a reset of internal state machines
476 * upon error conditions.
477 */
478 if (data->error != MMC_ERR_NONE) {
479 sdhci_reset(host, SDHCI_RESET_CMD);
480 sdhci_reset(host, SDHCI_RESET_DATA);
481 }
482
483 sdhci_send_command(host, data->stop);
484 } else
485 tasklet_schedule(&host->finish_tasklet);
486}
487
488static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
489{
490 int flags;
491 u32 mask;
492 unsigned long timeout;
493
494 WARN_ON(host->cmd);
495
496 DBG("Sending cmd (%x)\n", cmd->opcode);
497
498 /* Wait max 10 ms */
499 timeout = 10;
500
501 mask = SDHCI_CMD_INHIBIT;
502 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
503 mask |= SDHCI_DATA_INHIBIT;
504
505 /* We shouldn't wait for data inihibit for stop commands, even
506 though they might use busy signaling */
507 if (host->mrq->data && (cmd == host->mrq->data->stop))
508 mask &= ~SDHCI_DATA_INHIBIT;
509
510 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
511 if (timeout == 0) {
512 printk(KERN_ERR "%s: Controller never released "
513 "inhibit bit(s).\n", mmc_hostname(host->mmc));
514 sdhci_dumpregs(host);
515 cmd->error = MMC_ERR_FAILED;
516 tasklet_schedule(&host->finish_tasklet);
517 return;
518 }
519 timeout--;
520 mdelay(1);
521 }
522
523 mod_timer(&host->timer, jiffies + 10 * HZ);
524
525 host->cmd = cmd;
526
527 sdhci_prepare_data(host, cmd->data);
528
529 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
530
531 sdhci_set_transfer_mode(host, cmd->data);
532
533 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
534 printk(KERN_ERR "%s: Unsupported response type!\n",
535 mmc_hostname(host->mmc));
536 cmd->error = MMC_ERR_INVALID;
537 tasklet_schedule(&host->finish_tasklet);
538 return;
539 }
540
541 if (!(cmd->flags & MMC_RSP_PRESENT))
542 flags = SDHCI_CMD_RESP_NONE;
543 else if (cmd->flags & MMC_RSP_136)
544 flags = SDHCI_CMD_RESP_LONG;
545 else if (cmd->flags & MMC_RSP_BUSY)
546 flags = SDHCI_CMD_RESP_SHORT_BUSY;
547 else
548 flags = SDHCI_CMD_RESP_SHORT;
549
550 if (cmd->flags & MMC_RSP_CRC)
551 flags |= SDHCI_CMD_CRC;
552 if (cmd->flags & MMC_RSP_OPCODE)
553 flags |= SDHCI_CMD_INDEX;
554 if (cmd->data)
555 flags |= SDHCI_CMD_DATA;
556
557 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
558 host->ioaddr + SDHCI_COMMAND);
559}
560
561static void sdhci_finish_command(struct sdhci_host *host)
562{
563 int i;
564
565 BUG_ON(host->cmd == NULL);
566
567 if (host->cmd->flags & MMC_RSP_PRESENT) {
568 if (host->cmd->flags & MMC_RSP_136) {
569 /* CRC is stripped so we need to do some shifting. */
570 for (i = 0;i < 4;i++) {
571 host->cmd->resp[i] = readl(host->ioaddr +
572 SDHCI_RESPONSE + (3-i)*4) << 8;
573 if (i != 3)
574 host->cmd->resp[i] |=
575 readb(host->ioaddr +
576 SDHCI_RESPONSE + (3-i)*4-1);
577 }
578 } else {
579 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
580 }
581 }
582
583 host->cmd->error = MMC_ERR_NONE;
584
585 DBG("Ending cmd (%x)\n", host->cmd->opcode);
586
587 if (host->cmd->data)
588 host->data = host->cmd->data;
589 else
590 tasklet_schedule(&host->finish_tasklet);
591
592 host->cmd = NULL;
593}
594
595static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
596{
597 int div;
598 u16 clk;
599 unsigned long timeout;
600
601 if (clock == host->clock)
602 return;
603
604 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
605
606 if (clock == 0)
607 goto out;
608
609 for (div = 1;div < 256;div *= 2) {
610 if ((host->max_clk / div) <= clock)
611 break;
612 }
613 div >>= 1;
614
615 clk = div << SDHCI_DIVIDER_SHIFT;
616 clk |= SDHCI_CLOCK_INT_EN;
617 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
618
619 /* Wait max 10 ms */
620 timeout = 10;
621 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
622 & SDHCI_CLOCK_INT_STABLE)) {
623 if (timeout == 0) {
624 printk(KERN_ERR "%s: Internal clock never "
625 "stabilised.\n", mmc_hostname(host->mmc));
626 sdhci_dumpregs(host);
627 return;
628 }
629 timeout--;
630 mdelay(1);
631 }
632
633 clk |= SDHCI_CLOCK_CARD_EN;
634 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
635
636out:
637 host->clock = clock;
638}
639
640static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
641{
642 u8 pwr;
643
644 if (host->power == power)
645 return;
646
647 if (power == (unsigned short)-1) {
648 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
649 goto out;
650 }
651
652 /*
653 * Spec says that we should clear the power reg before setting
654 * a new value. Some controllers don't seem to like this though.
655 */
656 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
657 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
658
659 pwr = SDHCI_POWER_ON;
660
661 switch (power) {
662 case MMC_VDD_170:
663 case MMC_VDD_180:
664 case MMC_VDD_190:
665 pwr |= SDHCI_POWER_180;
666 break;
667 case MMC_VDD_290:
668 case MMC_VDD_300:
669 case MMC_VDD_310:
670 pwr |= SDHCI_POWER_300;
671 break;
672 case MMC_VDD_320:
673 case MMC_VDD_330:
674 case MMC_VDD_340:
675 pwr |= SDHCI_POWER_330;
676 break;
677 default:
678 BUG();
679 }
680
681 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
682
683out:
684 host->power = power;
685}
686
687/*****************************************************************************\
688 * *
689 * MMC callbacks *
690 * *
691\*****************************************************************************/
692
693static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
694{
695 struct sdhci_host *host;
696 unsigned long flags;
697
698 host = mmc_priv(mmc);
699
700 spin_lock_irqsave(&host->lock, flags);
701
702 WARN_ON(host->mrq != NULL);
703
704 sdhci_activate_led(host);
705
706 host->mrq = mrq;
707
708 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
709 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
710 tasklet_schedule(&host->finish_tasklet);
711 } else
712 sdhci_send_command(host, mrq->cmd);
713
714 mmiowb();
715 spin_unlock_irqrestore(&host->lock, flags);
716}
717
718static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
719{
720 struct sdhci_host *host;
721 unsigned long flags;
722 u8 ctrl;
723
724 host = mmc_priv(mmc);
725
726 spin_lock_irqsave(&host->lock, flags);
727
728 /*
729 * Reset the chip on each power off.
730 * Should clear out any weird states.
731 */
732 if (ios->power_mode == MMC_POWER_OFF) {
733 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
734 sdhci_init(host);
735 }
736
737 sdhci_set_clock(host, ios->clock);
738
739 if (ios->power_mode == MMC_POWER_OFF)
740 sdhci_set_power(host, -1);
741 else
742 sdhci_set_power(host, ios->vdd);
743
744 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
745
746 if (ios->bus_width == MMC_BUS_WIDTH_4)
747 ctrl |= SDHCI_CTRL_4BITBUS;
748 else
749 ctrl &= ~SDHCI_CTRL_4BITBUS;
750
751 if (ios->timing == MMC_TIMING_SD_HS)
752 ctrl |= SDHCI_CTRL_HISPD;
753 else
754 ctrl &= ~SDHCI_CTRL_HISPD;
755
756 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
757
758 mmiowb();
759 spin_unlock_irqrestore(&host->lock, flags);
760}
761
762static int sdhci_get_ro(struct mmc_host *mmc)
763{
764 struct sdhci_host *host;
765 unsigned long flags;
766 int present;
767
768 host = mmc_priv(mmc);
769
770 spin_lock_irqsave(&host->lock, flags);
771
772 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
773
774 spin_unlock_irqrestore(&host->lock, flags);
775
776 return !(present & SDHCI_WRITE_PROTECT);
777}
778
779static const struct mmc_host_ops sdhci_ops = {
780 .request = sdhci_request,
781 .set_ios = sdhci_set_ios,
782 .get_ro = sdhci_get_ro,
783};
784
785/*****************************************************************************\
786 * *
787 * Tasklets *
788 * *
789\*****************************************************************************/
790
791static void sdhci_tasklet_card(unsigned long param)
792{
793 struct sdhci_host *host;
794 unsigned long flags;
795
796 host = (struct sdhci_host*)param;
797
798 spin_lock_irqsave(&host->lock, flags);
799
800 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
801 if (host->mrq) {
802 printk(KERN_ERR "%s: Card removed during transfer!\n",
803 mmc_hostname(host->mmc));
804 printk(KERN_ERR "%s: Resetting controller.\n",
805 mmc_hostname(host->mmc));
806
807 sdhci_reset(host, SDHCI_RESET_CMD);
808 sdhci_reset(host, SDHCI_RESET_DATA);
809
810 host->mrq->cmd->error = MMC_ERR_FAILED;
811 tasklet_schedule(&host->finish_tasklet);
812 }
813 }
814
815 spin_unlock_irqrestore(&host->lock, flags);
816
817 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
818}
819
820static void sdhci_tasklet_finish(unsigned long param)
821{
822 struct sdhci_host *host;
823 unsigned long flags;
824 struct mmc_request *mrq;
825
826 host = (struct sdhci_host*)param;
827
828 spin_lock_irqsave(&host->lock, flags);
829
830 del_timer(&host->timer);
831
832 mrq = host->mrq;
833
834 DBG("Ending request, cmd (%x)\n", mrq->cmd->opcode);
835
836 /*
837 * The controller needs a reset of internal state machines
838 * upon error conditions.
839 */
840 if ((mrq->cmd->error != MMC_ERR_NONE) ||
841 (mrq->data && ((mrq->data->error != MMC_ERR_NONE) ||
842 (mrq->data->stop && (mrq->data->stop->error != MMC_ERR_NONE))))) {
843
844 /* Some controllers need this kick or reset won't work here */
845 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
846 unsigned int clock;
847
848 /* This is to force an update */
849 clock = host->clock;
850 host->clock = 0;
851 sdhci_set_clock(host, clock);
852 }
853
854 /* Spec says we should do both at the same time, but Ricoh
855 controllers do not like that. */
856 sdhci_reset(host, SDHCI_RESET_CMD);
857 sdhci_reset(host, SDHCI_RESET_DATA);
858 }
859
860 host->mrq = NULL;
861 host->cmd = NULL;
862 host->data = NULL;
863
864 sdhci_deactivate_led(host);
865
866 mmiowb();
867 spin_unlock_irqrestore(&host->lock, flags);
868
869 mmc_request_done(host->mmc, mrq);
870}
871
872static void sdhci_timeout_timer(unsigned long data)
873{
874 struct sdhci_host *host;
875 unsigned long flags;
876
877 host = (struct sdhci_host*)data;
878
879 spin_lock_irqsave(&host->lock, flags);
880
881 if (host->mrq) {
882 printk(KERN_ERR "%s: Timeout waiting for hardware "
883 "interrupt.\n", mmc_hostname(host->mmc));
884 sdhci_dumpregs(host);
885
886 if (host->data) {
887 host->data->error = MMC_ERR_TIMEOUT;
888 sdhci_finish_data(host);
889 } else {
890 if (host->cmd)
891 host->cmd->error = MMC_ERR_TIMEOUT;
892 else
893 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
894
895 tasklet_schedule(&host->finish_tasklet);
896 }
897 }
898
899 mmiowb();
900 spin_unlock_irqrestore(&host->lock, flags);
901}
902
903/*****************************************************************************\
904 * *
905 * Interrupt handling *
906 * *
907\*****************************************************************************/
908
909static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
910{
911 BUG_ON(intmask == 0);
912
913 if (!host->cmd) {
914 printk(KERN_ERR "%s: Got command interrupt even though no "
915 "command operation was in progress.\n",
916 mmc_hostname(host->mmc));
917 sdhci_dumpregs(host);
918 return;
919 }
920
921 if (intmask & SDHCI_INT_RESPONSE)
922 sdhci_finish_command(host);
923 else {
924 if (intmask & SDHCI_INT_TIMEOUT)
925 host->cmd->error = MMC_ERR_TIMEOUT;
926 else if (intmask & SDHCI_INT_CRC)
927 host->cmd->error = MMC_ERR_BADCRC;
928 else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
929 host->cmd->error = MMC_ERR_FAILED;
930 else
931 host->cmd->error = MMC_ERR_INVALID;
932
933 tasklet_schedule(&host->finish_tasklet);
934 }
935}
936
937static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
938{
939 BUG_ON(intmask == 0);
940
941 if (!host->data) {
942 /*
943 * A data end interrupt is sent together with the response
944 * for the stop command.
945 */
946 if (intmask & SDHCI_INT_DATA_END)
947 return;
948
949 printk(KERN_ERR "%s: Got data interrupt even though no "
950 "data operation was in progress.\n",
951 mmc_hostname(host->mmc));
952 sdhci_dumpregs(host);
953
954 return;
955 }
956
957 if (intmask & SDHCI_INT_DATA_TIMEOUT)
958 host->data->error = MMC_ERR_TIMEOUT;
959 else if (intmask & SDHCI_INT_DATA_CRC)
960 host->data->error = MMC_ERR_BADCRC;
961 else if (intmask & SDHCI_INT_DATA_END_BIT)
962 host->data->error = MMC_ERR_FAILED;
963
964 if (host->data->error != MMC_ERR_NONE)
965 sdhci_finish_data(host);
966 else {
967 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
968 sdhci_transfer_pio(host);
969
970 if (intmask & SDHCI_INT_DATA_END)
971 sdhci_finish_data(host);
972 }
973}
974
975static irqreturn_t sdhci_irq(int irq, void *dev_id)
976{
977 irqreturn_t result;
978 struct sdhci_host* host = dev_id;
979 u32 intmask;
980
981 spin_lock(&host->lock);
982
983 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
984
985 if (!intmask || intmask == 0xffffffff) {
986 result = IRQ_NONE;
987 goto out;
988 }
989
990 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
991
992 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
993 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
994 host->ioaddr + SDHCI_INT_STATUS);
995 tasklet_schedule(&host->card_tasklet);
996 }
997
998 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
999
1000 if (intmask & SDHCI_INT_CMD_MASK) {
1001 writel(intmask & SDHCI_INT_CMD_MASK,
1002 host->ioaddr + SDHCI_INT_STATUS);
1003 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1004 }
1005
1006 if (intmask & SDHCI_INT_DATA_MASK) {
1007 writel(intmask & SDHCI_INT_DATA_MASK,
1008 host->ioaddr + SDHCI_INT_STATUS);
1009 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1010 }
1011
1012 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1013
1014 if (intmask & SDHCI_INT_BUS_POWER) {
1015 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1016 mmc_hostname(host->mmc));
1017 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
1018 }
1019
1020 intmask &= SDHCI_INT_BUS_POWER;
1021
1022 if (intmask) {
1023 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1024 mmc_hostname(host->mmc), intmask);
1025 sdhci_dumpregs(host);
1026
1027 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1028 }
1029
1030 result = IRQ_HANDLED;
1031
1032 mmiowb();
1033out:
1034 spin_unlock(&host->lock);
1035
1036 return result;
1037}
1038
1039/*****************************************************************************\
1040 * *
1041 * Suspend/resume *
1042 * *
1043\*****************************************************************************/
1044
1045#ifdef CONFIG_PM
1046
1047static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
1048{
1049 struct sdhci_chip *chip;
1050 int i, ret;
1051
1052 chip = pci_get_drvdata(pdev);
1053 if (!chip)
1054 return 0;
1055
1056 DBG("Suspending...\n");
1057
1058 for (i = 0;i < chip->num_slots;i++) {
1059 if (!chip->hosts[i])
1060 continue;
1061 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1062 if (ret) {
1063 for (i--;i >= 0;i--)
1064 mmc_resume_host(chip->hosts[i]->mmc);
1065 return ret;
1066 }
1067 }
1068
1069 pci_save_state(pdev);
1070 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1071
1072 for (i = 0;i < chip->num_slots;i++) {
1073 if (!chip->hosts[i])
1074 continue;
1075 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1076 }
1077
1078 pci_disable_device(pdev);
1079 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1080
1081 return 0;
1082}
1083
1084static int sdhci_resume (struct pci_dev *pdev)
1085{
1086 struct sdhci_chip *chip;
1087 int i, ret;
1088
1089 chip = pci_get_drvdata(pdev);
1090 if (!chip)
1091 return 0;
1092
1093 DBG("Resuming...\n");
1094
1095 pci_set_power_state(pdev, PCI_D0);
1096 pci_restore_state(pdev);
1097 ret = pci_enable_device(pdev);
1098 if (ret)
1099 return ret;
1100
1101 for (i = 0;i < chip->num_slots;i++) {
1102 if (!chip->hosts[i])
1103 continue;
1104 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1105 pci_set_master(pdev);
1106 ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
1107 IRQF_SHARED, chip->hosts[i]->slot_descr,
1108 chip->hosts[i]);
1109 if (ret)
1110 return ret;
1111 sdhci_init(chip->hosts[i]);
1112 mmiowb();
1113 ret = mmc_resume_host(chip->hosts[i]->mmc);
1114 if (ret)
1115 return ret;
1116 }
1117
1118 return 0;
1119}
1120
1121#else /* CONFIG_PM */
1122
1123#define sdhci_suspend NULL
1124#define sdhci_resume NULL
1125
1126#endif /* CONFIG_PM */
1127
1128/*****************************************************************************\
1129 * *
1130 * Device probing/removal *
1131 * *
1132\*****************************************************************************/
1133
1134static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1135{
1136 int ret;
1137 unsigned int version;
1138 struct sdhci_chip *chip;
1139 struct mmc_host *mmc;
1140 struct sdhci_host *host;
1141
1142 u8 first_bar;
1143 unsigned int caps;
1144
1145 chip = pci_get_drvdata(pdev);
1146 BUG_ON(!chip);
1147
1148 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1149 if (ret)
1150 return ret;
1151
1152 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1153
1154 if (first_bar > 5) {
1155 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1156 return -ENODEV;
1157 }
1158
1159 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1160 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1161 return -ENODEV;
1162 }
1163
1164 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1165 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1166 "You may experience problems.\n");
1167 }
1168
1169 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1170 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1171 return -ENODEV;
1172 }
1173
1174 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1175 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1176 return -ENODEV;
1177 }
1178
1179 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
1180 if (!mmc)
1181 return -ENOMEM;
1182
1183 host = mmc_priv(mmc);
1184 host->mmc = mmc;
1185
1186 host->chip = chip;
1187 chip->hosts[slot] = host;
1188
1189 host->bar = first_bar + slot;
1190
1191 host->addr = pci_resource_start(pdev, host->bar);
1192 host->irq = pdev->irq;
1193
1194 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
1195
1196 snprintf(host->slot_descr, 20, "sdhci:slot%d", slot);
1197
1198 ret = pci_request_region(pdev, host->bar, host->slot_descr);
1199 if (ret)
1200 goto free;
1201
1202 host->ioaddr = ioremap_nocache(host->addr,
1203 pci_resource_len(pdev, host->bar));
1204 if (!host->ioaddr) {
1205 ret = -ENOMEM;
1206 goto release;
1207 }
1208
1209 sdhci_reset(host, SDHCI_RESET_ALL);
1210
1211 version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1212 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
1213 if (version != 0) {
1214 printk(KERN_ERR "%s: Unknown controller version (%d). "
1215 "You may experience problems.\n", host->slot_descr,
1216 version);
1217 }
1218
1219 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1220
1221 if (debug_nodma)
1222 DBG("DMA forced off\n");
1223 else if (debug_forcedma) {
1224 DBG("DMA forced on\n");
1225 host->flags |= SDHCI_USE_DMA;
1226 } else if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
1227 host->flags |= SDHCI_USE_DMA;
1228 else if ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA)
1229 DBG("Controller doesn't have DMA interface\n");
1230 else if (!(caps & SDHCI_CAN_DO_DMA))
1231 DBG("Controller doesn't have DMA capability\n");
1232 else
1233 host->flags |= SDHCI_USE_DMA;
1234
1235 if (host->flags & SDHCI_USE_DMA) {
1236 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1237 printk(KERN_WARNING "%s: No suitable DMA available. "
1238 "Falling back to PIO.\n", host->slot_descr);
1239 host->flags &= ~SDHCI_USE_DMA;
1240 }
1241 }
1242
1243 if (host->flags & SDHCI_USE_DMA)
1244 pci_set_master(pdev);
1245 else /* XXX: Hack to get MMC layer to avoid highmem */
1246 pdev->dma_mask = 0;
1247
1248 host->max_clk =
1249 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1250 if (host->max_clk == 0) {
1251 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1252 "frequency.\n", host->slot_descr);
1253 ret = -ENODEV;
1254 goto unmap;
1255 }
1256 host->max_clk *= 1000000;
1257
1258 host->timeout_clk =
1259 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1260 if (host->timeout_clk == 0) {
1261 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1262 "frequency.\n", host->slot_descr);
1263 ret = -ENODEV;
1264 goto unmap;
1265 }
1266 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1267 host->timeout_clk *= 1000;
1268
1269 /*
1270 * Set host parameters.
1271 */
1272 mmc->ops = &sdhci_ops;
1273 mmc->f_min = host->max_clk / 256;
1274 mmc->f_max = host->max_clk;
1275 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1276
1277 if (caps & SDHCI_CAN_DO_HISPD)
1278 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1279
1280 mmc->ocr_avail = 0;
1281 if (caps & SDHCI_CAN_VDD_330)
1282 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1283 if (caps & SDHCI_CAN_VDD_300)
1284 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1285 if (caps & SDHCI_CAN_VDD_180)
1286 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1287
1288 if (mmc->ocr_avail == 0) {
1289 printk(KERN_ERR "%s: Hardware doesn't report any "
1290 "support voltages.\n", host->slot_descr);
1291 ret = -ENODEV;
1292 goto unmap;
1293 }
1294
1295 spin_lock_init(&host->lock);
1296
1297 /*
1298 * Maximum number of segments. Hardware cannot do scatter lists.
1299 */
1300 if (host->flags & SDHCI_USE_DMA)
1301 mmc->max_hw_segs = 1;
1302 else
1303 mmc->max_hw_segs = 16;
1304 mmc->max_phys_segs = 16;
1305
1306 /*
1307 * Maximum number of sectors in one transfer. Limited by DMA boundary
1308 * size (512KiB).
1309 */
1310 mmc->max_req_size = 524288;
1311
1312 /*
1313 * Maximum segment size. Could be one segment with the maximum number
1314 * of bytes.
1315 */
1316 mmc->max_seg_size = mmc->max_req_size;
1317
1318 /*
1319 * Maximum block size. This varies from controller to controller and
1320 * is specified in the capabilities register.
1321 */
1322 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1323 if (mmc->max_blk_size >= 3) {
1324 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1325 host->slot_descr);
1326 ret = -ENODEV;
1327 goto unmap;
1328 }
1329 mmc->max_blk_size = 512 << mmc->max_blk_size;
1330
1331 /*
1332 * Maximum block count.
1333 */
1334 mmc->max_blk_count = 65535;
1335
1336 /*
1337 * Init tasklets.
1338 */
1339 tasklet_init(&host->card_tasklet,
1340 sdhci_tasklet_card, (unsigned long)host);
1341 tasklet_init(&host->finish_tasklet,
1342 sdhci_tasklet_finish, (unsigned long)host);
1343
1344 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1345
1346 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1347 host->slot_descr, host);
1348 if (ret)
1349 goto untasklet;
1350
1351 sdhci_init(host);
1352
1353#ifdef CONFIG_MMC_DEBUG
1354 sdhci_dumpregs(host);
1355#endif
1356
1357 mmiowb();
1358
1359 mmc_add_host(mmc);
1360
1361 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc),
1362 host->addr, host->irq,
1363 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1364
1365 return 0;
1366
1367untasklet:
1368 tasklet_kill(&host->card_tasklet);
1369 tasklet_kill(&host->finish_tasklet);
1370unmap:
1371 iounmap(host->ioaddr);
1372release:
1373 pci_release_region(pdev, host->bar);
1374free:
1375 mmc_free_host(mmc);
1376
1377 return ret;
1378}
1379
1380static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1381{
1382 struct sdhci_chip *chip;
1383 struct mmc_host *mmc;
1384 struct sdhci_host *host;
1385
1386 chip = pci_get_drvdata(pdev);
1387 host = chip->hosts[slot];
1388 mmc = host->mmc;
1389
1390 chip->hosts[slot] = NULL;
1391
1392 mmc_remove_host(mmc);
1393
1394 sdhci_reset(host, SDHCI_RESET_ALL);
1395
1396 free_irq(host->irq, host);
1397
1398 del_timer_sync(&host->timer);
1399
1400 tasklet_kill(&host->card_tasklet);
1401 tasklet_kill(&host->finish_tasklet);
1402
1403 iounmap(host->ioaddr);
1404
1405 pci_release_region(pdev, host->bar);
1406
1407 mmc_free_host(mmc);
1408}
1409
1410static int __devinit sdhci_probe(struct pci_dev *pdev,
1411 const struct pci_device_id *ent)
1412{
1413 int ret, i;
1414 u8 slots, rev;
1415 struct sdhci_chip *chip;
1416
1417 BUG_ON(pdev == NULL);
1418 BUG_ON(ent == NULL);
1419
1420 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
1421
1422 printk(KERN_INFO DRIVER_NAME
1423 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1424 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1425 (int)rev);
1426
1427 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1428 if (ret)
1429 return ret;
1430
1431 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1432 DBG("found %d slot(s)\n", slots);
1433 if (slots == 0)
1434 return -ENODEV;
1435
1436 ret = pci_enable_device(pdev);
1437 if (ret)
1438 return ret;
1439
1440 chip = kzalloc(sizeof(struct sdhci_chip) +
1441 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1442 if (!chip) {
1443 ret = -ENOMEM;
1444 goto err;
1445 }
1446
1447 chip->pdev = pdev;
1448 chip->quirks = ent->driver_data;
1449
1450 if (debug_quirks)
1451 chip->quirks = debug_quirks;
1452
1453 chip->num_slots = slots;
1454 pci_set_drvdata(pdev, chip);
1455
1456 for (i = 0;i < slots;i++) {
1457 ret = sdhci_probe_slot(pdev, i);
1458 if (ret) {
1459 for (i--;i >= 0;i--)
1460 sdhci_remove_slot(pdev, i);
1461 goto free;
1462 }
1463 }
1464
1465 return 0;
1466
1467free:
1468 pci_set_drvdata(pdev, NULL);
1469 kfree(chip);
1470
1471err:
1472 pci_disable_device(pdev);
1473 return ret;
1474}
1475
1476static void __devexit sdhci_remove(struct pci_dev *pdev)
1477{
1478 int i;
1479 struct sdhci_chip *chip;
1480
1481 chip = pci_get_drvdata(pdev);
1482
1483 if (chip) {
1484 for (i = 0;i < chip->num_slots;i++)
1485 sdhci_remove_slot(pdev, i);
1486
1487 pci_set_drvdata(pdev, NULL);
1488
1489 kfree(chip);
1490 }
1491
1492 pci_disable_device(pdev);
1493}
1494
1495static struct pci_driver sdhci_driver = {
1496 .name = DRIVER_NAME,
1497 .id_table = pci_ids,
1498 .probe = sdhci_probe,
1499 .remove = __devexit_p(sdhci_remove),
1500 .suspend = sdhci_suspend,
1501 .resume = sdhci_resume,
1502};
1503
1504/*****************************************************************************\
1505 * *
1506 * Driver init/exit *
1507 * *
1508\*****************************************************************************/
1509
1510static int __init sdhci_drv_init(void)
1511{
1512 printk(KERN_INFO DRIVER_NAME
1513 ": Secure Digital Host Controller Interface driver\n");
1514 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1515
1516 return pci_register_driver(&sdhci_driver);
1517}
1518
1519static void __exit sdhci_drv_exit(void)
1520{
1521 DBG("Exiting\n");
1522
1523 pci_unregister_driver(&sdhci_driver);
1524}
1525
1526module_init(sdhci_drv_init);
1527module_exit(sdhci_drv_exit);
1528
1529module_param(debug_nodma, uint, 0444);
1530module_param(debug_forcedma, uint, 0444);
1531module_param(debug_quirks, uint, 0444);
1532
1533MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1534MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
1535MODULE_LICENSE("GPL");
1536
1537MODULE_PARM_DESC(debug_nodma, "Forcefully disable DMA transfers. (default 0)");
1538MODULE_PARM_DESC(debug_forcedma, "Forcefully enable DMA transfers. (default 0)");
1539MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
new file mode 100644
index 000000000000..7400f4bc114f
--- /dev/null
+++ b/drivers/mmc/host/sdhci.h
@@ -0,0 +1,210 @@
1/*
2 * linux/drivers/mmc/sdhci.h - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12/*
13 * PCI registers
14 */
15
16#define PCI_SDHCI_IFPIO 0x00
17#define PCI_SDHCI_IFDMA 0x01
18#define PCI_SDHCI_IFVENDOR 0x02
19
20#define PCI_SLOT_INFO 0x40 /* 8 bits */
21#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
22#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
23
24/*
25 * Controller registers
26 */
27
28#define SDHCI_DMA_ADDRESS 0x00
29
30#define SDHCI_BLOCK_SIZE 0x04
31#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
32
33#define SDHCI_BLOCK_COUNT 0x06
34
35#define SDHCI_ARGUMENT 0x08
36
37#define SDHCI_TRANSFER_MODE 0x0C
38#define SDHCI_TRNS_DMA 0x01
39#define SDHCI_TRNS_BLK_CNT_EN 0x02
40#define SDHCI_TRNS_ACMD12 0x04
41#define SDHCI_TRNS_READ 0x10
42#define SDHCI_TRNS_MULTI 0x20
43
44#define SDHCI_COMMAND 0x0E
45#define SDHCI_CMD_RESP_MASK 0x03
46#define SDHCI_CMD_CRC 0x08
47#define SDHCI_CMD_INDEX 0x10
48#define SDHCI_CMD_DATA 0x20
49
50#define SDHCI_CMD_RESP_NONE 0x00
51#define SDHCI_CMD_RESP_LONG 0x01
52#define SDHCI_CMD_RESP_SHORT 0x02
53#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
54
55#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
56
57#define SDHCI_RESPONSE 0x10
58
59#define SDHCI_BUFFER 0x20
60
61#define SDHCI_PRESENT_STATE 0x24
62#define SDHCI_CMD_INHIBIT 0x00000001
63#define SDHCI_DATA_INHIBIT 0x00000002
64#define SDHCI_DOING_WRITE 0x00000100
65#define SDHCI_DOING_READ 0x00000200
66#define SDHCI_SPACE_AVAILABLE 0x00000400
67#define SDHCI_DATA_AVAILABLE 0x00000800
68#define SDHCI_CARD_PRESENT 0x00010000
69#define SDHCI_WRITE_PROTECT 0x00080000
70
71#define SDHCI_HOST_CONTROL 0x28
72#define SDHCI_CTRL_LED 0x01
73#define SDHCI_CTRL_4BITBUS 0x02
74#define SDHCI_CTRL_HISPD 0x04
75
76#define SDHCI_POWER_CONTROL 0x29
77#define SDHCI_POWER_ON 0x01
78#define SDHCI_POWER_180 0x0A
79#define SDHCI_POWER_300 0x0C
80#define SDHCI_POWER_330 0x0E
81
82#define SDHCI_BLOCK_GAP_CONTROL 0x2A
83
84#define SDHCI_WALK_UP_CONTROL 0x2B
85
86#define SDHCI_CLOCK_CONTROL 0x2C
87#define SDHCI_DIVIDER_SHIFT 8
88#define SDHCI_CLOCK_CARD_EN 0x0004
89#define SDHCI_CLOCK_INT_STABLE 0x0002
90#define SDHCI_CLOCK_INT_EN 0x0001
91
92#define SDHCI_TIMEOUT_CONTROL 0x2E
93
94#define SDHCI_SOFTWARE_RESET 0x2F
95#define SDHCI_RESET_ALL 0x01
96#define SDHCI_RESET_CMD 0x02
97#define SDHCI_RESET_DATA 0x04
98
99#define SDHCI_INT_STATUS 0x30
100#define SDHCI_INT_ENABLE 0x34
101#define SDHCI_SIGNAL_ENABLE 0x38
102#define SDHCI_INT_RESPONSE 0x00000001
103#define SDHCI_INT_DATA_END 0x00000002
104#define SDHCI_INT_DMA_END 0x00000008
105#define SDHCI_INT_SPACE_AVAIL 0x00000010
106#define SDHCI_INT_DATA_AVAIL 0x00000020
107#define SDHCI_INT_CARD_INSERT 0x00000040
108#define SDHCI_INT_CARD_REMOVE 0x00000080
109#define SDHCI_INT_CARD_INT 0x00000100
110#define SDHCI_INT_TIMEOUT 0x00010000
111#define SDHCI_INT_CRC 0x00020000
112#define SDHCI_INT_END_BIT 0x00040000
113#define SDHCI_INT_INDEX 0x00080000
114#define SDHCI_INT_DATA_TIMEOUT 0x00100000
115#define SDHCI_INT_DATA_CRC 0x00200000
116#define SDHCI_INT_DATA_END_BIT 0x00400000
117#define SDHCI_INT_BUS_POWER 0x00800000
118#define SDHCI_INT_ACMD12ERR 0x01000000
119
120#define SDHCI_INT_NORMAL_MASK 0x00007FFF
121#define SDHCI_INT_ERROR_MASK 0xFFFF8000
122
123#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
124 SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
125#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
127 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
128 SDHCI_INT_DATA_END_BIT)
129
130#define SDHCI_ACMD12_ERR 0x3C
131
132/* 3E-3F reserved */
133
134#define SDHCI_CAPABILITIES 0x40
135#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
136#define SDHCI_TIMEOUT_CLK_SHIFT 0
137#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
138#define SDHCI_CLOCK_BASE_MASK 0x00003F00
139#define SDHCI_CLOCK_BASE_SHIFT 8
140#define SDHCI_MAX_BLOCK_MASK 0x00030000
141#define SDHCI_MAX_BLOCK_SHIFT 16
142#define SDHCI_CAN_DO_HISPD 0x00200000
143#define SDHCI_CAN_DO_DMA 0x00400000
144#define SDHCI_CAN_VDD_330 0x01000000
145#define SDHCI_CAN_VDD_300 0x02000000
146#define SDHCI_CAN_VDD_180 0x04000000
147
148/* 44-47 reserved for more caps */
149
150#define SDHCI_MAX_CURRENT 0x48
151
152/* 4C-4F reserved for more max current */
153
154/* 50-FB reserved */
155
156#define SDHCI_SLOT_INT_STATUS 0xFC
157
158#define SDHCI_HOST_VERSION 0xFE
159#define SDHCI_VENDOR_VER_MASK 0xFF00
160#define SDHCI_VENDOR_VER_SHIFT 8
161#define SDHCI_SPEC_VER_MASK 0x00FF
162#define SDHCI_SPEC_VER_SHIFT 0
163
164struct sdhci_chip;
165
166struct sdhci_host {
167 struct sdhci_chip *chip;
168 struct mmc_host *mmc; /* MMC structure */
169
170 spinlock_t lock; /* Mutex */
171
172 int flags; /* Host attributes */
173#define SDHCI_USE_DMA (1<<0)
174
175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177
178 unsigned int clock; /* Current clock (MHz) */
179 unsigned short power; /* Current voltage */
180
181 struct mmc_request *mrq; /* Current request */
182 struct mmc_command *cmd; /* Current command */
183 struct mmc_data *data; /* Current data request */
184
185 struct scatterlist *cur_sg; /* We're working on this */
186 int num_sg; /* Entries left */
187 int offset; /* Offset into current sg */
188 int remain; /* Bytes left in current */
189
190 char slot_descr[20]; /* Name for reservations */
191
192 int irq; /* Device IRQ */
193 int bar; /* PCI BAR index */
194 unsigned long addr; /* Bus address */
195 void __iomem * ioaddr; /* Mapped address */
196
197 struct tasklet_struct card_tasklet; /* Tasklet structures */
198 struct tasklet_struct finish_tasklet;
199
200 struct timer_list timer; /* Timer for timeouts */
201};
202
203struct sdhci_chip {
204 struct pci_dev *pdev;
205
206 unsigned long quirks;
207
208 int num_slots; /* Slots on controller */
209 struct sdhci_host *hosts[0]; /* Pointers to hosts */
210};
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
new file mode 100644
index 000000000000..b0d77d298412
--- /dev/null
+++ b/drivers/mmc/host/tifm_sd.c
@@ -0,0 +1,1102 @@
1/*
2 * tifm_sd.c - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Special thanks to Brad Campbell for extensive testing of this driver.
11 *
12 */
13
14
15#include <linux/tifm.h>
16#include <linux/mmc/host.h>
17#include <linux/highmem.h>
18#include <linux/scatterlist.h>
19#include <asm/io.h>
20
21#define DRIVER_NAME "tifm_sd"
22#define DRIVER_VERSION "0.8"
23
24static int no_dma = 0;
25static int fixed_timeout = 0;
26module_param(no_dma, bool, 0644);
27module_param(fixed_timeout, bool, 0644);
28
29/* Constants here are mostly from OMAP5912 datasheet */
30#define TIFM_MMCSD_RESET 0x0002
31#define TIFM_MMCSD_CLKMASK 0x03ff
32#define TIFM_MMCSD_POWER 0x0800
33#define TIFM_MMCSD_4BBUS 0x8000
34#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
35#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
36#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
37#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
38#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
39#define TIFM_MMCSD_READ 0x8000
40
41#define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */
42#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
43#define TIFM_MMCSD_CD 0x0002 /* card detect */
44#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
45#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
46#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
47#define TIFM_MMCSD_DTO 0x0020 /* data time-out */
48#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
49#define TIFM_MMCSD_CTO 0x0080 /* command time-out */
50#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
51#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
52#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
53#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */
54#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */
55#define TIFM_MMCSD_CERR 0x4000 /* card status error */
56
57#define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */
58#define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */
59
60#define TIFM_MMCSD_FIFO_SIZE 0x0020
61
62#define TIFM_MMCSD_RSP_R0 0x0000
63#define TIFM_MMCSD_RSP_R1 0x0100
64#define TIFM_MMCSD_RSP_R2 0x0200
65#define TIFM_MMCSD_RSP_R3 0x0300
66#define TIFM_MMCSD_RSP_R4 0x0400
67#define TIFM_MMCSD_RSP_R5 0x0500
68#define TIFM_MMCSD_RSP_R6 0x0600
69
70#define TIFM_MMCSD_RSP_BUSY 0x0800
71
72#define TIFM_MMCSD_CMD_BC 0x0000
73#define TIFM_MMCSD_CMD_BCR 0x1000
74#define TIFM_MMCSD_CMD_AC 0x2000
75#define TIFM_MMCSD_CMD_ADTC 0x3000
76
77#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
78
79enum {
80 CMD_READY = 0x0001,
81 FIFO_READY = 0x0002,
82 BRS_READY = 0x0004,
83 SCMD_ACTIVE = 0x0008,
84 SCMD_READY = 0x0010,
85 CARD_BUSY = 0x0020,
86 DATA_CARRY = 0x0040
87};
88
89struct tifm_sd {
90 struct tifm_dev *dev;
91
92 unsigned short eject:1,
93 open_drain:1,
94 no_dma:1;
95 unsigned short cmd_flags;
96
97 unsigned int clk_freq;
98 unsigned int clk_div;
99 unsigned long timeout_jiffies;
100
101 struct tasklet_struct finish_tasklet;
102 struct timer_list timer;
103 struct mmc_request *req;
104
105 int sg_len;
106 int sg_pos;
107 unsigned int block_pos;
108 struct scatterlist bounce_buf;
109 unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE];
110};
111
112/* for some reason, host won't respond correctly to readw/writew */
113static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
114 unsigned int off, unsigned int cnt)
115{
116 struct tifm_dev *sock = host->dev;
117 unsigned char *buf;
118 unsigned int pos = 0, val;
119
120 buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off;
121 if (host->cmd_flags & DATA_CARRY) {
122 buf[pos++] = host->bounce_buf_data[0];
123 host->cmd_flags &= ~DATA_CARRY;
124 }
125
126 while (pos < cnt) {
127 val = readl(sock->addr + SOCK_MMCSD_DATA);
128 buf[pos++] = val & 0xff;
129 if (pos == cnt) {
130 host->bounce_buf_data[0] = (val >> 8) & 0xff;
131 host->cmd_flags |= DATA_CARRY;
132 break;
133 }
134 buf[pos++] = (val >> 8) & 0xff;
135 }
136 kunmap_atomic(buf - off, KM_BIO_DST_IRQ);
137}
138
139static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
140 unsigned int off, unsigned int cnt)
141{
142 struct tifm_dev *sock = host->dev;
143 unsigned char *buf;
144 unsigned int pos = 0, val;
145
146 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off;
147 if (host->cmd_flags & DATA_CARRY) {
148 val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
149 writel(val, sock->addr + SOCK_MMCSD_DATA);
150 host->cmd_flags &= ~DATA_CARRY;
151 }
152
153 while (pos < cnt) {
154 val = buf[pos++];
155 if (pos == cnt) {
156 host->bounce_buf_data[0] = val & 0xff;
157 host->cmd_flags |= DATA_CARRY;
158 break;
159 }
160 val |= (buf[pos++] << 8) & 0xff00;
161 writel(val, sock->addr + SOCK_MMCSD_DATA);
162 }
163 kunmap_atomic(buf - off, KM_BIO_SRC_IRQ);
164}
165
166static void tifm_sd_transfer_data(struct tifm_sd *host)
167{
168 struct mmc_data *r_data = host->req->cmd->data;
169 struct scatterlist *sg = r_data->sg;
170 unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2;
171 unsigned int p_off, p_cnt;
172 struct page *pg;
173
174 if (host->sg_pos == host->sg_len)
175 return;
176 while (t_size) {
177 cnt = sg[host->sg_pos].length - host->block_pos;
178 if (!cnt) {
179 host->block_pos = 0;
180 host->sg_pos++;
181 if (host->sg_pos == host->sg_len) {
182 if ((r_data->flags & MMC_DATA_WRITE)
183 && DATA_CARRY)
184 writel(host->bounce_buf_data[0],
185 host->dev->addr
186 + SOCK_MMCSD_DATA);
187
188 return;
189 }
190 cnt = sg[host->sg_pos].length;
191 }
192 off = sg[host->sg_pos].offset + host->block_pos;
193
194 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
195 p_off = offset_in_page(off);
196 p_cnt = PAGE_SIZE - p_off;
197 p_cnt = min(p_cnt, cnt);
198 p_cnt = min(p_cnt, t_size);
199
200 if (r_data->flags & MMC_DATA_READ)
201 tifm_sd_read_fifo(host, pg, p_off, p_cnt);
202 else if (r_data->flags & MMC_DATA_WRITE)
203 tifm_sd_write_fifo(host, pg, p_off, p_cnt);
204
205 t_size -= p_cnt;
206 host->block_pos += p_cnt;
207 }
208}
209
210static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
211 struct page *src, unsigned int src_off,
212 unsigned int count)
213{
214 unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off;
215 unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off;
216
217 memcpy(dst_buf, src_buf, count);
218
219 kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ);
220 kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ);
221}
222
223static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
224{
225 struct scatterlist *sg = r_data->sg;
226 unsigned int t_size = r_data->blksz;
227 unsigned int off, cnt;
228 unsigned int p_off, p_cnt;
229 struct page *pg;
230
231 dev_dbg(&host->dev->dev, "bouncing block\n");
232 while (t_size) {
233 cnt = sg[host->sg_pos].length - host->block_pos;
234 if (!cnt) {
235 host->block_pos = 0;
236 host->sg_pos++;
237 if (host->sg_pos == host->sg_len)
238 return;
239 cnt = sg[host->sg_pos].length;
240 }
241 off = sg[host->sg_pos].offset + host->block_pos;
242
243 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
244 p_off = offset_in_page(off);
245 p_cnt = PAGE_SIZE - p_off;
246 p_cnt = min(p_cnt, cnt);
247 p_cnt = min(p_cnt, t_size);
248
249 if (r_data->flags & MMC_DATA_WRITE)
250 tifm_sd_copy_page(host->bounce_buf.page,
251 r_data->blksz - t_size,
252 pg, p_off, p_cnt);
253 else if (r_data->flags & MMC_DATA_READ)
254 tifm_sd_copy_page(pg, p_off, host->bounce_buf.page,
255 r_data->blksz - t_size, p_cnt);
256
257 t_size -= p_cnt;
258 host->block_pos += p_cnt;
259 }
260}
261
262int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
263{
264 struct tifm_dev *sock = host->dev;
265 unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
266 unsigned int dma_len, dma_blk_cnt, dma_off;
267 struct scatterlist *sg = NULL;
268 unsigned long flags;
269
270 if (host->sg_pos == host->sg_len)
271 return 1;
272
273 if (host->cmd_flags & DATA_CARRY) {
274 host->cmd_flags &= ~DATA_CARRY;
275 local_irq_save(flags);
276 tifm_sd_bounce_block(host, r_data);
277 local_irq_restore(flags);
278 if (host->sg_pos == host->sg_len)
279 return 1;
280 }
281
282 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos;
283 if (!dma_len) {
284 host->block_pos = 0;
285 host->sg_pos++;
286 if (host->sg_pos == host->sg_len)
287 return 1;
288 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]);
289 }
290
291 if (dma_len < t_size) {
292 dma_blk_cnt = dma_len / r_data->blksz;
293 dma_off = host->block_pos;
294 host->block_pos += dma_blk_cnt * r_data->blksz;
295 } else {
296 dma_blk_cnt = TIFM_DMA_TSIZE;
297 dma_off = host->block_pos;
298 host->block_pos += t_size;
299 }
300
301 if (dma_blk_cnt)
302 sg = &r_data->sg[host->sg_pos];
303 else if (dma_len) {
304 if (r_data->flags & MMC_DATA_WRITE) {
305 local_irq_save(flags);
306 tifm_sd_bounce_block(host, r_data);
307 local_irq_restore(flags);
308 } else
309 host->cmd_flags |= DATA_CARRY;
310
311 sg = &host->bounce_buf;
312 dma_off = 0;
313 dma_blk_cnt = 1;
314 } else
315 return 1;
316
317 dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt);
318 writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS);
319 if (r_data->flags & MMC_DATA_WRITE)
320 writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
321 sock->addr + SOCK_DMA_CONTROL);
322 else
323 writel((dma_blk_cnt << 8) | TIFM_DMA_EN,
324 sock->addr + SOCK_DMA_CONTROL);
325
326 return 0;
327}
328
329static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
330{
331 unsigned int rc = 0;
332
333 switch (mmc_resp_type(cmd)) {
334 case MMC_RSP_NONE:
335 rc |= TIFM_MMCSD_RSP_R0;
336 break;
337 case MMC_RSP_R1B:
338 rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through
339 case MMC_RSP_R1:
340 rc |= TIFM_MMCSD_RSP_R1;
341 break;
342 case MMC_RSP_R2:
343 rc |= TIFM_MMCSD_RSP_R2;
344 break;
345 case MMC_RSP_R3:
346 rc |= TIFM_MMCSD_RSP_R3;
347 break;
348 default:
349 BUG();
350 }
351
352 switch (mmc_cmd_type(cmd)) {
353 case MMC_CMD_BC:
354 rc |= TIFM_MMCSD_CMD_BC;
355 break;
356 case MMC_CMD_BCR:
357 rc |= TIFM_MMCSD_CMD_BCR;
358 break;
359 case MMC_CMD_AC:
360 rc |= TIFM_MMCSD_CMD_AC;
361 break;
362 case MMC_CMD_ADTC:
363 rc |= TIFM_MMCSD_CMD_ADTC;
364 break;
365 default:
366 BUG();
367 }
368 return rc;
369}
370
371static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
372{
373 struct tifm_dev *sock = host->dev;
374 unsigned int cmd_mask = tifm_sd_op_flags(cmd);
375
376 if (host->open_drain)
377 cmd_mask |= TIFM_MMCSD_ODTO;
378
379 if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
380 cmd_mask |= TIFM_MMCSD_READ;
381
382 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
383 cmd->opcode, cmd->arg, cmd_mask);
384
385 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
386 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
387 writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
388}
389
390static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
391{
392 cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
393 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
394 cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
395 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
396 cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
397 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
398 cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
399 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
400}
401
402static void tifm_sd_check_status(struct tifm_sd *host)
403{
404 struct tifm_dev *sock = host->dev;
405 struct mmc_command *cmd = host->req->cmd;
406
407 if (cmd->error != MMC_ERR_NONE)
408 goto finish_request;
409
410 if (!(host->cmd_flags & CMD_READY))
411 return;
412
413 if (cmd->data) {
414 if (cmd->data->error != MMC_ERR_NONE) {
415 if ((host->cmd_flags & SCMD_ACTIVE)
416 && !(host->cmd_flags & SCMD_READY))
417 return;
418
419 goto finish_request;
420 }
421
422 if (!(host->cmd_flags & BRS_READY))
423 return;
424
425 if (!(host->no_dma || (host->cmd_flags & FIFO_READY)))
426 return;
427
428 if (cmd->data->flags & MMC_DATA_WRITE) {
429 if (host->req->stop) {
430 if (!(host->cmd_flags & SCMD_ACTIVE)) {
431 host->cmd_flags |= SCMD_ACTIVE;
432 writel(TIFM_MMCSD_EOFB
433 | readl(sock->addr
434 + SOCK_MMCSD_INT_ENABLE),
435 sock->addr
436 + SOCK_MMCSD_INT_ENABLE);
437 tifm_sd_exec(host, host->req->stop);
438 return;
439 } else {
440 if (!(host->cmd_flags & SCMD_READY)
441 || (host->cmd_flags & CARD_BUSY))
442 return;
443 writel((~TIFM_MMCSD_EOFB)
444 & readl(sock->addr
445 + SOCK_MMCSD_INT_ENABLE),
446 sock->addr
447 + SOCK_MMCSD_INT_ENABLE);
448 }
449 } else {
450 if (host->cmd_flags & CARD_BUSY)
451 return;
452 writel((~TIFM_MMCSD_EOFB)
453 & readl(sock->addr
454 + SOCK_MMCSD_INT_ENABLE),
455 sock->addr + SOCK_MMCSD_INT_ENABLE);
456 }
457 } else {
458 if (host->req->stop) {
459 if (!(host->cmd_flags & SCMD_ACTIVE)) {
460 host->cmd_flags |= SCMD_ACTIVE;
461 tifm_sd_exec(host, host->req->stop);
462 return;
463 } else {
464 if (!(host->cmd_flags & SCMD_READY))
465 return;
466 }
467 }
468 }
469 }
470finish_request:
471 tasklet_schedule(&host->finish_tasklet);
472}
473
474/* Called from interrupt handler */
475static void tifm_sd_data_event(struct tifm_dev *sock)
476{
477 struct tifm_sd *host;
478 unsigned int fifo_status = 0;
479 struct mmc_data *r_data = NULL;
480
481 spin_lock(&sock->lock);
482 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
483 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
484 dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n",
485 fifo_status, host->cmd_flags);
486
487 if (host->req) {
488 r_data = host->req->cmd->data;
489
490 if (r_data && (fifo_status & TIFM_FIFO_READY)) {
491 if (tifm_sd_set_dma_data(host, r_data)) {
492 host->cmd_flags |= FIFO_READY;
493 tifm_sd_check_status(host);
494 }
495 }
496 }
497
498 writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
499 spin_unlock(&sock->lock);
500}
501
502/* Called from interrupt handler */
503static void tifm_sd_card_event(struct tifm_dev *sock)
504{
505 struct tifm_sd *host;
506 unsigned int host_status = 0;
507 int cmd_error = MMC_ERR_NONE;
508 struct mmc_command *cmd = NULL;
509 unsigned long flags;
510
511 spin_lock(&sock->lock);
512 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
513 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
514 dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
515 host_status, host->cmd_flags);
516
517 if (host->req) {
518 cmd = host->req->cmd;
519
520 if (host_status & TIFM_MMCSD_ERRMASK) {
521 writel(host_status & TIFM_MMCSD_ERRMASK,
522 sock->addr + SOCK_MMCSD_STATUS);
523 if (host_status & TIFM_MMCSD_CTO)
524 cmd_error = MMC_ERR_TIMEOUT;
525 else if (host_status & TIFM_MMCSD_CCRC)
526 cmd_error = MMC_ERR_BADCRC;
527
528 if (cmd->data) {
529 if (host_status & TIFM_MMCSD_DTO)
530 cmd->data->error = MMC_ERR_TIMEOUT;
531 else if (host_status & TIFM_MMCSD_DCRC)
532 cmd->data->error = MMC_ERR_BADCRC;
533 }
534
535 writel(TIFM_FIFO_INT_SETALL,
536 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
537 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
538
539 if (host->req->stop) {
540 if (host->cmd_flags & SCMD_ACTIVE) {
541 host->req->stop->error = cmd_error;
542 host->cmd_flags |= SCMD_READY;
543 } else {
544 cmd->error = cmd_error;
545 host->cmd_flags |= SCMD_ACTIVE;
546 tifm_sd_exec(host, host->req->stop);
547 goto done;
548 }
549 } else
550 cmd->error = cmd_error;
551 } else {
552 if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) {
553 if (!(host->cmd_flags & CMD_READY)) {
554 host->cmd_flags |= CMD_READY;
555 tifm_sd_fetch_resp(cmd, sock);
556 } else if (host->cmd_flags & SCMD_ACTIVE) {
557 host->cmd_flags |= SCMD_READY;
558 tifm_sd_fetch_resp(host->req->stop,
559 sock);
560 }
561 }
562 if (host_status & TIFM_MMCSD_BRS)
563 host->cmd_flags |= BRS_READY;
564 }
565
566 if (host->no_dma && cmd->data) {
567 if (host_status & TIFM_MMCSD_AE)
568 writel(host_status & TIFM_MMCSD_AE,
569 sock->addr + SOCK_MMCSD_STATUS);
570
571 if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
572 | TIFM_MMCSD_BRS)) {
573 local_irq_save(flags);
574 tifm_sd_transfer_data(host);
575 local_irq_restore(flags);
576 host_status &= ~TIFM_MMCSD_AE;
577 }
578 }
579
580 if (host_status & TIFM_MMCSD_EOFB)
581 host->cmd_flags &= ~CARD_BUSY;
582 else if (host_status & TIFM_MMCSD_CB)
583 host->cmd_flags |= CARD_BUSY;
584
585 tifm_sd_check_status(host);
586 }
587done:
588 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
589 spin_unlock(&sock->lock);
590}
591
592static void tifm_sd_set_data_timeout(struct tifm_sd *host,
593 struct mmc_data *data)
594{
595 struct tifm_dev *sock = host->dev;
596 unsigned int data_timeout = data->timeout_clks;
597
598 if (fixed_timeout)
599 return;
600
601 data_timeout += data->timeout_ns /
602 ((1000000000UL / host->clk_freq) * host->clk_div);
603
604 if (data_timeout < 0xffff) {
605 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
606 writel((~TIFM_MMCSD_DPE)
607 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
608 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
609 } else {
610 data_timeout = (data_timeout >> 10) + 1;
611 if (data_timeout > 0xffff)
612 data_timeout = 0; /* set to unlimited */
613 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
614 writel(TIFM_MMCSD_DPE
615 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
616 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
617 }
618}
619
620static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
621{
622 struct tifm_sd *host = mmc_priv(mmc);
623 struct tifm_dev *sock = host->dev;
624 unsigned long flags;
625 struct mmc_data *r_data = mrq->cmd->data;
626
627 spin_lock_irqsave(&sock->lock, flags);
628 if (host->eject) {
629 spin_unlock_irqrestore(&sock->lock, flags);
630 goto err_out;
631 }
632
633 if (host->req) {
634 printk(KERN_ERR "%s : unfinished request detected\n",
635 sock->dev.bus_id);
636 spin_unlock_irqrestore(&sock->lock, flags);
637 goto err_out;
638 }
639
640 host->cmd_flags = 0;
641 host->block_pos = 0;
642 host->sg_pos = 0;
643
644 if (r_data) {
645 tifm_sd_set_data_timeout(host, r_data);
646
647 if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop)
648 writel(TIFM_MMCSD_EOFB
649 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
650 sock->addr + SOCK_MMCSD_INT_ENABLE);
651
652 if (host->no_dma) {
653 writel(TIFM_MMCSD_BUFINT
654 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
655 sock->addr + SOCK_MMCSD_INT_ENABLE);
656 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
657 | (TIFM_MMCSD_FIFO_SIZE - 1),
658 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
659
660 host->sg_len = r_data->sg_len;
661 } else {
662 sg_init_one(&host->bounce_buf, host->bounce_buf_data,
663 r_data->blksz);
664
665 if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
666 r_data->flags & MMC_DATA_WRITE
667 ? PCI_DMA_TODEVICE
668 : PCI_DMA_FROMDEVICE)) {
669 printk(KERN_ERR "%s : scatterlist map failed\n",
670 sock->dev.bus_id);
671 spin_unlock_irqrestore(&sock->lock, flags);
672 goto err_out;
673 }
674 host->sg_len = tifm_map_sg(sock, r_data->sg,
675 r_data->sg_len,
676 r_data->flags
677 & MMC_DATA_WRITE
678 ? PCI_DMA_TODEVICE
679 : PCI_DMA_FROMDEVICE);
680 if (host->sg_len < 1) {
681 printk(KERN_ERR "%s : scatterlist map failed\n",
682 sock->dev.bus_id);
683 tifm_unmap_sg(sock, &host->bounce_buf, 1,
684 r_data->flags & MMC_DATA_WRITE
685 ? PCI_DMA_TODEVICE
686 : PCI_DMA_FROMDEVICE);
687 spin_unlock_irqrestore(&sock->lock, flags);
688 goto err_out;
689 }
690
691 writel(TIFM_FIFO_INT_SETALL,
692 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
693 writel(ilog2(r_data->blksz) - 2,
694 sock->addr + SOCK_FIFO_PAGE_SIZE);
695 writel(TIFM_FIFO_ENABLE,
696 sock->addr + SOCK_FIFO_CONTROL);
697 writel(TIFM_FIFO_INTMASK,
698 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
699
700 if (r_data->flags & MMC_DATA_WRITE)
701 writel(TIFM_MMCSD_TXDE,
702 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
703 else
704 writel(TIFM_MMCSD_RXDE,
705 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
706
707 tifm_sd_set_dma_data(host, r_data);
708 }
709
710 writel(r_data->blocks - 1,
711 sock->addr + SOCK_MMCSD_NUM_BLOCKS);
712 writel(r_data->blksz - 1,
713 sock->addr + SOCK_MMCSD_BLOCK_LEN);
714 }
715
716 host->req = mrq;
717 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
718 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
719 sock->addr + SOCK_CONTROL);
720 tifm_sd_exec(host, mrq->cmd);
721 spin_unlock_irqrestore(&sock->lock, flags);
722 return;
723
724err_out:
725 mrq->cmd->error = MMC_ERR_TIMEOUT;
726 mmc_request_done(mmc, mrq);
727}
728
729static void tifm_sd_end_cmd(unsigned long data)
730{
731 struct tifm_sd *host = (struct tifm_sd*)data;
732 struct tifm_dev *sock = host->dev;
733 struct mmc_host *mmc = tifm_get_drvdata(sock);
734 struct mmc_request *mrq;
735 struct mmc_data *r_data = NULL;
736 unsigned long flags;
737
738 spin_lock_irqsave(&sock->lock, flags);
739
740 del_timer(&host->timer);
741 mrq = host->req;
742 host->req = NULL;
743
744 if (!mrq) {
745 printk(KERN_ERR " %s : no request to complete?\n",
746 sock->dev.bus_id);
747 spin_unlock_irqrestore(&sock->lock, flags);
748 return;
749 }
750
751 r_data = mrq->cmd->data;
752 if (r_data) {
753 if (host->no_dma) {
754 writel((~TIFM_MMCSD_BUFINT)
755 & readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
756 sock->addr + SOCK_MMCSD_INT_ENABLE);
757 } else {
758 tifm_unmap_sg(sock, &host->bounce_buf, 1,
759 (r_data->flags & MMC_DATA_WRITE)
760 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
761 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
762 (r_data->flags & MMC_DATA_WRITE)
763 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
764 }
765
766 r_data->bytes_xfered = r_data->blocks
767 - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
768 r_data->bytes_xfered *= r_data->blksz;
769 r_data->bytes_xfered += r_data->blksz
770 - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
771 }
772
773 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
774 sock->addr + SOCK_CONTROL);
775
776 spin_unlock_irqrestore(&sock->lock, flags);
777 mmc_request_done(mmc, mrq);
778}
779
780static void tifm_sd_abort(unsigned long data)
781{
782 struct tifm_sd *host = (struct tifm_sd*)data;
783
784 printk(KERN_ERR
785 "%s : card failed to respond for a long period of time "
786 "(%x, %x)\n",
787 host->dev->dev.bus_id, host->req->cmd->opcode, host->cmd_flags);
788
789 tifm_eject(host->dev);
790}
791
792static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
793{
794 struct tifm_sd *host = mmc_priv(mmc);
795 struct tifm_dev *sock = host->dev;
796 unsigned int clk_div1, clk_div2;
797 unsigned long flags;
798
799 spin_lock_irqsave(&sock->lock, flags);
800
801 dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, "
802 "chip_select = %x, power_mode = %x, bus_width = %x\n",
803 ios->clock, ios->vdd, ios->bus_mode, ios->chip_select,
804 ios->power_mode, ios->bus_width);
805
806 if (ios->bus_width == MMC_BUS_WIDTH_4) {
807 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
808 sock->addr + SOCK_MMCSD_CONFIG);
809 } else {
810 writel((~TIFM_MMCSD_4BBUS)
811 & readl(sock->addr + SOCK_MMCSD_CONFIG),
812 sock->addr + SOCK_MMCSD_CONFIG);
813 }
814
815 if (ios->clock) {
816 clk_div1 = 20000000 / ios->clock;
817 if (!clk_div1)
818 clk_div1 = 1;
819
820 clk_div2 = 24000000 / ios->clock;
821 if (!clk_div2)
822 clk_div2 = 1;
823
824 if ((20000000 / clk_div1) > ios->clock)
825 clk_div1++;
826 if ((24000000 / clk_div2) > ios->clock)
827 clk_div2++;
828 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
829 host->clk_freq = 20000000;
830 host->clk_div = clk_div1;
831 writel((~TIFM_CTRL_FAST_CLK)
832 & readl(sock->addr + SOCK_CONTROL),
833 sock->addr + SOCK_CONTROL);
834 } else {
835 host->clk_freq = 24000000;
836 host->clk_div = clk_div2;
837 writel(TIFM_CTRL_FAST_CLK
838 | readl(sock->addr + SOCK_CONTROL),
839 sock->addr + SOCK_CONTROL);
840 }
841 } else {
842 host->clk_div = 0;
843 }
844 host->clk_div &= TIFM_MMCSD_CLKMASK;
845 writel(host->clk_div
846 | ((~TIFM_MMCSD_CLKMASK)
847 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
848 sock->addr + SOCK_MMCSD_CONFIG);
849
850 host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN);
851
852 /* chip_select : maybe later */
853 //vdd
854 //power is set before probe / after remove
855
856 spin_unlock_irqrestore(&sock->lock, flags);
857}
858
859static int tifm_sd_ro(struct mmc_host *mmc)
860{
861 int rc = 0;
862 struct tifm_sd *host = mmc_priv(mmc);
863 struct tifm_dev *sock = host->dev;
864 unsigned long flags;
865
866 spin_lock_irqsave(&sock->lock, flags);
867 if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE))
868 rc = 1;
869 spin_unlock_irqrestore(&sock->lock, flags);
870 return rc;
871}
872
873static const struct mmc_host_ops tifm_sd_ops = {
874 .request = tifm_sd_request,
875 .set_ios = tifm_sd_ios,
876 .get_ro = tifm_sd_ro
877};
878
879static int tifm_sd_initialize_host(struct tifm_sd *host)
880{
881 int rc;
882 unsigned int host_status = 0;
883 struct tifm_dev *sock = host->dev;
884
885 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
886 mmiowb();
887 host->clk_div = 61;
888 host->clk_freq = 20000000;
889 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
890 writel(host->clk_div | TIFM_MMCSD_POWER,
891 sock->addr + SOCK_MMCSD_CONFIG);
892
893 /* wait up to 0.51 sec for reset */
894 for (rc = 32; rc <= 256; rc <<= 1) {
895 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
896 rc = 0;
897 break;
898 }
899 msleep(rc);
900 }
901
902 if (rc) {
903 printk(KERN_ERR "%s : controller failed to reset\n",
904 sock->dev.bus_id);
905 return -ENODEV;
906 }
907
908 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
909 writel(host->clk_div | TIFM_MMCSD_POWER,
910 sock->addr + SOCK_MMCSD_CONFIG);
911 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
912
913 // command timeout fixed to 64 clocks for now
914 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
915 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
916
917 for (rc = 16; rc <= 64; rc <<= 1) {
918 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
919 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
920 if (!(host_status & TIFM_MMCSD_ERRMASK)
921 && (host_status & TIFM_MMCSD_EOC)) {
922 rc = 0;
923 break;
924 }
925 msleep(rc);
926 }
927
928 if (rc) {
929 printk(KERN_ERR
930 "%s : card not ready - probe failed on initialization\n",
931 sock->dev.bus_id);
932 return -ENODEV;
933 }
934
935 writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
936 | TIFM_MMCSD_ERRMASK,
937 sock->addr + SOCK_MMCSD_INT_ENABLE);
938 mmiowb();
939
940 return 0;
941}
942
943static int tifm_sd_probe(struct tifm_dev *sock)
944{
945 struct mmc_host *mmc;
946 struct tifm_sd *host;
947 int rc = -EIO;
948
949 if (!(TIFM_SOCK_STATE_OCCUPIED
950 & readl(sock->addr + SOCK_PRESENT_STATE))) {
951 printk(KERN_WARNING "%s : card gone, unexpectedly\n",
952 sock->dev.bus_id);
953 return rc;
954 }
955
956 mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
957 if (!mmc)
958 return -ENOMEM;
959
960 host = mmc_priv(mmc);
961 host->no_dma = no_dma;
962 tifm_set_drvdata(sock, mmc);
963 host->dev = sock;
964 host->timeout_jiffies = msecs_to_jiffies(1000);
965
966 tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
967 (unsigned long)host);
968 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
969
970 mmc->ops = &tifm_sd_ops;
971 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
972 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
973 mmc->f_min = 20000000 / 60;
974 mmc->f_max = 24000000;
975
976 mmc->max_blk_count = 2048;
977 mmc->max_hw_segs = mmc->max_blk_count;
978 mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
979 mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
980 mmc->max_req_size = mmc->max_seg_size;
981 mmc->max_phys_segs = mmc->max_hw_segs;
982
983 sock->card_event = tifm_sd_card_event;
984 sock->data_event = tifm_sd_data_event;
985 rc = tifm_sd_initialize_host(host);
986
987 if (!rc)
988 rc = mmc_add_host(mmc);
989 if (!rc)
990 return 0;
991
992 mmc_free_host(mmc);
993 return rc;
994}
995
996static void tifm_sd_remove(struct tifm_dev *sock)
997{
998 struct mmc_host *mmc = tifm_get_drvdata(sock);
999 struct tifm_sd *host = mmc_priv(mmc);
1000 unsigned long flags;
1001
1002 spin_lock_irqsave(&sock->lock, flags);
1003 host->eject = 1;
1004 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
1005 mmiowb();
1006 spin_unlock_irqrestore(&sock->lock, flags);
1007
1008 tasklet_kill(&host->finish_tasklet);
1009
1010 spin_lock_irqsave(&sock->lock, flags);
1011 if (host->req) {
1012 writel(TIFM_FIFO_INT_SETALL,
1013 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
1014 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
1015 host->req->cmd->error = MMC_ERR_TIMEOUT;
1016 if (host->req->stop)
1017 host->req->stop->error = MMC_ERR_TIMEOUT;
1018 tasklet_schedule(&host->finish_tasklet);
1019 }
1020 spin_unlock_irqrestore(&sock->lock, flags);
1021 mmc_remove_host(mmc);
1022 dev_dbg(&sock->dev, "after remove\n");
1023
1024 /* The meaning of the bit majority in this constant is unknown. */
1025 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1026 sock->addr + SOCK_CONTROL);
1027
1028 mmc_free_host(mmc);
1029}
1030
1031#ifdef CONFIG_PM
1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{
1035 struct mmc_host *mmc = tifm_get_drvdata(sock);
1036 int rc;
1037
1038 rc = mmc_suspend_host(mmc, state);
1039 /* The meaning of the bit majority in this constant is unknown. */
1040 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1041 sock->addr + SOCK_CONTROL);
1042 return rc;
1043}
1044
1045static int tifm_sd_resume(struct tifm_dev *sock)
1046{
1047 struct mmc_host *mmc = tifm_get_drvdata(sock);
1048 struct tifm_sd *host = mmc_priv(mmc);
1049 int rc;
1050
1051 rc = tifm_sd_initialize_host(host);
1052 dev_dbg(&sock->dev, "resume initialize %d\n", rc);
1053
1054 if (rc)
1055 host->eject = 1;
1056 else
1057 rc = mmc_resume_host(mmc);
1058
1059 return rc;
1060}
1061
1062#else
1063
1064#define tifm_sd_suspend NULL
1065#define tifm_sd_resume NULL
1066
1067#endif /* CONFIG_PM */
1068
1069static struct tifm_device_id tifm_sd_id_tbl[] = {
1070 { TIFM_TYPE_SD }, { }
1071};
1072
1073static struct tifm_driver tifm_sd_driver = {
1074 .driver = {
1075 .name = DRIVER_NAME,
1076 .owner = THIS_MODULE
1077 },
1078 .id_table = tifm_sd_id_tbl,
1079 .probe = tifm_sd_probe,
1080 .remove = tifm_sd_remove,
1081 .suspend = tifm_sd_suspend,
1082 .resume = tifm_sd_resume
1083};
1084
1085static int __init tifm_sd_init(void)
1086{
1087 return tifm_register_driver(&tifm_sd_driver);
1088}
1089
1090static void __exit tifm_sd_exit(void)
1091{
1092 tifm_unregister_driver(&tifm_sd_driver);
1093}
1094
1095MODULE_AUTHOR("Alex Dubov");
1096MODULE_DESCRIPTION("TI FlashMedia SD driver");
1097MODULE_LICENSE("GPL");
1098MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
1099MODULE_VERSION(DRIVER_VERSION);
1100
1101module_init(tifm_sd_init);
1102module_exit(tifm_sd_exit);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
new file mode 100644
index 000000000000..9f7518b37c33
--- /dev/null
+++ b/drivers/mmc/host/wbsd.c
@@ -0,0 +1,2062 @@
1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 *
12 * Warning!
13 *
14 * Changes to the FIFO system should be done with extreme care since
15 * the hardware is full of bugs related to the FIFO. Known issues are:
16 *
17 * - FIFO size field in FSR is always zero.
18 *
19 * - FIFO interrupts tend not to work as they should. Interrupts are
20 * triggered only for full/empty events, not for threshold values.
21 *
22 * - On APIC systems the FIFO empty interrupt is sometimes lost.
23 */
24
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/platform_device.h>
30#include <linux/interrupt.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/pnp.h>
34#include <linux/highmem.h>
35#include <linux/mmc/host.h>
36
37#include <asm/io.h>
38#include <asm/dma.h>
39#include <asm/scatterlist.h>
40
41#include "wbsd.h"
42
43#define DRIVER_NAME "wbsd"
44
45#define DBG(x...) \
46 pr_debug(DRIVER_NAME ": " x)
47#define DBGF(f, x...) \
48 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
49
50/*
51 * Device resources
52 */
53
54#ifdef CONFIG_PNP
55
56static const struct pnp_device_id pnp_dev_table[] = {
57 { "WEC0517", 0 },
58 { "WEC0518", 0 },
59 { "", 0 },
60};
61
62MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
63
64#endif /* CONFIG_PNP */
65
66static const int config_ports[] = { 0x2E, 0x4E };
67static const int unlock_codes[] = { 0x83, 0x87 };
68
69static const int valid_ids[] = {
70 0x7112,
71 };
72
73#ifdef CONFIG_PNP
74static unsigned int nopnp = 0;
75#else
76static const unsigned int nopnp = 1;
77#endif
78static unsigned int io = 0x248;
79static unsigned int irq = 6;
80static int dma = 2;
81
82/*
83 * Basic functions
84 */
85
86static inline void wbsd_unlock_config(struct wbsd_host *host)
87{
88 BUG_ON(host->config == 0);
89
90 outb(host->unlock_code, host->config);
91 outb(host->unlock_code, host->config);
92}
93
94static inline void wbsd_lock_config(struct wbsd_host *host)
95{
96 BUG_ON(host->config == 0);
97
98 outb(LOCK_CODE, host->config);
99}
100
101static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
102{
103 BUG_ON(host->config == 0);
104
105 outb(reg, host->config);
106 outb(value, host->config + 1);
107}
108
109static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
110{
111 BUG_ON(host->config == 0);
112
113 outb(reg, host->config);
114 return inb(host->config + 1);
115}
116
117static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
118{
119 outb(index, host->base + WBSD_IDXR);
120 outb(value, host->base + WBSD_DATAR);
121}
122
123static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
124{
125 outb(index, host->base + WBSD_IDXR);
126 return inb(host->base + WBSD_DATAR);
127}
128
129/*
130 * Common routines
131 */
132
133static void wbsd_init_device(struct wbsd_host *host)
134{
135 u8 setup, ier;
136
137 /*
138 * Reset chip (SD/MMC part) and fifo.
139 */
140 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
141 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
142 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
143
144 /*
145 * Set DAT3 to input
146 */
147 setup &= ~WBSD_DAT3_H;
148 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
149 host->flags &= ~WBSD_FIGNORE_DETECT;
150
151 /*
152 * Read back default clock.
153 */
154 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
155
156 /*
157 * Power down port.
158 */
159 outb(WBSD_POWER_N, host->base + WBSD_CSR);
160
161 /*
162 * Set maximum timeout.
163 */
164 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
165
166 /*
167 * Test for card presence
168 */
169 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
170 host->flags |= WBSD_FCARD_PRESENT;
171 else
172 host->flags &= ~WBSD_FCARD_PRESENT;
173
174 /*
175 * Enable interesting interrupts.
176 */
177 ier = 0;
178 ier |= WBSD_EINT_CARD;
179 ier |= WBSD_EINT_FIFO_THRE;
180 ier |= WBSD_EINT_CRC;
181 ier |= WBSD_EINT_TIMEOUT;
182 ier |= WBSD_EINT_TC;
183
184 outb(ier, host->base + WBSD_EIR);
185
186 /*
187 * Clear interrupts.
188 */
189 inb(host->base + WBSD_ISR);
190}
191
192static void wbsd_reset(struct wbsd_host *host)
193{
194 u8 setup;
195
196 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
197
198 /*
199 * Soft reset of chip (SD/MMC part).
200 */
201 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
202 setup |= WBSD_SOFT_RESET;
203 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
204}
205
206static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
207{
208 unsigned long dmaflags;
209
210 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
211
212 if (host->dma >= 0) {
213 /*
214 * Release ISA DMA controller.
215 */
216 dmaflags = claim_dma_lock();
217 disable_dma(host->dma);
218 clear_dma_ff(host->dma);
219 release_dma_lock(dmaflags);
220
221 /*
222 * Disable DMA on host.
223 */
224 wbsd_write_index(host, WBSD_IDX_DMA, 0);
225 }
226
227 host->mrq = NULL;
228
229 /*
230 * MMC layer might call back into the driver so first unlock.
231 */
232 spin_unlock(&host->lock);
233 mmc_request_done(host->mmc, mrq);
234 spin_lock(&host->lock);
235}
236
237/*
238 * Scatter/gather functions
239 */
240
241static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
242{
243 /*
244 * Get info. about SG list from data structure.
245 */
246 host->cur_sg = data->sg;
247 host->num_sg = data->sg_len;
248
249 host->offset = 0;
250 host->remain = host->cur_sg->length;
251}
252
253static inline int wbsd_next_sg(struct wbsd_host *host)
254{
255 /*
256 * Skip to next SG entry.
257 */
258 host->cur_sg++;
259 host->num_sg--;
260
261 /*
262 * Any entries left?
263 */
264 if (host->num_sg > 0) {
265 host->offset = 0;
266 host->remain = host->cur_sg->length;
267 }
268
269 return host->num_sg;
270}
271
272static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
273{
274 return page_address(host->cur_sg->page) + host->cur_sg->offset;
275}
276
277static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
278{
279 unsigned int len, i;
280 struct scatterlist *sg;
281 char *dmabuf = host->dma_buffer;
282 char *sgbuf;
283
284 sg = data->sg;
285 len = data->sg_len;
286
287 for (i = 0; i < len; i++) {
288 sgbuf = page_address(sg[i].page) + sg[i].offset;
289 memcpy(dmabuf, sgbuf, sg[i].length);
290 dmabuf += sg[i].length;
291 }
292}
293
294static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
295{
296 unsigned int len, i;
297 struct scatterlist *sg;
298 char *dmabuf = host->dma_buffer;
299 char *sgbuf;
300
301 sg = data->sg;
302 len = data->sg_len;
303
304 for (i = 0; i < len; i++) {
305 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 memcpy(sgbuf, dmabuf, sg[i].length);
307 dmabuf += sg[i].length;
308 }
309}
310
311/*
312 * Command handling
313 */
314
315static inline void wbsd_get_short_reply(struct wbsd_host *host,
316 struct mmc_command *cmd)
317{
318 /*
319 * Correct response type?
320 */
321 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
322 cmd->error = MMC_ERR_INVALID;
323 return;
324 }
325
326 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
327 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
328 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
329 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
330 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
331}
332
333static inline void wbsd_get_long_reply(struct wbsd_host *host,
334 struct mmc_command *cmd)
335{
336 int i;
337
338 /*
339 * Correct response type?
340 */
341 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
342 cmd->error = MMC_ERR_INVALID;
343 return;
344 }
345
346 for (i = 0; i < 4; i++) {
347 cmd->resp[i] =
348 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
349 cmd->resp[i] |=
350 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
351 cmd->resp[i] |=
352 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
353 cmd->resp[i] |=
354 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
355 }
356}
357
358static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
359{
360 int i;
361 u8 status, isr;
362
363 DBGF("Sending cmd (%x)\n", cmd->opcode);
364
365 /*
366 * Clear accumulated ISR. The interrupt routine
367 * will fill this one with events that occur during
368 * transfer.
369 */
370 host->isr = 0;
371
372 /*
373 * Send the command (CRC calculated by host).
374 */
375 outb(cmd->opcode, host->base + WBSD_CMDR);
376 for (i = 3; i >= 0; i--)
377 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
378
379 cmd->error = MMC_ERR_NONE;
380
381 /*
382 * Wait for the request to complete.
383 */
384 do {
385 status = wbsd_read_index(host, WBSD_IDX_STATUS);
386 } while (status & WBSD_CARDTRAFFIC);
387
388 /*
389 * Do we expect a reply?
390 */
391 if (cmd->flags & MMC_RSP_PRESENT) {
392 /*
393 * Read back status.
394 */
395 isr = host->isr;
396
397 /* Card removed? */
398 if (isr & WBSD_INT_CARD)
399 cmd->error = MMC_ERR_TIMEOUT;
400 /* Timeout? */
401 else if (isr & WBSD_INT_TIMEOUT)
402 cmd->error = MMC_ERR_TIMEOUT;
403 /* CRC? */
404 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
405 cmd->error = MMC_ERR_BADCRC;
406 /* All ok */
407 else {
408 if (cmd->flags & MMC_RSP_136)
409 wbsd_get_long_reply(host, cmd);
410 else
411 wbsd_get_short_reply(host, cmd);
412 }
413 }
414
415 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
416}
417
418/*
419 * Data functions
420 */
421
422static void wbsd_empty_fifo(struct wbsd_host *host)
423{
424 struct mmc_data *data = host->mrq->cmd->data;
425 char *buffer;
426 int i, fsr, fifo;
427
428 /*
429 * Handle excessive data.
430 */
431 if (host->num_sg == 0)
432 return;
433
434 buffer = wbsd_sg_to_buffer(host) + host->offset;
435
436 /*
437 * Drain the fifo. This has a tendency to loop longer
438 * than the FIFO length (usually one block).
439 */
440 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
441 /*
442 * The size field in the FSR is broken so we have to
443 * do some guessing.
444 */
445 if (fsr & WBSD_FIFO_FULL)
446 fifo = 16;
447 else if (fsr & WBSD_FIFO_FUTHRE)
448 fifo = 8;
449 else
450 fifo = 1;
451
452 for (i = 0; i < fifo; i++) {
453 *buffer = inb(host->base + WBSD_DFR);
454 buffer++;
455 host->offset++;
456 host->remain--;
457
458 data->bytes_xfered++;
459
460 /*
461 * End of scatter list entry?
462 */
463 if (host->remain == 0) {
464 /*
465 * Get next entry. Check if last.
466 */
467 if (!wbsd_next_sg(host))
468 return;
469
470 buffer = wbsd_sg_to_buffer(host);
471 }
472 }
473 }
474
475 /*
476 * This is a very dirty hack to solve a
477 * hardware problem. The chip doesn't trigger
478 * FIFO threshold interrupts properly.
479 */
480 if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
481 tasklet_schedule(&host->fifo_tasklet);
482}
483
484static void wbsd_fill_fifo(struct wbsd_host *host)
485{
486 struct mmc_data *data = host->mrq->cmd->data;
487 char *buffer;
488 int i, fsr, fifo;
489
490 /*
491 * Check that we aren't being called after the
492 * entire buffer has been transfered.
493 */
494 if (host->num_sg == 0)
495 return;
496
497 buffer = wbsd_sg_to_buffer(host) + host->offset;
498
499 /*
500 * Fill the fifo. This has a tendency to loop longer
501 * than the FIFO length (usually one block).
502 */
503 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
504 /*
505 * The size field in the FSR is broken so we have to
506 * do some guessing.
507 */
508 if (fsr & WBSD_FIFO_EMPTY)
509 fifo = 0;
510 else if (fsr & WBSD_FIFO_EMTHRE)
511 fifo = 8;
512 else
513 fifo = 15;
514
515 for (i = 16; i > fifo; i--) {
516 outb(*buffer, host->base + WBSD_DFR);
517 buffer++;
518 host->offset++;
519 host->remain--;
520
521 data->bytes_xfered++;
522
523 /*
524 * End of scatter list entry?
525 */
526 if (host->remain == 0) {
527 /*
528 * Get next entry. Check if last.
529 */
530 if (!wbsd_next_sg(host))
531 return;
532
533 buffer = wbsd_sg_to_buffer(host);
534 }
535 }
536 }
537
538 /*
539 * The controller stops sending interrupts for
540 * 'FIFO empty' under certain conditions. So we
541 * need to be a bit more pro-active.
542 */
543 tasklet_schedule(&host->fifo_tasklet);
544}
545
546static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
547{
548 u16 blksize;
549 u8 setup;
550 unsigned long dmaflags;
551 unsigned int size;
552
553 DBGF("blksz %04x blks %04x flags %08x\n",
554 data->blksz, data->blocks, data->flags);
555 DBGF("tsac %d ms nsac %d clk\n",
556 data->timeout_ns / 1000000, data->timeout_clks);
557
558 /*
559 * Calculate size.
560 */
561 size = data->blocks * data->blksz;
562
563 /*
564 * Check timeout values for overflow.
565 * (Yes, some cards cause this value to overflow).
566 */
567 if (data->timeout_ns > 127000000)
568 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
569 else {
570 wbsd_write_index(host, WBSD_IDX_TAAC,
571 data->timeout_ns / 1000000);
572 }
573
574 if (data->timeout_clks > 255)
575 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
576 else
577 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
578
579 /*
580 * Inform the chip of how large blocks will be
581 * sent. It needs this to determine when to
582 * calculate CRC.
583 *
584 * Space for CRC must be included in the size.
585 * Two bytes are needed for each data line.
586 */
587 if (host->bus_width == MMC_BUS_WIDTH_1) {
588 blksize = data->blksz + 2;
589
590 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
591 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
592 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
593 blksize = data->blksz + 2 * 4;
594
595 wbsd_write_index(host, WBSD_IDX_PBSMSB,
596 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
597 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
598 } else {
599 data->error = MMC_ERR_INVALID;
600 return;
601 }
602
603 /*
604 * Clear the FIFO. This is needed even for DMA
605 * transfers since the chip still uses the FIFO
606 * internally.
607 */
608 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
609 setup |= WBSD_FIFO_RESET;
610 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
611
612 /*
613 * DMA transfer?
614 */
615 if (host->dma >= 0) {
616 /*
617 * The buffer for DMA is only 64 kB.
618 */
619 BUG_ON(size > 0x10000);
620 if (size > 0x10000) {
621 data->error = MMC_ERR_INVALID;
622 return;
623 }
624
625 /*
626 * Transfer data from the SG list to
627 * the DMA buffer.
628 */
629 if (data->flags & MMC_DATA_WRITE)
630 wbsd_sg_to_dma(host, data);
631
632 /*
633 * Initialise the ISA DMA controller.
634 */
635 dmaflags = claim_dma_lock();
636 disable_dma(host->dma);
637 clear_dma_ff(host->dma);
638 if (data->flags & MMC_DATA_READ)
639 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
640 else
641 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
642 set_dma_addr(host->dma, host->dma_addr);
643 set_dma_count(host->dma, size);
644
645 enable_dma(host->dma);
646 release_dma_lock(dmaflags);
647
648 /*
649 * Enable DMA on the host.
650 */
651 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
652 } else {
653 /*
654 * This flag is used to keep printk
655 * output to a minimum.
656 */
657 host->firsterr = 1;
658
659 /*
660 * Initialise the SG list.
661 */
662 wbsd_init_sg(host, data);
663
664 /*
665 * Turn off DMA.
666 */
667 wbsd_write_index(host, WBSD_IDX_DMA, 0);
668
669 /*
670 * Set up FIFO threshold levels (and fill
671 * buffer if doing a write).
672 */
673 if (data->flags & MMC_DATA_READ) {
674 wbsd_write_index(host, WBSD_IDX_FIFOEN,
675 WBSD_FIFOEN_FULL | 8);
676 } else {
677 wbsd_write_index(host, WBSD_IDX_FIFOEN,
678 WBSD_FIFOEN_EMPTY | 8);
679 wbsd_fill_fifo(host);
680 }
681 }
682
683 data->error = MMC_ERR_NONE;
684}
685
686static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
687{
688 unsigned long dmaflags;
689 int count;
690 u8 status;
691
692 WARN_ON(host->mrq == NULL);
693
694 /*
695 * Send a stop command if needed.
696 */
697 if (data->stop)
698 wbsd_send_command(host, data->stop);
699
700 /*
701 * Wait for the controller to leave data
702 * transfer state.
703 */
704 do {
705 status = wbsd_read_index(host, WBSD_IDX_STATUS);
706 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
707
708 /*
709 * DMA transfer?
710 */
711 if (host->dma >= 0) {
712 /*
713 * Disable DMA on the host.
714 */
715 wbsd_write_index(host, WBSD_IDX_DMA, 0);
716
717 /*
718 * Turn of ISA DMA controller.
719 */
720 dmaflags = claim_dma_lock();
721 disable_dma(host->dma);
722 clear_dma_ff(host->dma);
723 count = get_dma_residue(host->dma);
724 release_dma_lock(dmaflags);
725
726 data->bytes_xfered = host->mrq->data->blocks *
727 host->mrq->data->blksz - count;
728 data->bytes_xfered -= data->bytes_xfered % data->blksz;
729
730 /*
731 * Any leftover data?
732 */
733 if (count) {
734 printk(KERN_ERR "%s: Incomplete DMA transfer. "
735 "%d bytes left.\n",
736 mmc_hostname(host->mmc), count);
737
738 if (data->error == MMC_ERR_NONE)
739 data->error = MMC_ERR_FAILED;
740 } else {
741 /*
742 * Transfer data from DMA buffer to
743 * SG list.
744 */
745 if (data->flags & MMC_DATA_READ)
746 wbsd_dma_to_sg(host, data);
747 }
748
749 if (data->error != MMC_ERR_NONE) {
750 if (data->bytes_xfered)
751 data->bytes_xfered -= data->blksz;
752 }
753 }
754
755 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
756
757 wbsd_request_end(host, host->mrq);
758}
759
760/*****************************************************************************\
761 * *
762 * MMC layer callbacks *
763 * *
764\*****************************************************************************/
765
766static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
767{
768 struct wbsd_host *host = mmc_priv(mmc);
769 struct mmc_command *cmd;
770
771 /*
772 * Disable tasklets to avoid a deadlock.
773 */
774 spin_lock_bh(&host->lock);
775
776 BUG_ON(host->mrq != NULL);
777
778 cmd = mrq->cmd;
779
780 host->mrq = mrq;
781
782 /*
783 * If there is no card in the slot then
784 * timeout immediatly.
785 */
786 if (!(host->flags & WBSD_FCARD_PRESENT)) {
787 cmd->error = MMC_ERR_TIMEOUT;
788 goto done;
789 }
790
791 /*
792 * Does the request include data?
793 */
794 if (cmd->data) {
795 wbsd_prepare_data(host, cmd->data);
796
797 if (cmd->data->error != MMC_ERR_NONE)
798 goto done;
799 }
800
801 wbsd_send_command(host, cmd);
802
803 /*
804 * If this is a data transfer the request
805 * will be finished after the data has
806 * transfered.
807 */
808 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
809 /*
810 * The hardware is so delightfully stupid that it has a list
811 * of "data" commands. If a command isn't on this list, it'll
812 * just go back to the idle state and won't send any data
813 * interrupts.
814 */
815 switch (cmd->opcode) {
816 case 11:
817 case 17:
818 case 18:
819 case 20:
820 case 24:
821 case 25:
822 case 26:
823 case 27:
824 case 30:
825 case 42:
826 case 56:
827 break;
828
829 /* ACMDs. We don't keep track of state, so we just treat them
830 * like any other command. */
831 case 51:
832 break;
833
834 default:
835#ifdef CONFIG_MMC_DEBUG
836 printk(KERN_WARNING "%s: Data command %d is not "
837 "supported by this controller.\n",
838 mmc_hostname(host->mmc), cmd->opcode);
839#endif
840 cmd->data->error = MMC_ERR_INVALID;
841
842 if (cmd->data->stop)
843 wbsd_send_command(host, cmd->data->stop);
844
845 goto done;
846 };
847
848 /*
849 * Dirty fix for hardware bug.
850 */
851 if (host->dma == -1)
852 tasklet_schedule(&host->fifo_tasklet);
853
854 spin_unlock_bh(&host->lock);
855
856 return;
857 }
858
859done:
860 wbsd_request_end(host, mrq);
861
862 spin_unlock_bh(&host->lock);
863}
864
865static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
866{
867 struct wbsd_host *host = mmc_priv(mmc);
868 u8 clk, setup, pwr;
869
870 spin_lock_bh(&host->lock);
871
872 /*
873 * Reset the chip on each power off.
874 * Should clear out any weird states.
875 */
876 if (ios->power_mode == MMC_POWER_OFF)
877 wbsd_init_device(host);
878
879 if (ios->clock >= 24000000)
880 clk = WBSD_CLK_24M;
881 else if (ios->clock >= 16000000)
882 clk = WBSD_CLK_16M;
883 else if (ios->clock >= 12000000)
884 clk = WBSD_CLK_12M;
885 else
886 clk = WBSD_CLK_375K;
887
888 /*
889 * Only write to the clock register when
890 * there is an actual change.
891 */
892 if (clk != host->clk) {
893 wbsd_write_index(host, WBSD_IDX_CLK, clk);
894 host->clk = clk;
895 }
896
897 /*
898 * Power up card.
899 */
900 if (ios->power_mode != MMC_POWER_OFF) {
901 pwr = inb(host->base + WBSD_CSR);
902 pwr &= ~WBSD_POWER_N;
903 outb(pwr, host->base + WBSD_CSR);
904 }
905
906 /*
907 * MMC cards need to have pin 1 high during init.
908 * It wreaks havoc with the card detection though so
909 * that needs to be disabled.
910 */
911 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
912 if (ios->chip_select == MMC_CS_HIGH) {
913 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
914 setup |= WBSD_DAT3_H;
915 host->flags |= WBSD_FIGNORE_DETECT;
916 } else {
917 if (setup & WBSD_DAT3_H) {
918 setup &= ~WBSD_DAT3_H;
919
920 /*
921 * We cannot resume card detection immediatly
922 * because of capacitance and delays in the chip.
923 */
924 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
925 }
926 }
927 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
928
929 /*
930 * Store bus width for later. Will be used when
931 * setting up the data transfer.
932 */
933 host->bus_width = ios->bus_width;
934
935 spin_unlock_bh(&host->lock);
936}
937
938static int wbsd_get_ro(struct mmc_host *mmc)
939{
940 struct wbsd_host *host = mmc_priv(mmc);
941 u8 csr;
942
943 spin_lock_bh(&host->lock);
944
945 csr = inb(host->base + WBSD_CSR);
946 csr |= WBSD_MSLED;
947 outb(csr, host->base + WBSD_CSR);
948
949 mdelay(1);
950
951 csr = inb(host->base + WBSD_CSR);
952 csr &= ~WBSD_MSLED;
953 outb(csr, host->base + WBSD_CSR);
954
955 spin_unlock_bh(&host->lock);
956
957 return csr & WBSD_WRPT;
958}
959
960static const struct mmc_host_ops wbsd_ops = {
961 .request = wbsd_request,
962 .set_ios = wbsd_set_ios,
963 .get_ro = wbsd_get_ro,
964};
965
966/*****************************************************************************\
967 * *
968 * Interrupt handling *
969 * *
970\*****************************************************************************/
971
972/*
973 * Helper function to reset detection ignore
974 */
975
976static void wbsd_reset_ignore(unsigned long data)
977{
978 struct wbsd_host *host = (struct wbsd_host *)data;
979
980 BUG_ON(host == NULL);
981
982 DBG("Resetting card detection ignore\n");
983
984 spin_lock_bh(&host->lock);
985
986 host->flags &= ~WBSD_FIGNORE_DETECT;
987
988 /*
989 * Card status might have changed during the
990 * blackout.
991 */
992 tasklet_schedule(&host->card_tasklet);
993
994 spin_unlock_bh(&host->lock);
995}
996
997/*
998 * Tasklets
999 */
1000
1001static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1002{
1003 WARN_ON(!host->mrq);
1004 if (!host->mrq)
1005 return NULL;
1006
1007 WARN_ON(!host->mrq->cmd);
1008 if (!host->mrq->cmd)
1009 return NULL;
1010
1011 WARN_ON(!host->mrq->cmd->data);
1012 if (!host->mrq->cmd->data)
1013 return NULL;
1014
1015 return host->mrq->cmd->data;
1016}
1017
1018static void wbsd_tasklet_card(unsigned long param)
1019{
1020 struct wbsd_host *host = (struct wbsd_host *)param;
1021 u8 csr;
1022 int delay = -1;
1023
1024 spin_lock(&host->lock);
1025
1026 if (host->flags & WBSD_FIGNORE_DETECT) {
1027 spin_unlock(&host->lock);
1028 return;
1029 }
1030
1031 csr = inb(host->base + WBSD_CSR);
1032 WARN_ON(csr == 0xff);
1033
1034 if (csr & WBSD_CARDPRESENT) {
1035 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1036 DBG("Card inserted\n");
1037 host->flags |= WBSD_FCARD_PRESENT;
1038
1039 delay = 500;
1040 }
1041 } else if (host->flags & WBSD_FCARD_PRESENT) {
1042 DBG("Card removed\n");
1043 host->flags &= ~WBSD_FCARD_PRESENT;
1044
1045 if (host->mrq) {
1046 printk(KERN_ERR "%s: Card removed during transfer!\n",
1047 mmc_hostname(host->mmc));
1048 wbsd_reset(host);
1049
1050 host->mrq->cmd->error = MMC_ERR_FAILED;
1051 tasklet_schedule(&host->finish_tasklet);
1052 }
1053
1054 delay = 0;
1055 }
1056
1057 /*
1058 * Unlock first since we might get a call back.
1059 */
1060
1061 spin_unlock(&host->lock);
1062
1063 if (delay != -1)
1064 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1065}
1066
1067static void wbsd_tasklet_fifo(unsigned long param)
1068{
1069 struct wbsd_host *host = (struct wbsd_host *)param;
1070 struct mmc_data *data;
1071
1072 spin_lock(&host->lock);
1073
1074 if (!host->mrq)
1075 goto end;
1076
1077 data = wbsd_get_data(host);
1078 if (!data)
1079 goto end;
1080
1081 if (data->flags & MMC_DATA_WRITE)
1082 wbsd_fill_fifo(host);
1083 else
1084 wbsd_empty_fifo(host);
1085
1086 /*
1087 * Done?
1088 */
1089 if (host->num_sg == 0) {
1090 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1091 tasklet_schedule(&host->finish_tasklet);
1092 }
1093
1094end:
1095 spin_unlock(&host->lock);
1096}
1097
1098static void wbsd_tasklet_crc(unsigned long param)
1099{
1100 struct wbsd_host *host = (struct wbsd_host *)param;
1101 struct mmc_data *data;
1102
1103 spin_lock(&host->lock);
1104
1105 if (!host->mrq)
1106 goto end;
1107
1108 data = wbsd_get_data(host);
1109 if (!data)
1110 goto end;
1111
1112 DBGF("CRC error\n");
1113
1114 data->error = MMC_ERR_BADCRC;
1115
1116 tasklet_schedule(&host->finish_tasklet);
1117
1118end:
1119 spin_unlock(&host->lock);
1120}
1121
1122static void wbsd_tasklet_timeout(unsigned long param)
1123{
1124 struct wbsd_host *host = (struct wbsd_host *)param;
1125 struct mmc_data *data;
1126
1127 spin_lock(&host->lock);
1128
1129 if (!host->mrq)
1130 goto end;
1131
1132 data = wbsd_get_data(host);
1133 if (!data)
1134 goto end;
1135
1136 DBGF("Timeout\n");
1137
1138 data->error = MMC_ERR_TIMEOUT;
1139
1140 tasklet_schedule(&host->finish_tasklet);
1141
1142end:
1143 spin_unlock(&host->lock);
1144}
1145
1146static void wbsd_tasklet_finish(unsigned long param)
1147{
1148 struct wbsd_host *host = (struct wbsd_host *)param;
1149 struct mmc_data *data;
1150
1151 spin_lock(&host->lock);
1152
1153 WARN_ON(!host->mrq);
1154 if (!host->mrq)
1155 goto end;
1156
1157 data = wbsd_get_data(host);
1158 if (!data)
1159 goto end;
1160
1161 wbsd_finish_data(host, data);
1162
1163end:
1164 spin_unlock(&host->lock);
1165}
1166
1167/*
1168 * Interrupt handling
1169 */
1170
1171static irqreturn_t wbsd_irq(int irq, void *dev_id)
1172{
1173 struct wbsd_host *host = dev_id;
1174 int isr;
1175
1176 isr = inb(host->base + WBSD_ISR);
1177
1178 /*
1179 * Was it actually our hardware that caused the interrupt?
1180 */
1181 if (isr == 0xff || isr == 0x00)
1182 return IRQ_NONE;
1183
1184 host->isr |= isr;
1185
1186 /*
1187 * Schedule tasklets as needed.
1188 */
1189 if (isr & WBSD_INT_CARD)
1190 tasklet_schedule(&host->card_tasklet);
1191 if (isr & WBSD_INT_FIFO_THRE)
1192 tasklet_schedule(&host->fifo_tasklet);
1193 if (isr & WBSD_INT_CRC)
1194 tasklet_hi_schedule(&host->crc_tasklet);
1195 if (isr & WBSD_INT_TIMEOUT)
1196 tasklet_hi_schedule(&host->timeout_tasklet);
1197 if (isr & WBSD_INT_TC)
1198 tasklet_schedule(&host->finish_tasklet);
1199
1200 return IRQ_HANDLED;
1201}
1202
1203/*****************************************************************************\
1204 * *
1205 * Device initialisation and shutdown *
1206 * *
1207\*****************************************************************************/
1208
1209/*
1210 * Allocate/free MMC structure.
1211 */
1212
1213static int __devinit wbsd_alloc_mmc(struct device *dev)
1214{
1215 struct mmc_host *mmc;
1216 struct wbsd_host *host;
1217
1218 /*
1219 * Allocate MMC structure.
1220 */
1221 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1222 if (!mmc)
1223 return -ENOMEM;
1224
1225 host = mmc_priv(mmc);
1226 host->mmc = mmc;
1227
1228 host->dma = -1;
1229
1230 /*
1231 * Set host parameters.
1232 */
1233 mmc->ops = &wbsd_ops;
1234 mmc->f_min = 375000;
1235 mmc->f_max = 24000000;
1236 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1237 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1238
1239 spin_lock_init(&host->lock);
1240
1241 /*
1242 * Set up timers
1243 */
1244 init_timer(&host->ignore_timer);
1245 host->ignore_timer.data = (unsigned long)host;
1246 host->ignore_timer.function = wbsd_reset_ignore;
1247
1248 /*
1249 * Maximum number of segments. Worst case is one sector per segment
1250 * so this will be 64kB/512.
1251 */
1252 mmc->max_hw_segs = 128;
1253 mmc->max_phys_segs = 128;
1254
1255 /*
1256 * Maximum request size. Also limited by 64KiB buffer.
1257 */
1258 mmc->max_req_size = 65536;
1259
1260 /*
1261 * Maximum segment size. Could be one segment with the maximum number
1262 * of bytes.
1263 */
1264 mmc->max_seg_size = mmc->max_req_size;
1265
1266 /*
1267 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1268 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1269 */
1270 mmc->max_blk_size = 4087;
1271
1272 /*
1273 * Maximum block count. There is no real limit so the maximum
1274 * request size will be the only restriction.
1275 */
1276 mmc->max_blk_count = mmc->max_req_size;
1277
1278 dev_set_drvdata(dev, mmc);
1279
1280 return 0;
1281}
1282
1283static void __devexit wbsd_free_mmc(struct device *dev)
1284{
1285 struct mmc_host *mmc;
1286 struct wbsd_host *host;
1287
1288 mmc = dev_get_drvdata(dev);
1289 if (!mmc)
1290 return;
1291
1292 host = mmc_priv(mmc);
1293 BUG_ON(host == NULL);
1294
1295 del_timer_sync(&host->ignore_timer);
1296
1297 mmc_free_host(mmc);
1298
1299 dev_set_drvdata(dev, NULL);
1300}
1301
1302/*
1303 * Scan for known chip id:s
1304 */
1305
1306static int __devinit wbsd_scan(struct wbsd_host *host)
1307{
1308 int i, j, k;
1309 int id;
1310
1311 /*
1312 * Iterate through all ports, all codes to
1313 * find hardware that is in our known list.
1314 */
1315 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1316 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1317 continue;
1318
1319 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1320 id = 0xFFFF;
1321
1322 host->config = config_ports[i];
1323 host->unlock_code = unlock_codes[j];
1324
1325 wbsd_unlock_config(host);
1326
1327 outb(WBSD_CONF_ID_HI, config_ports[i]);
1328 id = inb(config_ports[i] + 1) << 8;
1329
1330 outb(WBSD_CONF_ID_LO, config_ports[i]);
1331 id |= inb(config_ports[i] + 1);
1332
1333 wbsd_lock_config(host);
1334
1335 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1336 if (id == valid_ids[k]) {
1337 host->chip_id = id;
1338
1339 return 0;
1340 }
1341 }
1342
1343 if (id != 0xFFFF) {
1344 DBG("Unknown hardware (id %x) found at %x\n",
1345 id, config_ports[i]);
1346 }
1347 }
1348
1349 release_region(config_ports[i], 2);
1350 }
1351
1352 host->config = 0;
1353 host->unlock_code = 0;
1354
1355 return -ENODEV;
1356}
1357
1358/*
1359 * Allocate/free io port ranges
1360 */
1361
1362static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1363{
1364 if (base & 0x7)
1365 return -EINVAL;
1366
1367 if (!request_region(base, 8, DRIVER_NAME))
1368 return -EIO;
1369
1370 host->base = base;
1371
1372 return 0;
1373}
1374
1375static void __devexit wbsd_release_regions(struct wbsd_host *host)
1376{
1377 if (host->base)
1378 release_region(host->base, 8);
1379
1380 host->base = 0;
1381
1382 if (host->config)
1383 release_region(host->config, 2);
1384
1385 host->config = 0;
1386}
1387
1388/*
1389 * Allocate/free DMA port and buffer
1390 */
1391
1392static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1393{
1394 if (dma < 0)
1395 return;
1396
1397 if (request_dma(dma, DRIVER_NAME))
1398 goto err;
1399
1400 /*
1401 * We need to allocate a special buffer in
1402 * order for ISA to be able to DMA to it.
1403 */
1404 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1405 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1406 if (!host->dma_buffer)
1407 goto free;
1408
1409 /*
1410 * Translate the address to a physical address.
1411 */
1412 host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
1413 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1414
1415 /*
1416 * ISA DMA must be aligned on a 64k basis.
1417 */
1418 if ((host->dma_addr & 0xffff) != 0)
1419 goto kfree;
1420 /*
1421 * ISA cannot access memory above 16 MB.
1422 */
1423 else if (host->dma_addr >= 0x1000000)
1424 goto kfree;
1425
1426 host->dma = dma;
1427
1428 return;
1429
1430kfree:
1431 /*
1432 * If we've gotten here then there is some kind of alignment bug
1433 */
1434 BUG_ON(1);
1435
1436 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1437 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1438 host->dma_addr = (dma_addr_t)NULL;
1439
1440 kfree(host->dma_buffer);
1441 host->dma_buffer = NULL;
1442
1443free:
1444 free_dma(dma);
1445
1446err:
1447 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1448 "Falling back on FIFO.\n", dma);
1449}
1450
1451static void __devexit wbsd_release_dma(struct wbsd_host *host)
1452{
1453 if (host->dma_addr) {
1454 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1455 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1456 }
1457 kfree(host->dma_buffer);
1458 if (host->dma >= 0)
1459 free_dma(host->dma);
1460
1461 host->dma = -1;
1462 host->dma_buffer = NULL;
1463 host->dma_addr = (dma_addr_t)NULL;
1464}
1465
1466/*
1467 * Allocate/free IRQ.
1468 */
1469
1470static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1471{
1472 int ret;
1473
1474 /*
1475 * Allocate interrupt.
1476 */
1477
1478 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1479 if (ret)
1480 return ret;
1481
1482 host->irq = irq;
1483
1484 /*
1485 * Set up tasklets.
1486 */
1487 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1488 (unsigned long)host);
1489 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1490 (unsigned long)host);
1491 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1492 (unsigned long)host);
1493 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1494 (unsigned long)host);
1495 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1496 (unsigned long)host);
1497
1498 return 0;
1499}
1500
1501static void __devexit wbsd_release_irq(struct wbsd_host *host)
1502{
1503 if (!host->irq)
1504 return;
1505
1506 free_irq(host->irq, host);
1507
1508 host->irq = 0;
1509
1510 tasklet_kill(&host->card_tasklet);
1511 tasklet_kill(&host->fifo_tasklet);
1512 tasklet_kill(&host->crc_tasklet);
1513 tasklet_kill(&host->timeout_tasklet);
1514 tasklet_kill(&host->finish_tasklet);
1515}
1516
1517/*
1518 * Allocate all resources for the host.
1519 */
1520
1521static int __devinit wbsd_request_resources(struct wbsd_host *host,
1522 int base, int irq, int dma)
1523{
1524 int ret;
1525
1526 /*
1527 * Allocate I/O ports.
1528 */
1529 ret = wbsd_request_region(host, base);
1530 if (ret)
1531 return ret;
1532
1533 /*
1534 * Allocate interrupt.
1535 */
1536 ret = wbsd_request_irq(host, irq);
1537 if (ret)
1538 return ret;
1539
1540 /*
1541 * Allocate DMA.
1542 */
1543 wbsd_request_dma(host, dma);
1544
1545 return 0;
1546}
1547
1548/*
1549 * Release all resources for the host.
1550 */
1551
1552static void __devexit wbsd_release_resources(struct wbsd_host *host)
1553{
1554 wbsd_release_dma(host);
1555 wbsd_release_irq(host);
1556 wbsd_release_regions(host);
1557}
1558
1559/*
1560 * Configure the resources the chip should use.
1561 */
1562
1563static void wbsd_chip_config(struct wbsd_host *host)
1564{
1565 wbsd_unlock_config(host);
1566
1567 /*
1568 * Reset the chip.
1569 */
1570 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1571 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1572
1573 /*
1574 * Select SD/MMC function.
1575 */
1576 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1577
1578 /*
1579 * Set up card detection.
1580 */
1581 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1582
1583 /*
1584 * Configure chip
1585 */
1586 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1587 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1588
1589 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1590
1591 if (host->dma >= 0)
1592 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1593
1594 /*
1595 * Enable and power up chip.
1596 */
1597 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1598 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1599
1600 wbsd_lock_config(host);
1601}
1602
1603/*
1604 * Check that configured resources are correct.
1605 */
1606
1607static int wbsd_chip_validate(struct wbsd_host *host)
1608{
1609 int base, irq, dma;
1610
1611 wbsd_unlock_config(host);
1612
1613 /*
1614 * Select SD/MMC function.
1615 */
1616 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1617
1618 /*
1619 * Read configuration.
1620 */
1621 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1622 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1623
1624 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1625
1626 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1627
1628 wbsd_lock_config(host);
1629
1630 /*
1631 * Validate against given configuration.
1632 */
1633 if (base != host->base)
1634 return 0;
1635 if (irq != host->irq)
1636 return 0;
1637 if ((dma != host->dma) && (host->dma != -1))
1638 return 0;
1639
1640 return 1;
1641}
1642
1643/*
1644 * Powers down the SD function
1645 */
1646
1647static void wbsd_chip_poweroff(struct wbsd_host *host)
1648{
1649 wbsd_unlock_config(host);
1650
1651 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1652 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1653
1654 wbsd_lock_config(host);
1655}
1656
1657/*****************************************************************************\
1658 * *
1659 * Devices setup and shutdown *
1660 * *
1661\*****************************************************************************/
1662
1663static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1664 int pnp)
1665{
1666 struct wbsd_host *host = NULL;
1667 struct mmc_host *mmc = NULL;
1668 int ret;
1669
1670 ret = wbsd_alloc_mmc(dev);
1671 if (ret)
1672 return ret;
1673
1674 mmc = dev_get_drvdata(dev);
1675 host = mmc_priv(mmc);
1676
1677 /*
1678 * Scan for hardware.
1679 */
1680 ret = wbsd_scan(host);
1681 if (ret) {
1682 if (pnp && (ret == -ENODEV)) {
1683 printk(KERN_WARNING DRIVER_NAME
1684 ": Unable to confirm device presence. You may "
1685 "experience lock-ups.\n");
1686 } else {
1687 wbsd_free_mmc(dev);
1688 return ret;
1689 }
1690 }
1691
1692 /*
1693 * Request resources.
1694 */
1695 ret = wbsd_request_resources(host, base, irq, dma);
1696 if (ret) {
1697 wbsd_release_resources(host);
1698 wbsd_free_mmc(dev);
1699 return ret;
1700 }
1701
1702 /*
1703 * See if chip needs to be configured.
1704 */
1705 if (pnp) {
1706 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1707 printk(KERN_WARNING DRIVER_NAME
1708 ": PnP active but chip not configured! "
1709 "You probably have a buggy BIOS. "
1710 "Configuring chip manually.\n");
1711 wbsd_chip_config(host);
1712 }
1713 } else
1714 wbsd_chip_config(host);
1715
1716 /*
1717 * Power Management stuff. No idea how this works.
1718 * Not tested.
1719 */
1720#ifdef CONFIG_PM
1721 if (host->config) {
1722 wbsd_unlock_config(host);
1723 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1724 wbsd_lock_config(host);
1725 }
1726#endif
1727 /*
1728 * Allow device to initialise itself properly.
1729 */
1730 mdelay(5);
1731
1732 /*
1733 * Reset the chip into a known state.
1734 */
1735 wbsd_init_device(host);
1736
1737 mmc_add_host(mmc);
1738
1739 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1740 if (host->chip_id != 0)
1741 printk(" id %x", (int)host->chip_id);
1742 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1743 if (host->dma >= 0)
1744 printk(" dma %d", (int)host->dma);
1745 else
1746 printk(" FIFO");
1747 if (pnp)
1748 printk(" PnP");
1749 printk("\n");
1750
1751 return 0;
1752}
1753
1754static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1755{
1756 struct mmc_host *mmc = dev_get_drvdata(dev);
1757 struct wbsd_host *host;
1758
1759 if (!mmc)
1760 return;
1761
1762 host = mmc_priv(mmc);
1763
1764 mmc_remove_host(mmc);
1765
1766 /*
1767 * Power down the SD/MMC function.
1768 */
1769 if (!pnp)
1770 wbsd_chip_poweroff(host);
1771
1772 wbsd_release_resources(host);
1773
1774 wbsd_free_mmc(dev);
1775}
1776
1777/*
1778 * Non-PnP
1779 */
1780
1781static int __devinit wbsd_probe(struct platform_device *dev)
1782{
1783 /* Use the module parameters for resources */
1784 return wbsd_init(&dev->dev, io, irq, dma, 0);
1785}
1786
1787static int __devexit wbsd_remove(struct platform_device *dev)
1788{
1789 wbsd_shutdown(&dev->dev, 0);
1790
1791 return 0;
1792}
1793
1794/*
1795 * PnP
1796 */
1797
1798#ifdef CONFIG_PNP
1799
1800static int __devinit
1801wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1802{
1803 int io, irq, dma;
1804
1805 /*
1806 * Get resources from PnP layer.
1807 */
1808 io = pnp_port_start(pnpdev, 0);
1809 irq = pnp_irq(pnpdev, 0);
1810 if (pnp_dma_valid(pnpdev, 0))
1811 dma = pnp_dma(pnpdev, 0);
1812 else
1813 dma = -1;
1814
1815 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1816
1817 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1818}
1819
1820static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1821{
1822 wbsd_shutdown(&dev->dev, 1);
1823}
1824
1825#endif /* CONFIG_PNP */
1826
1827/*
1828 * Power management
1829 */
1830
1831#ifdef CONFIG_PM
1832
1833static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1834{
1835 BUG_ON(host == NULL);
1836
1837 return mmc_suspend_host(host->mmc, state);
1838}
1839
1840static int wbsd_resume(struct wbsd_host *host)
1841{
1842 BUG_ON(host == NULL);
1843
1844 wbsd_init_device(host);
1845
1846 return mmc_resume_host(host->mmc);
1847}
1848
1849static int wbsd_platform_suspend(struct platform_device *dev,
1850 pm_message_t state)
1851{
1852 struct mmc_host *mmc = platform_get_drvdata(dev);
1853 struct wbsd_host *host;
1854 int ret;
1855
1856 if (mmc == NULL)
1857 return 0;
1858
1859 DBGF("Suspending...\n");
1860
1861 host = mmc_priv(mmc);
1862
1863 ret = wbsd_suspend(host, state);
1864 if (ret)
1865 return ret;
1866
1867 wbsd_chip_poweroff(host);
1868
1869 return 0;
1870}
1871
1872static int wbsd_platform_resume(struct platform_device *dev)
1873{
1874 struct mmc_host *mmc = platform_get_drvdata(dev);
1875 struct wbsd_host *host;
1876
1877 if (mmc == NULL)
1878 return 0;
1879
1880 DBGF("Resuming...\n");
1881
1882 host = mmc_priv(mmc);
1883
1884 wbsd_chip_config(host);
1885
1886 /*
1887 * Allow device to initialise itself properly.
1888 */
1889 mdelay(5);
1890
1891 return wbsd_resume(host);
1892}
1893
1894#ifdef CONFIG_PNP
1895
1896static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1897{
1898 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1899 struct wbsd_host *host;
1900
1901 if (mmc == NULL)
1902 return 0;
1903
1904 DBGF("Suspending...\n");
1905
1906 host = mmc_priv(mmc);
1907
1908 return wbsd_suspend(host, state);
1909}
1910
1911static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
1912{
1913 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1914 struct wbsd_host *host;
1915
1916 if (mmc == NULL)
1917 return 0;
1918
1919 DBGF("Resuming...\n");
1920
1921 host = mmc_priv(mmc);
1922
1923 /*
1924 * See if chip needs to be configured.
1925 */
1926 if (host->config != 0) {
1927 if (!wbsd_chip_validate(host)) {
1928 printk(KERN_WARNING DRIVER_NAME
1929 ": PnP active but chip not configured! "
1930 "You probably have a buggy BIOS. "
1931 "Configuring chip manually.\n");
1932 wbsd_chip_config(host);
1933 }
1934 }
1935
1936 /*
1937 * Allow device to initialise itself properly.
1938 */
1939 mdelay(5);
1940
1941 return wbsd_resume(host);
1942}
1943
1944#endif /* CONFIG_PNP */
1945
1946#else /* CONFIG_PM */
1947
1948#define wbsd_platform_suspend NULL
1949#define wbsd_platform_resume NULL
1950
1951#define wbsd_pnp_suspend NULL
1952#define wbsd_pnp_resume NULL
1953
1954#endif /* CONFIG_PM */
1955
1956static struct platform_device *wbsd_device;
1957
1958static struct platform_driver wbsd_driver = {
1959 .probe = wbsd_probe,
1960 .remove = __devexit_p(wbsd_remove),
1961
1962 .suspend = wbsd_platform_suspend,
1963 .resume = wbsd_platform_resume,
1964 .driver = {
1965 .name = DRIVER_NAME,
1966 },
1967};
1968
1969#ifdef CONFIG_PNP
1970
1971static struct pnp_driver wbsd_pnp_driver = {
1972 .name = DRIVER_NAME,
1973 .id_table = pnp_dev_table,
1974 .probe = wbsd_pnp_probe,
1975 .remove = __devexit_p(wbsd_pnp_remove),
1976
1977 .suspend = wbsd_pnp_suspend,
1978 .resume = wbsd_pnp_resume,
1979};
1980
1981#endif /* CONFIG_PNP */
1982
1983/*
1984 * Module loading/unloading
1985 */
1986
1987static int __init wbsd_drv_init(void)
1988{
1989 int result;
1990
1991 printk(KERN_INFO DRIVER_NAME
1992 ": Winbond W83L51xD SD/MMC card interface driver\n");
1993 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1994
1995#ifdef CONFIG_PNP
1996
1997 if (!nopnp) {
1998 result = pnp_register_driver(&wbsd_pnp_driver);
1999 if (result < 0)
2000 return result;
2001 }
2002#endif /* CONFIG_PNP */
2003
2004 if (nopnp) {
2005 result = platform_driver_register(&wbsd_driver);
2006 if (result < 0)
2007 return result;
2008
2009 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2010 if (!wbsd_device) {
2011 platform_driver_unregister(&wbsd_driver);
2012 return -ENOMEM;
2013 }
2014
2015 result = platform_device_add(wbsd_device);
2016 if (result) {
2017 platform_device_put(wbsd_device);
2018 platform_driver_unregister(&wbsd_driver);
2019 return result;
2020 }
2021 }
2022
2023 return 0;
2024}
2025
2026static void __exit wbsd_drv_exit(void)
2027{
2028#ifdef CONFIG_PNP
2029
2030 if (!nopnp)
2031 pnp_unregister_driver(&wbsd_pnp_driver);
2032
2033#endif /* CONFIG_PNP */
2034
2035 if (nopnp) {
2036 platform_device_unregister(wbsd_device);
2037
2038 platform_driver_unregister(&wbsd_driver);
2039 }
2040
2041 DBG("unloaded\n");
2042}
2043
2044module_init(wbsd_drv_init);
2045module_exit(wbsd_drv_exit);
2046#ifdef CONFIG_PNP
2047module_param(nopnp, uint, 0444);
2048#endif
2049module_param(io, uint, 0444);
2050module_param(irq, uint, 0444);
2051module_param(dma, int, 0444);
2052
2053MODULE_LICENSE("GPL");
2054MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2055MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2056
2057#ifdef CONFIG_PNP
2058MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2059#endif
2060MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2061MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2062MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h
new file mode 100644
index 000000000000..873bda1e59b4
--- /dev/null
+++ b/drivers/mmc/host/wbsd.h
@@ -0,0 +1,185 @@
1/*
2 * linux/drivers/mmc/wbsd.h - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#define LOCK_CODE 0xAA
13
14#define WBSD_CONF_SWRST 0x02
15#define WBSD_CONF_DEVICE 0x07
16#define WBSD_CONF_ID_HI 0x20
17#define WBSD_CONF_ID_LO 0x21
18#define WBSD_CONF_POWER 0x22
19#define WBSD_CONF_PME 0x23
20#define WBSD_CONF_PMES 0x24
21
22#define WBSD_CONF_ENABLE 0x30
23#define WBSD_CONF_PORT_HI 0x60
24#define WBSD_CONF_PORT_LO 0x61
25#define WBSD_CONF_IRQ 0x70
26#define WBSD_CONF_DRQ 0x74
27
28#define WBSD_CONF_PINS 0xF0
29
30#define DEVICE_SD 0x03
31
32#define WBSD_PINS_DAT3_HI 0x20
33#define WBSD_PINS_DAT3_OUT 0x10
34#define WBSD_PINS_GP11_HI 0x04
35#define WBSD_PINS_DETECT_GP11 0x02
36#define WBSD_PINS_DETECT_DAT3 0x01
37
38#define WBSD_CMDR 0x00
39#define WBSD_DFR 0x01
40#define WBSD_EIR 0x02
41#define WBSD_ISR 0x03
42#define WBSD_FSR 0x04
43#define WBSD_IDXR 0x05
44#define WBSD_DATAR 0x06
45#define WBSD_CSR 0x07
46
47#define WBSD_EINT_CARD 0x40
48#define WBSD_EINT_FIFO_THRE 0x20
49#define WBSD_EINT_CRC 0x10
50#define WBSD_EINT_TIMEOUT 0x08
51#define WBSD_EINT_PROGEND 0x04
52#define WBSD_EINT_BUSYEND 0x02
53#define WBSD_EINT_TC 0x01
54
55#define WBSD_INT_PENDING 0x80
56#define WBSD_INT_CARD 0x40
57#define WBSD_INT_FIFO_THRE 0x20
58#define WBSD_INT_CRC 0x10
59#define WBSD_INT_TIMEOUT 0x08
60#define WBSD_INT_PROGEND 0x04
61#define WBSD_INT_BUSYEND 0x02
62#define WBSD_INT_TC 0x01
63
64#define WBSD_FIFO_EMPTY 0x80
65#define WBSD_FIFO_FULL 0x40
66#define WBSD_FIFO_EMTHRE 0x20
67#define WBSD_FIFO_FUTHRE 0x10
68#define WBSD_FIFO_SZMASK 0x0F
69
70#define WBSD_MSLED 0x20
71#define WBSD_POWER_N 0x10
72#define WBSD_WRPT 0x04
73#define WBSD_CARDPRESENT 0x01
74
75#define WBSD_IDX_CLK 0x01
76#define WBSD_IDX_PBSMSB 0x02
77#define WBSD_IDX_TAAC 0x03
78#define WBSD_IDX_NSAC 0x04
79#define WBSD_IDX_PBSLSB 0x05
80#define WBSD_IDX_SETUP 0x06
81#define WBSD_IDX_DMA 0x07
82#define WBSD_IDX_FIFOEN 0x08
83#define WBSD_IDX_STATUS 0x10
84#define WBSD_IDX_RSPLEN 0x1E
85#define WBSD_IDX_RESP0 0x1F
86#define WBSD_IDX_RESP1 0x20
87#define WBSD_IDX_RESP2 0x21
88#define WBSD_IDX_RESP3 0x22
89#define WBSD_IDX_RESP4 0x23
90#define WBSD_IDX_RESP5 0x24
91#define WBSD_IDX_RESP6 0x25
92#define WBSD_IDX_RESP7 0x26
93#define WBSD_IDX_RESP8 0x27
94#define WBSD_IDX_RESP9 0x28
95#define WBSD_IDX_RESP10 0x29
96#define WBSD_IDX_RESP11 0x2A
97#define WBSD_IDX_RESP12 0x2B
98#define WBSD_IDX_RESP13 0x2C
99#define WBSD_IDX_RESP14 0x2D
100#define WBSD_IDX_RESP15 0x2E
101#define WBSD_IDX_RESP16 0x2F
102#define WBSD_IDX_CRCSTATUS 0x30
103#define WBSD_IDX_ISR 0x3F
104
105#define WBSD_CLK_375K 0x00
106#define WBSD_CLK_12M 0x01
107#define WBSD_CLK_16M 0x02
108#define WBSD_CLK_24M 0x03
109
110#define WBSD_DATA_WIDTH 0x01
111
112#define WBSD_DAT3_H 0x08
113#define WBSD_FIFO_RESET 0x04
114#define WBSD_SOFT_RESET 0x02
115#define WBSD_INC_INDEX 0x01
116
117#define WBSD_DMA_SINGLE 0x02
118#define WBSD_DMA_ENABLE 0x01
119
120#define WBSD_FIFOEN_EMPTY 0x20
121#define WBSD_FIFOEN_FULL 0x10
122#define WBSD_FIFO_THREMASK 0x0F
123
124#define WBSD_BLOCK_READ 0x80
125#define WBSD_BLOCK_WRITE 0x40
126#define WBSD_BUSY 0x20
127#define WBSD_CARDTRAFFIC 0x04
128#define WBSD_SENDCMD 0x02
129#define WBSD_RECVRES 0x01
130
131#define WBSD_RSP_SHORT 0x00
132#define WBSD_RSP_LONG 0x01
133
134#define WBSD_CRC_MASK 0x1F
135#define WBSD_CRC_OK 0x05 /* S010E (00101) */
136#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */
137
138#define WBSD_DMA_SIZE 65536
139
140struct wbsd_host
141{
142 struct mmc_host* mmc; /* MMC structure */
143
144 spinlock_t lock; /* Mutex */
145
146 int flags; /* Driver states */
147
148#define WBSD_FCARD_PRESENT (1<<0) /* Card is present */
149#define WBSD_FIGNORE_DETECT (1<<1) /* Ignore card detection */
150
151 struct mmc_request* mrq; /* Current request */
152
153 u8 isr; /* Accumulated ISR */
154
155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */
157
158 unsigned int offset; /* Offset into current entry */
159 unsigned int remain; /* Data left in curren entry */
160
161 char* dma_buffer; /* ISA DMA buffer */
162 dma_addr_t dma_addr; /* Physical address for same */
163
164 int firsterr; /* See fifo functions */
165
166 u8 clk; /* Current clock speed */
167 unsigned char bus_width; /* Current bus width */
168
169 int config; /* Config port */
170 u8 unlock_code; /* Code to unlock config */
171
172 int chip_id; /* ID of controller */
173
174 int base; /* I/O port base */
175 int irq; /* Interrupt */
176 int dma; /* DMA channel */
177
178 struct tasklet_struct card_tasklet; /* Tasklet structures */
179 struct tasklet_struct fifo_tasklet;
180 struct tasklet_struct crc_tasklet;
181 struct tasklet_struct timeout_tasklet;
182 struct tasklet_struct finish_tasklet;
183
184 struct timer_list ignore_timer; /* Ignore detection timer */
185};