aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 00:44:34 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 00:44:34 -0400
commit62ea6d80211ecc88ef516927ecebf64cb505be3f (patch)
tree1920de8cd3671aedcc912afb8e5ddb2a7c674b05 /drivers/mmc/host
parentfa24aa561a3cf91cf25b5d4066470b08a2d24206 (diff)
parentd3af5abe9a809becbe4b413144b607844560d445 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (46 commits) mmc-omap: Clean up omap set_ios and make MMC_POWER_ON work mmc-omap: Fix omap to use MMC_POWER_ON mmc-omap: add missing '\n' mmc: make tifm_sd_set_dma_data() static mmc: remove old card states mmc: support unsafe resume of cards mmc: separate out reading EXT_CSD mmc: break apart switch function MMC: Fix handling of low-voltage cards MMC: Consolidate voltage definitions mmc: add bus handler wbsd: check for data opcode earlier mmc: Separate out protocol ops mmc: Move core functions to subdir mmc: deprecate mmc bus topology mmc: remove card upon suspend mmc: allow suspended block driver to be removed mmc: Flush pending detects on host removal mmc: Move host and card drivers to subdirs mmc: Move queue functions to mmc_block ...
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r--drivers/mmc/host/Kconfig103
-rw-r--r--drivers/mmc/host/Makefile18
-rw-r--r--drivers/mmc/host/at91_mci.c1001
-rw-r--r--drivers/mmc/host/au1xmmc.c1031
-rw-r--r--drivers/mmc/host/au1xmmc.h96
-rw-r--r--drivers/mmc/host/imxmmc.c1137
-rw-r--r--drivers/mmc/host/imxmmc.h67
-rw-r--r--drivers/mmc/host/mmci.c702
-rw-r--r--drivers/mmc/host/mmci.h179
-rw-r--r--drivers/mmc/host/omap.c1295
-rw-r--r--drivers/mmc/host/pxamci.c616
-rw-r--r--drivers/mmc/host/pxamci.h124
-rw-r--r--drivers/mmc/host/sdhci.c1535
-rw-r--r--drivers/mmc/host/sdhci.h210
-rw-r--r--drivers/mmc/host/tifm_sd.c1102
-rw-r--r--drivers/mmc/host/wbsd.c2061
-rw-r--r--drivers/mmc/host/wbsd.h185
17 files changed, 11462 insertions, 0 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
new file mode 100644
index 000000000000..ed4deab2203d
--- /dev/null
+++ b/drivers/mmc/host/Kconfig
@@ -0,0 +1,103 @@
1#
2# MMC/SD host controller drivers
3#
4
5comment "MMC/SD Host Controller Drivers"
6 depends on MMC
7
8config MMC_ARMMMCI
9 tristate "ARM AMBA Multimedia Card Interface support"
10 depends on ARM_AMBA && MMC
11 help
12 This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
13 Interface (PL180 and PL181) support. If you have an ARM(R)
14 platform with a Multimedia Card slot, say Y or M here.
15
16 If unsure, say N.
17
18config MMC_PXA
19 tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
20 depends on ARCH_PXA && MMC
21 help
22 This selects the Intel(R) PXA(R) Multimedia card Interface.
23 If you have a PXA(R) platform with a Multimedia Card slot,
24 say Y or M here.
25
26 If unsure, say N.
27
28config MMC_SDHCI
29 tristate "Secure Digital Host Controller Interface support (EXPERIMENTAL)"
30 depends on PCI && MMC && EXPERIMENTAL
31 help
32 This select the generic Secure Digital Host Controller Interface.
33 It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
34 and Toshiba(R). Most controllers found in laptops are of this type.
35 If you have a controller with this interface, say Y or M here.
36
37 If unsure, say N.
38
39config MMC_OMAP
40 tristate "TI OMAP Multimedia Card Interface support"
41 depends on ARCH_OMAP && MMC
42 select TPS65010 if MACH_OMAP_H2
43 help
44 This selects the TI OMAP Multimedia card Interface.
45 If you have an OMAP board with a Multimedia Card slot,
46 say Y or M here.
47
48 If unsure, say N.
49
50config MMC_WBSD
51 tristate "Winbond W83L51xD SD/MMC Card Interface support"
52 depends on MMC && ISA_DMA_API
53 help
54 This selects the Winbond(R) W83L51xD Secure digital and
55 Multimedia card Interface.
56 If you have a machine with a integrated W83L518D or W83L519D
57 SD/MMC card reader, say Y or M here.
58
59 If unsure, say N.
60
61config MMC_AU1X
62 tristate "Alchemy AU1XX0 MMC Card Interface support"
63 depends on MMC && SOC_AU1200
64 help
65 This selects the AMD Alchemy(R) Multimedia card interface.
66 If you have a Alchemy platform with a MMC slot, say Y or M here.
67
68 If unsure, say N.
69
70config MMC_AT91
71 tristate "AT91 SD/MMC Card Interface support"
72 depends on ARCH_AT91 && MMC
73 help
74 This selects the AT91 MCI controller.
75
76 If unsure, say N.
77
78config MMC_IMX
79 tristate "Motorola i.MX Multimedia Card Interface support"
80 depends on ARCH_IMX && MMC
81 help
82 This selects the Motorola i.MX Multimedia card Interface.
83 If you have a i.MX platform with a Multimedia Card slot,
84 say Y or M here.
85
86 If unsure, say N.
87
88config MMC_TIFM_SD
89 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
90 depends on MMC && EXPERIMENTAL && PCI
91 select TIFM_CORE
92 help
93 Say Y here if you want to be able to access MMC/SD cards with
94 the Texas Instruments(R) Flash Media card reader, found in many
95 laptops.
96 This option 'selects' (turns on, enables) 'TIFM_CORE', but you
97 probably also need appropriate card reader host adapter, such as
98 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
99 (TIFM_7XX1)'.
100
101 To compile this driver as a module, choose M here: the
102 module will be called tifm_sd.
103
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
new file mode 100644
index 000000000000..6685f64345b4
--- /dev/null
+++ b/drivers/mmc/host/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for MMC/SD host controller drivers
3#
4
5ifeq ($(CONFIG_MMC_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG
7endif
8
9obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
10obj-$(CONFIG_MMC_PXA) += pxamci.o
11obj-$(CONFIG_MMC_IMX) += imxmmc.o
12obj-$(CONFIG_MMC_SDHCI) += sdhci.o
13obj-$(CONFIG_MMC_WBSD) += wbsd.o
14obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
15obj-$(CONFIG_MMC_OMAP) += omap.o
16obj-$(CONFIG_MMC_AT91) += at91_mci.o
17obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
18
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
new file mode 100644
index 000000000000..e37943c314cb
--- /dev/null
+++ b/drivers/mmc/host/at91_mci.c
@@ -0,0 +1,1001 @@
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67#include <linux/atmel_pdc.h>
68
69#include <linux/mmc/host.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
75#include <asm/arch/cpu.h>
76#include <asm/arch/gpio.h>
77#include <asm/arch/at91_mci.h>
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
83#define FL_SENT_COMMAND (1 << 0)
84#define FL_SENT_STOP (1 << 1)
85
86#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
89
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
92
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
103 void __iomem *baseaddr;
104 int irq;
105
106 struct at91_mmc_data *board;
107 int present;
108
109 struct clk *mci_clk;
110
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129};
130
131/*
132 * Copy from sg to a dma block - used for transfers
133 */
134static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
135{
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 unsigned int *sgbuffer;
151
152 sg = &data->sg[i];
153
154 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
155 amount = min(size, sg->length);
156 size -= amount;
157
158 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
159 int index;
160
161 for (index = 0; index < (amount / 4); index++)
162 *dmabuf++ = swab32(sgbuffer[index]);
163 }
164 else
165 memcpy(dmabuf, sgbuffer, amount);
166
167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
168
169 if (size == 0)
170 break;
171 }
172
173 /*
174 * Check that we didn't get a request to transfer
175 * more data than can fit into the SG list.
176 */
177 BUG_ON(size != 0);
178}
179
180/*
181 * Prepare a dma read
182 */
183static void at91mci_pre_dma_read(struct at91mci_host *host)
184{
185 int i;
186 struct scatterlist *sg;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189
190 pr_debug("pre dma read\n");
191
192 cmd = host->cmd;
193 if (!cmd) {
194 pr_debug("no command\n");
195 return;
196 }
197
198 data = cmd->data;
199 if (!data) {
200 pr_debug("no data\n");
201 return;
202 }
203
204 for (i = 0; i < 2; i++) {
205 /* nothing left to transfer */
206 if (host->transfer_index >= data->sg_len) {
207 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
208 break;
209 }
210
211 /* Check to see if this needs filling */
212 if (i == 0) {
213 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
214 pr_debug("Transfer active in current\n");
215 continue;
216 }
217 }
218 else {
219 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
220 pr_debug("Transfer active in next\n");
221 continue;
222 }
223 }
224
225 /* Setup the next transfer */
226 pr_debug("Using transfer index %d\n", host->transfer_index);
227
228 sg = &data->sg[host->transfer_index++];
229 pr_debug("sg = %p\n", sg);
230
231 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
232
233 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
234
235 if (i == 0) {
236 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
237 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
238 }
239 else {
240 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
241 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
242 }
243 }
244
245 pr_debug("pre dma read done\n");
246}
247
248/*
249 * Handle after a dma read
250 */
251static void at91mci_post_dma_read(struct at91mci_host *host)
252{
253 struct mmc_command *cmd;
254 struct mmc_data *data;
255
256 pr_debug("post dma read\n");
257
258 cmd = host->cmd;
259 if (!cmd) {
260 pr_debug("no command\n");
261 return;
262 }
263
264 data = cmd->data;
265 if (!data) {
266 pr_debug("no data\n");
267 return;
268 }
269
270 while (host->in_use_index < host->transfer_index) {
271 unsigned int *buffer;
272
273 struct scatterlist *sg;
274
275 pr_debug("finishing index %d\n", host->in_use_index);
276
277 sg = &data->sg[host->in_use_index++];
278
279 pr_debug("Unmapping page %08X\n", sg->dma_address);
280
281 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
282
283 /* Swap the contents of the buffer */
284 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
285 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
286
287 data->bytes_xfered += sg->length;
288
289 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
290 int index;
291
292 for (index = 0; index < (sg->length / 4); index++)
293 buffer[index] = swab32(buffer[index]);
294 }
295
296 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
297 flush_dcache_page(sg->page);
298 }
299
300 /* Is there another transfer to trigger? */
301 if (host->transfer_index < data->sg_len)
302 at91mci_pre_dma_read(host);
303 else {
304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
305 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
306 }
307
308 pr_debug("post dma read done\n");
309}
310
311/*
312 * Handle transmitted data
313 */
314static void at91_mci_handle_transmitted(struct at91mci_host *host)
315{
316 struct mmc_command *cmd;
317 struct mmc_data *data;
318
319 pr_debug("Handling the transmit\n");
320
321 /* Disable the transfer */
322 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
323
324 /* Now wait for cmd ready */
325 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
326 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
327
328 cmd = host->cmd;
329 if (!cmd) return;
330
331 data = cmd->data;
332 if (!data) return;
333
334 data->bytes_xfered = host->total_length;
335}
336
337/*
338 * Enable the controller
339 */
340static void at91_mci_enable(struct at91mci_host *host)
341{
342 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
343 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
344 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
345 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
346
347 /* use Slot A or B (only one at same time) */
348 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
349}
350
351/*
352 * Disable the controller
353 */
354static void at91_mci_disable(struct at91mci_host *host)
355{
356 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
357}
358
359/*
360 * Send a command
361 * return the interrupts to enable
362 */
363static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
364{
365 unsigned int cmdr, mr;
366 unsigned int block_length;
367 struct mmc_data *data = cmd->data;
368
369 unsigned int blocks;
370 unsigned int ier = 0;
371
372 host->cmd = cmd;
373
374 /* Not sure if this is needed */
375#if 0
376 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
377 pr_debug("Clearing timeout\n");
378 at91_mci_write(host, AT91_MCI_ARGR, 0);
379 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
380 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
381 /* spin */
382 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
383 }
384 }
385#endif
386 cmdr = cmd->opcode;
387
388 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
389 cmdr |= AT91_MCI_RSPTYP_NONE;
390 else {
391 /* if a response is expected then allow maximum response latancy */
392 cmdr |= AT91_MCI_MAXLAT;
393 /* set 136 bit response for R2, 48 bit response otherwise */
394 if (mmc_resp_type(cmd) == MMC_RSP_R2)
395 cmdr |= AT91_MCI_RSPTYP_136;
396 else
397 cmdr |= AT91_MCI_RSPTYP_48;
398 }
399
400 if (data) {
401 block_length = data->blksz;
402 blocks = data->blocks;
403
404 /* always set data start - also set direction flag for read */
405 if (data->flags & MMC_DATA_READ)
406 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
407 else if (data->flags & MMC_DATA_WRITE)
408 cmdr |= AT91_MCI_TRCMD_START;
409
410 if (data->flags & MMC_DATA_STREAM)
411 cmdr |= AT91_MCI_TRTYP_STREAM;
412 if (data->flags & MMC_DATA_MULTI)
413 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
414 }
415 else {
416 block_length = 0;
417 blocks = 0;
418 }
419
420 if (cmd->opcode == MMC_STOP_TRANSMISSION)
421 cmdr |= AT91_MCI_TRCMD_STOP;
422
423 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
424 cmdr |= AT91_MCI_OPDCMD;
425
426 /*
427 * Set the arguments and send the command
428 */
429 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
430 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
431
432 if (!data) {
433 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
434 at91_mci_write(host, ATMEL_PDC_RPR, 0);
435 at91_mci_write(host, ATMEL_PDC_RCR, 0);
436 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
437 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
438 at91_mci_write(host, ATMEL_PDC_TPR, 0);
439 at91_mci_write(host, ATMEL_PDC_TCR, 0);
440 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
441 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
442
443 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
444 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
445 return AT91_MCI_CMDRDY;
446 }
447
448 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
449 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
450
451 /*
452 * Disable the PDC controller
453 */
454 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
455
456 if (cmdr & AT91_MCI_TRCMD_START) {
457 data->bytes_xfered = 0;
458 host->transfer_index = 0;
459 host->in_use_index = 0;
460 if (cmdr & AT91_MCI_TRDIR) {
461 /*
462 * Handle a read
463 */
464 host->buffer = NULL;
465 host->total_length = 0;
466
467 at91mci_pre_dma_read(host);
468 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
469 }
470 else {
471 /*
472 * Handle a write
473 */
474 host->total_length = block_length * blocks;
475 host->buffer = dma_alloc_coherent(NULL,
476 host->total_length,
477 &host->physical_address, GFP_KERNEL);
478
479 at91mci_sg_to_dma(host, data);
480
481 pr_debug("Transmitting %d bytes\n", host->total_length);
482
483 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
484 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
485 ier = AT91_MCI_TXBUFE;
486 }
487 }
488
489 /*
490 * Send the command and then enable the PDC - not the other way round as
491 * the data sheet says
492 */
493
494 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
495 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
496
497 if (cmdr & AT91_MCI_TRCMD_START) {
498 if (cmdr & AT91_MCI_TRDIR)
499 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
500 else
501 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
502 }
503 return ier;
504}
505
506/*
507 * Wait for a command to complete
508 */
509static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
510{
511 unsigned int ier;
512
513 ier = at91_mci_send_command(host, cmd);
514
515 pr_debug("setting ier to %08X\n", ier);
516
517 /* Stop on errors or the required value */
518 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
519}
520
521/*
522 * Process the next step in the request
523 */
524static void at91mci_process_next(struct at91mci_host *host)
525{
526 if (!(host->flags & FL_SENT_COMMAND)) {
527 host->flags |= FL_SENT_COMMAND;
528 at91mci_process_command(host, host->request->cmd);
529 }
530 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
531 host->flags |= FL_SENT_STOP;
532 at91mci_process_command(host, host->request->stop);
533 }
534 else
535 mmc_request_done(host->mmc, host->request);
536}
537
538/*
539 * Handle a command that has been completed
540 */
541static void at91mci_completed_command(struct at91mci_host *host)
542{
543 struct mmc_command *cmd = host->cmd;
544 unsigned int status;
545
546 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
547
548 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
549 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
550 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
551 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
552
553 if (host->buffer) {
554 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
555 host->buffer = NULL;
556 }
557
558 status = at91_mci_read(host, AT91_MCI_SR);
559
560 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
561 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
562
563 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
564 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
565 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
566 if ((status & AT91_MCI_RCRCE) &&
567 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
568 cmd->error = MMC_ERR_NONE;
569 }
570 else {
571 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
572 cmd->error = MMC_ERR_TIMEOUT;
573 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
574 cmd->error = MMC_ERR_BADCRC;
575 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
576 cmd->error = MMC_ERR_FIFO;
577 else
578 cmd->error = MMC_ERR_FAILED;
579
580 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
581 cmd->error, cmd->opcode, cmd->retries);
582 }
583 }
584 else
585 cmd->error = MMC_ERR_NONE;
586
587 at91mci_process_next(host);
588}
589
590/*
591 * Handle an MMC request
592 */
593static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
594{
595 struct at91mci_host *host = mmc_priv(mmc);
596 host->request = mrq;
597 host->flags = 0;
598
599 at91mci_process_next(host);
600}
601
602/*
603 * Set the IOS
604 */
605static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
606{
607 int clkdiv;
608 struct at91mci_host *host = mmc_priv(mmc);
609 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
610
611 host->bus_mode = ios->bus_mode;
612
613 if (ios->clock == 0) {
614 /* Disable the MCI controller */
615 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
616 clkdiv = 0;
617 }
618 else {
619 /* Enable the MCI controller */
620 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
621
622 if ((at91_master_clock % (ios->clock * 2)) == 0)
623 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
624 else
625 clkdiv = (at91_master_clock / ios->clock) / 2;
626
627 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
628 at91_master_clock / (2 * (clkdiv + 1)));
629 }
630 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
631 pr_debug("MMC: Setting controller bus width to 4\n");
632 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
633 }
634 else {
635 pr_debug("MMC: Setting controller bus width to 1\n");
636 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
637 }
638
639 /* Set the clock divider */
640 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
641
642 /* maybe switch power to the card */
643 if (host->board->vcc_pin) {
644 switch (ios->power_mode) {
645 case MMC_POWER_OFF:
646 at91_set_gpio_value(host->board->vcc_pin, 0);
647 break;
648 case MMC_POWER_UP:
649 case MMC_POWER_ON:
650 at91_set_gpio_value(host->board->vcc_pin, 1);
651 break;
652 }
653 }
654}
655
656/*
657 * Handle an interrupt
658 */
659static irqreturn_t at91_mci_irq(int irq, void *devid)
660{
661 struct at91mci_host *host = devid;
662 int completed = 0;
663 unsigned int int_status, int_mask;
664
665 int_status = at91_mci_read(host, AT91_MCI_SR);
666 int_mask = at91_mci_read(host, AT91_MCI_IMR);
667
668 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
669 int_status & int_mask);
670
671 int_status = int_status & int_mask;
672
673 if (int_status & AT91_MCI_ERRORS) {
674 completed = 1;
675
676 if (int_status & AT91_MCI_UNRE)
677 pr_debug("MMC: Underrun error\n");
678 if (int_status & AT91_MCI_OVRE)
679 pr_debug("MMC: Overrun error\n");
680 if (int_status & AT91_MCI_DTOE)
681 pr_debug("MMC: Data timeout\n");
682 if (int_status & AT91_MCI_DCRCE)
683 pr_debug("MMC: CRC error in data\n");
684 if (int_status & AT91_MCI_RTOE)
685 pr_debug("MMC: Response timeout\n");
686 if (int_status & AT91_MCI_RENDE)
687 pr_debug("MMC: Response end bit error\n");
688 if (int_status & AT91_MCI_RCRCE)
689 pr_debug("MMC: Response CRC error\n");
690 if (int_status & AT91_MCI_RDIRE)
691 pr_debug("MMC: Response direction error\n");
692 if (int_status & AT91_MCI_RINDE)
693 pr_debug("MMC: Response index error\n");
694 } else {
695 /* Only continue processing if no errors */
696
697 if (int_status & AT91_MCI_TXBUFE) {
698 pr_debug("TX buffer empty\n");
699 at91_mci_handle_transmitted(host);
700 }
701
702 if (int_status & AT91_MCI_RXBUFF) {
703 pr_debug("RX buffer full\n");
704 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
705 }
706
707 if (int_status & AT91_MCI_ENDTX)
708 pr_debug("Transmit has ended\n");
709
710 if (int_status & AT91_MCI_ENDRX) {
711 pr_debug("Receive has ended\n");
712 at91mci_post_dma_read(host);
713 }
714
715 if (int_status & AT91_MCI_NOTBUSY) {
716 pr_debug("Card is ready\n");
717 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
718 }
719
720 if (int_status & AT91_MCI_DTIP)
721 pr_debug("Data transfer in progress\n");
722
723 if (int_status & AT91_MCI_BLKE)
724 pr_debug("Block transfer has ended\n");
725
726 if (int_status & AT91_MCI_TXRDY)
727 pr_debug("Ready to transmit\n");
728
729 if (int_status & AT91_MCI_RXRDY)
730 pr_debug("Ready to receive\n");
731
732 if (int_status & AT91_MCI_CMDRDY) {
733 pr_debug("Command ready\n");
734 completed = 1;
735 }
736 }
737
738 if (completed) {
739 pr_debug("Completed command\n");
740 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
741 at91mci_completed_command(host);
742 } else
743 at91_mci_write(host, AT91_MCI_IDR, int_status);
744
745 return IRQ_HANDLED;
746}
747
748static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
749{
750 struct at91mci_host *host = _host;
751 int present = !at91_get_gpio_value(irq);
752
753 /*
754 * we expect this irq on both insert and remove,
755 * and use a short delay to debounce.
756 */
757 if (present != host->present) {
758 host->present = present;
759 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
760 present ? "insert" : "remove");
761 if (!present) {
762 pr_debug("****** Resetting SD-card bus width ******\n");
763 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
764 }
765 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
766 }
767 return IRQ_HANDLED;
768}
769
770static int at91_mci_get_ro(struct mmc_host *mmc)
771{
772 int read_only = 0;
773 struct at91mci_host *host = mmc_priv(mmc);
774
775 if (host->board->wp_pin) {
776 read_only = at91_get_gpio_value(host->board->wp_pin);
777 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
778 (read_only ? "read-only" : "read-write") );
779 }
780 else {
781 printk(KERN_WARNING "%s: host does not support reading read-only "
782 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
783 }
784 return read_only;
785}
786
787static const struct mmc_host_ops at91_mci_ops = {
788 .request = at91_mci_request,
789 .set_ios = at91_mci_set_ios,
790 .get_ro = at91_mci_get_ro,
791};
792
793/*
794 * Probe for the device
795 */
796static int __init at91_mci_probe(struct platform_device *pdev)
797{
798 struct mmc_host *mmc;
799 struct at91mci_host *host;
800 struct resource *res;
801 int ret;
802
803 pr_debug("Probe MCI devices\n");
804
805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
806 if (!res)
807 return -ENXIO;
808
809 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
810 return -EBUSY;
811
812 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
813 if (!mmc) {
814 pr_debug("Failed to allocate mmc host\n");
815 release_mem_region(res->start, res->end - res->start + 1);
816 return -ENOMEM;
817 }
818
819 mmc->ops = &at91_mci_ops;
820 mmc->f_min = 375000;
821 mmc->f_max = 25000000;
822 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
823 mmc->caps = MMC_CAP_BYTEBLOCK;
824
825 mmc->max_blk_size = 4095;
826 mmc->max_blk_count = mmc->max_req_size;
827
828 host = mmc_priv(mmc);
829 host->mmc = mmc;
830 host->buffer = NULL;
831 host->bus_mode = 0;
832 host->board = pdev->dev.platform_data;
833 if (host->board->wire4) {
834#ifdef SUPPORT_4WIRE
835 mmc->caps |= MMC_CAP_4_BIT_DATA;
836#else
837 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
838#endif
839 }
840
841 /*
842 * Get Clock
843 */
844 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
845 if (IS_ERR(host->mci_clk)) {
846 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
847 mmc_free_host(mmc);
848 release_mem_region(res->start, res->end - res->start + 1);
849 return -ENODEV;
850 }
851
852 /*
853 * Map I/O region
854 */
855 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
856 if (!host->baseaddr) {
857 clk_put(host->mci_clk);
858 mmc_free_host(mmc);
859 release_mem_region(res->start, res->end - res->start + 1);
860 return -ENOMEM;
861 }
862
863 /*
864 * Reset hardware
865 */
866 clk_enable(host->mci_clk); /* Enable the peripheral clock */
867 at91_mci_disable(host);
868 at91_mci_enable(host);
869
870 /*
871 * Allocate the MCI interrupt
872 */
873 host->irq = platform_get_irq(pdev, 0);
874 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
875 if (ret) {
876 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
877 clk_disable(host->mci_clk);
878 clk_put(host->mci_clk);
879 mmc_free_host(mmc);
880 iounmap(host->baseaddr);
881 release_mem_region(res->start, res->end - res->start + 1);
882 return ret;
883 }
884
885 platform_set_drvdata(pdev, mmc);
886
887 /*
888 * Add host to MMC layer
889 */
890 if (host->board->det_pin)
891 host->present = !at91_get_gpio_value(host->board->det_pin);
892 else
893 host->present = -1;
894
895 mmc_add_host(mmc);
896
897 /*
898 * monitor card insertion/removal if we can
899 */
900 if (host->board->det_pin) {
901 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
902 0, DRIVER_NAME, host);
903 if (ret)
904 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
905 }
906
907 pr_debug("Added MCI driver\n");
908
909 return 0;
910}
911
912/*
913 * Remove a device
914 */
915static int __exit at91_mci_remove(struct platform_device *pdev)
916{
917 struct mmc_host *mmc = platform_get_drvdata(pdev);
918 struct at91mci_host *host;
919 struct resource *res;
920
921 if (!mmc)
922 return -1;
923
924 host = mmc_priv(mmc);
925
926 if (host->present != -1) {
927 free_irq(host->board->det_pin, host);
928 cancel_delayed_work(&host->mmc->detect);
929 }
930
931 at91_mci_disable(host);
932 mmc_remove_host(mmc);
933 free_irq(host->irq, host);
934
935 clk_disable(host->mci_clk); /* Disable the peripheral clock */
936 clk_put(host->mci_clk);
937
938 iounmap(host->baseaddr);
939 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
940 release_mem_region(res->start, res->end - res->start + 1);
941
942 mmc_free_host(mmc);
943 platform_set_drvdata(pdev, NULL);
944 pr_debug("MCI Removed\n");
945
946 return 0;
947}
948
949#ifdef CONFIG_PM
950static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
951{
952 struct mmc_host *mmc = platform_get_drvdata(pdev);
953 int ret = 0;
954
955 if (mmc)
956 ret = mmc_suspend_host(mmc, state);
957
958 return ret;
959}
960
961static int at91_mci_resume(struct platform_device *pdev)
962{
963 struct mmc_host *mmc = platform_get_drvdata(pdev);
964 int ret = 0;
965
966 if (mmc)
967 ret = mmc_resume_host(mmc);
968
969 return ret;
970}
971#else
972#define at91_mci_suspend NULL
973#define at91_mci_resume NULL
974#endif
975
976static struct platform_driver at91_mci_driver = {
977 .remove = __exit_p(at91_mci_remove),
978 .suspend = at91_mci_suspend,
979 .resume = at91_mci_resume,
980 .driver = {
981 .name = DRIVER_NAME,
982 .owner = THIS_MODULE,
983 },
984};
985
986static int __init at91_mci_init(void)
987{
988 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
989}
990
991static void __exit at91_mci_exit(void)
992{
993 platform_driver_unregister(&at91_mci_driver);
994}
995
996module_init(at91_mci_init);
997module_exit(at91_mci_exit);
998
999MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1000MODULE_AUTHOR("Nick Randell");
1001MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
new file mode 100644
index 000000000000..b7156a4555b5
--- /dev/null
+++ b/drivers/mmc/host/au1xmmc.c
@@ -0,0 +1,1031 @@
1/*
2 * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
3 *
4 * Copyright (c) 2005, Advanced Micro Devices, Inc.
5 *
6 * Developed with help from the 2.4.30 MMC AU1XXX controller including
7 * the following copyright notices:
8 * Copyright (c) 2003-2004 Embedded Edge, LLC.
9 * Portions Copyright (C) 2002 Embedix, Inc
10 * Copyright 2002 Hewlett-Packard Company
11
12 * 2.6 version of this driver inspired by:
13 * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 * All Rights Reserved.
15 * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 * All Rights Reserved.
17 *
18
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24/* Why is a timer used to detect insert events?
25 *
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/platform_device.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/dma-mapping.h>
43
44#include <linux/mmc/host.h>
45#include <asm/io.h>
46#include <asm/mach-au1x00/au1000.h>
47#include <asm/mach-au1x00/au1xxx_dbdma.h>
48#include <asm/mach-au1x00/au1100_mmc.h>
49#include <asm/scatterlist.h>
50
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc"
55
56/* Set this to enable special debugging macros */
57
58#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
60#else
61#define DBG(fmt, idx, args...)
62#endif
63
64const struct {
65 u32 iobase;
66 u32 tx_devid, rx_devid;
67 u16 bcsrpwr;
68 u16 bcsrstatus;
69 u16 wpstatus;
70} au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78
79#define AU1XMMC_CONTROLLER_COUNT \
80 (sizeof(au1xmmc_card_table) / sizeof(au1xmmc_card_table[0]))
81
82/* This array stores pointers for the hosts (used by the IRQ handler) */
83struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT];
84static int dma = 1;
85
86#ifdef MODULE
87module_param(dma, bool, 0);
88MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)");
89#endif
90
91static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
92{
93 u32 val = au_readl(HOST_CONFIG(host));
94 val |= mask;
95 au_writel(val, HOST_CONFIG(host));
96 au_sync();
97}
98
99static inline void FLUSH_FIFO(struct au1xmmc_host *host)
100{
101 u32 val = au_readl(HOST_CONFIG2(host));
102
103 au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
104 au_sync_delay(1);
105
106 /* SEND_STOP will turn off clock control - this re-enables it */
107 val &= ~SD_CONFIG2_DF;
108
109 au_writel(val, HOST_CONFIG2(host));
110 au_sync();
111}
112
113static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
114{
115 u32 val = au_readl(HOST_CONFIG(host));
116 val &= ~mask;
117 au_writel(val, HOST_CONFIG(host));
118 au_sync();
119}
120
121static inline void SEND_STOP(struct au1xmmc_host *host)
122{
123
124 /* We know the value of CONFIG2, so avoid a read we don't need */
125 u32 mask = SD_CONFIG2_EN;
126
127 WARN_ON(host->status != HOST_S_DATA);
128 host->status = HOST_S_STOP;
129
130 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host));
131 au_sync();
132
133 /* Send the stop commmand */
134 au_writel(STOP_CMD, HOST_CMD(host));
135}
136
137static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
138{
139
140 u32 val = au1xmmc_card_table[host->id].bcsrpwr;
141
142 bcsr->board &= ~val;
143 if (state) bcsr->board |= val;
144
145 au_sync_delay(1);
146}
147
148static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
149{
150 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus)
151 ? 1 : 0;
152}
153
154static int au1xmmc_card_readonly(struct mmc_host *mmc)
155{
156 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0;
159}
160
161static void au1xmmc_finish_request(struct au1xmmc_host *host)
162{
163
164 struct mmc_request *mrq = host->mrq;
165
166 host->mrq = NULL;
167 host->flags &= HOST_F_ACTIVE;
168
169 host->dma.len = 0;
170 host->dma.dir = 0;
171
172 host->pio.index = 0;
173 host->pio.offset = 0;
174 host->pio.len = 0;
175
176 host->status = HOST_S_IDLE;
177
178 bcsr->disk_leds |= (1 << 8);
179
180 mmc_request_done(host->mmc, mrq);
181}
182
183static void au1xmmc_tasklet_finish(unsigned long param)
184{
185 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
186 au1xmmc_finish_request(host);
187}
188
189static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
190 struct mmc_command *cmd)
191{
192
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194
195 switch (mmc_resp_type(cmd)) {
196 case MMC_RSP_NONE:
197 break;
198 case MMC_RSP_R1:
199 mmccmd |= SD_CMD_RT_1;
200 break;
201 case MMC_RSP_R1B:
202 mmccmd |= SD_CMD_RT_1B;
203 break;
204 case MMC_RSP_R2:
205 mmccmd |= SD_CMD_RT_2;
206 break;
207 case MMC_RSP_R3:
208 mmccmd |= SD_CMD_RT_3;
209 break;
210 default:
211 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
212 mmc_resp_type(cmd));
213 return MMC_ERR_INVALID;
214 }
215
216 switch(cmd->opcode) {
217 case MMC_READ_SINGLE_BLOCK:
218 case SD_APP_SEND_SCR:
219 mmccmd |= SD_CMD_CT_2;
220 break;
221 case MMC_READ_MULTIPLE_BLOCK:
222 mmccmd |= SD_CMD_CT_4;
223 break;
224 case MMC_WRITE_BLOCK:
225 mmccmd |= SD_CMD_CT_1;
226 break;
227
228 case MMC_WRITE_MULTIPLE_BLOCK:
229 mmccmd |= SD_CMD_CT_3;
230 break;
231 case MMC_STOP_TRANSMISSION:
232 mmccmd |= SD_CMD_CT_7;
233 break;
234 }
235
236 au_writel(cmd->arg, HOST_CMDARG(host));
237 au_sync();
238
239 if (wait)
240 IRQ_OFF(host, SD_CONFIG_CR);
241
242 au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
243 au_sync();
244
245 /* Wait for the command to go on the line */
246
247 while(1) {
248 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
249 break;
250 }
251
252 /* Wait for the command to come back */
253
254 if (wait) {
255 u32 status = au_readl(HOST_STATUS(host));
256
257 while(!(status & SD_STATUS_CR))
258 status = au_readl(HOST_STATUS(host));
259
260 /* Clear the CR status */
261 au_writel(SD_STATUS_CR, HOST_STATUS(host));
262
263 IRQ_ON(host, SD_CONFIG_CR);
264 }
265
266 return MMC_ERR_NONE;
267}
268
269static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
270{
271
272 struct mmc_request *mrq = host->mrq;
273 struct mmc_data *data;
274 u32 crc;
275
276 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
277
278 if (host->mrq == NULL)
279 return;
280
281 data = mrq->cmd->data;
282
283 if (status == 0)
284 status = au_readl(HOST_STATUS(host));
285
286 /* The transaction is really over when the SD_STATUS_DB bit is clear */
287
288 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
289 status = au_readl(HOST_STATUS(host));
290
291 data->error = MMC_ERR_NONE;
292 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
293
294 /* Process any errors */
295
296 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
297 if (host->flags & HOST_F_XMIT)
298 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
299
300 if (crc)
301 data->error = MMC_ERR_BADCRC;
302
303 /* Clear the CRC bits */
304 au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
305
306 data->bytes_xfered = 0;
307
308 if (data->error == MMC_ERR_NONE) {
309 if (host->flags & HOST_F_DMA) {
310 u32 chan = DMA_CHANNEL(host);
311
312 chan_tab_t *c = *((chan_tab_t **) chan);
313 au1x_dma_chan_t *cp = c->chan_ptr;
314 data->bytes_xfered = cp->ddma_bytecnt;
315 }
316 else
317 data->bytes_xfered =
318 (data->blocks * data->blksz) -
319 host->pio.len;
320 }
321
322 au1xmmc_finish_request(host);
323}
324
325static void au1xmmc_tasklet_data(unsigned long param)
326{
327 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
328
329 u32 status = au_readl(HOST_STATUS(host));
330 au1xmmc_data_complete(host, status);
331}
332
333#define AU1XMMC_MAX_TRANSFER 8
334
335static void au1xmmc_send_pio(struct au1xmmc_host *host)
336{
337
338 struct mmc_data *data = 0;
339 int sg_len, max, count = 0;
340 unsigned char *sg_ptr;
341 u32 status = 0;
342 struct scatterlist *sg;
343
344 data = host->mrq->data;
345
346 if (!(host->flags & HOST_F_XMIT))
347 return;
348
349 /* This is the pointer to the data buffer */
350 sg = &data->sg[host->pio.index];
351 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
352
353 /* This is the space left inside the buffer */
354 sg_len = data->sg[host->pio.index].length - host->pio.offset;
355
356 /* Check to if we need less then the size of the sg_buffer */
357
358 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
359 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
360
361 for(count = 0; count < max; count++ ) {
362 unsigned char val;
363
364 status = au_readl(HOST_STATUS(host));
365
366 if (!(status & SD_STATUS_TH))
367 break;
368
369 val = *sg_ptr++;
370
371 au_writel((unsigned long) val, HOST_TXPORT(host));
372 au_sync();
373 }
374
375 host->pio.len -= count;
376 host->pio.offset += count;
377
378 if (count == sg_len) {
379 host->pio.index++;
380 host->pio.offset = 0;
381 }
382
383 if (host->pio.len == 0) {
384 IRQ_OFF(host, SD_CONFIG_TH);
385
386 if (host->flags & HOST_F_STOP)
387 SEND_STOP(host);
388
389 tasklet_schedule(&host->data_task);
390 }
391}
392
393static void au1xmmc_receive_pio(struct au1xmmc_host *host)
394{
395
396 struct mmc_data *data = 0;
397 int sg_len = 0, max = 0, count = 0;
398 unsigned char *sg_ptr = 0;
399 u32 status = 0;
400 struct scatterlist *sg;
401
402 data = host->mrq->data;
403
404 if (!(host->flags & HOST_F_RECV))
405 return;
406
407 max = host->pio.len;
408
409 if (host->pio.index < host->dma.len) {
410 sg = &data->sg[host->pio.index];
411 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
412
413 /* This is the space left inside the buffer */
414 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
415
416 /* Check to if we need less then the size of the sg_buffer */
417 if (sg_len < max) max = sg_len;
418 }
419
420 if (max > AU1XMMC_MAX_TRANSFER)
421 max = AU1XMMC_MAX_TRANSFER;
422
423 for(count = 0; count < max; count++ ) {
424 u32 val;
425 status = au_readl(HOST_STATUS(host));
426
427 if (!(status & SD_STATUS_NE))
428 break;
429
430 if (status & SD_STATUS_RC) {
431 DBG("RX CRC Error [%d + %d].\n", host->id,
432 host->pio.len, count);
433 break;
434 }
435
436 if (status & SD_STATUS_RO) {
437 DBG("RX Overrun [%d + %d]\n", host->id,
438 host->pio.len, count);
439 break;
440 }
441 else if (status & SD_STATUS_RU) {
442 DBG("RX Underrun [%d + %d]\n", host->id,
443 host->pio.len, count);
444 break;
445 }
446
447 val = au_readl(HOST_RXPORT(host));
448
449 if (sg_ptr)
450 *sg_ptr++ = (unsigned char) (val & 0xFF);
451 }
452
453 host->pio.len -= count;
454 host->pio.offset += count;
455
456 if (sg_len && count == sg_len) {
457 host->pio.index++;
458 host->pio.offset = 0;
459 }
460
461 if (host->pio.len == 0) {
462 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
463 IRQ_OFF(host, SD_CONFIG_NE);
464
465 if (host->flags & HOST_F_STOP)
466 SEND_STOP(host);
467
468 tasklet_schedule(&host->data_task);
469 }
470}
471
472/* static void au1xmmc_cmd_complete
473 This is called when a command has been completed - grab the response
474 and check for errors. Then start the data transfer if it is indicated.
475*/
476
477static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
478{
479
480 struct mmc_request *mrq = host->mrq;
481 struct mmc_command *cmd;
482 int trans;
483
484 if (!host->mrq)
485 return;
486
487 cmd = mrq->cmd;
488 cmd->error = MMC_ERR_NONE;
489
490 if (cmd->flags & MMC_RSP_PRESENT) {
491 if (cmd->flags & MMC_RSP_136) {
492 u32 r[4];
493 int i;
494
495 r[0] = au_readl(host->iobase + SD_RESP3);
496 r[1] = au_readl(host->iobase + SD_RESP2);
497 r[2] = au_readl(host->iobase + SD_RESP1);
498 r[3] = au_readl(host->iobase + SD_RESP0);
499
500 /* The CRC is omitted from the response, so really
501 * we only got 120 bytes, but the engine expects
502 * 128 bits, so we have to shift things up
503 */
504
505 for(i = 0; i < 4; i++) {
506 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
507 if (i != 3)
508 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
509 }
510 } else {
511 /* Techincally, we should be getting all 48 bits of
512 * the response (SD_RESP1 + SD_RESP2), but because
513 * our response omits the CRC, our data ends up
514 * being shifted 8 bits to the right. In this case,
515 * that means that the OSR data starts at bit 31,
516 * so we can just read RESP0 and return that
517 */
518 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
519 }
520 }
521
522 /* Figure out errors */
523
524 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
525 cmd->error = MMC_ERR_BADCRC;
526
527 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
528
529 if (!trans || cmd->error != MMC_ERR_NONE) {
530
531 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
532 tasklet_schedule(&host->finish_task);
533 return;
534 }
535
536 host->status = HOST_S_DATA;
537
538 if (host->flags & HOST_F_DMA) {
539 u32 channel = DMA_CHANNEL(host);
540
541 /* Start the DMA as soon as the buffer gets something in it */
542
543 if (host->flags & HOST_F_RECV) {
544 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
545
546 while((status & mask) != mask)
547 status = au_readl(HOST_STATUS(host));
548 }
549
550 au1xxx_dbdma_start(channel);
551 }
552}
553
554static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
555{
556
557 unsigned int pbus = get_au1x00_speed();
558 unsigned int divisor;
559 u32 config;
560
561 /* From databook:
562 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
563 */
564
565 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
566 pbus /= 2;
567
568 divisor = ((pbus / rate) / 2) - 1;
569
570 config = au_readl(HOST_CONFIG(host));
571
572 config &= ~(SD_CONFIG_DIV);
573 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
574
575 au_writel(config, HOST_CONFIG(host));
576 au_sync();
577}
578
579static int
580au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
581{
582
583 int datalen = data->blocks * data->blksz;
584
585 if (dma != 0)
586 host->flags |= HOST_F_DMA;
587
588 if (data->flags & MMC_DATA_READ)
589 host->flags |= HOST_F_RECV;
590 else
591 host->flags |= HOST_F_XMIT;
592
593 if (host->mrq->stop)
594 host->flags |= HOST_F_STOP;
595
596 host->dma.dir = DMA_BIDIRECTIONAL;
597
598 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
599 data->sg_len, host->dma.dir);
600
601 if (host->dma.len == 0)
602 return MMC_ERR_TIMEOUT;
603
604 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
605
606 if (host->flags & HOST_F_DMA) {
607 int i;
608 u32 channel = DMA_CHANNEL(host);
609
610 au1xxx_dbdma_stop(channel);
611
612 for(i = 0; i < host->dma.len; i++) {
613 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
614 struct scatterlist *sg = &data->sg[i];
615 int sg_len = sg->length;
616
617 int len = (datalen > sg_len) ? sg_len : datalen;
618
619 if (i == host->dma.len - 1)
620 flags = DDMA_FLAGS_IE;
621
622 if (host->flags & HOST_F_XMIT){
623 ret = au1xxx_dbdma_put_source_flags(channel,
624 (void *) (page_address(sg->page) +
625 sg->offset),
626 len, flags);
627 }
628 else {
629 ret = au1xxx_dbdma_put_dest_flags(channel,
630 (void *) (page_address(sg->page) +
631 sg->offset),
632 len, flags);
633 }
634
635 if (!ret)
636 goto dataerr;
637
638 datalen -= len;
639 }
640 }
641 else {
642 host->pio.index = 0;
643 host->pio.offset = 0;
644 host->pio.len = datalen;
645
646 if (host->flags & HOST_F_XMIT)
647 IRQ_ON(host, SD_CONFIG_TH);
648 else
649 IRQ_ON(host, SD_CONFIG_NE);
650 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
651 }
652
653 return MMC_ERR_NONE;
654
655 dataerr:
656 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir);
657 return MMC_ERR_TIMEOUT;
658}
659
660/* static void au1xmmc_request
661 This actually starts a command or data transaction
662*/
663
664static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
665{
666
667 struct au1xmmc_host *host = mmc_priv(mmc);
668 int ret = MMC_ERR_NONE;
669
670 WARN_ON(irqs_disabled());
671 WARN_ON(host->status != HOST_S_IDLE);
672
673 host->mrq = mrq;
674 host->status = HOST_S_CMD;
675
676 bcsr->disk_leds &= ~(1 << 8);
677
678 if (mrq->data) {
679 FLUSH_FIFO(host);
680 ret = au1xmmc_prepare_data(host, mrq->data);
681 }
682
683 if (ret == MMC_ERR_NONE)
684 ret = au1xmmc_send_command(host, 0, mrq->cmd);
685
686 if (ret != MMC_ERR_NONE) {
687 mrq->cmd->error = ret;
688 au1xmmc_finish_request(host);
689 }
690}
691
692static void au1xmmc_reset_controller(struct au1xmmc_host *host)
693{
694
695 /* Apply the clock */
696 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
697 au_sync_delay(1);
698
699 au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
700 au_sync_delay(5);
701
702 au_writel(~0, HOST_STATUS(host));
703 au_sync();
704
705 au_writel(0, HOST_BLKSIZE(host));
706 au_writel(0x001fffff, HOST_TIMEOUT(host));
707 au_sync();
708
709 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
710 au_sync();
711
712 au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
713 au_sync_delay(1);
714
715 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
716 au_sync();
717
718 /* Configure interrupts */
719 au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
720 au_sync();
721}
722
723
724static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
725{
726 struct au1xmmc_host *host = mmc_priv(mmc);
727
728 if (ios->power_mode == MMC_POWER_OFF)
729 au1xmmc_set_power(host, 0);
730 else if (ios->power_mode == MMC_POWER_ON) {
731 au1xmmc_set_power(host, 1);
732 }
733
734 if (ios->clock && ios->clock != host->clock) {
735 au1xmmc_set_clock(host, ios->clock);
736 host->clock = ios->clock;
737 }
738}
739
740static void au1xmmc_dma_callback(int irq, void *dev_id)
741{
742 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
743
744 /* Avoid spurious interrupts */
745
746 if (!host->mrq)
747 return;
748
749 if (host->flags & HOST_F_STOP)
750 SEND_STOP(host);
751
752 tasklet_schedule(&host->data_task);
753}
754
755#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
756#define STATUS_DATA_IN (SD_STATUS_NE)
757#define STATUS_DATA_OUT (SD_STATUS_TH)
758
759static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
760{
761
762 u32 status;
763 int i, ret = 0;
764
765 disable_irq(AU1100_SD_IRQ);
766
767 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
768 struct au1xmmc_host * host = au1xmmc_hosts[i];
769 u32 handled = 1;
770
771 status = au_readl(HOST_STATUS(host));
772
773 if (host->mrq && (status & STATUS_TIMEOUT)) {
774 if (status & SD_STATUS_RAT)
775 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
776
777 else if (status & SD_STATUS_DT)
778 host->mrq->data->error = MMC_ERR_TIMEOUT;
779
780 /* In PIO mode, interrupts might still be enabled */
781 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
782
783 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF);
784 tasklet_schedule(&host->finish_task);
785 }
786#if 0
787 else if (status & SD_STATUS_DD) {
788
789 /* Sometimes we get a DD before a NE in PIO mode */
790
791 if (!(host->flags & HOST_F_DMA) &&
792 (status & SD_STATUS_NE))
793 au1xmmc_receive_pio(host);
794 else {
795 au1xmmc_data_complete(host, status);
796 //tasklet_schedule(&host->data_task);
797 }
798 }
799#endif
800 else if (status & (SD_STATUS_CR)) {
801 if (host->status == HOST_S_CMD)
802 au1xmmc_cmd_complete(host,status);
803 }
804 else if (!(host->flags & HOST_F_DMA)) {
805 if ((host->flags & HOST_F_XMIT) &&
806 (status & STATUS_DATA_OUT))
807 au1xmmc_send_pio(host);
808 else if ((host->flags & HOST_F_RECV) &&
809 (status & STATUS_DATA_IN))
810 au1xmmc_receive_pio(host);
811 }
812 else if (status & 0x203FBC70) {
813 DBG("Unhandled status %8.8x\n", host->id, status);
814 handled = 0;
815 }
816
817 au_writel(status, HOST_STATUS(host));
818 au_sync();
819
820 ret |= handled;
821 }
822
823 enable_irq(AU1100_SD_IRQ);
824 return ret;
825}
826
827static void au1xmmc_poll_event(unsigned long arg)
828{
829 struct au1xmmc_host *host = (struct au1xmmc_host *) arg;
830
831 int card = au1xmmc_card_inserted(host);
832 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
833
834 if (card != controller) {
835 host->flags &= ~HOST_F_ACTIVE;
836 if (card) host->flags |= HOST_F_ACTIVE;
837 mmc_detect_change(host->mmc, 0);
838 }
839
840 if (host->mrq != NULL) {
841 u32 status = au_readl(HOST_STATUS(host));
842 DBG("PENDING - %8.8x\n", host->id, status);
843 }
844
845 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
846}
847
848static dbdev_tab_t au1xmmc_mem_dbdev =
849{
850 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
851};
852
853static void au1xmmc_init_dma(struct au1xmmc_host *host)
854{
855
856 u32 rxchan, txchan;
857
858 int txid = au1xmmc_card_table[host->id].tx_devid;
859 int rxid = au1xmmc_card_table[host->id].rx_devid;
860
861 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
862 of 8 bits. And since devices are shared, we need to create
863 our own to avoid freaking out other devices
864 */
865
866 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
867
868 txchan = au1xxx_dbdma_chan_alloc(memid, txid,
869 au1xmmc_dma_callback, (void *) host);
870
871 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid,
872 au1xmmc_dma_callback, (void *) host);
873
874 au1xxx_dbdma_set_devwidth(txchan, 8);
875 au1xxx_dbdma_set_devwidth(rxchan, 8);
876
877 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT);
878 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT);
879
880 host->tx_chan = txchan;
881 host->rx_chan = rxchan;
882}
883
884static const struct mmc_host_ops au1xmmc_ops = {
885 .request = au1xmmc_request,
886 .set_ios = au1xmmc_set_ios,
887 .get_ro = au1xmmc_card_readonly,
888};
889
890static int __devinit au1xmmc_probe(struct platform_device *pdev)
891{
892
893 int i, ret = 0;
894
895 /* THe interrupt is shared among all controllers */
896 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
897
898 if (ret) {
899 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
900 AU1100_SD_IRQ, ret);
901 return -ENXIO;
902 }
903
904 disable_irq(AU1100_SD_IRQ);
905
906 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
907 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
908 struct au1xmmc_host *host = 0;
909
910 if (!mmc) {
911 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i);
912 au1xmmc_hosts[i] = 0;
913 continue;
914 }
915
916 mmc->ops = &au1xmmc_ops;
917
918 mmc->f_min = 450000;
919 mmc->f_max = 24000000;
920
921 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
922 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
923
924 mmc->max_blk_size = 2048;
925 mmc->max_blk_count = 512;
926
927 mmc->ocr_avail = AU1XMMC_OCR;
928
929 host = mmc_priv(mmc);
930 host->mmc = mmc;
931
932 host->id = i;
933 host->iobase = au1xmmc_card_table[host->id].iobase;
934 host->clock = 0;
935 host->power_mode = MMC_POWER_OFF;
936
937 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0;
938 host->status = HOST_S_IDLE;
939
940 init_timer(&host->timer);
941
942 host->timer.function = au1xmmc_poll_event;
943 host->timer.data = (unsigned long) host;
944 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
945
946 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
947 (unsigned long) host);
948
949 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
950 (unsigned long) host);
951
952 spin_lock_init(&host->lock);
953
954 if (dma != 0)
955 au1xmmc_init_dma(host);
956
957 au1xmmc_reset_controller(host);
958
959 mmc_add_host(mmc);
960 au1xmmc_hosts[i] = host;
961
962 add_timer(&host->timer);
963
964 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n",
965 host->id, host->iobase, dma ? "dma" : "pio");
966 }
967
968 enable_irq(AU1100_SD_IRQ);
969
970 return 0;
971}
972
973static int __devexit au1xmmc_remove(struct platform_device *pdev)
974{
975
976 int i;
977
978 disable_irq(AU1100_SD_IRQ);
979
980 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
981 struct au1xmmc_host *host = au1xmmc_hosts[i];
982 if (!host) continue;
983
984 tasklet_kill(&host->data_task);
985 tasklet_kill(&host->finish_task);
986
987 del_timer_sync(&host->timer);
988 au1xmmc_set_power(host, 0);
989
990 mmc_remove_host(host->mmc);
991
992 au1xxx_dbdma_chan_free(host->tx_chan);
993 au1xxx_dbdma_chan_free(host->rx_chan);
994
995 au_writel(0x0, HOST_ENABLE(host));
996 au_sync();
997 }
998
999 free_irq(AU1100_SD_IRQ, 0);
1000 return 0;
1001}
1002
1003static struct platform_driver au1xmmc_driver = {
1004 .probe = au1xmmc_probe,
1005 .remove = au1xmmc_remove,
1006 .suspend = NULL,
1007 .resume = NULL,
1008 .driver = {
1009 .name = DRIVER_NAME,
1010 },
1011};
1012
1013static int __init au1xmmc_init(void)
1014{
1015 return platform_driver_register(&au1xmmc_driver);
1016}
1017
1018static void __exit au1xmmc_exit(void)
1019{
1020 platform_driver_unregister(&au1xmmc_driver);
1021}
1022
1023module_init(au1xmmc_init);
1024module_exit(au1xmmc_exit);
1025
1026#ifdef MODULE
1027MODULE_AUTHOR("Advanced Micro Devices, Inc");
1028MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1029MODULE_LICENSE("GPL");
1030#endif
1031
diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h
new file mode 100644
index 000000000000..341cbdf0baca
--- /dev/null
+++ b/drivers/mmc/host/au1xmmc.h
@@ -0,0 +1,96 @@
1#ifndef _AU1XMMC_H_
2#define _AU1XMMC_H_
3
4/* Hardware definitions */
5
6#define AU1XMMC_DESCRIPTOR_COUNT 1
7#define AU1XMMC_DESCRIPTOR_SIZE 2048
8
9#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
10 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
11 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
12
13/* Easy access macros */
14
15#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
16#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
17#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
18#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
19#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
20#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
21#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
22#define HOST_CMD(h) ((h)->iobase + SD_CMD)
23#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
24#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
25#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
26
27#define DMA_CHANNEL(h) \
28 ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
29
30/* This gives us a hard value for the stop command that we can write directly
31 * to the command register
32 */
33
34#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO)
35
36/* This is the set of interrupts that we configure by default */
37
38#if 0
39#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \
40 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
41#endif
42
43#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \
44 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
45/* The poll event (looking for insert/remove events runs twice a second */
46#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
47
48struct au1xmmc_host {
49 struct mmc_host *mmc;
50 struct mmc_request *mrq;
51
52 u32 id;
53
54 u32 flags;
55 u32 iobase;
56 u32 clock;
57 u32 bus_width;
58 u32 power_mode;
59
60 int status;
61
62 struct {
63 int len;
64 int dir;
65 } dma;
66
67 struct {
68 int index;
69 int offset;
70 int len;
71 } pio;
72
73 u32 tx_chan;
74 u32 rx_chan;
75
76 struct timer_list timer;
77 struct tasklet_struct finish_task;
78 struct tasklet_struct data_task;
79
80 spinlock_t lock;
81};
82
83/* Status flags used by the host structure */
84
85#define HOST_F_XMIT 0x0001
86#define HOST_F_RECV 0x0002
87#define HOST_F_DMA 0x0010
88#define HOST_F_ACTIVE 0x0100
89#define HOST_F_STOP 0x1000
90
91#define HOST_S_IDLE 0x0001
92#define HOST_S_CMD 0x0002
93#define HOST_S_DATA 0x0003
94#define HOST_S_STOP 0x0004
95
96#endif
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
new file mode 100644
index 000000000000..7ee2045acbef
--- /dev/null
+++ b/drivers/mmc/host/imxmmc.c
@@ -0,0 +1,1137 @@
1/*
2 * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 * derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Changed to conform redesigned i.MX scatter gather DMA interface
15 *
16 * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17 * Updated for 2.6.14 kernel
18 *
19 * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20 * Found and corrected problems in the write path
21 *
22 * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23 * The event handling rewritten right way in softirq.
24 * Added many ugly hacks and delays to overcome SDHC
25 * deficiencies
26 *
27 */
28
29#ifdef CONFIG_MMC_DEBUG
30#define DEBUG
31#else
32#undef DEBUG
33#endif
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/ioport.h>
38#include <linux/platform_device.h>
39#include <linux/interrupt.h>
40#include <linux/blkdev.h>
41#include <linux/dma-mapping.h>
42#include <linux/mmc/host.h>
43#include <linux/mmc/card.h>
44#include <linux/delay.h>
45
46#include <asm/dma.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/sizes.h>
50#include <asm/arch/mmc.h>
51#include <asm/arch/imx-dma.h>
52
53#include "imxmmc.h"
54
55#define DRIVER_NAME "imx-mmc"
56
57#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
58 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
59 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
60
61struct imxmci_host {
62 struct mmc_host *mmc;
63 spinlock_t lock;
64 struct resource *res;
65 int irq;
66 imx_dmach_t dma;
67 unsigned int clkrt;
68 unsigned int cmdat;
69 volatile unsigned int imask;
70 unsigned int power_mode;
71 unsigned int present;
72 struct imxmmc_platform_data *pdata;
73
74 struct mmc_request *req;
75 struct mmc_command *cmd;
76 struct mmc_data *data;
77
78 struct timer_list timer;
79 struct tasklet_struct tasklet;
80 unsigned int status_reg;
81 unsigned long pending_events;
82 /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */
83 u16 *data_ptr;
84 unsigned int data_cnt;
85 atomic_t stuck_timeout;
86
87 unsigned int dma_nents;
88 unsigned int dma_size;
89 unsigned int dma_dir;
90 int dma_allocated;
91
92 unsigned char actual_bus_width;
93
94 int prev_cmd_code;
95};
96
97#define IMXMCI_PEND_IRQ_b 0
98#define IMXMCI_PEND_DMA_END_b 1
99#define IMXMCI_PEND_DMA_ERR_b 2
100#define IMXMCI_PEND_WAIT_RESP_b 3
101#define IMXMCI_PEND_DMA_DATA_b 4
102#define IMXMCI_PEND_CPU_DATA_b 5
103#define IMXMCI_PEND_CARD_XCHG_b 6
104#define IMXMCI_PEND_SET_INIT_b 7
105#define IMXMCI_PEND_STARTED_b 8
106
107#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
108#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
109#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
110#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
111#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
112#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
113#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
114#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
115#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
116
117static void imxmci_stop_clock(struct imxmci_host *host)
118{
119 int i = 0;
120 MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK;
121 while(i < 0x1000) {
122 if(!(i & 0x7f))
123 MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK;
124
125 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) {
126 /* Check twice before cut */
127 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN))
128 return;
129 }
130
131 i++;
132 }
133 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
134}
135
136static int imxmci_start_clock(struct imxmci_host *host)
137{
138 unsigned int trials = 0;
139 unsigned int delay_limit = 128;
140 unsigned long flags;
141
142 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
143
144 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
145
146 /*
147 * Command start of the clock, this usually succeeds in less
148 * then 6 delay loops, but during card detection (low clockrate)
149 * it takes up to 5000 delay loops and sometimes fails for the first time
150 */
151 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
152
153 do {
154 unsigned int delay = delay_limit;
155
156 while(delay--){
157 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
158 /* Check twice before cut */
159 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
160 return 0;
161
162 if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
163 return 0;
164 }
165
166 local_irq_save(flags);
167 /*
168 * Ensure, that request is not doubled under all possible circumstances.
169 * It is possible, that cock running state is missed, because some other
170 * IRQ or schedule delays this function execution and the clocks has
171 * been already stopped by other means (response processing, SDHC HW)
172 */
173 if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
174 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
175 local_irq_restore(flags);
176
177 } while(++trials<256);
178
179 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
180
181 return -1;
182}
183
184static void imxmci_softreset(void)
185{
186 /* reset sequence */
187 MMC_STR_STP_CLK = 0x8;
188 MMC_STR_STP_CLK = 0xD;
189 MMC_STR_STP_CLK = 0x5;
190 MMC_STR_STP_CLK = 0x5;
191 MMC_STR_STP_CLK = 0x5;
192 MMC_STR_STP_CLK = 0x5;
193 MMC_STR_STP_CLK = 0x5;
194 MMC_STR_STP_CLK = 0x5;
195 MMC_STR_STP_CLK = 0x5;
196 MMC_STR_STP_CLK = 0x5;
197
198 MMC_RES_TO = 0xff;
199 MMC_BLK_LEN = 512;
200 MMC_NOB = 1;
201}
202
203static int imxmci_busy_wait_for_status(struct imxmci_host *host,
204 unsigned int *pstat, unsigned int stat_mask,
205 int timeout, const char *where)
206{
207 int loops=0;
208 while(!(*pstat & stat_mask)) {
209 loops+=2;
210 if(loops >= timeout) {
211 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
212 where, *pstat, stat_mask);
213 return -1;
214 }
215 udelay(2);
216 *pstat |= MMC_STATUS;
217 }
218 if(!loops)
219 return 0;
220
221 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
222 if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
223 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
224 loops, where, *pstat, stat_mask);
225 return loops;
226}
227
228static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
229{
230 unsigned int nob = data->blocks;
231 unsigned int blksz = data->blksz;
232 unsigned int datasz = nob * blksz;
233 int i;
234
235 if (data->flags & MMC_DATA_STREAM)
236 nob = 0xffff;
237
238 host->data = data;
239 data->bytes_xfered = 0;
240
241 MMC_NOB = nob;
242 MMC_BLK_LEN = blksz;
243
244 /*
245 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
246 * We are in big troubles for non-512 byte transfers according to note in the paragraph
247 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
248 * The situation is even more complex in reality. The SDHC in not able to handle wll
249 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
250 * This is required for SCR read at least.
251 */
252 if (datasz < 512) {
253 host->dma_size = datasz;
254 if (data->flags & MMC_DATA_READ) {
255 host->dma_dir = DMA_FROM_DEVICE;
256
257 /* Hack to enable read SCR */
258 MMC_NOB = 1;
259 MMC_BLK_LEN = 512;
260 } else {
261 host->dma_dir = DMA_TO_DEVICE;
262 }
263
264 /* Convert back to virtual address */
265 host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
266 host->data_cnt = 0;
267
268 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
269 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
270
271 return;
272 }
273
274 if (data->flags & MMC_DATA_READ) {
275 host->dma_dir = DMA_FROM_DEVICE;
276 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
277 data->sg_len, host->dma_dir);
278
279 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
280 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);
281
282 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
283 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
284 } else {
285 host->dma_dir = DMA_TO_DEVICE;
286
287 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
288 data->sg_len, host->dma_dir);
289
290 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
291 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);
292
293 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
294 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
295 }
296
297#if 1 /* This code is there only for consistency checking and can be disabled in future */
298 host->dma_size = 0;
299 for(i=0; i<host->dma_nents; i++)
300 host->dma_size+=data->sg[i].length;
301
302 if (datasz > host->dma_size) {
303 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
304 datasz, host->dma_size);
305 }
306#endif
307
308 host->dma_size = datasz;
309
310 wmb();
311
312 if(host->actual_bus_width == MMC_BUS_WIDTH_4)
313 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */
314 else
315 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */
316
317 RSSR(host->dma) = DMA_REQ_SDHC;
318
319 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
320 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
321
322 /* start DMA engine for read, write is delayed after initial response */
323 if (host->dma_dir == DMA_FROM_DEVICE) {
324 imx_dma_enable(host->dma);
325 }
326}
327
328static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
329{
330 unsigned long flags;
331 u32 imask;
332
333 WARN_ON(host->cmd != NULL);
334 host->cmd = cmd;
335
336 /* Ensure, that clock are stopped else command programming and start fails */
337 imxmci_stop_clock(host);
338
339 if (cmd->flags & MMC_RSP_BUSY)
340 cmdat |= CMD_DAT_CONT_BUSY;
341
342 switch (mmc_resp_type(cmd)) {
343 case MMC_RSP_R1: /* short CRC, OPCODE */
344 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
345 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
346 break;
347 case MMC_RSP_R2: /* long 136 bit + CRC */
348 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
349 break;
350 case MMC_RSP_R3: /* short */
351 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
352 break;
353 default:
354 break;
355 }
356
357 if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) )
358 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
359
360 if ( host->actual_bus_width == MMC_BUS_WIDTH_4 )
361 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
362
363 MMC_CMD = cmd->opcode;
364 MMC_ARGH = cmd->arg >> 16;
365 MMC_ARGL = cmd->arg & 0xffff;
366 MMC_CMD_DAT_CONT = cmdat;
367
368 atomic_set(&host->stuck_timeout, 0);
369 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
370
371
372 imask = IMXMCI_INT_MASK_DEFAULT;
373 imask &= ~INT_MASK_END_CMD_RES;
374 if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) {
375 /*imask &= ~INT_MASK_BUF_READY;*/
376 imask &= ~INT_MASK_DATA_TRAN;
377 if ( cmdat & CMD_DAT_CONT_WRITE )
378 imask &= ~INT_MASK_WRITE_OP_DONE;
379 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
380 imask &= ~INT_MASK_BUF_READY;
381 }
382
383 spin_lock_irqsave(&host->lock, flags);
384 host->imask = imask;
385 MMC_INT_MASK = host->imask;
386 spin_unlock_irqrestore(&host->lock, flags);
387
388 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
389 cmd->opcode, cmd->opcode, imask);
390
391 imxmci_start_clock(host);
392}
393
394static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
395{
396 unsigned long flags;
397
398 spin_lock_irqsave(&host->lock, flags);
399
400 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
401 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
402
403 host->imask = IMXMCI_INT_MASK_DEFAULT;
404 MMC_INT_MASK = host->imask;
405
406 spin_unlock_irqrestore(&host->lock, flags);
407
408 if(req && req->cmd)
409 host->prev_cmd_code = req->cmd->opcode;
410
411 host->req = NULL;
412 host->cmd = NULL;
413 host->data = NULL;
414 mmc_request_done(host->mmc, req);
415}
416
417static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
418{
419 struct mmc_data *data = host->data;
420 int data_error;
421
422 if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
423 imx_dma_disable(host->dma);
424 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
425 host->dma_dir);
426 }
427
428 if ( stat & STATUS_ERR_MASK ) {
429 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
430 if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
431 data->error = MMC_ERR_BADCRC;
432 else if(stat & STATUS_TIME_OUT_READ)
433 data->error = MMC_ERR_TIMEOUT;
434 else
435 data->error = MMC_ERR_FAILED;
436 } else {
437 data->bytes_xfered = host->dma_size;
438 }
439
440 data_error = data->error;
441
442 host->data = NULL;
443
444 return data_error;
445}
446
447static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
448{
449 struct mmc_command *cmd = host->cmd;
450 int i;
451 u32 a,b,c;
452 struct mmc_data *data = host->data;
453
454 if (!cmd)
455 return 0;
456
457 host->cmd = NULL;
458
459 if (stat & STATUS_TIME_OUT_RESP) {
460 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
461 cmd->error = MMC_ERR_TIMEOUT;
462 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
463 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
464 cmd->error = MMC_ERR_BADCRC;
465 }
466
467 if(cmd->flags & MMC_RSP_PRESENT) {
468 if(cmd->flags & MMC_RSP_136) {
469 for (i = 0; i < 4; i++) {
470 u32 a = MMC_RES_FIFO & 0xffff;
471 u32 b = MMC_RES_FIFO & 0xffff;
472 cmd->resp[i] = a<<16 | b;
473 }
474 } else {
475 a = MMC_RES_FIFO & 0xffff;
476 b = MMC_RES_FIFO & 0xffff;
477 c = MMC_RES_FIFO & 0xffff;
478 cmd->resp[0] = a<<24 | b<<8 | c>>8;
479 }
480 }
481
482 dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
483 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
484
485 if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) {
486 if (host->req->data->flags & MMC_DATA_WRITE) {
487
488 /* Wait for FIFO to be empty before starting DMA write */
489
490 stat = MMC_STATUS;
491 if(imxmci_busy_wait_for_status(host, &stat,
492 STATUS_APPL_BUFF_FE,
493 40, "imxmci_cmd_done DMA WR") < 0) {
494 cmd->error = MMC_ERR_FIFO;
495 imxmci_finish_data(host, stat);
496 if(host->req)
497 imxmci_finish_request(host, host->req);
498 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
499 stat);
500 return 0;
501 }
502
503 if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
504 imx_dma_enable(host->dma);
505 }
506 }
507 } else {
508 struct mmc_request *req;
509 imxmci_stop_clock(host);
510 req = host->req;
511
512 if(data)
513 imxmci_finish_data(host, stat);
514
515 if( req ) {
516 imxmci_finish_request(host, req);
517 } else {
518 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
519 }
520 }
521
522 return 1;
523}
524
525static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
526{
527 struct mmc_data *data = host->data;
528 int data_error;
529
530 if (!data)
531 return 0;
532
533 data_error = imxmci_finish_data(host, stat);
534
535 if (host->req->stop) {
536 imxmci_stop_clock(host);
537 imxmci_start_cmd(host, host->req->stop, 0);
538 } else {
539 struct mmc_request *req;
540 req = host->req;
541 if( req ) {
542 imxmci_finish_request(host, req);
543 } else {
544 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
545 }
546 }
547
548 return 1;
549}
550
551static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
552{
553 int i;
554 int burst_len;
555 int trans_done = 0;
556 unsigned int stat = *pstat;
557
558 if(host->actual_bus_width != MMC_BUS_WIDTH_4)
559 burst_len = 16;
560 else
561 burst_len = 64;
562
563 /* This is unfortunately required */
564 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
565 stat);
566
567 udelay(20); /* required for clocks < 8MHz*/
568
569 if(host->dma_dir == DMA_FROM_DEVICE) {
570 imxmci_busy_wait_for_status(host, &stat,
571 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
572 STATUS_TIME_OUT_READ,
573 50, "imxmci_cpu_driven_data read");
574
575 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
576 !(stat & STATUS_TIME_OUT_READ) &&
577 (host->data_cnt < 512)) {
578
579 udelay(20); /* required for clocks < 8MHz*/
580
581 for(i = burst_len; i>=2 ; i-=2) {
582 u16 data;
583 data = MMC_BUFFER_ACCESS;
584 udelay(10); /* required for clocks < 8MHz*/
585 if(host->data_cnt+2 <= host->dma_size) {
586 *(host->data_ptr++) = data;
587 } else {
588 if(host->data_cnt < host->dma_size)
589 *(u8*)(host->data_ptr) = data;
590 }
591 host->data_cnt += 2;
592 }
593
594 stat = MMC_STATUS;
595
596 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
597 host->data_cnt, burst_len, stat);
598 }
599
600 if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
601 trans_done = 1;
602
603 if(host->dma_size & 0x1ff)
604 stat &= ~STATUS_CRC_READ_ERR;
605
606 if(stat & STATUS_TIME_OUT_READ) {
607 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
608 stat);
609 trans_done = -1;
610 }
611
612 } else {
613 imxmci_busy_wait_for_status(host, &stat,
614 STATUS_APPL_BUFF_FE,
615 20, "imxmci_cpu_driven_data write");
616
617 while((stat & STATUS_APPL_BUFF_FE) &&
618 (host->data_cnt < host->dma_size)) {
619 if(burst_len >= host->dma_size - host->data_cnt) {
620 burst_len = host->dma_size - host->data_cnt;
621 host->data_cnt = host->dma_size;
622 trans_done = 1;
623 } else {
624 host->data_cnt += burst_len;
625 }
626
627 for(i = burst_len; i>0 ; i-=2)
628 MMC_BUFFER_ACCESS = *(host->data_ptr++);
629
630 stat = MMC_STATUS;
631
632 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
633 burst_len, stat);
634 }
635 }
636
637 *pstat = stat;
638
639 return trans_done;
640}
641
642static void imxmci_dma_irq(int dma, void *devid)
643{
644 struct imxmci_host *host = devid;
645 uint32_t stat = MMC_STATUS;
646
647 atomic_set(&host->stuck_timeout, 0);
648 host->status_reg = stat;
649 set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
650 tasklet_schedule(&host->tasklet);
651}
652
653static irqreturn_t imxmci_irq(int irq, void *devid)
654{
655 struct imxmci_host *host = devid;
656 uint32_t stat = MMC_STATUS;
657 int handled = 1;
658
659 MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT;
660
661 atomic_set(&host->stuck_timeout, 0);
662 host->status_reg = stat;
663 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
664 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
665 tasklet_schedule(&host->tasklet);
666
667 return IRQ_RETVAL(handled);;
668}
669
670static void imxmci_tasklet_fnc(unsigned long data)
671{
672 struct imxmci_host *host = (struct imxmci_host *)data;
673 u32 stat;
674 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
675 int timeout = 0;
676
677 if(atomic_read(&host->stuck_timeout) > 4) {
678 char *what;
679 timeout = 1;
680 stat = MMC_STATUS;
681 host->status_reg = stat;
682 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
683 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
684 what = "RESP+DMA";
685 else
686 what = "RESP";
687 else
688 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
689 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
690 what = "DATA";
691 else
692 what = "DMA";
693 else
694 what = "???";
695
696 dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
697 what, stat, MMC_INT_MASK);
698 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
699 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
700 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
701 host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
702 }
703
704 if(!host->present || timeout)
705 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
706 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
707
708 if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
709 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
710
711 stat = MMC_STATUS;
712 /*
713 * This is not required in theory, but there is chance to miss some flag
714 * which clears automatically by mask write, FreeScale original code keeps
715 * stat from IRQ time so do I
716 */
717 stat |= host->status_reg;
718
719 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
720 stat &= ~STATUS_CRC_READ_ERR;
721
722 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
723 imxmci_busy_wait_for_status(host, &stat,
724 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
725 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
726 }
727
728 if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
729 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
730 imxmci_cmd_done(host, stat);
731 if(host->data && (stat & STATUS_ERR_MASK))
732 imxmci_data_done(host, stat);
733 }
734
735 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
736 stat |= MMC_STATUS;
737 if(imxmci_cpu_driven_data(host, &stat)){
738 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
739 imxmci_cmd_done(host, stat);
740 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
741 &host->pending_events);
742 imxmci_data_done(host, stat);
743 }
744 }
745 }
746
747 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
748 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
749
750 stat = MMC_STATUS;
751 /* Same as above */
752 stat |= host->status_reg;
753
754 if(host->dma_dir == DMA_TO_DEVICE) {
755 data_dir_mask = STATUS_WRITE_OP_DONE;
756 } else {
757 data_dir_mask = STATUS_DATA_TRANS_DONE;
758 }
759
760 if(stat & data_dir_mask) {
761 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
762 imxmci_data_done(host, stat);
763 }
764 }
765
766 if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
767
768 if(host->cmd)
769 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
770
771 if(host->data)
772 imxmci_data_done(host, STATUS_TIME_OUT_READ |
773 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
774
775 if(host->req)
776 imxmci_finish_request(host, host->req);
777
778 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
779
780 }
781}
782
783static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
784{
785 struct imxmci_host *host = mmc_priv(mmc);
786 unsigned int cmdat;
787
788 WARN_ON(host->req != NULL);
789
790 host->req = req;
791
792 cmdat = 0;
793
794 if (req->data) {
795 imxmci_setup_data(host, req->data);
796
797 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
798
799 if (req->data->flags & MMC_DATA_WRITE)
800 cmdat |= CMD_DAT_CONT_WRITE;
801
802 if (req->data->flags & MMC_DATA_STREAM) {
803 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
804 }
805 }
806
807 imxmci_start_cmd(host, req->cmd, cmdat);
808}
809
810#define CLK_RATE 19200000
811
812static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
813{
814 struct imxmci_host *host = mmc_priv(mmc);
815 int prescaler;
816
817 if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
818 host->actual_bus_width = MMC_BUS_WIDTH_4;
819 imx_gpio_mode(PB11_PF_SD_DAT3);
820 }else{
821 host->actual_bus_width = MMC_BUS_WIDTH_1;
822 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
823 }
824
825 if ( host->power_mode != ios->power_mode ) {
826 switch (ios->power_mode) {
827 case MMC_POWER_OFF:
828 break;
829 case MMC_POWER_UP:
830 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
831 break;
832 case MMC_POWER_ON:
833 break;
834 }
835 host->power_mode = ios->power_mode;
836 }
837
838 if ( ios->clock ) {
839 unsigned int clk;
840
841 /* The prescaler is 5 for PERCLK2 equal to 96MHz
842 * then 96MHz / 5 = 19.2 MHz
843 */
844 clk=imx_get_perclk2();
845 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
846 switch(prescaler) {
847 case 0:
848 case 1: prescaler = 0;
849 break;
850 case 2: prescaler = 1;
851 break;
852 case 3: prescaler = 2;
853 break;
854 case 4: prescaler = 4;
855 break;
856 default:
857 case 5: prescaler = 5;
858 break;
859 }
860
861 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
862 clk, prescaler);
863
864 for(clk=0; clk<8; clk++) {
865 int x;
866 x = CLK_RATE / (1<<clk);
867 if( x <= ios->clock)
868 break;
869 }
870
871 MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */
872
873 imxmci_stop_clock(host);
874 MMC_CLK_RATE = (prescaler<<3) | clk;
875 /*
876 * Under my understanding, clock should not be started there, because it would
877 * initiate SDHC sequencer and send last or random command into card
878 */
879 /*imxmci_start_clock(host);*/
880
881 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
882 } else {
883 imxmci_stop_clock(host);
884 }
885}
886
887static const struct mmc_host_ops imxmci_ops = {
888 .request = imxmci_request,
889 .set_ios = imxmci_set_ios,
890};
891
892static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
893{
894 int i;
895
896 for (i = 0; i < dev->num_resources; i++)
897 if (dev->resource[i].flags == mask && nr-- == 0)
898 return &dev->resource[i];
899 return NULL;
900}
901
902static int platform_device_irq(struct platform_device *dev, int nr)
903{
904 int i;
905
906 for (i = 0; i < dev->num_resources; i++)
907 if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
908 return dev->resource[i].start;
909 return NO_IRQ;
910}
911
912static void imxmci_check_status(unsigned long data)
913{
914 struct imxmci_host *host = (struct imxmci_host *)data;
915
916 if( host->pdata->card_present() != host->present ) {
917 host->present ^= 1;
918 dev_info(mmc_dev(host->mmc), "card %s\n",
919 host->present ? "inserted" : "removed");
920
921 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
922 tasklet_schedule(&host->tasklet);
923 }
924
925 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
926 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
927 atomic_inc(&host->stuck_timeout);
928 if(atomic_read(&host->stuck_timeout) > 4)
929 tasklet_schedule(&host->tasklet);
930 } else {
931 atomic_set(&host->stuck_timeout, 0);
932
933 }
934
935 mod_timer(&host->timer, jiffies + (HZ>>1));
936}
937
938static int imxmci_probe(struct platform_device *pdev)
939{
940 struct mmc_host *mmc;
941 struct imxmci_host *host = NULL;
942 struct resource *r;
943 int ret = 0, irq;
944
945 printk(KERN_INFO "i.MX mmc driver\n");
946
947 r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
948 irq = platform_device_irq(pdev, 0);
949 if (!r || irq == NO_IRQ)
950 return -ENXIO;
951
952 r = request_mem_region(r->start, 0x100, "IMXMCI");
953 if (!r)
954 return -EBUSY;
955
956 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
957 if (!mmc) {
958 ret = -ENOMEM;
959 goto out;
960 }
961
962 mmc->ops = &imxmci_ops;
963 mmc->f_min = 150000;
964 mmc->f_max = CLK_RATE/2;
965 mmc->ocr_avail = MMC_VDD_32_33;
966 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK;
967
968 /* MMC core transfer sizes tunable parameters */
969 mmc->max_hw_segs = 64;
970 mmc->max_phys_segs = 64;
971 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
972 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
973 mmc->max_blk_size = 2048;
974 mmc->max_blk_count = 65535;
975
976 host = mmc_priv(mmc);
977 host->mmc = mmc;
978 host->dma_allocated = 0;
979 host->pdata = pdev->dev.platform_data;
980
981 spin_lock_init(&host->lock);
982 host->res = r;
983 host->irq = irq;
984
985 imx_gpio_mode(PB8_PF_SD_DAT0);
986 imx_gpio_mode(PB9_PF_SD_DAT1);
987 imx_gpio_mode(PB10_PF_SD_DAT2);
988 /* Configured as GPIO with pull-up to ensure right MCC card mode */
989 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
990 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
991 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
992 imx_gpio_mode(PB12_PF_SD_CLK);
993 imx_gpio_mode(PB13_PF_SD_CMD);
994
995 imxmci_softreset();
996
997 if ( MMC_REV_NO != 0x390 ) {
998 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
999 MMC_REV_NO);
1000 goto out;
1001 }
1002
1003 MMC_READ_TO = 0x2db4; /* recommended in data sheet */
1004
1005 host->imask = IMXMCI_INT_MASK_DEFAULT;
1006 MMC_INT_MASK = host->imask;
1007
1008
1009 if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){
1010 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1011 ret = -EBUSY;
1012 goto out;
1013 }
1014 host->dma_allocated=1;
1015 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1016
1017 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1018 host->status_reg=0;
1019 host->pending_events=0;
1020
1021 ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1022 if (ret)
1023 goto out;
1024
1025 host->present = host->pdata->card_present();
1026 init_timer(&host->timer);
1027 host->timer.data = (unsigned long)host;
1028 host->timer.function = imxmci_check_status;
1029 add_timer(&host->timer);
1030 mod_timer(&host->timer, jiffies + (HZ>>1));
1031
1032 platform_set_drvdata(pdev, mmc);
1033
1034 mmc_add_host(mmc);
1035
1036 return 0;
1037
1038out:
1039 if (host) {
1040 if(host->dma_allocated){
1041 imx_dma_free(host->dma);
1042 host->dma_allocated=0;
1043 }
1044 }
1045 if (mmc)
1046 mmc_free_host(mmc);
1047 release_resource(r);
1048 return ret;
1049}
1050
1051static int imxmci_remove(struct platform_device *pdev)
1052{
1053 struct mmc_host *mmc = platform_get_drvdata(pdev);
1054
1055 platform_set_drvdata(pdev, NULL);
1056
1057 if (mmc) {
1058 struct imxmci_host *host = mmc_priv(mmc);
1059
1060 tasklet_disable(&host->tasklet);
1061
1062 del_timer_sync(&host->timer);
1063 mmc_remove_host(mmc);
1064
1065 free_irq(host->irq, host);
1066 if(host->dma_allocated){
1067 imx_dma_free(host->dma);
1068 host->dma_allocated=0;
1069 }
1070
1071 tasklet_kill(&host->tasklet);
1072
1073 release_resource(host->res);
1074
1075 mmc_free_host(mmc);
1076 }
1077 return 0;
1078}
1079
1080#ifdef CONFIG_PM
1081static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1082{
1083 struct mmc_host *mmc = platform_get_drvdata(dev);
1084 int ret = 0;
1085
1086 if (mmc)
1087 ret = mmc_suspend_host(mmc, state);
1088
1089 return ret;
1090}
1091
1092static int imxmci_resume(struct platform_device *dev)
1093{
1094 struct mmc_host *mmc = platform_get_drvdata(dev);
1095 struct imxmci_host *host;
1096 int ret = 0;
1097
1098 if (mmc) {
1099 host = mmc_priv(mmc);
1100 if(host)
1101 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1102 ret = mmc_resume_host(mmc);
1103 }
1104
1105 return ret;
1106}
1107#else
1108#define imxmci_suspend NULL
1109#define imxmci_resume NULL
1110#endif /* CONFIG_PM */
1111
1112static struct platform_driver imxmci_driver = {
1113 .probe = imxmci_probe,
1114 .remove = imxmci_remove,
1115 .suspend = imxmci_suspend,
1116 .resume = imxmci_resume,
1117 .driver = {
1118 .name = DRIVER_NAME,
1119 }
1120};
1121
1122static int __init imxmci_init(void)
1123{
1124 return platform_driver_register(&imxmci_driver);
1125}
1126
1127static void __exit imxmci_exit(void)
1128{
1129 platform_driver_unregister(&imxmci_driver);
1130}
1131
1132module_init(imxmci_init);
1133module_exit(imxmci_exit);
1134
1135MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1136MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1137MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
new file mode 100644
index 000000000000..e5339e334dbb
--- /dev/null
+++ b/drivers/mmc/host/imxmmc.h
@@ -0,0 +1,67 @@
1
2# define __REG16(x) (*((volatile u16 *)IO_ADDRESS(x)))
3
4#define MMC_STR_STP_CLK __REG16(IMX_MMC_BASE + 0x00)
5#define MMC_STATUS __REG16(IMX_MMC_BASE + 0x04)
6#define MMC_CLK_RATE __REG16(IMX_MMC_BASE + 0x08)
7#define MMC_CMD_DAT_CONT __REG16(IMX_MMC_BASE + 0x0C)
8#define MMC_RES_TO __REG16(IMX_MMC_BASE + 0x10)
9#define MMC_READ_TO __REG16(IMX_MMC_BASE + 0x14)
10#define MMC_BLK_LEN __REG16(IMX_MMC_BASE + 0x18)
11#define MMC_NOB __REG16(IMX_MMC_BASE + 0x1C)
12#define MMC_REV_NO __REG16(IMX_MMC_BASE + 0x20)
13#define MMC_INT_MASK __REG16(IMX_MMC_BASE + 0x24)
14#define MMC_CMD __REG16(IMX_MMC_BASE + 0x28)
15#define MMC_ARGH __REG16(IMX_MMC_BASE + 0x2C)
16#define MMC_ARGL __REG16(IMX_MMC_BASE + 0x30)
17#define MMC_RES_FIFO __REG16(IMX_MMC_BASE + 0x34)
18#define MMC_BUFFER_ACCESS __REG16(IMX_MMC_BASE + 0x38)
19#define MMC_BUFFER_ACCESS_OFS 0x38
20
21
22#define STR_STP_CLK_ENDIAN (1<<5)
23#define STR_STP_CLK_RESET (1<<3)
24#define STR_STP_CLK_ENABLE (1<<2)
25#define STR_STP_CLK_START_CLK (1<<1)
26#define STR_STP_CLK_STOP_CLK (1<<0)
27#define STATUS_CARD_PRESENCE (1<<15)
28#define STATUS_SDIO_INT_ACTIVE (1<<14)
29#define STATUS_END_CMD_RESP (1<<13)
30#define STATUS_WRITE_OP_DONE (1<<12)
31#define STATUS_DATA_TRANS_DONE (1<<11)
32#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
33#define STATUS_CARD_BUS_CLK_RUN (1<<8)
34#define STATUS_APPL_BUFF_FF (1<<7)
35#define STATUS_APPL_BUFF_FE (1<<6)
36#define STATUS_RESP_CRC_ERR (1<<5)
37#define STATUS_CRC_READ_ERR (1<<3)
38#define STATUS_CRC_WRITE_ERR (1<<2)
39#define STATUS_TIME_OUT_RESP (1<<1)
40#define STATUS_TIME_OUT_READ (1<<0)
41#define STATUS_ERR_MASK 0x2f
42#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
43#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
44#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
45#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
46#define CMD_DAT_CONT_START_READWAIT (1<<10)
47#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
48#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
49#define CMD_DAT_CONT_INIT (1<<7)
50#define CMD_DAT_CONT_BUSY (1<<6)
51#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
52#define CMD_DAT_CONT_WRITE (1<<4)
53#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
54#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
55#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
56#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
57#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
58#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
59#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
60#define INT_MASK_AUTO_CARD_DETECT (1<<6)
61#define INT_MASK_DAT0_EN (1<<5)
62#define INT_MASK_SDIO (1<<4)
63#define INT_MASK_BUF_READY (1<<3)
64#define INT_MASK_END_CMD_RES (1<<2)
65#define INT_MASK_WRITE_OP_DONE (1<<1)
66#define INT_MASK_DATA_TRAN (1<<0)
67#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
new file mode 100644
index 000000000000..d11c2d23ceea
--- /dev/null
+++ b/drivers/mmc/host/mmci.c
@@ -0,0 +1,702 @@
1/*
2 * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/highmem.h>
19#include <linux/mmc/host.h>
20#include <linux/amba/bus.h>
21#include <linux/clk.h>
22
23#include <asm/cacheflush.h>
24#include <asm/div64.h>
25#include <asm/io.h>
26#include <asm/scatterlist.h>
27#include <asm/sizes.h>
28#include <asm/mach/mmc.h>
29
30#include "mmci.h"
31
32#define DRIVER_NAME "mmci-pl18x"
33
34#define DBG(host,fmt,args...) \
35 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
36
37static unsigned int fmax = 515633;
38
39static void
40mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
41{
42 writel(0, host->base + MMCICOMMAND);
43
44 BUG_ON(host->data);
45
46 host->mrq = NULL;
47 host->cmd = NULL;
48
49 if (mrq->data)
50 mrq->data->bytes_xfered = host->data_xfered;
51
52 /*
53 * Need to drop the host lock here; mmc_request_done may call
54 * back into the driver...
55 */
56 spin_unlock(&host->lock);
57 mmc_request_done(host->mmc, mrq);
58 spin_lock(&host->lock);
59}
60
61static void mmci_stop_data(struct mmci_host *host)
62{
63 writel(0, host->base + MMCIDATACTRL);
64 writel(0, host->base + MMCIMASK1);
65 host->data = NULL;
66}
67
68static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
69{
70 unsigned int datactrl, timeout, irqmask;
71 unsigned long long clks;
72 void __iomem *base;
73 int blksz_bits;
74
75 DBG(host, "blksz %04x blks %04x flags %08x\n",
76 data->blksz, data->blocks, data->flags);
77
78 host->data = data;
79 host->size = data->blksz;
80 host->data_xfered = 0;
81
82 mmci_init_sg(host, data);
83
84 clks = (unsigned long long)data->timeout_ns * host->cclk;
85 do_div(clks, 1000000000UL);
86
87 timeout = data->timeout_clks + (unsigned int)clks;
88
89 base = host->base;
90 writel(timeout, base + MMCIDATATIMER);
91 writel(host->size, base + MMCIDATALENGTH);
92
93 blksz_bits = ffs(data->blksz) - 1;
94 BUG_ON(1 << blksz_bits != data->blksz);
95
96 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
97 if (data->flags & MMC_DATA_READ) {
98 datactrl |= MCI_DPSM_DIRECTION;
99 irqmask = MCI_RXFIFOHALFFULLMASK;
100
101 /*
102 * If we have less than a FIFOSIZE of bytes to transfer,
103 * trigger a PIO interrupt as soon as any data is available.
104 */
105 if (host->size < MCI_FIFOSIZE)
106 irqmask |= MCI_RXDATAAVLBLMASK;
107 } else {
108 /*
109 * We don't actually need to include "FIFO empty" here
110 * since its implicit in "FIFO half empty".
111 */
112 irqmask = MCI_TXFIFOHALFEMPTYMASK;
113 }
114
115 writel(datactrl, base + MMCIDATACTRL);
116 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
117 writel(irqmask, base + MMCIMASK1);
118}
119
120static void
121mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
122{
123 void __iomem *base = host->base;
124
125 DBG(host, "op %02x arg %08x flags %08x\n",
126 cmd->opcode, cmd->arg, cmd->flags);
127
128 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
129 writel(0, base + MMCICOMMAND);
130 udelay(1);
131 }
132
133 c |= cmd->opcode | MCI_CPSM_ENABLE;
134 if (cmd->flags & MMC_RSP_PRESENT) {
135 if (cmd->flags & MMC_RSP_136)
136 c |= MCI_CPSM_LONGRSP;
137 c |= MCI_CPSM_RESPONSE;
138 }
139 if (/*interrupt*/0)
140 c |= MCI_CPSM_INTERRUPT;
141
142 host->cmd = cmd;
143
144 writel(cmd->arg, base + MMCIARGUMENT);
145 writel(c, base + MMCICOMMAND);
146}
147
148static void
149mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
150 unsigned int status)
151{
152 if (status & MCI_DATABLOCKEND) {
153 host->data_xfered += data->blksz;
154 }
155 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
156 if (status & MCI_DATACRCFAIL)
157 data->error = MMC_ERR_BADCRC;
158 else if (status & MCI_DATATIMEOUT)
159 data->error = MMC_ERR_TIMEOUT;
160 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
161 data->error = MMC_ERR_FIFO;
162 status |= MCI_DATAEND;
163
164 /*
165 * We hit an error condition. Ensure that any data
166 * partially written to a page is properly coherent.
167 */
168 if (host->sg_len && data->flags & MMC_DATA_READ)
169 flush_dcache_page(host->sg_ptr->page);
170 }
171 if (status & MCI_DATAEND) {
172 mmci_stop_data(host);
173
174 if (!data->stop) {
175 mmci_request_end(host, data->mrq);
176 } else {
177 mmci_start_command(host, data->stop, 0);
178 }
179 }
180}
181
182static void
183mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
184 unsigned int status)
185{
186 void __iomem *base = host->base;
187
188 host->cmd = NULL;
189
190 cmd->resp[0] = readl(base + MMCIRESPONSE0);
191 cmd->resp[1] = readl(base + MMCIRESPONSE1);
192 cmd->resp[2] = readl(base + MMCIRESPONSE2);
193 cmd->resp[3] = readl(base + MMCIRESPONSE3);
194
195 if (status & MCI_CMDTIMEOUT) {
196 cmd->error = MMC_ERR_TIMEOUT;
197 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
198 cmd->error = MMC_ERR_BADCRC;
199 }
200
201 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
202 if (host->data)
203 mmci_stop_data(host);
204 mmci_request_end(host, cmd->mrq);
205 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
206 mmci_start_data(host, cmd->data);
207 }
208}
209
210static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
211{
212 void __iomem *base = host->base;
213 char *ptr = buffer;
214 u32 status;
215
216 do {
217 int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
218
219 if (count > remain)
220 count = remain;
221
222 if (count <= 0)
223 break;
224
225 readsl(base + MMCIFIFO, ptr, count >> 2);
226
227 ptr += count;
228 remain -= count;
229
230 if (remain == 0)
231 break;
232
233 status = readl(base + MMCISTATUS);
234 } while (status & MCI_RXDATAAVLBL);
235
236 return ptr - buffer;
237}
238
239static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
240{
241 void __iomem *base = host->base;
242 char *ptr = buffer;
243
244 do {
245 unsigned int count, maxcnt;
246
247 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
248 count = min(remain, maxcnt);
249
250 writesl(base + MMCIFIFO, ptr, count >> 2);
251
252 ptr += count;
253 remain -= count;
254
255 if (remain == 0)
256 break;
257
258 status = readl(base + MMCISTATUS);
259 } while (status & MCI_TXFIFOHALFEMPTY);
260
261 return ptr - buffer;
262}
263
264/*
265 * PIO data transfer IRQ handler.
266 */
267static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
268{
269 struct mmci_host *host = dev_id;
270 void __iomem *base = host->base;
271 u32 status;
272
273 status = readl(base + MMCISTATUS);
274
275 DBG(host, "irq1 %08x\n", status);
276
277 do {
278 unsigned long flags;
279 unsigned int remain, len;
280 char *buffer;
281
282 /*
283 * For write, we only need to test the half-empty flag
284 * here - if the FIFO is completely empty, then by
285 * definition it is more than half empty.
286 *
287 * For read, check for data available.
288 */
289 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
290 break;
291
292 /*
293 * Map the current scatter buffer.
294 */
295 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
296 remain = host->sg_ptr->length - host->sg_off;
297
298 len = 0;
299 if (status & MCI_RXACTIVE)
300 len = mmci_pio_read(host, buffer, remain);
301 if (status & MCI_TXACTIVE)
302 len = mmci_pio_write(host, buffer, remain, status);
303
304 /*
305 * Unmap the buffer.
306 */
307 mmci_kunmap_atomic(host, buffer, &flags);
308
309 host->sg_off += len;
310 host->size -= len;
311 remain -= len;
312
313 if (remain)
314 break;
315
316 /*
317 * If we were reading, and we have completed this
318 * page, ensure that the data cache is coherent.
319 */
320 if (status & MCI_RXACTIVE)
321 flush_dcache_page(host->sg_ptr->page);
322
323 if (!mmci_next_sg(host))
324 break;
325
326 status = readl(base + MMCISTATUS);
327 } while (1);
328
329 /*
330 * If we're nearing the end of the read, switch to
331 * "any data available" mode.
332 */
333 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
334 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
335
336 /*
337 * If we run out of data, disable the data IRQs; this
338 * prevents a race where the FIFO becomes empty before
339 * the chip itself has disabled the data path, and
340 * stops us racing with our data end IRQ.
341 */
342 if (host->size == 0) {
343 writel(0, base + MMCIMASK1);
344 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
345 }
346
347 return IRQ_HANDLED;
348}
349
350/*
351 * Handle completion of command and data transfers.
352 */
353static irqreturn_t mmci_irq(int irq, void *dev_id)
354{
355 struct mmci_host *host = dev_id;
356 u32 status;
357 int ret = 0;
358
359 spin_lock(&host->lock);
360
361 do {
362 struct mmc_command *cmd;
363 struct mmc_data *data;
364
365 status = readl(host->base + MMCISTATUS);
366 status &= readl(host->base + MMCIMASK0);
367 writel(status, host->base + MMCICLEAR);
368
369 DBG(host, "irq0 %08x\n", status);
370
371 data = host->data;
372 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
373 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
374 mmci_data_irq(host, data, status);
375
376 cmd = host->cmd;
377 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
378 mmci_cmd_irq(host, cmd, status);
379
380 ret = 1;
381 } while (status);
382
383 spin_unlock(&host->lock);
384
385 return IRQ_RETVAL(ret);
386}
387
388static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
389{
390 struct mmci_host *host = mmc_priv(mmc);
391
392 WARN_ON(host->mrq != NULL);
393
394 spin_lock_irq(&host->lock);
395
396 host->mrq = mrq;
397
398 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
399 mmci_start_data(host, mrq->data);
400
401 mmci_start_command(host, mrq->cmd, 0);
402
403 spin_unlock_irq(&host->lock);
404}
405
406static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
407{
408 struct mmci_host *host = mmc_priv(mmc);
409 u32 clk = 0, pwr = 0;
410
411 if (ios->clock) {
412 if (ios->clock >= host->mclk) {
413 clk = MCI_CLK_BYPASS;
414 host->cclk = host->mclk;
415 } else {
416 clk = host->mclk / (2 * ios->clock) - 1;
417 if (clk > 256)
418 clk = 255;
419 host->cclk = host->mclk / (2 * (clk + 1));
420 }
421 clk |= MCI_CLK_ENABLE;
422 }
423
424 if (host->plat->translate_vdd)
425 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
426
427 switch (ios->power_mode) {
428 case MMC_POWER_OFF:
429 break;
430 case MMC_POWER_UP:
431 pwr |= MCI_PWR_UP;
432 break;
433 case MMC_POWER_ON:
434 pwr |= MCI_PWR_ON;
435 break;
436 }
437
438 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
439 pwr |= MCI_ROD;
440
441 writel(clk, host->base + MMCICLOCK);
442
443 if (host->pwr != pwr) {
444 host->pwr = pwr;
445 writel(pwr, host->base + MMCIPOWER);
446 }
447}
448
449static const struct mmc_host_ops mmci_ops = {
450 .request = mmci_request,
451 .set_ios = mmci_set_ios,
452};
453
454static void mmci_check_status(unsigned long data)
455{
456 struct mmci_host *host = (struct mmci_host *)data;
457 unsigned int status;
458
459 status = host->plat->status(mmc_dev(host->mmc));
460 if (status ^ host->oldstat)
461 mmc_detect_change(host->mmc, 0);
462
463 host->oldstat = status;
464 mod_timer(&host->timer, jiffies + HZ);
465}
466
467static int mmci_probe(struct amba_device *dev, void *id)
468{
469 struct mmc_platform_data *plat = dev->dev.platform_data;
470 struct mmci_host *host;
471 struct mmc_host *mmc;
472 int ret;
473
474 /* must have platform data */
475 if (!plat) {
476 ret = -EINVAL;
477 goto out;
478 }
479
480 ret = amba_request_regions(dev, DRIVER_NAME);
481 if (ret)
482 goto out;
483
484 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
485 if (!mmc) {
486 ret = -ENOMEM;
487 goto rel_regions;
488 }
489
490 host = mmc_priv(mmc);
491 host->clk = clk_get(&dev->dev, "MCLK");
492 if (IS_ERR(host->clk)) {
493 ret = PTR_ERR(host->clk);
494 host->clk = NULL;
495 goto host_free;
496 }
497
498 ret = clk_enable(host->clk);
499 if (ret)
500 goto clk_free;
501
502 host->plat = plat;
503 host->mclk = clk_get_rate(host->clk);
504 host->mmc = mmc;
505 host->base = ioremap(dev->res.start, SZ_4K);
506 if (!host->base) {
507 ret = -ENOMEM;
508 goto clk_disable;
509 }
510
511 mmc->ops = &mmci_ops;
512 mmc->f_min = (host->mclk + 511) / 512;
513 mmc->f_max = min(host->mclk, fmax);
514 mmc->ocr_avail = plat->ocr_mask;
515 mmc->caps = MMC_CAP_MULTIWRITE;
516
517 /*
518 * We can do SGIO
519 */
520 mmc->max_hw_segs = 16;
521 mmc->max_phys_segs = NR_SG;
522
523 /*
524 * Since we only have a 16-bit data length register, we must
525 * ensure that we don't exceed 2^16-1 bytes in a single request.
526 */
527 mmc->max_req_size = 65535;
528
529 /*
530 * Set the maximum segment size. Since we aren't doing DMA
531 * (yet) we are only limited by the data length register.
532 */
533 mmc->max_seg_size = mmc->max_req_size;
534
535 /*
536 * Block size can be up to 2048 bytes, but must be a power of two.
537 */
538 mmc->max_blk_size = 2048;
539
540 /*
541 * No limit on the number of blocks transferred.
542 */
543 mmc->max_blk_count = mmc->max_req_size;
544
545 spin_lock_init(&host->lock);
546
547 writel(0, host->base + MMCIMASK0);
548 writel(0, host->base + MMCIMASK1);
549 writel(0xfff, host->base + MMCICLEAR);
550
551 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
552 if (ret)
553 goto unmap;
554
555 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
556 if (ret)
557 goto irq0_free;
558
559 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
560
561 amba_set_drvdata(dev, mmc);
562
563 mmc_add_host(mmc);
564
565 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
566 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
567 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
568
569 init_timer(&host->timer);
570 host->timer.data = (unsigned long)host;
571 host->timer.function = mmci_check_status;
572 host->timer.expires = jiffies + HZ;
573 add_timer(&host->timer);
574
575 return 0;
576
577 irq0_free:
578 free_irq(dev->irq[0], host);
579 unmap:
580 iounmap(host->base);
581 clk_disable:
582 clk_disable(host->clk);
583 clk_free:
584 clk_put(host->clk);
585 host_free:
586 mmc_free_host(mmc);
587 rel_regions:
588 amba_release_regions(dev);
589 out:
590 return ret;
591}
592
593static int mmci_remove(struct amba_device *dev)
594{
595 struct mmc_host *mmc = amba_get_drvdata(dev);
596
597 amba_set_drvdata(dev, NULL);
598
599 if (mmc) {
600 struct mmci_host *host = mmc_priv(mmc);
601
602 del_timer_sync(&host->timer);
603
604 mmc_remove_host(mmc);
605
606 writel(0, host->base + MMCIMASK0);
607 writel(0, host->base + MMCIMASK1);
608
609 writel(0, host->base + MMCICOMMAND);
610 writel(0, host->base + MMCIDATACTRL);
611
612 free_irq(dev->irq[0], host);
613 free_irq(dev->irq[1], host);
614
615 iounmap(host->base);
616 clk_disable(host->clk);
617 clk_put(host->clk);
618
619 mmc_free_host(mmc);
620
621 amba_release_regions(dev);
622 }
623
624 return 0;
625}
626
627#ifdef CONFIG_PM
628static int mmci_suspend(struct amba_device *dev, pm_message_t state)
629{
630 struct mmc_host *mmc = amba_get_drvdata(dev);
631 int ret = 0;
632
633 if (mmc) {
634 struct mmci_host *host = mmc_priv(mmc);
635
636 ret = mmc_suspend_host(mmc, state);
637 if (ret == 0)
638 writel(0, host->base + MMCIMASK0);
639 }
640
641 return ret;
642}
643
644static int mmci_resume(struct amba_device *dev)
645{
646 struct mmc_host *mmc = amba_get_drvdata(dev);
647 int ret = 0;
648
649 if (mmc) {
650 struct mmci_host *host = mmc_priv(mmc);
651
652 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
653
654 ret = mmc_resume_host(mmc);
655 }
656
657 return ret;
658}
659#else
660#define mmci_suspend NULL
661#define mmci_resume NULL
662#endif
663
664static struct amba_id mmci_ids[] = {
665 {
666 .id = 0x00041180,
667 .mask = 0x000fffff,
668 },
669 {
670 .id = 0x00041181,
671 .mask = 0x000fffff,
672 },
673 { 0, 0 },
674};
675
676static struct amba_driver mmci_driver = {
677 .drv = {
678 .name = DRIVER_NAME,
679 },
680 .probe = mmci_probe,
681 .remove = mmci_remove,
682 .suspend = mmci_suspend,
683 .resume = mmci_resume,
684 .id_table = mmci_ids,
685};
686
687static int __init mmci_init(void)
688{
689 return amba_driver_register(&mmci_driver);
690}
691
692static void __exit mmci_exit(void)
693{
694 amba_driver_unregister(&mmci_driver);
695}
696
697module_init(mmci_init);
698module_exit(mmci_exit);
699module_param(fmax, uint, 0444);
700
701MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
702MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
new file mode 100644
index 000000000000..6d7eadc9a678
--- /dev/null
+++ b/drivers/mmc/host/mmci.h
@@ -0,0 +1,179 @@
1/*
2 * linux/drivers/mmc/mmci.h - ARM PrimeCell MMCI PL180/1 driver
3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define MMCIPOWER 0x000
11#define MCI_PWR_OFF 0x00
12#define MCI_PWR_UP 0x02
13#define MCI_PWR_ON 0x03
14#define MCI_OD (1 << 6)
15#define MCI_ROD (1 << 7)
16
17#define MMCICLOCK 0x004
18#define MCI_CLK_ENABLE (1 << 8)
19#define MCI_CLK_PWRSAVE (1 << 9)
20#define MCI_CLK_BYPASS (1 << 10)
21
22#define MMCIARGUMENT 0x008
23#define MMCICOMMAND 0x00c
24#define MCI_CPSM_RESPONSE (1 << 6)
25#define MCI_CPSM_LONGRSP (1 << 7)
26#define MCI_CPSM_INTERRUPT (1 << 8)
27#define MCI_CPSM_PENDING (1 << 9)
28#define MCI_CPSM_ENABLE (1 << 10)
29
30#define MMCIRESPCMD 0x010
31#define MMCIRESPONSE0 0x014
32#define MMCIRESPONSE1 0x018
33#define MMCIRESPONSE2 0x01c
34#define MMCIRESPONSE3 0x020
35#define MMCIDATATIMER 0x024
36#define MMCIDATALENGTH 0x028
37#define MMCIDATACTRL 0x02c
38#define MCI_DPSM_ENABLE (1 << 0)
39#define MCI_DPSM_DIRECTION (1 << 1)
40#define MCI_DPSM_MODE (1 << 2)
41#define MCI_DPSM_DMAENABLE (1 << 3)
42
43#define MMCIDATACNT 0x030
44#define MMCISTATUS 0x034
45#define MCI_CMDCRCFAIL (1 << 0)
46#define MCI_DATACRCFAIL (1 << 1)
47#define MCI_CMDTIMEOUT (1 << 2)
48#define MCI_DATATIMEOUT (1 << 3)
49#define MCI_TXUNDERRUN (1 << 4)
50#define MCI_RXOVERRUN (1 << 5)
51#define MCI_CMDRESPEND (1 << 6)
52#define MCI_CMDSENT (1 << 7)
53#define MCI_DATAEND (1 << 8)
54#define MCI_DATABLOCKEND (1 << 10)
55#define MCI_CMDACTIVE (1 << 11)
56#define MCI_TXACTIVE (1 << 12)
57#define MCI_RXACTIVE (1 << 13)
58#define MCI_TXFIFOHALFEMPTY (1 << 14)
59#define MCI_RXFIFOHALFFULL (1 << 15)
60#define MCI_TXFIFOFULL (1 << 16)
61#define MCI_RXFIFOFULL (1 << 17)
62#define MCI_TXFIFOEMPTY (1 << 18)
63#define MCI_RXFIFOEMPTY (1 << 19)
64#define MCI_TXDATAAVLBL (1 << 20)
65#define MCI_RXDATAAVLBL (1 << 21)
66
67#define MMCICLEAR 0x038
68#define MCI_CMDCRCFAILCLR (1 << 0)
69#define MCI_DATACRCFAILCLR (1 << 1)
70#define MCI_CMDTIMEOUTCLR (1 << 2)
71#define MCI_DATATIMEOUTCLR (1 << 3)
72#define MCI_TXUNDERRUNCLR (1 << 4)
73#define MCI_RXOVERRUNCLR (1 << 5)
74#define MCI_CMDRESPENDCLR (1 << 6)
75#define MCI_CMDSENTCLR (1 << 7)
76#define MCI_DATAENDCLR (1 << 8)
77#define MCI_DATABLOCKENDCLR (1 << 10)
78
79#define MMCIMASK0 0x03c
80#define MCI_CMDCRCFAILMASK (1 << 0)
81#define MCI_DATACRCFAILMASK (1 << 1)
82#define MCI_CMDTIMEOUTMASK (1 << 2)
83#define MCI_DATATIMEOUTMASK (1 << 3)
84#define MCI_TXUNDERRUNMASK (1 << 4)
85#define MCI_RXOVERRUNMASK (1 << 5)
86#define MCI_CMDRESPENDMASK (1 << 6)
87#define MCI_CMDSENTMASK (1 << 7)
88#define MCI_DATAENDMASK (1 << 8)
89#define MCI_DATABLOCKENDMASK (1 << 10)
90#define MCI_CMDACTIVEMASK (1 << 11)
91#define MCI_TXACTIVEMASK (1 << 12)
92#define MCI_RXACTIVEMASK (1 << 13)
93#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
94#define MCI_RXFIFOHALFFULLMASK (1 << 15)
95#define MCI_TXFIFOFULLMASK (1 << 16)
96#define MCI_RXFIFOFULLMASK (1 << 17)
97#define MCI_TXFIFOEMPTYMASK (1 << 18)
98#define MCI_RXFIFOEMPTYMASK (1 << 19)
99#define MCI_TXDATAAVLBLMASK (1 << 20)
100#define MCI_RXDATAAVLBLMASK (1 << 21)
101
102#define MMCIMASK1 0x040
103#define MMCIFIFOCNT 0x048
104#define MMCIFIFO 0x080 /* to 0x0bc */
105
106#define MCI_IRQENABLE \
107 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
108 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
109 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
110
111/*
112 * The size of the FIFO in bytes.
113 */
114#define MCI_FIFOSIZE (16*4)
115
116#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
117
118#define NR_SG 16
119
120struct clk;
121
122struct mmci_host {
123 void __iomem *base;
124 struct mmc_request *mrq;
125 struct mmc_command *cmd;
126 struct mmc_data *data;
127 struct mmc_host *mmc;
128 struct clk *clk;
129
130 unsigned int data_xfered;
131
132 spinlock_t lock;
133
134 unsigned int mclk;
135 unsigned int cclk;
136 u32 pwr;
137 struct mmc_platform_data *plat;
138
139 struct timer_list timer;
140 unsigned int oldstat;
141
142 unsigned int sg_len;
143
144 /* pio stuff */
145 struct scatterlist *sg_ptr;
146 unsigned int sg_off;
147 unsigned int size;
148};
149
150static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
151{
152 /*
153 * Ideally, we want the higher levels to pass us a scatter list.
154 */
155 host->sg_len = data->sg_len;
156 host->sg_ptr = data->sg;
157 host->sg_off = 0;
158}
159
160static inline int mmci_next_sg(struct mmci_host *host)
161{
162 host->sg_ptr++;
163 host->sg_off = 0;
164 return --host->sg_len;
165}
166
167static inline char *mmci_kmap_atomic(struct mmci_host *host, unsigned long *flags)
168{
169 struct scatterlist *sg = host->sg_ptr;
170
171 local_irq_save(*flags);
172 return kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
173}
174
175static inline void mmci_kunmap_atomic(struct mmci_host *host, void *buffer, unsigned long *flags)
176{
177 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
178 local_irq_restore(*flags);
179}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
new file mode 100644
index 000000000000..1914e65d4db1
--- /dev/null
+++ b/drivers/mmc/host/omap.c
@@ -0,0 +1,1295 @@
1/*
2 * linux/drivers/media/mmc/omap.c
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/ioport.h>
18#include <linux/platform_device.h>
19#include <linux/interrupt.h>
20#include <linux/dma-mapping.h>
21#include <linux/delay.h>
22#include <linux/spinlock.h>
23#include <linux/timer.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
26#include <linux/clk.h>
27
28#include <asm/io.h>
29#include <asm/irq.h>
30#include <asm/scatterlist.h>
31#include <asm/mach-types.h>
32
33#include <asm/arch/board.h>
34#include <asm/arch/gpio.h>
35#include <asm/arch/dma.h>
36#include <asm/arch/mux.h>
37#include <asm/arch/fpga.h>
38#include <asm/arch/tps65010.h>
39
40#define OMAP_MMC_REG_CMD 0x00
41#define OMAP_MMC_REG_ARGL 0x04
42#define OMAP_MMC_REG_ARGH 0x08
43#define OMAP_MMC_REG_CON 0x0c
44#define OMAP_MMC_REG_STAT 0x10
45#define OMAP_MMC_REG_IE 0x14
46#define OMAP_MMC_REG_CTO 0x18
47#define OMAP_MMC_REG_DTO 0x1c
48#define OMAP_MMC_REG_DATA 0x20
49#define OMAP_MMC_REG_BLEN 0x24
50#define OMAP_MMC_REG_NBLK 0x28
51#define OMAP_MMC_REG_BUF 0x2c
52#define OMAP_MMC_REG_SDIO 0x34
53#define OMAP_MMC_REG_REV 0x3c
54#define OMAP_MMC_REG_RSP0 0x40
55#define OMAP_MMC_REG_RSP1 0x44
56#define OMAP_MMC_REG_RSP2 0x48
57#define OMAP_MMC_REG_RSP3 0x4c
58#define OMAP_MMC_REG_RSP4 0x50
59#define OMAP_MMC_REG_RSP5 0x54
60#define OMAP_MMC_REG_RSP6 0x58
61#define OMAP_MMC_REG_RSP7 0x5c
62#define OMAP_MMC_REG_IOSR 0x60
63#define OMAP_MMC_REG_SYSC 0x64
64#define OMAP_MMC_REG_SYSS 0x68
65
66#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
67#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
68#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
69#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
70#define OMAP_MMC_STAT_A_FULL (1 << 10)
71#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
72#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
73#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
74#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
75#define OMAP_MMC_STAT_END_BUSY (1 << 4)
76#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
77#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
78#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
79
80#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
81#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
82
83/*
84 * Command types
85 */
86#define OMAP_MMC_CMDTYPE_BC 0
87#define OMAP_MMC_CMDTYPE_BCR 1
88#define OMAP_MMC_CMDTYPE_AC 2
89#define OMAP_MMC_CMDTYPE_ADTC 3
90
91
92#define DRIVER_NAME "mmci-omap"
93
94/* Specifies how often in millisecs to poll for card status changes
95 * when the cover switch is open */
96#define OMAP_MMC_SWITCH_POLL_DELAY 500
97
98static int mmc_omap_enable_poll = 1;
99
100struct mmc_omap_host {
101 int initialized;
102 int suspended;
103 struct mmc_request * mrq;
104 struct mmc_command * cmd;
105 struct mmc_data * data;
106 struct mmc_host * mmc;
107 struct device * dev;
108 unsigned char id; /* 16xx chips have 2 MMC blocks */
109 struct clk * iclk;
110 struct clk * fclk;
111 struct resource *mem_res;
112 void __iomem *virt_base;
113 unsigned int phys_base;
114 int irq;
115 unsigned char bus_mode;
116 unsigned char hw_bus_mode;
117
118 unsigned int sg_len;
119 int sg_idx;
120 u16 * buffer;
121 u32 buffer_bytes_left;
122 u32 total_bytes_left;
123
124 unsigned use_dma:1;
125 unsigned brs_received:1, dma_done:1;
126 unsigned dma_is_read:1;
127 unsigned dma_in_use:1;
128 int dma_ch;
129 spinlock_t dma_lock;
130 struct timer_list dma_timer;
131 unsigned dma_len;
132
133 short power_pin;
134 short wp_pin;
135
136 int switch_pin;
137 struct work_struct switch_work;
138 struct timer_list switch_timer;
139 int switch_last_state;
140};
141
142static inline int
143mmc_omap_cover_is_open(struct mmc_omap_host *host)
144{
145 if (host->switch_pin < 0)
146 return 0;
147 return omap_get_gpio_datain(host->switch_pin);
148}
149
150static ssize_t
151mmc_omap_show_cover_switch(struct device *dev,
152 struct device_attribute *attr, char *buf)
153{
154 struct mmc_omap_host *host = dev_get_drvdata(dev);
155
156 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
157 "closed");
158}
159
160static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
161
162static ssize_t
163mmc_omap_show_enable_poll(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
167}
168
169static ssize_t
170mmc_omap_store_enable_poll(struct device *dev,
171 struct device_attribute *attr, const char *buf,
172 size_t size)
173{
174 int enable_poll;
175
176 if (sscanf(buf, "%10d", &enable_poll) != 1)
177 return -EINVAL;
178
179 if (enable_poll != mmc_omap_enable_poll) {
180 struct mmc_omap_host *host = dev_get_drvdata(dev);
181
182 mmc_omap_enable_poll = enable_poll;
183 if (enable_poll && host->switch_pin >= 0)
184 schedule_work(&host->switch_work);
185 }
186 return size;
187}
188
189static DEVICE_ATTR(enable_poll, 0664,
190 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
191
192static void
193mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
194{
195 u32 cmdreg;
196 u32 resptype;
197 u32 cmdtype;
198
199 host->cmd = cmd;
200
201 resptype = 0;
202 cmdtype = 0;
203
204 /* Our hardware needs to know exact type */
205 switch (mmc_resp_type(cmd)) {
206 case MMC_RSP_NONE:
207 break;
208 case MMC_RSP_R1:
209 case MMC_RSP_R1B:
210 /* resp 1, 1b, 6, 7 */
211 resptype = 1;
212 break;
213 case MMC_RSP_R2:
214 resptype = 2;
215 break;
216 case MMC_RSP_R3:
217 resptype = 3;
218 break;
219 default:
220 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
221 break;
222 }
223
224 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
225 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
226 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
227 cmdtype = OMAP_MMC_CMDTYPE_BC;
228 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
229 cmdtype = OMAP_MMC_CMDTYPE_BCR;
230 } else {
231 cmdtype = OMAP_MMC_CMDTYPE_AC;
232 }
233
234 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
235
236 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
237 cmdreg |= 1 << 6;
238
239 if (cmd->flags & MMC_RSP_BUSY)
240 cmdreg |= 1 << 11;
241
242 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
243 cmdreg |= 1 << 15;
244
245 clk_enable(host->fclk);
246
247 OMAP_MMC_WRITE(host, CTO, 200);
248 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
249 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
250 OMAP_MMC_WRITE(host, IE,
251 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
252 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
253 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
254 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
255 OMAP_MMC_STAT_END_OF_DATA);
256 OMAP_MMC_WRITE(host, CMD, cmdreg);
257}
258
259static void
260mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
261{
262 if (host->dma_in_use) {
263 enum dma_data_direction dma_data_dir;
264
265 BUG_ON(host->dma_ch < 0);
266 if (data->error != MMC_ERR_NONE)
267 omap_stop_dma(host->dma_ch);
268 /* Release DMA channel lazily */
269 mod_timer(&host->dma_timer, jiffies + HZ);
270 if (data->flags & MMC_DATA_WRITE)
271 dma_data_dir = DMA_TO_DEVICE;
272 else
273 dma_data_dir = DMA_FROM_DEVICE;
274 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
275 dma_data_dir);
276 }
277 host->data = NULL;
278 host->sg_len = 0;
279 clk_disable(host->fclk);
280
281 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
282 * dozens of requests until the card finishes writing data.
283 * It'd be cheaper to just wait till an EOFB interrupt arrives...
284 */
285
286 if (!data->stop) {
287 host->mrq = NULL;
288 mmc_request_done(host->mmc, data->mrq);
289 return;
290 }
291
292 mmc_omap_start_command(host, data->stop);
293}
294
295static void
296mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
297{
298 unsigned long flags;
299 int done;
300
301 if (!host->dma_in_use) {
302 mmc_omap_xfer_done(host, data);
303 return;
304 }
305 done = 0;
306 spin_lock_irqsave(&host->dma_lock, flags);
307 if (host->dma_done)
308 done = 1;
309 else
310 host->brs_received = 1;
311 spin_unlock_irqrestore(&host->dma_lock, flags);
312 if (done)
313 mmc_omap_xfer_done(host, data);
314}
315
316static void
317mmc_omap_dma_timer(unsigned long data)
318{
319 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
320
321 BUG_ON(host->dma_ch < 0);
322 omap_free_dma(host->dma_ch);
323 host->dma_ch = -1;
324}
325
326static void
327mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
328{
329 unsigned long flags;
330 int done;
331
332 done = 0;
333 spin_lock_irqsave(&host->dma_lock, flags);
334 if (host->brs_received)
335 done = 1;
336 else
337 host->dma_done = 1;
338 spin_unlock_irqrestore(&host->dma_lock, flags);
339 if (done)
340 mmc_omap_xfer_done(host, data);
341}
342
343static void
344mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
345{
346 host->cmd = NULL;
347
348 if (cmd->flags & MMC_RSP_PRESENT) {
349 if (cmd->flags & MMC_RSP_136) {
350 /* response type 2 */
351 cmd->resp[3] =
352 OMAP_MMC_READ(host, RSP0) |
353 (OMAP_MMC_READ(host, RSP1) << 16);
354 cmd->resp[2] =
355 OMAP_MMC_READ(host, RSP2) |
356 (OMAP_MMC_READ(host, RSP3) << 16);
357 cmd->resp[1] =
358 OMAP_MMC_READ(host, RSP4) |
359 (OMAP_MMC_READ(host, RSP5) << 16);
360 cmd->resp[0] =
361 OMAP_MMC_READ(host, RSP6) |
362 (OMAP_MMC_READ(host, RSP7) << 16);
363 } else {
364 /* response types 1, 1b, 3, 4, 5, 6 */
365 cmd->resp[0] =
366 OMAP_MMC_READ(host, RSP6) |
367 (OMAP_MMC_READ(host, RSP7) << 16);
368 }
369 }
370
371 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
372 host->mrq = NULL;
373 clk_disable(host->fclk);
374 mmc_request_done(host->mmc, cmd->mrq);
375 }
376}
377
378/* PIO only */
379static void
380mmc_omap_sg_to_buf(struct mmc_omap_host *host)
381{
382 struct scatterlist *sg;
383
384 sg = host->data->sg + host->sg_idx;
385 host->buffer_bytes_left = sg->length;
386 host->buffer = page_address(sg->page) + sg->offset;
387 if (host->buffer_bytes_left > host->total_bytes_left)
388 host->buffer_bytes_left = host->total_bytes_left;
389}
390
391/* PIO only */
392static void
393mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
394{
395 int n;
396
397 if (host->buffer_bytes_left == 0) {
398 host->sg_idx++;
399 BUG_ON(host->sg_idx == host->sg_len);
400 mmc_omap_sg_to_buf(host);
401 }
402 n = 64;
403 if (n > host->buffer_bytes_left)
404 n = host->buffer_bytes_left;
405 host->buffer_bytes_left -= n;
406 host->total_bytes_left -= n;
407 host->data->bytes_xfered += n;
408
409 if (write) {
410 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
411 } else {
412 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
413 }
414}
415
416static inline void mmc_omap_report_irq(u16 status)
417{
418 static const char *mmc_omap_status_bits[] = {
419 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
420 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
421 };
422 int i, c = 0;
423
424 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
425 if (status & (1 << i)) {
426 if (c)
427 printk(" ");
428 printk("%s", mmc_omap_status_bits[i]);
429 c++;
430 }
431}
432
433static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
434{
435 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
436 u16 status;
437 int end_command;
438 int end_transfer;
439 int transfer_error;
440
441 if (host->cmd == NULL && host->data == NULL) {
442 status = OMAP_MMC_READ(host, STAT);
443 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
444 if (status != 0) {
445 OMAP_MMC_WRITE(host, STAT, status);
446 OMAP_MMC_WRITE(host, IE, 0);
447 }
448 return IRQ_HANDLED;
449 }
450
451 end_command = 0;
452 end_transfer = 0;
453 transfer_error = 0;
454
455 while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
456 OMAP_MMC_WRITE(host, STAT, status);
457#ifdef CONFIG_MMC_DEBUG
458 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
459 status, host->cmd != NULL ? host->cmd->opcode : -1);
460 mmc_omap_report_irq(status);
461 printk("\n");
462#endif
463 if (host->total_bytes_left) {
464 if ((status & OMAP_MMC_STAT_A_FULL) ||
465 (status & OMAP_MMC_STAT_END_OF_DATA))
466 mmc_omap_xfer_data(host, 0);
467 if (status & OMAP_MMC_STAT_A_EMPTY)
468 mmc_omap_xfer_data(host, 1);
469 }
470
471 if (status & OMAP_MMC_STAT_END_OF_DATA) {
472 end_transfer = 1;
473 }
474
475 if (status & OMAP_MMC_STAT_DATA_TOUT) {
476 dev_dbg(mmc_dev(host->mmc), "data timeout\n");
477 if (host->data) {
478 host->data->error |= MMC_ERR_TIMEOUT;
479 transfer_error = 1;
480 }
481 }
482
483 if (status & OMAP_MMC_STAT_DATA_CRC) {
484 if (host->data) {
485 host->data->error |= MMC_ERR_BADCRC;
486 dev_dbg(mmc_dev(host->mmc),
487 "data CRC error, bytes left %d\n",
488 host->total_bytes_left);
489 transfer_error = 1;
490 } else {
491 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
492 }
493 }
494
495 if (status & OMAP_MMC_STAT_CMD_TOUT) {
496 /* Timeouts are routine with some commands */
497 if (host->cmd) {
498 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
499 host->cmd->opcode !=
500 MMC_SEND_OP_COND &&
501 host->cmd->opcode !=
502 MMC_APP_CMD &&
503 !mmc_omap_cover_is_open(host))
504 dev_err(mmc_dev(host->mmc),
505 "command timeout, CMD %d\n",
506 host->cmd->opcode);
507 host->cmd->error = MMC_ERR_TIMEOUT;
508 end_command = 1;
509 }
510 }
511
512 if (status & OMAP_MMC_STAT_CMD_CRC) {
513 if (host->cmd) {
514 dev_err(mmc_dev(host->mmc),
515 "command CRC error (CMD%d, arg 0x%08x)\n",
516 host->cmd->opcode, host->cmd->arg);
517 host->cmd->error = MMC_ERR_BADCRC;
518 end_command = 1;
519 } else
520 dev_err(mmc_dev(host->mmc),
521 "command CRC error without cmd?\n");
522 }
523
524 if (status & OMAP_MMC_STAT_CARD_ERR) {
525 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
526 u32 response = OMAP_MMC_READ(host, RSP6)
527 | (OMAP_MMC_READ(host, RSP7) << 16);
528 /* STOP sometimes sets must-ignore bits */
529 if (!(response & (R1_CC_ERROR
530 | R1_ILLEGAL_COMMAND
531 | R1_COM_CRC_ERROR))) {
532 end_command = 1;
533 continue;
534 }
535 }
536
537 dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
538 host->cmd->opcode);
539 if (host->cmd) {
540 host->cmd->error = MMC_ERR_FAILED;
541 end_command = 1;
542 }
543 if (host->data) {
544 host->data->error = MMC_ERR_FAILED;
545 transfer_error = 1;
546 }
547 }
548
549 /*
550 * NOTE: On 1610 the END_OF_CMD may come too early when
551 * starting a write
552 */
553 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
554 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
555 end_command = 1;
556 }
557 }
558
559 if (end_command) {
560 mmc_omap_cmd_done(host, host->cmd);
561 }
562 if (transfer_error)
563 mmc_omap_xfer_done(host, host->data);
564 else if (end_transfer)
565 mmc_omap_end_of_data(host, host->data);
566
567 return IRQ_HANDLED;
568}
569
570static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id)
571{
572 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
573
574 schedule_work(&host->switch_work);
575
576 return IRQ_HANDLED;
577}
578
579static void mmc_omap_switch_timer(unsigned long arg)
580{
581 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
582
583 schedule_work(&host->switch_work);
584}
585
586static void mmc_omap_switch_handler(struct work_struct *work)
587{
588 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, switch_work);
589 struct mmc_card *card;
590 static int complained = 0;
591 int cards = 0, cover_open;
592
593 if (host->switch_pin == -1)
594 return;
595 cover_open = mmc_omap_cover_is_open(host);
596 if (cover_open != host->switch_last_state) {
597 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
598 host->switch_last_state = cover_open;
599 }
600 mmc_detect_change(host->mmc, 0);
601 list_for_each_entry(card, &host->mmc->cards, node) {
602 if (mmc_card_present(card))
603 cards++;
604 }
605 if (mmc_omap_cover_is_open(host)) {
606 if (!complained) {
607 dev_info(mmc_dev(host->mmc), "cover is open\n");
608 complained = 1;
609 }
610 if (mmc_omap_enable_poll)
611 mod_timer(&host->switch_timer, jiffies +
612 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
613 } else {
614 complained = 0;
615 }
616}
617
618/* Prepare to transfer the next segment of a scatterlist */
619static void
620mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
621{
622 int dma_ch = host->dma_ch;
623 unsigned long data_addr;
624 u16 buf, frame;
625 u32 count;
626 struct scatterlist *sg = &data->sg[host->sg_idx];
627 int src_port = 0;
628 int dst_port = 0;
629 int sync_dev = 0;
630
631 data_addr = host->phys_base + OMAP_MMC_REG_DATA;
632 frame = data->blksz;
633 count = sg_dma_len(sg);
634
635 if ((data->blocks == 1) && (count > data->blksz))
636 count = frame;
637
638 host->dma_len = count;
639
640 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
641 * Use 16 or 32 word frames when the blocksize is at least that large.
642 * Blocksize is usually 512 bytes; but not for some SD reads.
643 */
644 if (cpu_is_omap15xx() && frame > 32)
645 frame = 32;
646 else if (frame > 64)
647 frame = 64;
648 count /= frame;
649 frame >>= 1;
650
651 if (!(data->flags & MMC_DATA_WRITE)) {
652 buf = 0x800f | ((frame - 1) << 8);
653
654 if (cpu_class_is_omap1()) {
655 src_port = OMAP_DMA_PORT_TIPB;
656 dst_port = OMAP_DMA_PORT_EMIFF;
657 }
658 if (cpu_is_omap24xx())
659 sync_dev = OMAP24XX_DMA_MMC1_RX;
660
661 omap_set_dma_src_params(dma_ch, src_port,
662 OMAP_DMA_AMODE_CONSTANT,
663 data_addr, 0, 0);
664 omap_set_dma_dest_params(dma_ch, dst_port,
665 OMAP_DMA_AMODE_POST_INC,
666 sg_dma_address(sg), 0, 0);
667 omap_set_dma_dest_data_pack(dma_ch, 1);
668 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
669 } else {
670 buf = 0x0f80 | ((frame - 1) << 0);
671
672 if (cpu_class_is_omap1()) {
673 src_port = OMAP_DMA_PORT_EMIFF;
674 dst_port = OMAP_DMA_PORT_TIPB;
675 }
676 if (cpu_is_omap24xx())
677 sync_dev = OMAP24XX_DMA_MMC1_TX;
678
679 omap_set_dma_dest_params(dma_ch, dst_port,
680 OMAP_DMA_AMODE_CONSTANT,
681 data_addr, 0, 0);
682 omap_set_dma_src_params(dma_ch, src_port,
683 OMAP_DMA_AMODE_POST_INC,
684 sg_dma_address(sg), 0, 0);
685 omap_set_dma_src_data_pack(dma_ch, 1);
686 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
687 }
688
689 /* Max limit for DMA frame count is 0xffff */
690 BUG_ON(count > 0xffff);
691
692 OMAP_MMC_WRITE(host, BUF, buf);
693 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
694 frame, count, OMAP_DMA_SYNC_FRAME,
695 sync_dev, 0);
696}
697
698/* A scatterlist segment completed */
699static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
700{
701 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
702 struct mmc_data *mmcdat = host->data;
703
704 if (unlikely(host->dma_ch < 0)) {
705 dev_err(mmc_dev(host->mmc),
706 "DMA callback while DMA not enabled\n");
707 return;
708 }
709 /* FIXME: We really should do something to _handle_ the errors */
710 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
711 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
712 return;
713 }
714 if (ch_status & OMAP_DMA_DROP_IRQ) {
715 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
716 return;
717 }
718 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
719 return;
720 }
721 mmcdat->bytes_xfered += host->dma_len;
722 host->sg_idx++;
723 if (host->sg_idx < host->sg_len) {
724 mmc_omap_prepare_dma(host, host->data);
725 omap_start_dma(host->dma_ch);
726 } else
727 mmc_omap_dma_done(host, host->data);
728}
729
730static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
731{
732 const char *dev_name;
733 int sync_dev, dma_ch, is_read, r;
734
735 is_read = !(data->flags & MMC_DATA_WRITE);
736 del_timer_sync(&host->dma_timer);
737 if (host->dma_ch >= 0) {
738 if (is_read == host->dma_is_read)
739 return 0;
740 omap_free_dma(host->dma_ch);
741 host->dma_ch = -1;
742 }
743
744 if (is_read) {
745 if (host->id == 1) {
746 sync_dev = OMAP_DMA_MMC_RX;
747 dev_name = "MMC1 read";
748 } else {
749 sync_dev = OMAP_DMA_MMC2_RX;
750 dev_name = "MMC2 read";
751 }
752 } else {
753 if (host->id == 1) {
754 sync_dev = OMAP_DMA_MMC_TX;
755 dev_name = "MMC1 write";
756 } else {
757 sync_dev = OMAP_DMA_MMC2_TX;
758 dev_name = "MMC2 write";
759 }
760 }
761 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
762 host, &dma_ch);
763 if (r != 0) {
764 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
765 return r;
766 }
767 host->dma_ch = dma_ch;
768 host->dma_is_read = is_read;
769
770 return 0;
771}
772
773static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
774{
775 u16 reg;
776
777 reg = OMAP_MMC_READ(host, SDIO);
778 reg &= ~(1 << 5);
779 OMAP_MMC_WRITE(host, SDIO, reg);
780 /* Set maximum timeout */
781 OMAP_MMC_WRITE(host, CTO, 0xff);
782}
783
784static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
785{
786 int timeout;
787 u16 reg;
788
789 /* Convert ns to clock cycles by assuming 20MHz frequency
790 * 1 cycle at 20MHz = 500 ns
791 */
792 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
793
794 /* Check if we need to use timeout multiplier register */
795 reg = OMAP_MMC_READ(host, SDIO);
796 if (timeout > 0xffff) {
797 reg |= (1 << 5);
798 timeout /= 1024;
799 } else
800 reg &= ~(1 << 5);
801 OMAP_MMC_WRITE(host, SDIO, reg);
802 OMAP_MMC_WRITE(host, DTO, timeout);
803}
804
805static void
806mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
807{
808 struct mmc_data *data = req->data;
809 int i, use_dma, block_size;
810 unsigned sg_len;
811
812 host->data = data;
813 if (data == NULL) {
814 OMAP_MMC_WRITE(host, BLEN, 0);
815 OMAP_MMC_WRITE(host, NBLK, 0);
816 OMAP_MMC_WRITE(host, BUF, 0);
817 host->dma_in_use = 0;
818 set_cmd_timeout(host, req);
819 return;
820 }
821
822 block_size = data->blksz;
823
824 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
825 OMAP_MMC_WRITE(host, BLEN, block_size - 1);
826 set_data_timeout(host, req);
827
828 /* cope with calling layer confusion; it issues "single
829 * block" writes using multi-block scatterlists.
830 */
831 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
832
833 /* Only do DMA for entire blocks */
834 use_dma = host->use_dma;
835 if (use_dma) {
836 for (i = 0; i < sg_len; i++) {
837 if ((data->sg[i].length % block_size) != 0) {
838 use_dma = 0;
839 break;
840 }
841 }
842 }
843
844 host->sg_idx = 0;
845 if (use_dma) {
846 if (mmc_omap_get_dma_channel(host, data) == 0) {
847 enum dma_data_direction dma_data_dir;
848
849 if (data->flags & MMC_DATA_WRITE)
850 dma_data_dir = DMA_TO_DEVICE;
851 else
852 dma_data_dir = DMA_FROM_DEVICE;
853
854 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
855 sg_len, dma_data_dir);
856 host->total_bytes_left = 0;
857 mmc_omap_prepare_dma(host, req->data);
858 host->brs_received = 0;
859 host->dma_done = 0;
860 host->dma_in_use = 1;
861 } else
862 use_dma = 0;
863 }
864
865 /* Revert to PIO? */
866 if (!use_dma) {
867 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
868 host->total_bytes_left = data->blocks * block_size;
869 host->sg_len = sg_len;
870 mmc_omap_sg_to_buf(host);
871 host->dma_in_use = 0;
872 }
873}
874
875static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
876{
877 struct mmc_omap_host *host = mmc_priv(mmc);
878
879 WARN_ON(host->mrq != NULL);
880
881 host->mrq = req;
882
883 /* only touch fifo AFTER the controller readies it */
884 mmc_omap_prepare_data(host, req);
885 mmc_omap_start_command(host, req->cmd);
886 if (host->dma_in_use)
887 omap_start_dma(host->dma_ch);
888}
889
890static void innovator_fpga_socket_power(int on)
891{
892#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
893 if (on) {
894 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
895 OMAP1510_FPGA_POWER);
896 } else {
897 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
898 OMAP1510_FPGA_POWER);
899 }
900#endif
901}
902
903/*
904 * Turn the socket power on/off. Innovator uses FPGA, most boards
905 * probably use GPIO.
906 */
907static void mmc_omap_power(struct mmc_omap_host *host, int on)
908{
909 if (on) {
910 if (machine_is_omap_innovator())
911 innovator_fpga_socket_power(1);
912 else if (machine_is_omap_h2())
913 tps65010_set_gpio_out_value(GPIO3, HIGH);
914 else if (machine_is_omap_h3())
915 /* GPIO 4 of TPS65010 sends SD_EN signal */
916 tps65010_set_gpio_out_value(GPIO4, HIGH);
917 else if (cpu_is_omap24xx()) {
918 u16 reg = OMAP_MMC_READ(host, CON);
919 OMAP_MMC_WRITE(host, CON, reg | (1 << 11));
920 } else
921 if (host->power_pin >= 0)
922 omap_set_gpio_dataout(host->power_pin, 1);
923 } else {
924 if (machine_is_omap_innovator())
925 innovator_fpga_socket_power(0);
926 else if (machine_is_omap_h2())
927 tps65010_set_gpio_out_value(GPIO3, LOW);
928 else if (machine_is_omap_h3())
929 tps65010_set_gpio_out_value(GPIO4, LOW);
930 else if (cpu_is_omap24xx()) {
931 u16 reg = OMAP_MMC_READ(host, CON);
932 OMAP_MMC_WRITE(host, CON, reg & ~(1 << 11));
933 } else
934 if (host->power_pin >= 0)
935 omap_set_gpio_dataout(host->power_pin, 0);
936 }
937}
938
939static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
940{
941 struct mmc_omap_host *host = mmc_priv(mmc);
942 int func_clk_rate = clk_get_rate(host->fclk);
943 int dsor;
944
945 if (ios->clock == 0)
946 return 0;
947
948 dsor = func_clk_rate / ios->clock;
949 if (dsor < 1)
950 dsor = 1;
951
952 if (func_clk_rate / dsor > ios->clock)
953 dsor++;
954
955 if (dsor > 250)
956 dsor = 250;
957 dsor++;
958
959 if (ios->bus_width == MMC_BUS_WIDTH_4)
960 dsor |= 1 << 15;
961
962 return dsor;
963}
964
965static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
966{
967 struct mmc_omap_host *host = mmc_priv(mmc);
968 int dsor;
969 int i;
970
971 dsor = mmc_omap_calc_divisor(mmc, ios);
972 host->bus_mode = ios->bus_mode;
973 host->hw_bus_mode = host->bus_mode;
974
975 switch (ios->power_mode) {
976 case MMC_POWER_OFF:
977 mmc_omap_power(host, 0);
978 break;
979 case MMC_POWER_UP:
980 /* Cannot touch dsor yet, just power up MMC */
981 mmc_omap_power(host, 1);
982 return;
983 case MMC_POWER_ON:
984 dsor |= 1 << 11;
985 break;
986 }
987
988 clk_enable(host->fclk);
989
990 /* On insanely high arm_per frequencies something sometimes
991 * goes somehow out of sync, and the POW bit is not being set,
992 * which results in the while loop below getting stuck.
993 * Writing to the CON register twice seems to do the trick. */
994 for (i = 0; i < 2; i++)
995 OMAP_MMC_WRITE(host, CON, dsor);
996 if (ios->power_mode == MMC_POWER_ON) {
997 /* Send clock cycles, poll completion */
998 OMAP_MMC_WRITE(host, IE, 0);
999 OMAP_MMC_WRITE(host, STAT, 0xffff);
1000 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1001 while ((OMAP_MMC_READ(host, STAT) & 1) == 0);
1002 OMAP_MMC_WRITE(host, STAT, 1);
1003 }
1004 clk_disable(host->fclk);
1005}
1006
1007static int mmc_omap_get_ro(struct mmc_host *mmc)
1008{
1009 struct mmc_omap_host *host = mmc_priv(mmc);
1010
1011 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
1012}
1013
1014static const struct mmc_host_ops mmc_omap_ops = {
1015 .request = mmc_omap_request,
1016 .set_ios = mmc_omap_set_ios,
1017 .get_ro = mmc_omap_get_ro,
1018};
1019
1020static int __init mmc_omap_probe(struct platform_device *pdev)
1021{
1022 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
1023 struct mmc_host *mmc;
1024 struct mmc_omap_host *host = NULL;
1025 struct resource *res;
1026 int ret = 0;
1027 int irq;
1028
1029 if (minfo == NULL) {
1030 dev_err(&pdev->dev, "platform data missing\n");
1031 return -ENXIO;
1032 }
1033
1034 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1035 irq = platform_get_irq(pdev, 0);
1036 if (res == NULL || irq < 0)
1037 return -ENXIO;
1038
1039 res = request_mem_region(res->start, res->end - res->start + 1,
1040 pdev->name);
1041 if (res == NULL)
1042 return -EBUSY;
1043
1044 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
1045 if (mmc == NULL) {
1046 ret = -ENOMEM;
1047 goto err_free_mem_region;
1048 }
1049
1050 host = mmc_priv(mmc);
1051 host->mmc = mmc;
1052
1053 spin_lock_init(&host->dma_lock);
1054 init_timer(&host->dma_timer);
1055 host->dma_timer.function = mmc_omap_dma_timer;
1056 host->dma_timer.data = (unsigned long) host;
1057
1058 host->id = pdev->id;
1059 host->mem_res = res;
1060 host->irq = irq;
1061
1062 if (cpu_is_omap24xx()) {
1063 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1064 if (IS_ERR(host->iclk))
1065 goto err_free_mmc_host;
1066 clk_enable(host->iclk);
1067 }
1068
1069 if (!cpu_is_omap24xx())
1070 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1071 else
1072 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1073
1074 if (IS_ERR(host->fclk)) {
1075 ret = PTR_ERR(host->fclk);
1076 goto err_free_iclk;
1077 }
1078
1079 /* REVISIT:
1080 * Also, use minfo->cover to decide how to manage
1081 * the card detect sensing.
1082 */
1083 host->power_pin = minfo->power_pin;
1084 host->switch_pin = minfo->switch_pin;
1085 host->wp_pin = minfo->wp_pin;
1086 host->use_dma = 1;
1087 host->dma_ch = -1;
1088
1089 host->irq = irq;
1090 host->phys_base = host->mem_res->start;
1091 host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
1092
1093 mmc->ops = &mmc_omap_ops;
1094 mmc->f_min = 400000;
1095 mmc->f_max = 24000000;
1096 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1097 mmc->caps = MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1098
1099 if (minfo->wire4)
1100 mmc->caps |= MMC_CAP_4_BIT_DATA;
1101
1102 /* Use scatterlist DMA to reduce per-transfer costs.
1103 * NOTE max_seg_size assumption that small blocks aren't
1104 * normally used (except e.g. for reading SD registers).
1105 */
1106 mmc->max_phys_segs = 32;
1107 mmc->max_hw_segs = 32;
1108 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1109 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1110 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1111 mmc->max_seg_size = mmc->max_req_size;
1112
1113 if (host->power_pin >= 0) {
1114 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1115 dev_err(mmc_dev(host->mmc),
1116 "Unable to get GPIO pin for MMC power\n");
1117 goto err_free_fclk;
1118 }
1119 omap_set_gpio_direction(host->power_pin, 0);
1120 }
1121
1122 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1123 if (ret)
1124 goto err_free_power_gpio;
1125
1126 host->dev = &pdev->dev;
1127 platform_set_drvdata(pdev, host);
1128
1129 if (host->switch_pin >= 0) {
1130 INIT_WORK(&host->switch_work, mmc_omap_switch_handler);
1131 init_timer(&host->switch_timer);
1132 host->switch_timer.function = mmc_omap_switch_timer;
1133 host->switch_timer.data = (unsigned long) host;
1134 if (omap_request_gpio(host->switch_pin) != 0) {
1135 dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
1136 host->switch_pin = -1;
1137 goto no_switch;
1138 }
1139
1140 omap_set_gpio_direction(host->switch_pin, 1);
1141 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1142 mmc_omap_switch_irq, IRQF_TRIGGER_RISING, DRIVER_NAME, host);
1143 if (ret) {
1144 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
1145 omap_free_gpio(host->switch_pin);
1146 host->switch_pin = -1;
1147 goto no_switch;
1148 }
1149 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1150 if (ret == 0) {
1151 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1152 if (ret != 0)
1153 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1154 }
1155 if (ret) {
1156 dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
1157 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1158 omap_free_gpio(host->switch_pin);
1159 host->switch_pin = -1;
1160 goto no_switch;
1161 }
1162 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1163 schedule_work(&host->switch_work);
1164 }
1165
1166 mmc_add_host(mmc);
1167
1168 return 0;
1169
1170no_switch:
1171 /* FIXME: Free other resources too. */
1172 if (host) {
1173 if (host->iclk && !IS_ERR(host->iclk))
1174 clk_put(host->iclk);
1175 if (host->fclk && !IS_ERR(host->fclk))
1176 clk_put(host->fclk);
1177 mmc_free_host(host->mmc);
1178 }
1179err_free_power_gpio:
1180 if (host->power_pin >= 0)
1181 omap_free_gpio(host->power_pin);
1182err_free_fclk:
1183 clk_put(host->fclk);
1184err_free_iclk:
1185 if (host->iclk != NULL) {
1186 clk_disable(host->iclk);
1187 clk_put(host->iclk);
1188 }
1189err_free_mmc_host:
1190 mmc_free_host(host->mmc);
1191err_free_mem_region:
1192 release_mem_region(res->start, res->end - res->start + 1);
1193 return ret;
1194}
1195
1196static int mmc_omap_remove(struct platform_device *pdev)
1197{
1198 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1199
1200 platform_set_drvdata(pdev, NULL);
1201
1202 BUG_ON(host == NULL);
1203
1204 mmc_remove_host(host->mmc);
1205 free_irq(host->irq, host);
1206
1207 if (host->power_pin >= 0)
1208 omap_free_gpio(host->power_pin);
1209 if (host->switch_pin >= 0) {
1210 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1211 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1212 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1213 omap_free_gpio(host->switch_pin);
1214 host->switch_pin = -1;
1215 del_timer_sync(&host->switch_timer);
1216 flush_scheduled_work();
1217 }
1218 if (host->iclk && !IS_ERR(host->iclk))
1219 clk_put(host->iclk);
1220 if (host->fclk && !IS_ERR(host->fclk))
1221 clk_put(host->fclk);
1222
1223 release_mem_region(pdev->resource[0].start,
1224 pdev->resource[0].end - pdev->resource[0].start + 1);
1225
1226 mmc_free_host(host->mmc);
1227
1228 return 0;
1229}
1230
1231#ifdef CONFIG_PM
1232static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1233{
1234 int ret = 0;
1235 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1236
1237 if (host && host->suspended)
1238 return 0;
1239
1240 if (host) {
1241 ret = mmc_suspend_host(host->mmc, mesg);
1242 if (ret == 0)
1243 host->suspended = 1;
1244 }
1245 return ret;
1246}
1247
1248static int mmc_omap_resume(struct platform_device *pdev)
1249{
1250 int ret = 0;
1251 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1252
1253 if (host && !host->suspended)
1254 return 0;
1255
1256 if (host) {
1257 ret = mmc_resume_host(host->mmc);
1258 if (ret == 0)
1259 host->suspended = 0;
1260 }
1261
1262 return ret;
1263}
1264#else
1265#define mmc_omap_suspend NULL
1266#define mmc_omap_resume NULL
1267#endif
1268
1269static struct platform_driver mmc_omap_driver = {
1270 .probe = mmc_omap_probe,
1271 .remove = mmc_omap_remove,
1272 .suspend = mmc_omap_suspend,
1273 .resume = mmc_omap_resume,
1274 .driver = {
1275 .name = DRIVER_NAME,
1276 },
1277};
1278
1279static int __init mmc_omap_init(void)
1280{
1281 return platform_driver_register(&mmc_omap_driver);
1282}
1283
1284static void __exit mmc_omap_exit(void)
1285{
1286 platform_driver_unregister(&mmc_omap_driver);
1287}
1288
1289module_init(mmc_omap_init);
1290module_exit(mmc_omap_exit);
1291
1292MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1293MODULE_LICENSE("GPL");
1294MODULE_ALIAS(DRIVER_NAME);
1295MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
new file mode 100644
index 000000000000..a98ff98fa567
--- /dev/null
+++ b/drivers/mmc/host/pxamci.c
@@ -0,0 +1,616 @@
1/*
2 * linux/drivers/mmc/pxa.c - PXA MMCI driver
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
14 * Yuck!
15 *
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
18 */
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/ioport.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/dma-mapping.h>
26#include <linux/mmc/host.h>
27
28#include <asm/dma.h>
29#include <asm/io.h>
30#include <asm/scatterlist.h>
31#include <asm/sizes.h>
32
33#include <asm/arch/pxa-regs.h>
34#include <asm/arch/mmc.h>
35
36#include "pxamci.h"
37
38#define DRIVER_NAME "pxa2xx-mci"
39
40#define NR_SG 1
41
42struct pxamci_host {
43 struct mmc_host *mmc;
44 spinlock_t lock;
45 struct resource *res;
46 void __iomem *base;
47 int irq;
48 int dma;
49 unsigned int clkrt;
50 unsigned int cmdat;
51 unsigned int imask;
52 unsigned int power_mode;
53 struct pxamci_platform_data *pdata;
54
55 struct mmc_request *mrq;
56 struct mmc_command *cmd;
57 struct mmc_data *data;
58
59 dma_addr_t sg_dma;
60 struct pxa_dma_desc *sg_cpu;
61 unsigned int dma_len;
62
63 unsigned int dma_dir;
64};
65
66static void pxamci_stop_clock(struct pxamci_host *host)
67{
68 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
69 unsigned long timeout = 10000;
70 unsigned int v;
71
72 writel(STOP_CLOCK, host->base + MMC_STRPCL);
73
74 do {
75 v = readl(host->base + MMC_STAT);
76 if (!(v & STAT_CLK_EN))
77 break;
78 udelay(1);
79 } while (timeout--);
80
81 if (v & STAT_CLK_EN)
82 dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
83 }
84}
85
86static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&host->lock, flags);
91 host->imask &= ~mask;
92 writel(host->imask, host->base + MMC_I_MASK);
93 spin_unlock_irqrestore(&host->lock, flags);
94}
95
96static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&host->lock, flags);
101 host->imask |= mask;
102 writel(host->imask, host->base + MMC_I_MASK);
103 spin_unlock_irqrestore(&host->lock, flags);
104}
105
106static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
107{
108 unsigned int nob = data->blocks;
109 unsigned long long clks;
110 unsigned int timeout;
111 u32 dcmd;
112 int i;
113
114 host->data = data;
115
116 if (data->flags & MMC_DATA_STREAM)
117 nob = 0xffff;
118
119 writel(nob, host->base + MMC_NOB);
120 writel(data->blksz, host->base + MMC_BLKLEN);
121
122 clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
123 do_div(clks, 1000000000UL);
124 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
125 writel((timeout + 255) / 256, host->base + MMC_RDTO);
126
127 if (data->flags & MMC_DATA_READ) {
128 host->dma_dir = DMA_FROM_DEVICE;
129 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
130 DRCMRTXMMC = 0;
131 DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
132 } else {
133 host->dma_dir = DMA_TO_DEVICE;
134 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
135 DRCMRRXMMC = 0;
136 DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
137 }
138
139 dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
140
141 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
142 host->dma_dir);
143
144 for (i = 0; i < host->dma_len; i++) {
145 if (data->flags & MMC_DATA_READ) {
146 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
147 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
148 } else {
149 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
150 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
151 }
152 host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]);
153 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
154 sizeof(struct pxa_dma_desc);
155 }
156 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
157 wmb();
158
159 DDADR(host->dma) = host->sg_dma;
160 DCSR(host->dma) = DCSR_RUN;
161}
162
163static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
164{
165 WARN_ON(host->cmd != NULL);
166 host->cmd = cmd;
167
168 if (cmd->flags & MMC_RSP_BUSY)
169 cmdat |= CMDAT_BUSY;
170
171#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
172 switch (RSP_TYPE(mmc_resp_type(cmd))) {
173 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
174 cmdat |= CMDAT_RESP_SHORT;
175 break;
176 case RSP_TYPE(MMC_RSP_R3):
177 cmdat |= CMDAT_RESP_R3;
178 break;
179 case RSP_TYPE(MMC_RSP_R2):
180 cmdat |= CMDAT_RESP_R2;
181 break;
182 default:
183 break;
184 }
185
186 writel(cmd->opcode, host->base + MMC_CMD);
187 writel(cmd->arg >> 16, host->base + MMC_ARGH);
188 writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
189 writel(cmdat, host->base + MMC_CMDAT);
190 writel(host->clkrt, host->base + MMC_CLKRT);
191
192 writel(START_CLOCK, host->base + MMC_STRPCL);
193
194 pxamci_enable_irq(host, END_CMD_RES);
195}
196
197static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
198{
199 host->mrq = NULL;
200 host->cmd = NULL;
201 host->data = NULL;
202 mmc_request_done(host->mmc, mrq);
203}
204
205static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
206{
207 struct mmc_command *cmd = host->cmd;
208 int i;
209 u32 v;
210
211 if (!cmd)
212 return 0;
213
214 host->cmd = NULL;
215
216 /*
217 * Did I mention this is Sick. We always need to
218 * discard the upper 8 bits of the first 16-bit word.
219 */
220 v = readl(host->base + MMC_RES) & 0xffff;
221 for (i = 0; i < 4; i++) {
222 u32 w1 = readl(host->base + MMC_RES) & 0xffff;
223 u32 w2 = readl(host->base + MMC_RES) & 0xffff;
224 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
225 v = w2;
226 }
227
228 if (stat & STAT_TIME_OUT_RESPONSE) {
229 cmd->error = MMC_ERR_TIMEOUT;
230 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
231#ifdef CONFIG_PXA27x
232 /*
233 * workaround for erratum #42:
234 * Intel PXA27x Family Processor Specification Update Rev 001
235 */
236 if (cmd->opcode == MMC_ALL_SEND_CID ||
237 cmd->opcode == MMC_SEND_CSD ||
238 cmd->opcode == MMC_SEND_CID) {
239 /* a bogus CRC error can appear if the msb of
240 the 15 byte response is a one */
241 if ((cmd->resp[0] & 0x80000000) == 0)
242 cmd->error = MMC_ERR_BADCRC;
243 } else {
244 pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
245 }
246#else
247 cmd->error = MMC_ERR_BADCRC;
248#endif
249 }
250
251 pxamci_disable_irq(host, END_CMD_RES);
252 if (host->data && cmd->error == MMC_ERR_NONE) {
253 pxamci_enable_irq(host, DATA_TRAN_DONE);
254 } else {
255 pxamci_finish_request(host, host->mrq);
256 }
257
258 return 1;
259}
260
261static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
262{
263 struct mmc_data *data = host->data;
264
265 if (!data)
266 return 0;
267
268 DCSR(host->dma) = 0;
269 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
270 host->dma_dir);
271
272 if (stat & STAT_READ_TIME_OUT)
273 data->error = MMC_ERR_TIMEOUT;
274 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
275 data->error = MMC_ERR_BADCRC;
276
277 /*
278 * There appears to be a hardware design bug here. There seems to
279 * be no way to find out how much data was transferred to the card.
280 * This means that if there was an error on any block, we mark all
281 * data blocks as being in error.
282 */
283 if (data->error == MMC_ERR_NONE)
284 data->bytes_xfered = data->blocks * data->blksz;
285 else
286 data->bytes_xfered = 0;
287
288 pxamci_disable_irq(host, DATA_TRAN_DONE);
289
290 host->data = NULL;
291 if (host->mrq->stop) {
292 pxamci_stop_clock(host);
293 pxamci_start_cmd(host, host->mrq->stop, 0);
294 } else {
295 pxamci_finish_request(host, host->mrq);
296 }
297
298 return 1;
299}
300
301static irqreturn_t pxamci_irq(int irq, void *devid)
302{
303 struct pxamci_host *host = devid;
304 unsigned int ireg;
305 int handled = 0;
306
307 ireg = readl(host->base + MMC_I_REG);
308
309 if (ireg) {
310 unsigned stat = readl(host->base + MMC_STAT);
311
312 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
313
314 if (ireg & END_CMD_RES)
315 handled |= pxamci_cmd_done(host, stat);
316 if (ireg & DATA_TRAN_DONE)
317 handled |= pxamci_data_done(host, stat);
318 }
319
320 return IRQ_RETVAL(handled);
321}
322
323static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
324{
325 struct pxamci_host *host = mmc_priv(mmc);
326 unsigned int cmdat;
327
328 WARN_ON(host->mrq != NULL);
329
330 host->mrq = mrq;
331
332 pxamci_stop_clock(host);
333
334 cmdat = host->cmdat;
335 host->cmdat &= ~CMDAT_INIT;
336
337 if (mrq->data) {
338 pxamci_setup_data(host, mrq->data);
339
340 cmdat &= ~CMDAT_BUSY;
341 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
342 if (mrq->data->flags & MMC_DATA_WRITE)
343 cmdat |= CMDAT_WRITE;
344
345 if (mrq->data->flags & MMC_DATA_STREAM)
346 cmdat |= CMDAT_STREAM;
347 }
348
349 pxamci_start_cmd(host, mrq->cmd, cmdat);
350}
351
352static int pxamci_get_ro(struct mmc_host *mmc)
353{
354 struct pxamci_host *host = mmc_priv(mmc);
355
356 if (host->pdata && host->pdata->get_ro)
357 return host->pdata->get_ro(mmc_dev(mmc));
358 /* Host doesn't support read only detection so assume writeable */
359 return 0;
360}
361
362static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
363{
364 struct pxamci_host *host = mmc_priv(mmc);
365
366 if (ios->clock) {
367 unsigned int clk = CLOCKRATE / ios->clock;
368 if (CLOCKRATE / clk > ios->clock)
369 clk <<= 1;
370 host->clkrt = fls(clk) - 1;
371 pxa_set_cken(CKEN12_MMC, 1);
372
373 /*
374 * we write clkrt on the next command
375 */
376 } else {
377 pxamci_stop_clock(host);
378 pxa_set_cken(CKEN12_MMC, 0);
379 }
380
381 if (host->power_mode != ios->power_mode) {
382 host->power_mode = ios->power_mode;
383
384 if (host->pdata && host->pdata->setpower)
385 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
386
387 if (ios->power_mode == MMC_POWER_ON)
388 host->cmdat |= CMDAT_INIT;
389 }
390
391 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
392 host->clkrt, host->cmdat);
393}
394
395static const struct mmc_host_ops pxamci_ops = {
396 .request = pxamci_request,
397 .get_ro = pxamci_get_ro,
398 .set_ios = pxamci_set_ios,
399};
400
401static void pxamci_dma_irq(int dma, void *devid)
402{
403 printk(KERN_ERR "DMA%d: IRQ???\n", dma);
404 DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
405}
406
407static irqreturn_t pxamci_detect_irq(int irq, void *devid)
408{
409 struct pxamci_host *host = mmc_priv(devid);
410
411 mmc_detect_change(devid, host->pdata->detect_delay);
412 return IRQ_HANDLED;
413}
414
415static int pxamci_probe(struct platform_device *pdev)
416{
417 struct mmc_host *mmc;
418 struct pxamci_host *host = NULL;
419 struct resource *r;
420 int ret, irq;
421
422 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
423 irq = platform_get_irq(pdev, 0);
424 if (!r || irq < 0)
425 return -ENXIO;
426
427 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
428 if (!r)
429 return -EBUSY;
430
431 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
432 if (!mmc) {
433 ret = -ENOMEM;
434 goto out;
435 }
436
437 mmc->ops = &pxamci_ops;
438 mmc->f_min = CLOCKRATE_MIN;
439 mmc->f_max = CLOCKRATE_MAX;
440
441 /*
442 * We can do SG-DMA, but we don't because we never know how much
443 * data we successfully wrote to the card.
444 */
445 mmc->max_phys_segs = NR_SG;
446
447 /*
448 * Our hardware DMA can handle a maximum of one page per SG entry.
449 */
450 mmc->max_seg_size = PAGE_SIZE;
451
452 /*
453 * Block length register is 10 bits.
454 */
455 mmc->max_blk_size = 1023;
456
457 /*
458 * Block count register is 16 bits.
459 */
460 mmc->max_blk_count = 65535;
461
462 host = mmc_priv(mmc);
463 host->mmc = mmc;
464 host->dma = -1;
465 host->pdata = pdev->dev.platform_data;
466 mmc->ocr_avail = host->pdata ?
467 host->pdata->ocr_mask :
468 MMC_VDD_32_33|MMC_VDD_33_34;
469
470 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
471 if (!host->sg_cpu) {
472 ret = -ENOMEM;
473 goto out;
474 }
475
476 spin_lock_init(&host->lock);
477 host->res = r;
478 host->irq = irq;
479 host->imask = MMC_I_MASK_ALL;
480
481 host->base = ioremap(r->start, SZ_4K);
482 if (!host->base) {
483 ret = -ENOMEM;
484 goto out;
485 }
486
487 /*
488 * Ensure that the host controller is shut down, and setup
489 * with our defaults.
490 */
491 pxamci_stop_clock(host);
492 writel(0, host->base + MMC_SPI);
493 writel(64, host->base + MMC_RESTO);
494 writel(host->imask, host->base + MMC_I_MASK);
495
496 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
497 pxamci_dma_irq, host);
498 if (host->dma < 0) {
499 ret = -EBUSY;
500 goto out;
501 }
502
503 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
504 if (ret)
505 goto out;
506
507 platform_set_drvdata(pdev, mmc);
508
509 if (host->pdata && host->pdata->init)
510 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
511
512 mmc_add_host(mmc);
513
514 return 0;
515
516 out:
517 if (host) {
518 if (host->dma >= 0)
519 pxa_free_dma(host->dma);
520 if (host->base)
521 iounmap(host->base);
522 if (host->sg_cpu)
523 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
524 }
525 if (mmc)
526 mmc_free_host(mmc);
527 release_resource(r);
528 return ret;
529}
530
531static int pxamci_remove(struct platform_device *pdev)
532{
533 struct mmc_host *mmc = platform_get_drvdata(pdev);
534
535 platform_set_drvdata(pdev, NULL);
536
537 if (mmc) {
538 struct pxamci_host *host = mmc_priv(mmc);
539
540 if (host->pdata && host->pdata->exit)
541 host->pdata->exit(&pdev->dev, mmc);
542
543 mmc_remove_host(mmc);
544
545 pxamci_stop_clock(host);
546 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
547 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
548 host->base + MMC_I_MASK);
549
550 DRCMRRXMMC = 0;
551 DRCMRTXMMC = 0;
552
553 free_irq(host->irq, host);
554 pxa_free_dma(host->dma);
555 iounmap(host->base);
556 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
557
558 release_resource(host->res);
559
560 mmc_free_host(mmc);
561 }
562 return 0;
563}
564
565#ifdef CONFIG_PM
566static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
567{
568 struct mmc_host *mmc = platform_get_drvdata(dev);
569 int ret = 0;
570
571 if (mmc)
572 ret = mmc_suspend_host(mmc, state);
573
574 return ret;
575}
576
577static int pxamci_resume(struct platform_device *dev)
578{
579 struct mmc_host *mmc = platform_get_drvdata(dev);
580 int ret = 0;
581
582 if (mmc)
583 ret = mmc_resume_host(mmc);
584
585 return ret;
586}
587#else
588#define pxamci_suspend NULL
589#define pxamci_resume NULL
590#endif
591
592static struct platform_driver pxamci_driver = {
593 .probe = pxamci_probe,
594 .remove = pxamci_remove,
595 .suspend = pxamci_suspend,
596 .resume = pxamci_resume,
597 .driver = {
598 .name = DRIVER_NAME,
599 },
600};
601
602static int __init pxamci_init(void)
603{
604 return platform_driver_register(&pxamci_driver);
605}
606
607static void __exit pxamci_exit(void)
608{
609 platform_driver_unregister(&pxamci_driver);
610}
611
612module_init(pxamci_init);
613module_exit(pxamci_exit);
614
615MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
616MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h
new file mode 100644
index 000000000000..1b163220df2b
--- /dev/null
+++ b/drivers/mmc/host/pxamci.h
@@ -0,0 +1,124 @@
1#undef MMC_STRPCL
2#undef MMC_STAT
3#undef MMC_CLKRT
4#undef MMC_SPI
5#undef MMC_CMDAT
6#undef MMC_RESTO
7#undef MMC_RDTO
8#undef MMC_BLKLEN
9#undef MMC_NOB
10#undef MMC_PRTBUF
11#undef MMC_I_MASK
12#undef END_CMD_RES
13#undef PRG_DONE
14#undef DATA_TRAN_DONE
15#undef MMC_I_REG
16#undef MMC_CMD
17#undef MMC_ARGH
18#undef MMC_ARGL
19#undef MMC_RES
20#undef MMC_RXFIFO
21#undef MMC_TXFIFO
22
23#define MMC_STRPCL 0x0000
24#define STOP_CLOCK (1 << 0)
25#define START_CLOCK (2 << 0)
26
27#define MMC_STAT 0x0004
28#define STAT_END_CMD_RES (1 << 13)
29#define STAT_PRG_DONE (1 << 12)
30#define STAT_DATA_TRAN_DONE (1 << 11)
31#define STAT_CLK_EN (1 << 8)
32#define STAT_RECV_FIFO_FULL (1 << 7)
33#define STAT_XMIT_FIFO_EMPTY (1 << 6)
34#define STAT_RES_CRC_ERR (1 << 5)
35#define STAT_SPI_READ_ERROR_TOKEN (1 << 4)
36#define STAT_CRC_READ_ERROR (1 << 3)
37#define STAT_CRC_WRITE_ERROR (1 << 2)
38#define STAT_TIME_OUT_RESPONSE (1 << 1)
39#define STAT_READ_TIME_OUT (1 << 0)
40
41#define MMC_CLKRT 0x0008 /* 3 bit */
42
43#define MMC_SPI 0x000c
44#define SPI_CS_ADDRESS (1 << 3)
45#define SPI_CS_EN (1 << 2)
46#define CRC_ON (1 << 1)
47#define SPI_EN (1 << 0)
48
49#define MMC_CMDAT 0x0010
50#define CMDAT_DMAEN (1 << 7)
51#define CMDAT_INIT (1 << 6)
52#define CMDAT_BUSY (1 << 5)
53#define CMDAT_STREAM (1 << 4) /* 1 = stream */
54#define CMDAT_WRITE (1 << 3) /* 1 = write */
55#define CMDAT_DATAEN (1 << 2)
56#define CMDAT_RESP_NONE (0 << 0)
57#define CMDAT_RESP_SHORT (1 << 0)
58#define CMDAT_RESP_R2 (2 << 0)
59#define CMDAT_RESP_R3 (3 << 0)
60
61#define MMC_RESTO 0x0014 /* 7 bit */
62
63#define MMC_RDTO 0x0018 /* 16 bit */
64
65#define MMC_BLKLEN 0x001c /* 10 bit */
66
67#define MMC_NOB 0x0020 /* 16 bit */
68
69#define MMC_PRTBUF 0x0024
70#define BUF_PART_FULL (1 << 0)
71
72#define MMC_I_MASK 0x0028
73
74/*PXA27x MMC interrupts*/
75#define SDIO_SUSPEND_ACK (1 << 12)
76#define SDIO_INT (1 << 11)
77#define RD_STALLED (1 << 10)
78#define RES_ERR (1 << 9)
79#define DAT_ERR (1 << 8)
80#define TINT (1 << 7)
81
82/*PXA2xx MMC interrupts*/
83#define TXFIFO_WR_REQ (1 << 6)
84#define RXFIFO_RD_REQ (1 << 5)
85#define CLK_IS_OFF (1 << 4)
86#define STOP_CMD (1 << 3)
87#define END_CMD_RES (1 << 2)
88#define PRG_DONE (1 << 1)
89#define DATA_TRAN_DONE (1 << 0)
90
91#ifdef CONFIG_PXA27x
92#define MMC_I_MASK_ALL 0x00001fff
93#else
94#define MMC_I_MASK_ALL 0x0000007f
95#endif
96
97#define MMC_I_REG 0x002c
98/* same as MMC_I_MASK */
99
100#define MMC_CMD 0x0030
101
102#define MMC_ARGH 0x0034 /* 16 bit */
103
104#define MMC_ARGL 0x0038 /* 16 bit */
105
106#define MMC_RES 0x003c /* 16 bit */
107
108#define MMC_RXFIFO 0x0040 /* 8 bit */
109
110#define MMC_TXFIFO 0x0044 /* 8 bit */
111
112/*
113 * The base MMC clock rate
114 */
115#ifdef CONFIG_PXA27x
116#define CLOCKRATE_MIN 304688
117#define CLOCKRATE_MAX 19500000
118#else
119#define CLOCKRATE_MIN 312500
120#define CLOCKRATE_MAX 20000000
121#endif
122
123#define CLOCKRATE CLOCKRATE_MAX
124
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
new file mode 100644
index 000000000000..ff5bf73cdd25
--- /dev/null
+++ b/drivers/mmc/host/sdhci.c
@@ -0,0 +1,1535 @@
1/*
2 * linux/drivers/mmc/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/delay.h>
13#include <linux/highmem.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16
17#include <linux/mmc/host.h>
18
19#include <asm/scatterlist.h>
20
21#include "sdhci.h"
22
23#define DRIVER_NAME "sdhci"
24
25#define DBG(f, x...) \
26 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
27
28static unsigned int debug_nodma = 0;
29static unsigned int debug_forcedma = 0;
30static unsigned int debug_quirks = 0;
31
32#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
33#define SDHCI_QUIRK_FORCE_DMA (1<<1)
34/* Controller doesn't like some resets when there is no card inserted. */
35#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
36#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
37
38static const struct pci_device_id pci_ids[] __devinitdata = {
39 {
40 .vendor = PCI_VENDOR_ID_RICOH,
41 .device = PCI_DEVICE_ID_RICOH_R5C822,
42 .subvendor = PCI_VENDOR_ID_IBM,
43 .subdevice = PCI_ANY_ID,
44 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
45 SDHCI_QUIRK_FORCE_DMA,
46 },
47
48 {
49 .vendor = PCI_VENDOR_ID_RICOH,
50 .device = PCI_DEVICE_ID_RICOH_R5C822,
51 .subvendor = PCI_ANY_ID,
52 .subdevice = PCI_ANY_ID,
53 .driver_data = SDHCI_QUIRK_FORCE_DMA |
54 SDHCI_QUIRK_NO_CARD_NO_RESET,
55 },
56
57 {
58 .vendor = PCI_VENDOR_ID_TI,
59 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
60 .subvendor = PCI_ANY_ID,
61 .subdevice = PCI_ANY_ID,
62 .driver_data = SDHCI_QUIRK_FORCE_DMA,
63 },
64
65 {
66 .vendor = PCI_VENDOR_ID_ENE,
67 .device = PCI_DEVICE_ID_ENE_CB712_SD,
68 .subvendor = PCI_ANY_ID,
69 .subdevice = PCI_ANY_ID,
70 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
71 },
72
73 { /* Generic SD host controller */
74 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
75 },
76
77 { /* end: all zeroes */ },
78};
79
80MODULE_DEVICE_TABLE(pci, pci_ids);
81
82static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
83static void sdhci_finish_data(struct sdhci_host *);
84
85static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
86static void sdhci_finish_command(struct sdhci_host *);
87
88static void sdhci_dumpregs(struct sdhci_host *host)
89{
90 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
91
92 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
93 readl(host->ioaddr + SDHCI_DMA_ADDRESS),
94 readw(host->ioaddr + SDHCI_HOST_VERSION));
95 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
96 readw(host->ioaddr + SDHCI_BLOCK_SIZE),
97 readw(host->ioaddr + SDHCI_BLOCK_COUNT));
98 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
99 readl(host->ioaddr + SDHCI_ARGUMENT),
100 readw(host->ioaddr + SDHCI_TRANSFER_MODE));
101 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
102 readl(host->ioaddr + SDHCI_PRESENT_STATE),
103 readb(host->ioaddr + SDHCI_HOST_CONTROL));
104 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
105 readb(host->ioaddr + SDHCI_POWER_CONTROL),
106 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
107 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
108 readb(host->ioaddr + SDHCI_WALK_UP_CONTROL),
109 readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
110 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
111 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
112 readl(host->ioaddr + SDHCI_INT_STATUS));
113 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
114 readl(host->ioaddr + SDHCI_INT_ENABLE),
115 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
116 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
117 readw(host->ioaddr + SDHCI_ACMD12_ERR),
118 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
119 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
120 readl(host->ioaddr + SDHCI_CAPABILITIES),
121 readl(host->ioaddr + SDHCI_MAX_CURRENT));
122
123 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
124}
125
126/*****************************************************************************\
127 * *
128 * Low level functions *
129 * *
130\*****************************************************************************/
131
132static void sdhci_reset(struct sdhci_host *host, u8 mask)
133{
134 unsigned long timeout;
135
136 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
137 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
138 SDHCI_CARD_PRESENT))
139 return;
140 }
141
142 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
143
144 if (mask & SDHCI_RESET_ALL)
145 host->clock = 0;
146
147 /* Wait max 100 ms */
148 timeout = 100;
149
150 /* hw clears the bit when it's done */
151 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
152 if (timeout == 0) {
153 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
154 mmc_hostname(host->mmc), (int)mask);
155 sdhci_dumpregs(host);
156 return;
157 }
158 timeout--;
159 mdelay(1);
160 }
161}
162
163static void sdhci_init(struct sdhci_host *host)
164{
165 u32 intmask;
166
167 sdhci_reset(host, SDHCI_RESET_ALL);
168
169 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
170 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
171 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
172 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
173 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
174 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
175
176 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
177 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
178}
179
180static void sdhci_activate_led(struct sdhci_host *host)
181{
182 u8 ctrl;
183
184 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
185 ctrl |= SDHCI_CTRL_LED;
186 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
187}
188
189static void sdhci_deactivate_led(struct sdhci_host *host)
190{
191 u8 ctrl;
192
193 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
194 ctrl &= ~SDHCI_CTRL_LED;
195 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
196}
197
198/*****************************************************************************\
199 * *
200 * Core functions *
201 * *
202\*****************************************************************************/
203
204static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
205{
206 return page_address(host->cur_sg->page) + host->cur_sg->offset;
207}
208
209static inline int sdhci_next_sg(struct sdhci_host* host)
210{
211 /*
212 * Skip to next SG entry.
213 */
214 host->cur_sg++;
215 host->num_sg--;
216
217 /*
218 * Any entries left?
219 */
220 if (host->num_sg > 0) {
221 host->offset = 0;
222 host->remain = host->cur_sg->length;
223 }
224
225 return host->num_sg;
226}
227
228static void sdhci_read_block_pio(struct sdhci_host *host)
229{
230 int blksize, chunk_remain;
231 u32 data;
232 char *buffer;
233 int size;
234
235 DBG("PIO reading\n");
236
237 blksize = host->data->blksz;
238 chunk_remain = 0;
239 data = 0;
240
241 buffer = sdhci_sg_to_buffer(host) + host->offset;
242
243 while (blksize) {
244 if (chunk_remain == 0) {
245 data = readl(host->ioaddr + SDHCI_BUFFER);
246 chunk_remain = min(blksize, 4);
247 }
248
249 size = min(host->remain, chunk_remain);
250
251 chunk_remain -= size;
252 blksize -= size;
253 host->offset += size;
254 host->remain -= size;
255
256 while (size) {
257 *buffer = data & 0xFF;
258 buffer++;
259 data >>= 8;
260 size--;
261 }
262
263 if (host->remain == 0) {
264 if (sdhci_next_sg(host) == 0) {
265 BUG_ON(blksize != 0);
266 return;
267 }
268 buffer = sdhci_sg_to_buffer(host);
269 }
270 }
271}
272
273static void sdhci_write_block_pio(struct sdhci_host *host)
274{
275 int blksize, chunk_remain;
276 u32 data;
277 char *buffer;
278 int bytes, size;
279
280 DBG("PIO writing\n");
281
282 blksize = host->data->blksz;
283 chunk_remain = 4;
284 data = 0;
285
286 bytes = 0;
287 buffer = sdhci_sg_to_buffer(host) + host->offset;
288
289 while (blksize) {
290 size = min(host->remain, chunk_remain);
291
292 chunk_remain -= size;
293 blksize -= size;
294 host->offset += size;
295 host->remain -= size;
296
297 while (size) {
298 data >>= 8;
299 data |= (u32)*buffer << 24;
300 buffer++;
301 size--;
302 }
303
304 if (chunk_remain == 0) {
305 writel(data, host->ioaddr + SDHCI_BUFFER);
306 chunk_remain = min(blksize, 4);
307 }
308
309 if (host->remain == 0) {
310 if (sdhci_next_sg(host) == 0) {
311 BUG_ON(blksize != 0);
312 return;
313 }
314 buffer = sdhci_sg_to_buffer(host);
315 }
316 }
317}
318
319static void sdhci_transfer_pio(struct sdhci_host *host)
320{
321 u32 mask;
322
323 BUG_ON(!host->data);
324
325 if (host->num_sg == 0)
326 return;
327
328 if (host->data->flags & MMC_DATA_READ)
329 mask = SDHCI_DATA_AVAILABLE;
330 else
331 mask = SDHCI_SPACE_AVAILABLE;
332
333 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
334 if (host->data->flags & MMC_DATA_READ)
335 sdhci_read_block_pio(host);
336 else
337 sdhci_write_block_pio(host);
338
339 if (host->num_sg == 0)
340 break;
341 }
342
343 DBG("PIO transfer complete.\n");
344}
345
346static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
347{
348 u8 count;
349 unsigned target_timeout, current_timeout;
350
351 WARN_ON(host->data);
352
353 if (data == NULL)
354 return;
355
356 DBG("blksz %04x blks %04x flags %08x\n",
357 data->blksz, data->blocks, data->flags);
358 DBG("tsac %d ms nsac %d clk\n",
359 data->timeout_ns / 1000000, data->timeout_clks);
360
361 /* Sanity checks */
362 BUG_ON(data->blksz * data->blocks > 524288);
363 BUG_ON(data->blksz > host->mmc->max_blk_size);
364 BUG_ON(data->blocks > 65535);
365
366 /* timeout in us */
367 target_timeout = data->timeout_ns / 1000 +
368 data->timeout_clks / host->clock;
369
370 /*
371 * Figure out needed cycles.
372 * We do this in steps in order to fit inside a 32 bit int.
373 * The first step is the minimum timeout, which will have a
374 * minimum resolution of 6 bits:
375 * (1) 2^13*1000 > 2^22,
376 * (2) host->timeout_clk < 2^16
377 * =>
378 * (1) / (2) > 2^6
379 */
380 count = 0;
381 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
382 while (current_timeout < target_timeout) {
383 count++;
384 current_timeout <<= 1;
385 if (count >= 0xF)
386 break;
387 }
388
389 if (count >= 0xF) {
390 printk(KERN_WARNING "%s: Too large timeout requested!\n",
391 mmc_hostname(host->mmc));
392 count = 0xE;
393 }
394
395 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
396
397 if (host->flags & SDHCI_USE_DMA) {
398 int count;
399
400 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
401 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
402 BUG_ON(count != 1);
403
404 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
405 } else {
406 host->cur_sg = data->sg;
407 host->num_sg = data->sg_len;
408
409 host->offset = 0;
410 host->remain = host->cur_sg->length;
411 }
412
413 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
414 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
415 host->ioaddr + SDHCI_BLOCK_SIZE);
416 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
417}
418
419static void sdhci_set_transfer_mode(struct sdhci_host *host,
420 struct mmc_data *data)
421{
422 u16 mode;
423
424 WARN_ON(host->data);
425
426 if (data == NULL)
427 return;
428
429 mode = SDHCI_TRNS_BLK_CNT_EN;
430 if (data->blocks > 1)
431 mode |= SDHCI_TRNS_MULTI;
432 if (data->flags & MMC_DATA_READ)
433 mode |= SDHCI_TRNS_READ;
434 if (host->flags & SDHCI_USE_DMA)
435 mode |= SDHCI_TRNS_DMA;
436
437 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
438}
439
440static void sdhci_finish_data(struct sdhci_host *host)
441{
442 struct mmc_data *data;
443 u16 blocks;
444
445 BUG_ON(!host->data);
446
447 data = host->data;
448 host->data = NULL;
449
450 if (host->flags & SDHCI_USE_DMA) {
451 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
452 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
453 }
454
455 /*
456 * Controller doesn't count down when in single block mode.
457 */
458 if ((data->blocks == 1) && (data->error == MMC_ERR_NONE))
459 blocks = 0;
460 else
461 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
462 data->bytes_xfered = data->blksz * (data->blocks - blocks);
463
464 if ((data->error == MMC_ERR_NONE) && blocks) {
465 printk(KERN_ERR "%s: Controller signalled completion even "
466 "though there were blocks left.\n",
467 mmc_hostname(host->mmc));
468 data->error = MMC_ERR_FAILED;
469 }
470
471 DBG("Ending data transfer (%d bytes)\n", data->bytes_xfered);
472
473 if (data->stop) {
474 /*
475 * The controller needs a reset of internal state machines
476 * upon error conditions.
477 */
478 if (data->error != MMC_ERR_NONE) {
479 sdhci_reset(host, SDHCI_RESET_CMD);
480 sdhci_reset(host, SDHCI_RESET_DATA);
481 }
482
483 sdhci_send_command(host, data->stop);
484 } else
485 tasklet_schedule(&host->finish_tasklet);
486}
487
488static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
489{
490 int flags;
491 u32 mask;
492 unsigned long timeout;
493
494 WARN_ON(host->cmd);
495
496 DBG("Sending cmd (%x)\n", cmd->opcode);
497
498 /* Wait max 10 ms */
499 timeout = 10;
500
501 mask = SDHCI_CMD_INHIBIT;
502 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
503 mask |= SDHCI_DATA_INHIBIT;
504
505 /* We shouldn't wait for data inihibit for stop commands, even
506 though they might use busy signaling */
507 if (host->mrq->data && (cmd == host->mrq->data->stop))
508 mask &= ~SDHCI_DATA_INHIBIT;
509
510 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
511 if (timeout == 0) {
512 printk(KERN_ERR "%s: Controller never released "
513 "inhibit bit(s).\n", mmc_hostname(host->mmc));
514 sdhci_dumpregs(host);
515 cmd->error = MMC_ERR_FAILED;
516 tasklet_schedule(&host->finish_tasklet);
517 return;
518 }
519 timeout--;
520 mdelay(1);
521 }
522
523 mod_timer(&host->timer, jiffies + 10 * HZ);
524
525 host->cmd = cmd;
526
527 sdhci_prepare_data(host, cmd->data);
528
529 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
530
531 sdhci_set_transfer_mode(host, cmd->data);
532
533 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
534 printk(KERN_ERR "%s: Unsupported response type!\n",
535 mmc_hostname(host->mmc));
536 cmd->error = MMC_ERR_INVALID;
537 tasklet_schedule(&host->finish_tasklet);
538 return;
539 }
540
541 if (!(cmd->flags & MMC_RSP_PRESENT))
542 flags = SDHCI_CMD_RESP_NONE;
543 else if (cmd->flags & MMC_RSP_136)
544 flags = SDHCI_CMD_RESP_LONG;
545 else if (cmd->flags & MMC_RSP_BUSY)
546 flags = SDHCI_CMD_RESP_SHORT_BUSY;
547 else
548 flags = SDHCI_CMD_RESP_SHORT;
549
550 if (cmd->flags & MMC_RSP_CRC)
551 flags |= SDHCI_CMD_CRC;
552 if (cmd->flags & MMC_RSP_OPCODE)
553 flags |= SDHCI_CMD_INDEX;
554 if (cmd->data)
555 flags |= SDHCI_CMD_DATA;
556
557 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
558 host->ioaddr + SDHCI_COMMAND);
559}
560
561static void sdhci_finish_command(struct sdhci_host *host)
562{
563 int i;
564
565 BUG_ON(host->cmd == NULL);
566
567 if (host->cmd->flags & MMC_RSP_PRESENT) {
568 if (host->cmd->flags & MMC_RSP_136) {
569 /* CRC is stripped so we need to do some shifting. */
570 for (i = 0;i < 4;i++) {
571 host->cmd->resp[i] = readl(host->ioaddr +
572 SDHCI_RESPONSE + (3-i)*4) << 8;
573 if (i != 3)
574 host->cmd->resp[i] |=
575 readb(host->ioaddr +
576 SDHCI_RESPONSE + (3-i)*4-1);
577 }
578 } else {
579 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
580 }
581 }
582
583 host->cmd->error = MMC_ERR_NONE;
584
585 DBG("Ending cmd (%x)\n", host->cmd->opcode);
586
587 if (host->cmd->data)
588 host->data = host->cmd->data;
589 else
590 tasklet_schedule(&host->finish_tasklet);
591
592 host->cmd = NULL;
593}
594
595static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
596{
597 int div;
598 u16 clk;
599 unsigned long timeout;
600
601 if (clock == host->clock)
602 return;
603
604 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
605
606 if (clock == 0)
607 goto out;
608
609 for (div = 1;div < 256;div *= 2) {
610 if ((host->max_clk / div) <= clock)
611 break;
612 }
613 div >>= 1;
614
615 clk = div << SDHCI_DIVIDER_SHIFT;
616 clk |= SDHCI_CLOCK_INT_EN;
617 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
618
619 /* Wait max 10 ms */
620 timeout = 10;
621 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
622 & SDHCI_CLOCK_INT_STABLE)) {
623 if (timeout == 0) {
624 printk(KERN_ERR "%s: Internal clock never "
625 "stabilised.\n", mmc_hostname(host->mmc));
626 sdhci_dumpregs(host);
627 return;
628 }
629 timeout--;
630 mdelay(1);
631 }
632
633 clk |= SDHCI_CLOCK_CARD_EN;
634 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
635
636out:
637 host->clock = clock;
638}
639
640static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
641{
642 u8 pwr;
643
644 if (host->power == power)
645 return;
646
647 if (power == (unsigned short)-1) {
648 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
649 goto out;
650 }
651
652 /*
653 * Spec says that we should clear the power reg before setting
654 * a new value. Some controllers don't seem to like this though.
655 */
656 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
657 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
658
659 pwr = SDHCI_POWER_ON;
660
661 switch (1 << power) {
662 case MMC_VDD_165_195:
663 pwr |= SDHCI_POWER_180;
664 break;
665 case MMC_VDD_29_30:
666 case MMC_VDD_30_31:
667 pwr |= SDHCI_POWER_300;
668 break;
669 case MMC_VDD_32_33:
670 case MMC_VDD_33_34:
671 pwr |= SDHCI_POWER_330;
672 break;
673 default:
674 BUG();
675 }
676
677 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
678
679out:
680 host->power = power;
681}
682
683/*****************************************************************************\
684 * *
685 * MMC callbacks *
686 * *
687\*****************************************************************************/
688
689static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
690{
691 struct sdhci_host *host;
692 unsigned long flags;
693
694 host = mmc_priv(mmc);
695
696 spin_lock_irqsave(&host->lock, flags);
697
698 WARN_ON(host->mrq != NULL);
699
700 sdhci_activate_led(host);
701
702 host->mrq = mrq;
703
704 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
705 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
706 tasklet_schedule(&host->finish_tasklet);
707 } else
708 sdhci_send_command(host, mrq->cmd);
709
710 mmiowb();
711 spin_unlock_irqrestore(&host->lock, flags);
712}
713
714static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
715{
716 struct sdhci_host *host;
717 unsigned long flags;
718 u8 ctrl;
719
720 host = mmc_priv(mmc);
721
722 spin_lock_irqsave(&host->lock, flags);
723
724 /*
725 * Reset the chip on each power off.
726 * Should clear out any weird states.
727 */
728 if (ios->power_mode == MMC_POWER_OFF) {
729 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
730 sdhci_init(host);
731 }
732
733 sdhci_set_clock(host, ios->clock);
734
735 if (ios->power_mode == MMC_POWER_OFF)
736 sdhci_set_power(host, -1);
737 else
738 sdhci_set_power(host, ios->vdd);
739
740 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
741
742 if (ios->bus_width == MMC_BUS_WIDTH_4)
743 ctrl |= SDHCI_CTRL_4BITBUS;
744 else
745 ctrl &= ~SDHCI_CTRL_4BITBUS;
746
747 if (ios->timing == MMC_TIMING_SD_HS)
748 ctrl |= SDHCI_CTRL_HISPD;
749 else
750 ctrl &= ~SDHCI_CTRL_HISPD;
751
752 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
753
754 mmiowb();
755 spin_unlock_irqrestore(&host->lock, flags);
756}
757
758static int sdhci_get_ro(struct mmc_host *mmc)
759{
760 struct sdhci_host *host;
761 unsigned long flags;
762 int present;
763
764 host = mmc_priv(mmc);
765
766 spin_lock_irqsave(&host->lock, flags);
767
768 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
769
770 spin_unlock_irqrestore(&host->lock, flags);
771
772 return !(present & SDHCI_WRITE_PROTECT);
773}
774
775static const struct mmc_host_ops sdhci_ops = {
776 .request = sdhci_request,
777 .set_ios = sdhci_set_ios,
778 .get_ro = sdhci_get_ro,
779};
780
781/*****************************************************************************\
782 * *
783 * Tasklets *
784 * *
785\*****************************************************************************/
786
787static void sdhci_tasklet_card(unsigned long param)
788{
789 struct sdhci_host *host;
790 unsigned long flags;
791
792 host = (struct sdhci_host*)param;
793
794 spin_lock_irqsave(&host->lock, flags);
795
796 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
797 if (host->mrq) {
798 printk(KERN_ERR "%s: Card removed during transfer!\n",
799 mmc_hostname(host->mmc));
800 printk(KERN_ERR "%s: Resetting controller.\n",
801 mmc_hostname(host->mmc));
802
803 sdhci_reset(host, SDHCI_RESET_CMD);
804 sdhci_reset(host, SDHCI_RESET_DATA);
805
806 host->mrq->cmd->error = MMC_ERR_FAILED;
807 tasklet_schedule(&host->finish_tasklet);
808 }
809 }
810
811 spin_unlock_irqrestore(&host->lock, flags);
812
813 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
814}
815
816static void sdhci_tasklet_finish(unsigned long param)
817{
818 struct sdhci_host *host;
819 unsigned long flags;
820 struct mmc_request *mrq;
821
822 host = (struct sdhci_host*)param;
823
824 spin_lock_irqsave(&host->lock, flags);
825
826 del_timer(&host->timer);
827
828 mrq = host->mrq;
829
830 DBG("Ending request, cmd (%x)\n", mrq->cmd->opcode);
831
832 /*
833 * The controller needs a reset of internal state machines
834 * upon error conditions.
835 */
836 if ((mrq->cmd->error != MMC_ERR_NONE) ||
837 (mrq->data && ((mrq->data->error != MMC_ERR_NONE) ||
838 (mrq->data->stop && (mrq->data->stop->error != MMC_ERR_NONE))))) {
839
840 /* Some controllers need this kick or reset won't work here */
841 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
842 unsigned int clock;
843
844 /* This is to force an update */
845 clock = host->clock;
846 host->clock = 0;
847 sdhci_set_clock(host, clock);
848 }
849
850 /* Spec says we should do both at the same time, but Ricoh
851 controllers do not like that. */
852 sdhci_reset(host, SDHCI_RESET_CMD);
853 sdhci_reset(host, SDHCI_RESET_DATA);
854 }
855
856 host->mrq = NULL;
857 host->cmd = NULL;
858 host->data = NULL;
859
860 sdhci_deactivate_led(host);
861
862 mmiowb();
863 spin_unlock_irqrestore(&host->lock, flags);
864
865 mmc_request_done(host->mmc, mrq);
866}
867
868static void sdhci_timeout_timer(unsigned long data)
869{
870 struct sdhci_host *host;
871 unsigned long flags;
872
873 host = (struct sdhci_host*)data;
874
875 spin_lock_irqsave(&host->lock, flags);
876
877 if (host->mrq) {
878 printk(KERN_ERR "%s: Timeout waiting for hardware "
879 "interrupt.\n", mmc_hostname(host->mmc));
880 sdhci_dumpregs(host);
881
882 if (host->data) {
883 host->data->error = MMC_ERR_TIMEOUT;
884 sdhci_finish_data(host);
885 } else {
886 if (host->cmd)
887 host->cmd->error = MMC_ERR_TIMEOUT;
888 else
889 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
890
891 tasklet_schedule(&host->finish_tasklet);
892 }
893 }
894
895 mmiowb();
896 spin_unlock_irqrestore(&host->lock, flags);
897}
898
899/*****************************************************************************\
900 * *
901 * Interrupt handling *
902 * *
903\*****************************************************************************/
904
905static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
906{
907 BUG_ON(intmask == 0);
908
909 if (!host->cmd) {
910 printk(KERN_ERR "%s: Got command interrupt even though no "
911 "command operation was in progress.\n",
912 mmc_hostname(host->mmc));
913 sdhci_dumpregs(host);
914 return;
915 }
916
917 if (intmask & SDHCI_INT_RESPONSE)
918 sdhci_finish_command(host);
919 else {
920 if (intmask & SDHCI_INT_TIMEOUT)
921 host->cmd->error = MMC_ERR_TIMEOUT;
922 else if (intmask & SDHCI_INT_CRC)
923 host->cmd->error = MMC_ERR_BADCRC;
924 else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
925 host->cmd->error = MMC_ERR_FAILED;
926 else
927 host->cmd->error = MMC_ERR_INVALID;
928
929 tasklet_schedule(&host->finish_tasklet);
930 }
931}
932
933static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
934{
935 BUG_ON(intmask == 0);
936
937 if (!host->data) {
938 /*
939 * A data end interrupt is sent together with the response
940 * for the stop command.
941 */
942 if (intmask & SDHCI_INT_DATA_END)
943 return;
944
945 printk(KERN_ERR "%s: Got data interrupt even though no "
946 "data operation was in progress.\n",
947 mmc_hostname(host->mmc));
948 sdhci_dumpregs(host);
949
950 return;
951 }
952
953 if (intmask & SDHCI_INT_DATA_TIMEOUT)
954 host->data->error = MMC_ERR_TIMEOUT;
955 else if (intmask & SDHCI_INT_DATA_CRC)
956 host->data->error = MMC_ERR_BADCRC;
957 else if (intmask & SDHCI_INT_DATA_END_BIT)
958 host->data->error = MMC_ERR_FAILED;
959
960 if (host->data->error != MMC_ERR_NONE)
961 sdhci_finish_data(host);
962 else {
963 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
964 sdhci_transfer_pio(host);
965
966 if (intmask & SDHCI_INT_DATA_END)
967 sdhci_finish_data(host);
968 }
969}
970
971static irqreturn_t sdhci_irq(int irq, void *dev_id)
972{
973 irqreturn_t result;
974 struct sdhci_host* host = dev_id;
975 u32 intmask;
976
977 spin_lock(&host->lock);
978
979 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
980
981 if (!intmask || intmask == 0xffffffff) {
982 result = IRQ_NONE;
983 goto out;
984 }
985
986 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
987
988 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
989 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
990 host->ioaddr + SDHCI_INT_STATUS);
991 tasklet_schedule(&host->card_tasklet);
992 }
993
994 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
995
996 if (intmask & SDHCI_INT_CMD_MASK) {
997 writel(intmask & SDHCI_INT_CMD_MASK,
998 host->ioaddr + SDHCI_INT_STATUS);
999 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1000 }
1001
1002 if (intmask & SDHCI_INT_DATA_MASK) {
1003 writel(intmask & SDHCI_INT_DATA_MASK,
1004 host->ioaddr + SDHCI_INT_STATUS);
1005 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1006 }
1007
1008 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1009
1010 if (intmask & SDHCI_INT_BUS_POWER) {
1011 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1012 mmc_hostname(host->mmc));
1013 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
1014 }
1015
1016 intmask &= SDHCI_INT_BUS_POWER;
1017
1018 if (intmask) {
1019 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1020 mmc_hostname(host->mmc), intmask);
1021 sdhci_dumpregs(host);
1022
1023 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1024 }
1025
1026 result = IRQ_HANDLED;
1027
1028 mmiowb();
1029out:
1030 spin_unlock(&host->lock);
1031
1032 return result;
1033}
1034
1035/*****************************************************************************\
1036 * *
1037 * Suspend/resume *
1038 * *
1039\*****************************************************************************/
1040
1041#ifdef CONFIG_PM
1042
1043static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
1044{
1045 struct sdhci_chip *chip;
1046 int i, ret;
1047
1048 chip = pci_get_drvdata(pdev);
1049 if (!chip)
1050 return 0;
1051
1052 DBG("Suspending...\n");
1053
1054 for (i = 0;i < chip->num_slots;i++) {
1055 if (!chip->hosts[i])
1056 continue;
1057 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1058 if (ret) {
1059 for (i--;i >= 0;i--)
1060 mmc_resume_host(chip->hosts[i]->mmc);
1061 return ret;
1062 }
1063 }
1064
1065 pci_save_state(pdev);
1066 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1067
1068 for (i = 0;i < chip->num_slots;i++) {
1069 if (!chip->hosts[i])
1070 continue;
1071 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1072 }
1073
1074 pci_disable_device(pdev);
1075 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1076
1077 return 0;
1078}
1079
1080static int sdhci_resume (struct pci_dev *pdev)
1081{
1082 struct sdhci_chip *chip;
1083 int i, ret;
1084
1085 chip = pci_get_drvdata(pdev);
1086 if (!chip)
1087 return 0;
1088
1089 DBG("Resuming...\n");
1090
1091 pci_set_power_state(pdev, PCI_D0);
1092 pci_restore_state(pdev);
1093 ret = pci_enable_device(pdev);
1094 if (ret)
1095 return ret;
1096
1097 for (i = 0;i < chip->num_slots;i++) {
1098 if (!chip->hosts[i])
1099 continue;
1100 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1101 pci_set_master(pdev);
1102 ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
1103 IRQF_SHARED, chip->hosts[i]->slot_descr,
1104 chip->hosts[i]);
1105 if (ret)
1106 return ret;
1107 sdhci_init(chip->hosts[i]);
1108 mmiowb();
1109 ret = mmc_resume_host(chip->hosts[i]->mmc);
1110 if (ret)
1111 return ret;
1112 }
1113
1114 return 0;
1115}
1116
1117#else /* CONFIG_PM */
1118
1119#define sdhci_suspend NULL
1120#define sdhci_resume NULL
1121
1122#endif /* CONFIG_PM */
1123
1124/*****************************************************************************\
1125 * *
1126 * Device probing/removal *
1127 * *
1128\*****************************************************************************/
1129
1130static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1131{
1132 int ret;
1133 unsigned int version;
1134 struct sdhci_chip *chip;
1135 struct mmc_host *mmc;
1136 struct sdhci_host *host;
1137
1138 u8 first_bar;
1139 unsigned int caps;
1140
1141 chip = pci_get_drvdata(pdev);
1142 BUG_ON(!chip);
1143
1144 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1145 if (ret)
1146 return ret;
1147
1148 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1149
1150 if (first_bar > 5) {
1151 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1152 return -ENODEV;
1153 }
1154
1155 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1156 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1157 return -ENODEV;
1158 }
1159
1160 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1161 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1162 "You may experience problems.\n");
1163 }
1164
1165 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1166 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1167 return -ENODEV;
1168 }
1169
1170 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1171 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1172 return -ENODEV;
1173 }
1174
1175 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
1176 if (!mmc)
1177 return -ENOMEM;
1178
1179 host = mmc_priv(mmc);
1180 host->mmc = mmc;
1181
1182 host->chip = chip;
1183 chip->hosts[slot] = host;
1184
1185 host->bar = first_bar + slot;
1186
1187 host->addr = pci_resource_start(pdev, host->bar);
1188 host->irq = pdev->irq;
1189
1190 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
1191
1192 snprintf(host->slot_descr, 20, "sdhci:slot%d", slot);
1193
1194 ret = pci_request_region(pdev, host->bar, host->slot_descr);
1195 if (ret)
1196 goto free;
1197
1198 host->ioaddr = ioremap_nocache(host->addr,
1199 pci_resource_len(pdev, host->bar));
1200 if (!host->ioaddr) {
1201 ret = -ENOMEM;
1202 goto release;
1203 }
1204
1205 sdhci_reset(host, SDHCI_RESET_ALL);
1206
1207 version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1208 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
1209 if (version != 0) {
1210 printk(KERN_ERR "%s: Unknown controller version (%d). "
1211 "You may experience problems.\n", host->slot_descr,
1212 version);
1213 }
1214
1215 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1216
1217 if (debug_nodma)
1218 DBG("DMA forced off\n");
1219 else if (debug_forcedma) {
1220 DBG("DMA forced on\n");
1221 host->flags |= SDHCI_USE_DMA;
1222 } else if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
1223 host->flags |= SDHCI_USE_DMA;
1224 else if ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA)
1225 DBG("Controller doesn't have DMA interface\n");
1226 else if (!(caps & SDHCI_CAN_DO_DMA))
1227 DBG("Controller doesn't have DMA capability\n");
1228 else
1229 host->flags |= SDHCI_USE_DMA;
1230
1231 if (host->flags & SDHCI_USE_DMA) {
1232 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1233 printk(KERN_WARNING "%s: No suitable DMA available. "
1234 "Falling back to PIO.\n", host->slot_descr);
1235 host->flags &= ~SDHCI_USE_DMA;
1236 }
1237 }
1238
1239 if (host->flags & SDHCI_USE_DMA)
1240 pci_set_master(pdev);
1241 else /* XXX: Hack to get MMC layer to avoid highmem */
1242 pdev->dma_mask = 0;
1243
1244 host->max_clk =
1245 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1246 if (host->max_clk == 0) {
1247 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1248 "frequency.\n", host->slot_descr);
1249 ret = -ENODEV;
1250 goto unmap;
1251 }
1252 host->max_clk *= 1000000;
1253
1254 host->timeout_clk =
1255 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1256 if (host->timeout_clk == 0) {
1257 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1258 "frequency.\n", host->slot_descr);
1259 ret = -ENODEV;
1260 goto unmap;
1261 }
1262 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1263 host->timeout_clk *= 1000;
1264
1265 /*
1266 * Set host parameters.
1267 */
1268 mmc->ops = &sdhci_ops;
1269 mmc->f_min = host->max_clk / 256;
1270 mmc->f_max = host->max_clk;
1271 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1272
1273 if (caps & SDHCI_CAN_DO_HISPD)
1274 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1275
1276 mmc->ocr_avail = 0;
1277 if (caps & SDHCI_CAN_VDD_330)
1278 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1279 if (caps & SDHCI_CAN_VDD_300)
1280 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1281 if (caps & SDHCI_CAN_VDD_180)
1282 mmc->ocr_avail |= MMC_VDD_165_195;
1283
1284 if (mmc->ocr_avail == 0) {
1285 printk(KERN_ERR "%s: Hardware doesn't report any "
1286 "support voltages.\n", host->slot_descr);
1287 ret = -ENODEV;
1288 goto unmap;
1289 }
1290
1291 spin_lock_init(&host->lock);
1292
1293 /*
1294 * Maximum number of segments. Hardware cannot do scatter lists.
1295 */
1296 if (host->flags & SDHCI_USE_DMA)
1297 mmc->max_hw_segs = 1;
1298 else
1299 mmc->max_hw_segs = 16;
1300 mmc->max_phys_segs = 16;
1301
1302 /*
1303 * Maximum number of sectors in one transfer. Limited by DMA boundary
1304 * size (512KiB).
1305 */
1306 mmc->max_req_size = 524288;
1307
1308 /*
1309 * Maximum segment size. Could be one segment with the maximum number
1310 * of bytes.
1311 */
1312 mmc->max_seg_size = mmc->max_req_size;
1313
1314 /*
1315 * Maximum block size. This varies from controller to controller and
1316 * is specified in the capabilities register.
1317 */
1318 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1319 if (mmc->max_blk_size >= 3) {
1320 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1321 host->slot_descr);
1322 ret = -ENODEV;
1323 goto unmap;
1324 }
1325 mmc->max_blk_size = 512 << mmc->max_blk_size;
1326
1327 /*
1328 * Maximum block count.
1329 */
1330 mmc->max_blk_count = 65535;
1331
1332 /*
1333 * Init tasklets.
1334 */
1335 tasklet_init(&host->card_tasklet,
1336 sdhci_tasklet_card, (unsigned long)host);
1337 tasklet_init(&host->finish_tasklet,
1338 sdhci_tasklet_finish, (unsigned long)host);
1339
1340 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1341
1342 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1343 host->slot_descr, host);
1344 if (ret)
1345 goto untasklet;
1346
1347 sdhci_init(host);
1348
1349#ifdef CONFIG_MMC_DEBUG
1350 sdhci_dumpregs(host);
1351#endif
1352
1353 mmiowb();
1354
1355 mmc_add_host(mmc);
1356
1357 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc),
1358 host->addr, host->irq,
1359 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1360
1361 return 0;
1362
1363untasklet:
1364 tasklet_kill(&host->card_tasklet);
1365 tasklet_kill(&host->finish_tasklet);
1366unmap:
1367 iounmap(host->ioaddr);
1368release:
1369 pci_release_region(pdev, host->bar);
1370free:
1371 mmc_free_host(mmc);
1372
1373 return ret;
1374}
1375
1376static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1377{
1378 struct sdhci_chip *chip;
1379 struct mmc_host *mmc;
1380 struct sdhci_host *host;
1381
1382 chip = pci_get_drvdata(pdev);
1383 host = chip->hosts[slot];
1384 mmc = host->mmc;
1385
1386 chip->hosts[slot] = NULL;
1387
1388 mmc_remove_host(mmc);
1389
1390 sdhci_reset(host, SDHCI_RESET_ALL);
1391
1392 free_irq(host->irq, host);
1393
1394 del_timer_sync(&host->timer);
1395
1396 tasklet_kill(&host->card_tasklet);
1397 tasklet_kill(&host->finish_tasklet);
1398
1399 iounmap(host->ioaddr);
1400
1401 pci_release_region(pdev, host->bar);
1402
1403 mmc_free_host(mmc);
1404}
1405
1406static int __devinit sdhci_probe(struct pci_dev *pdev,
1407 const struct pci_device_id *ent)
1408{
1409 int ret, i;
1410 u8 slots, rev;
1411 struct sdhci_chip *chip;
1412
1413 BUG_ON(pdev == NULL);
1414 BUG_ON(ent == NULL);
1415
1416 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
1417
1418 printk(KERN_INFO DRIVER_NAME
1419 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1420 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1421 (int)rev);
1422
1423 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1424 if (ret)
1425 return ret;
1426
1427 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1428 DBG("found %d slot(s)\n", slots);
1429 if (slots == 0)
1430 return -ENODEV;
1431
1432 ret = pci_enable_device(pdev);
1433 if (ret)
1434 return ret;
1435
1436 chip = kzalloc(sizeof(struct sdhci_chip) +
1437 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1438 if (!chip) {
1439 ret = -ENOMEM;
1440 goto err;
1441 }
1442
1443 chip->pdev = pdev;
1444 chip->quirks = ent->driver_data;
1445
1446 if (debug_quirks)
1447 chip->quirks = debug_quirks;
1448
1449 chip->num_slots = slots;
1450 pci_set_drvdata(pdev, chip);
1451
1452 for (i = 0;i < slots;i++) {
1453 ret = sdhci_probe_slot(pdev, i);
1454 if (ret) {
1455 for (i--;i >= 0;i--)
1456 sdhci_remove_slot(pdev, i);
1457 goto free;
1458 }
1459 }
1460
1461 return 0;
1462
1463free:
1464 pci_set_drvdata(pdev, NULL);
1465 kfree(chip);
1466
1467err:
1468 pci_disable_device(pdev);
1469 return ret;
1470}
1471
1472static void __devexit sdhci_remove(struct pci_dev *pdev)
1473{
1474 int i;
1475 struct sdhci_chip *chip;
1476
1477 chip = pci_get_drvdata(pdev);
1478
1479 if (chip) {
1480 for (i = 0;i < chip->num_slots;i++)
1481 sdhci_remove_slot(pdev, i);
1482
1483 pci_set_drvdata(pdev, NULL);
1484
1485 kfree(chip);
1486 }
1487
1488 pci_disable_device(pdev);
1489}
1490
1491static struct pci_driver sdhci_driver = {
1492 .name = DRIVER_NAME,
1493 .id_table = pci_ids,
1494 .probe = sdhci_probe,
1495 .remove = __devexit_p(sdhci_remove),
1496 .suspend = sdhci_suspend,
1497 .resume = sdhci_resume,
1498};
1499
1500/*****************************************************************************\
1501 * *
1502 * Driver init/exit *
1503 * *
1504\*****************************************************************************/
1505
1506static int __init sdhci_drv_init(void)
1507{
1508 printk(KERN_INFO DRIVER_NAME
1509 ": Secure Digital Host Controller Interface driver\n");
1510 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1511
1512 return pci_register_driver(&sdhci_driver);
1513}
1514
1515static void __exit sdhci_drv_exit(void)
1516{
1517 DBG("Exiting\n");
1518
1519 pci_unregister_driver(&sdhci_driver);
1520}
1521
1522module_init(sdhci_drv_init);
1523module_exit(sdhci_drv_exit);
1524
1525module_param(debug_nodma, uint, 0444);
1526module_param(debug_forcedma, uint, 0444);
1527module_param(debug_quirks, uint, 0444);
1528
1529MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1530MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
1531MODULE_LICENSE("GPL");
1532
1533MODULE_PARM_DESC(debug_nodma, "Forcefully disable DMA transfers. (default 0)");
1534MODULE_PARM_DESC(debug_forcedma, "Forcefully enable DMA transfers. (default 0)");
1535MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
new file mode 100644
index 000000000000..7400f4bc114f
--- /dev/null
+++ b/drivers/mmc/host/sdhci.h
@@ -0,0 +1,210 @@
1/*
2 * linux/drivers/mmc/sdhci.h - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12/*
13 * PCI registers
14 */
15
16#define PCI_SDHCI_IFPIO 0x00
17#define PCI_SDHCI_IFDMA 0x01
18#define PCI_SDHCI_IFVENDOR 0x02
19
20#define PCI_SLOT_INFO 0x40 /* 8 bits */
21#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
22#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
23
24/*
25 * Controller registers
26 */
27
28#define SDHCI_DMA_ADDRESS 0x00
29
30#define SDHCI_BLOCK_SIZE 0x04
31#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
32
33#define SDHCI_BLOCK_COUNT 0x06
34
35#define SDHCI_ARGUMENT 0x08
36
37#define SDHCI_TRANSFER_MODE 0x0C
38#define SDHCI_TRNS_DMA 0x01
39#define SDHCI_TRNS_BLK_CNT_EN 0x02
40#define SDHCI_TRNS_ACMD12 0x04
41#define SDHCI_TRNS_READ 0x10
42#define SDHCI_TRNS_MULTI 0x20
43
44#define SDHCI_COMMAND 0x0E
45#define SDHCI_CMD_RESP_MASK 0x03
46#define SDHCI_CMD_CRC 0x08
47#define SDHCI_CMD_INDEX 0x10
48#define SDHCI_CMD_DATA 0x20
49
50#define SDHCI_CMD_RESP_NONE 0x00
51#define SDHCI_CMD_RESP_LONG 0x01
52#define SDHCI_CMD_RESP_SHORT 0x02
53#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
54
55#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
56
57#define SDHCI_RESPONSE 0x10
58
59#define SDHCI_BUFFER 0x20
60
61#define SDHCI_PRESENT_STATE 0x24
62#define SDHCI_CMD_INHIBIT 0x00000001
63#define SDHCI_DATA_INHIBIT 0x00000002
64#define SDHCI_DOING_WRITE 0x00000100
65#define SDHCI_DOING_READ 0x00000200
66#define SDHCI_SPACE_AVAILABLE 0x00000400
67#define SDHCI_DATA_AVAILABLE 0x00000800
68#define SDHCI_CARD_PRESENT 0x00010000
69#define SDHCI_WRITE_PROTECT 0x00080000
70
71#define SDHCI_HOST_CONTROL 0x28
72#define SDHCI_CTRL_LED 0x01
73#define SDHCI_CTRL_4BITBUS 0x02
74#define SDHCI_CTRL_HISPD 0x04
75
76#define SDHCI_POWER_CONTROL 0x29
77#define SDHCI_POWER_ON 0x01
78#define SDHCI_POWER_180 0x0A
79#define SDHCI_POWER_300 0x0C
80#define SDHCI_POWER_330 0x0E
81
82#define SDHCI_BLOCK_GAP_CONTROL 0x2A
83
84#define SDHCI_WALK_UP_CONTROL 0x2B
85
86#define SDHCI_CLOCK_CONTROL 0x2C
87#define SDHCI_DIVIDER_SHIFT 8
88#define SDHCI_CLOCK_CARD_EN 0x0004
89#define SDHCI_CLOCK_INT_STABLE 0x0002
90#define SDHCI_CLOCK_INT_EN 0x0001
91
92#define SDHCI_TIMEOUT_CONTROL 0x2E
93
94#define SDHCI_SOFTWARE_RESET 0x2F
95#define SDHCI_RESET_ALL 0x01
96#define SDHCI_RESET_CMD 0x02
97#define SDHCI_RESET_DATA 0x04
98
99#define SDHCI_INT_STATUS 0x30
100#define SDHCI_INT_ENABLE 0x34
101#define SDHCI_SIGNAL_ENABLE 0x38
102#define SDHCI_INT_RESPONSE 0x00000001
103#define SDHCI_INT_DATA_END 0x00000002
104#define SDHCI_INT_DMA_END 0x00000008
105#define SDHCI_INT_SPACE_AVAIL 0x00000010
106#define SDHCI_INT_DATA_AVAIL 0x00000020
107#define SDHCI_INT_CARD_INSERT 0x00000040
108#define SDHCI_INT_CARD_REMOVE 0x00000080
109#define SDHCI_INT_CARD_INT 0x00000100
110#define SDHCI_INT_TIMEOUT 0x00010000
111#define SDHCI_INT_CRC 0x00020000
112#define SDHCI_INT_END_BIT 0x00040000
113#define SDHCI_INT_INDEX 0x00080000
114#define SDHCI_INT_DATA_TIMEOUT 0x00100000
115#define SDHCI_INT_DATA_CRC 0x00200000
116#define SDHCI_INT_DATA_END_BIT 0x00400000
117#define SDHCI_INT_BUS_POWER 0x00800000
118#define SDHCI_INT_ACMD12ERR 0x01000000
119
120#define SDHCI_INT_NORMAL_MASK 0x00007FFF
121#define SDHCI_INT_ERROR_MASK 0xFFFF8000
122
123#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
124 SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
125#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
127 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
128 SDHCI_INT_DATA_END_BIT)
129
130#define SDHCI_ACMD12_ERR 0x3C
131
132/* 3E-3F reserved */
133
134#define SDHCI_CAPABILITIES 0x40
135#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
136#define SDHCI_TIMEOUT_CLK_SHIFT 0
137#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
138#define SDHCI_CLOCK_BASE_MASK 0x00003F00
139#define SDHCI_CLOCK_BASE_SHIFT 8
140#define SDHCI_MAX_BLOCK_MASK 0x00030000
141#define SDHCI_MAX_BLOCK_SHIFT 16
142#define SDHCI_CAN_DO_HISPD 0x00200000
143#define SDHCI_CAN_DO_DMA 0x00400000
144#define SDHCI_CAN_VDD_330 0x01000000
145#define SDHCI_CAN_VDD_300 0x02000000
146#define SDHCI_CAN_VDD_180 0x04000000
147
148/* 44-47 reserved for more caps */
149
150#define SDHCI_MAX_CURRENT 0x48
151
152/* 4C-4F reserved for more max current */
153
154/* 50-FB reserved */
155
156#define SDHCI_SLOT_INT_STATUS 0xFC
157
158#define SDHCI_HOST_VERSION 0xFE
159#define SDHCI_VENDOR_VER_MASK 0xFF00
160#define SDHCI_VENDOR_VER_SHIFT 8
161#define SDHCI_SPEC_VER_MASK 0x00FF
162#define SDHCI_SPEC_VER_SHIFT 0
163
164struct sdhci_chip;
165
166struct sdhci_host {
167 struct sdhci_chip *chip;
168 struct mmc_host *mmc; /* MMC structure */
169
170 spinlock_t lock; /* Mutex */
171
172 int flags; /* Host attributes */
173#define SDHCI_USE_DMA (1<<0)
174
175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177
178 unsigned int clock; /* Current clock (MHz) */
179 unsigned short power; /* Current voltage */
180
181 struct mmc_request *mrq; /* Current request */
182 struct mmc_command *cmd; /* Current command */
183 struct mmc_data *data; /* Current data request */
184
185 struct scatterlist *cur_sg; /* We're working on this */
186 int num_sg; /* Entries left */
187 int offset; /* Offset into current sg */
188 int remain; /* Bytes left in current */
189
190 char slot_descr[20]; /* Name for reservations */
191
192 int irq; /* Device IRQ */
193 int bar; /* PCI BAR index */
194 unsigned long addr; /* Bus address */
195 void __iomem * ioaddr; /* Mapped address */
196
197 struct tasklet_struct card_tasklet; /* Tasklet structures */
198 struct tasklet_struct finish_tasklet;
199
200 struct timer_list timer; /* Timer for timeouts */
201};
202
203struct sdhci_chip {
204 struct pci_dev *pdev;
205
206 unsigned long quirks;
207
208 int num_slots; /* Slots on controller */
209 struct sdhci_host *hosts[0]; /* Pointers to hosts */
210};
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
new file mode 100644
index 000000000000..7511f961c67b
--- /dev/null
+++ b/drivers/mmc/host/tifm_sd.c
@@ -0,0 +1,1102 @@
1/*
2 * tifm_sd.c - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Special thanks to Brad Campbell for extensive testing of this driver.
11 *
12 */
13
14
15#include <linux/tifm.h>
16#include <linux/mmc/host.h>
17#include <linux/highmem.h>
18#include <linux/scatterlist.h>
19#include <asm/io.h>
20
21#define DRIVER_NAME "tifm_sd"
22#define DRIVER_VERSION "0.8"
23
24static int no_dma = 0;
25static int fixed_timeout = 0;
26module_param(no_dma, bool, 0644);
27module_param(fixed_timeout, bool, 0644);
28
29/* Constants here are mostly from OMAP5912 datasheet */
30#define TIFM_MMCSD_RESET 0x0002
31#define TIFM_MMCSD_CLKMASK 0x03ff
32#define TIFM_MMCSD_POWER 0x0800
33#define TIFM_MMCSD_4BBUS 0x8000
34#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
35#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
36#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
37#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
38#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
39#define TIFM_MMCSD_READ 0x8000
40
41#define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */
42#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
43#define TIFM_MMCSD_CD 0x0002 /* card detect */
44#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
45#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
46#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
47#define TIFM_MMCSD_DTO 0x0020 /* data time-out */
48#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
49#define TIFM_MMCSD_CTO 0x0080 /* command time-out */
50#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
51#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
52#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
53#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */
54#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */
55#define TIFM_MMCSD_CERR 0x4000 /* card status error */
56
57#define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */
58#define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */
59
60#define TIFM_MMCSD_FIFO_SIZE 0x0020
61
62#define TIFM_MMCSD_RSP_R0 0x0000
63#define TIFM_MMCSD_RSP_R1 0x0100
64#define TIFM_MMCSD_RSP_R2 0x0200
65#define TIFM_MMCSD_RSP_R3 0x0300
66#define TIFM_MMCSD_RSP_R4 0x0400
67#define TIFM_MMCSD_RSP_R5 0x0500
68#define TIFM_MMCSD_RSP_R6 0x0600
69
70#define TIFM_MMCSD_RSP_BUSY 0x0800
71
72#define TIFM_MMCSD_CMD_BC 0x0000
73#define TIFM_MMCSD_CMD_BCR 0x1000
74#define TIFM_MMCSD_CMD_AC 0x2000
75#define TIFM_MMCSD_CMD_ADTC 0x3000
76
77#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
78
79enum {
80 CMD_READY = 0x0001,
81 FIFO_READY = 0x0002,
82 BRS_READY = 0x0004,
83 SCMD_ACTIVE = 0x0008,
84 SCMD_READY = 0x0010,
85 CARD_BUSY = 0x0020,
86 DATA_CARRY = 0x0040
87};
88
89struct tifm_sd {
90 struct tifm_dev *dev;
91
92 unsigned short eject:1,
93 open_drain:1,
94 no_dma:1;
95 unsigned short cmd_flags;
96
97 unsigned int clk_freq;
98 unsigned int clk_div;
99 unsigned long timeout_jiffies;
100
101 struct tasklet_struct finish_tasklet;
102 struct timer_list timer;
103 struct mmc_request *req;
104
105 int sg_len;
106 int sg_pos;
107 unsigned int block_pos;
108 struct scatterlist bounce_buf;
109 unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE];
110};
111
112/* for some reason, host won't respond correctly to readw/writew */
113static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
114 unsigned int off, unsigned int cnt)
115{
116 struct tifm_dev *sock = host->dev;
117 unsigned char *buf;
118 unsigned int pos = 0, val;
119
120 buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off;
121 if (host->cmd_flags & DATA_CARRY) {
122 buf[pos++] = host->bounce_buf_data[0];
123 host->cmd_flags &= ~DATA_CARRY;
124 }
125
126 while (pos < cnt) {
127 val = readl(sock->addr + SOCK_MMCSD_DATA);
128 buf[pos++] = val & 0xff;
129 if (pos == cnt) {
130 host->bounce_buf_data[0] = (val >> 8) & 0xff;
131 host->cmd_flags |= DATA_CARRY;
132 break;
133 }
134 buf[pos++] = (val >> 8) & 0xff;
135 }
136 kunmap_atomic(buf - off, KM_BIO_DST_IRQ);
137}
138
139static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
140 unsigned int off, unsigned int cnt)
141{
142 struct tifm_dev *sock = host->dev;
143 unsigned char *buf;
144 unsigned int pos = 0, val;
145
146 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off;
147 if (host->cmd_flags & DATA_CARRY) {
148 val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
149 writel(val, sock->addr + SOCK_MMCSD_DATA);
150 host->cmd_flags &= ~DATA_CARRY;
151 }
152
153 while (pos < cnt) {
154 val = buf[pos++];
155 if (pos == cnt) {
156 host->bounce_buf_data[0] = val & 0xff;
157 host->cmd_flags |= DATA_CARRY;
158 break;
159 }
160 val |= (buf[pos++] << 8) & 0xff00;
161 writel(val, sock->addr + SOCK_MMCSD_DATA);
162 }
163 kunmap_atomic(buf - off, KM_BIO_SRC_IRQ);
164}
165
166static void tifm_sd_transfer_data(struct tifm_sd *host)
167{
168 struct mmc_data *r_data = host->req->cmd->data;
169 struct scatterlist *sg = r_data->sg;
170 unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2;
171 unsigned int p_off, p_cnt;
172 struct page *pg;
173
174 if (host->sg_pos == host->sg_len)
175 return;
176 while (t_size) {
177 cnt = sg[host->sg_pos].length - host->block_pos;
178 if (!cnt) {
179 host->block_pos = 0;
180 host->sg_pos++;
181 if (host->sg_pos == host->sg_len) {
182 if ((r_data->flags & MMC_DATA_WRITE)
183 && DATA_CARRY)
184 writel(host->bounce_buf_data[0],
185 host->dev->addr
186 + SOCK_MMCSD_DATA);
187
188 return;
189 }
190 cnt = sg[host->sg_pos].length;
191 }
192 off = sg[host->sg_pos].offset + host->block_pos;
193
194 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
195 p_off = offset_in_page(off);
196 p_cnt = PAGE_SIZE - p_off;
197 p_cnt = min(p_cnt, cnt);
198 p_cnt = min(p_cnt, t_size);
199
200 if (r_data->flags & MMC_DATA_READ)
201 tifm_sd_read_fifo(host, pg, p_off, p_cnt);
202 else if (r_data->flags & MMC_DATA_WRITE)
203 tifm_sd_write_fifo(host, pg, p_off, p_cnt);
204
205 t_size -= p_cnt;
206 host->block_pos += p_cnt;
207 }
208}
209
210static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
211 struct page *src, unsigned int src_off,
212 unsigned int count)
213{
214 unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off;
215 unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off;
216
217 memcpy(dst_buf, src_buf, count);
218
219 kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ);
220 kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ);
221}
222
223static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
224{
225 struct scatterlist *sg = r_data->sg;
226 unsigned int t_size = r_data->blksz;
227 unsigned int off, cnt;
228 unsigned int p_off, p_cnt;
229 struct page *pg;
230
231 dev_dbg(&host->dev->dev, "bouncing block\n");
232 while (t_size) {
233 cnt = sg[host->sg_pos].length - host->block_pos;
234 if (!cnt) {
235 host->block_pos = 0;
236 host->sg_pos++;
237 if (host->sg_pos == host->sg_len)
238 return;
239 cnt = sg[host->sg_pos].length;
240 }
241 off = sg[host->sg_pos].offset + host->block_pos;
242
243 pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
244 p_off = offset_in_page(off);
245 p_cnt = PAGE_SIZE - p_off;
246 p_cnt = min(p_cnt, cnt);
247 p_cnt = min(p_cnt, t_size);
248
249 if (r_data->flags & MMC_DATA_WRITE)
250 tifm_sd_copy_page(host->bounce_buf.page,
251 r_data->blksz - t_size,
252 pg, p_off, p_cnt);
253 else if (r_data->flags & MMC_DATA_READ)
254 tifm_sd_copy_page(pg, p_off, host->bounce_buf.page,
255 r_data->blksz - t_size, p_cnt);
256
257 t_size -= p_cnt;
258 host->block_pos += p_cnt;
259 }
260}
261
262static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
263{
264 struct tifm_dev *sock = host->dev;
265 unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
266 unsigned int dma_len, dma_blk_cnt, dma_off;
267 struct scatterlist *sg = NULL;
268 unsigned long flags;
269
270 if (host->sg_pos == host->sg_len)
271 return 1;
272
273 if (host->cmd_flags & DATA_CARRY) {
274 host->cmd_flags &= ~DATA_CARRY;
275 local_irq_save(flags);
276 tifm_sd_bounce_block(host, r_data);
277 local_irq_restore(flags);
278 if (host->sg_pos == host->sg_len)
279 return 1;
280 }
281
282 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos;
283 if (!dma_len) {
284 host->block_pos = 0;
285 host->sg_pos++;
286 if (host->sg_pos == host->sg_len)
287 return 1;
288 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]);
289 }
290
291 if (dma_len < t_size) {
292 dma_blk_cnt = dma_len / r_data->blksz;
293 dma_off = host->block_pos;
294 host->block_pos += dma_blk_cnt * r_data->blksz;
295 } else {
296 dma_blk_cnt = TIFM_DMA_TSIZE;
297 dma_off = host->block_pos;
298 host->block_pos += t_size;
299 }
300
301 if (dma_blk_cnt)
302 sg = &r_data->sg[host->sg_pos];
303 else if (dma_len) {
304 if (r_data->flags & MMC_DATA_WRITE) {
305 local_irq_save(flags);
306 tifm_sd_bounce_block(host, r_data);
307 local_irq_restore(flags);
308 } else
309 host->cmd_flags |= DATA_CARRY;
310
311 sg = &host->bounce_buf;
312 dma_off = 0;
313 dma_blk_cnt = 1;
314 } else
315 return 1;
316
317 dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt);
318 writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS);
319 if (r_data->flags & MMC_DATA_WRITE)
320 writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
321 sock->addr + SOCK_DMA_CONTROL);
322 else
323 writel((dma_blk_cnt << 8) | TIFM_DMA_EN,
324 sock->addr + SOCK_DMA_CONTROL);
325
326 return 0;
327}
328
329static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
330{
331 unsigned int rc = 0;
332
333 switch (mmc_resp_type(cmd)) {
334 case MMC_RSP_NONE:
335 rc |= TIFM_MMCSD_RSP_R0;
336 break;
337 case MMC_RSP_R1B:
338 rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through
339 case MMC_RSP_R1:
340 rc |= TIFM_MMCSD_RSP_R1;
341 break;
342 case MMC_RSP_R2:
343 rc |= TIFM_MMCSD_RSP_R2;
344 break;
345 case MMC_RSP_R3:
346 rc |= TIFM_MMCSD_RSP_R3;
347 break;
348 default:
349 BUG();
350 }
351
352 switch (mmc_cmd_type(cmd)) {
353 case MMC_CMD_BC:
354 rc |= TIFM_MMCSD_CMD_BC;
355 break;
356 case MMC_CMD_BCR:
357 rc |= TIFM_MMCSD_CMD_BCR;
358 break;
359 case MMC_CMD_AC:
360 rc |= TIFM_MMCSD_CMD_AC;
361 break;
362 case MMC_CMD_ADTC:
363 rc |= TIFM_MMCSD_CMD_ADTC;
364 break;
365 default:
366 BUG();
367 }
368 return rc;
369}
370
371static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
372{
373 struct tifm_dev *sock = host->dev;
374 unsigned int cmd_mask = tifm_sd_op_flags(cmd);
375
376 if (host->open_drain)
377 cmd_mask |= TIFM_MMCSD_ODTO;
378
379 if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
380 cmd_mask |= TIFM_MMCSD_READ;
381
382 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
383 cmd->opcode, cmd->arg, cmd_mask);
384
385 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
386 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
387 writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
388}
389
390static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
391{
392 cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
393 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
394 cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
395 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
396 cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
397 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
398 cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
399 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
400}
401
402static void tifm_sd_check_status(struct tifm_sd *host)
403{
404 struct tifm_dev *sock = host->dev;
405 struct mmc_command *cmd = host->req->cmd;
406
407 if (cmd->error != MMC_ERR_NONE)
408 goto finish_request;
409
410 if (!(host->cmd_flags & CMD_READY))
411 return;
412
413 if (cmd->data) {
414 if (cmd->data->error != MMC_ERR_NONE) {
415 if ((host->cmd_flags & SCMD_ACTIVE)
416 && !(host->cmd_flags & SCMD_READY))
417 return;
418
419 goto finish_request;
420 }
421
422 if (!(host->cmd_flags & BRS_READY))
423 return;
424
425 if (!(host->no_dma || (host->cmd_flags & FIFO_READY)))
426 return;
427
428 if (cmd->data->flags & MMC_DATA_WRITE) {
429 if (host->req->stop) {
430 if (!(host->cmd_flags & SCMD_ACTIVE)) {
431 host->cmd_flags |= SCMD_ACTIVE;
432 writel(TIFM_MMCSD_EOFB
433 | readl(sock->addr
434 + SOCK_MMCSD_INT_ENABLE),
435 sock->addr
436 + SOCK_MMCSD_INT_ENABLE);
437 tifm_sd_exec(host, host->req->stop);
438 return;
439 } else {
440 if (!(host->cmd_flags & SCMD_READY)
441 || (host->cmd_flags & CARD_BUSY))
442 return;
443 writel((~TIFM_MMCSD_EOFB)
444 & readl(sock->addr
445 + SOCK_MMCSD_INT_ENABLE),
446 sock->addr
447 + SOCK_MMCSD_INT_ENABLE);
448 }
449 } else {
450 if (host->cmd_flags & CARD_BUSY)
451 return;
452 writel((~TIFM_MMCSD_EOFB)
453 & readl(sock->addr
454 + SOCK_MMCSD_INT_ENABLE),
455 sock->addr + SOCK_MMCSD_INT_ENABLE);
456 }
457 } else {
458 if (host->req->stop) {
459 if (!(host->cmd_flags & SCMD_ACTIVE)) {
460 host->cmd_flags |= SCMD_ACTIVE;
461 tifm_sd_exec(host, host->req->stop);
462 return;
463 } else {
464 if (!(host->cmd_flags & SCMD_READY))
465 return;
466 }
467 }
468 }
469 }
470finish_request:
471 tasklet_schedule(&host->finish_tasklet);
472}
473
474/* Called from interrupt handler */
475static void tifm_sd_data_event(struct tifm_dev *sock)
476{
477 struct tifm_sd *host;
478 unsigned int fifo_status = 0;
479 struct mmc_data *r_data = NULL;
480
481 spin_lock(&sock->lock);
482 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
483 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
484 dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n",
485 fifo_status, host->cmd_flags);
486
487 if (host->req) {
488 r_data = host->req->cmd->data;
489
490 if (r_data && (fifo_status & TIFM_FIFO_READY)) {
491 if (tifm_sd_set_dma_data(host, r_data)) {
492 host->cmd_flags |= FIFO_READY;
493 tifm_sd_check_status(host);
494 }
495 }
496 }
497
498 writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
499 spin_unlock(&sock->lock);
500}
501
502/* Called from interrupt handler */
503static void tifm_sd_card_event(struct tifm_dev *sock)
504{
505 struct tifm_sd *host;
506 unsigned int host_status = 0;
507 int cmd_error = MMC_ERR_NONE;
508 struct mmc_command *cmd = NULL;
509 unsigned long flags;
510
511 spin_lock(&sock->lock);
512 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
513 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
514 dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
515 host_status, host->cmd_flags);
516
517 if (host->req) {
518 cmd = host->req->cmd;
519
520 if (host_status & TIFM_MMCSD_ERRMASK) {
521 writel(host_status & TIFM_MMCSD_ERRMASK,
522 sock->addr + SOCK_MMCSD_STATUS);
523 if (host_status & TIFM_MMCSD_CTO)
524 cmd_error = MMC_ERR_TIMEOUT;
525 else if (host_status & TIFM_MMCSD_CCRC)
526 cmd_error = MMC_ERR_BADCRC;
527
528 if (cmd->data) {
529 if (host_status & TIFM_MMCSD_DTO)
530 cmd->data->error = MMC_ERR_TIMEOUT;
531 else if (host_status & TIFM_MMCSD_DCRC)
532 cmd->data->error = MMC_ERR_BADCRC;
533 }
534
535 writel(TIFM_FIFO_INT_SETALL,
536 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
537 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
538
539 if (host->req->stop) {
540 if (host->cmd_flags & SCMD_ACTIVE) {
541 host->req->stop->error = cmd_error;
542 host->cmd_flags |= SCMD_READY;
543 } else {
544 cmd->error = cmd_error;
545 host->cmd_flags |= SCMD_ACTIVE;
546 tifm_sd_exec(host, host->req->stop);
547 goto done;
548 }
549 } else
550 cmd->error = cmd_error;
551 } else {
552 if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) {
553 if (!(host->cmd_flags & CMD_READY)) {
554 host->cmd_flags |= CMD_READY;
555 tifm_sd_fetch_resp(cmd, sock);
556 } else if (host->cmd_flags & SCMD_ACTIVE) {
557 host->cmd_flags |= SCMD_READY;
558 tifm_sd_fetch_resp(host->req->stop,
559 sock);
560 }
561 }
562 if (host_status & TIFM_MMCSD_BRS)
563 host->cmd_flags |= BRS_READY;
564 }
565
566 if (host->no_dma && cmd->data) {
567 if (host_status & TIFM_MMCSD_AE)
568 writel(host_status & TIFM_MMCSD_AE,
569 sock->addr + SOCK_MMCSD_STATUS);
570
571 if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
572 | TIFM_MMCSD_BRS)) {
573 local_irq_save(flags);
574 tifm_sd_transfer_data(host);
575 local_irq_restore(flags);
576 host_status &= ~TIFM_MMCSD_AE;
577 }
578 }
579
580 if (host_status & TIFM_MMCSD_EOFB)
581 host->cmd_flags &= ~CARD_BUSY;
582 else if (host_status & TIFM_MMCSD_CB)
583 host->cmd_flags |= CARD_BUSY;
584
585 tifm_sd_check_status(host);
586 }
587done:
588 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
589 spin_unlock(&sock->lock);
590}
591
592static void tifm_sd_set_data_timeout(struct tifm_sd *host,
593 struct mmc_data *data)
594{
595 struct tifm_dev *sock = host->dev;
596 unsigned int data_timeout = data->timeout_clks;
597
598 if (fixed_timeout)
599 return;
600
601 data_timeout += data->timeout_ns /
602 ((1000000000UL / host->clk_freq) * host->clk_div);
603
604 if (data_timeout < 0xffff) {
605 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
606 writel((~TIFM_MMCSD_DPE)
607 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
608 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
609 } else {
610 data_timeout = (data_timeout >> 10) + 1;
611 if (data_timeout > 0xffff)
612 data_timeout = 0; /* set to unlimited */
613 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
614 writel(TIFM_MMCSD_DPE
615 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
616 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
617 }
618}
619
620static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
621{
622 struct tifm_sd *host = mmc_priv(mmc);
623 struct tifm_dev *sock = host->dev;
624 unsigned long flags;
625 struct mmc_data *r_data = mrq->cmd->data;
626
627 spin_lock_irqsave(&sock->lock, flags);
628 if (host->eject) {
629 spin_unlock_irqrestore(&sock->lock, flags);
630 goto err_out;
631 }
632
633 if (host->req) {
634 printk(KERN_ERR "%s : unfinished request detected\n",
635 sock->dev.bus_id);
636 spin_unlock_irqrestore(&sock->lock, flags);
637 goto err_out;
638 }
639
640 host->cmd_flags = 0;
641 host->block_pos = 0;
642 host->sg_pos = 0;
643
644 if (r_data) {
645 tifm_sd_set_data_timeout(host, r_data);
646
647 if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop)
648 writel(TIFM_MMCSD_EOFB
649 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
650 sock->addr + SOCK_MMCSD_INT_ENABLE);
651
652 if (host->no_dma) {
653 writel(TIFM_MMCSD_BUFINT
654 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
655 sock->addr + SOCK_MMCSD_INT_ENABLE);
656 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
657 | (TIFM_MMCSD_FIFO_SIZE - 1),
658 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
659
660 host->sg_len = r_data->sg_len;
661 } else {
662 sg_init_one(&host->bounce_buf, host->bounce_buf_data,
663 r_data->blksz);
664
665 if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
666 r_data->flags & MMC_DATA_WRITE
667 ? PCI_DMA_TODEVICE
668 : PCI_DMA_FROMDEVICE)) {
669 printk(KERN_ERR "%s : scatterlist map failed\n",
670 sock->dev.bus_id);
671 spin_unlock_irqrestore(&sock->lock, flags);
672 goto err_out;
673 }
674 host->sg_len = tifm_map_sg(sock, r_data->sg,
675 r_data->sg_len,
676 r_data->flags
677 & MMC_DATA_WRITE
678 ? PCI_DMA_TODEVICE
679 : PCI_DMA_FROMDEVICE);
680 if (host->sg_len < 1) {
681 printk(KERN_ERR "%s : scatterlist map failed\n",
682 sock->dev.bus_id);
683 tifm_unmap_sg(sock, &host->bounce_buf, 1,
684 r_data->flags & MMC_DATA_WRITE
685 ? PCI_DMA_TODEVICE
686 : PCI_DMA_FROMDEVICE);
687 spin_unlock_irqrestore(&sock->lock, flags);
688 goto err_out;
689 }
690
691 writel(TIFM_FIFO_INT_SETALL,
692 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
693 writel(ilog2(r_data->blksz) - 2,
694 sock->addr + SOCK_FIFO_PAGE_SIZE);
695 writel(TIFM_FIFO_ENABLE,
696 sock->addr + SOCK_FIFO_CONTROL);
697 writel(TIFM_FIFO_INTMASK,
698 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
699
700 if (r_data->flags & MMC_DATA_WRITE)
701 writel(TIFM_MMCSD_TXDE,
702 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
703 else
704 writel(TIFM_MMCSD_RXDE,
705 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
706
707 tifm_sd_set_dma_data(host, r_data);
708 }
709
710 writel(r_data->blocks - 1,
711 sock->addr + SOCK_MMCSD_NUM_BLOCKS);
712 writel(r_data->blksz - 1,
713 sock->addr + SOCK_MMCSD_BLOCK_LEN);
714 }
715
716 host->req = mrq;
717 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
718 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
719 sock->addr + SOCK_CONTROL);
720 tifm_sd_exec(host, mrq->cmd);
721 spin_unlock_irqrestore(&sock->lock, flags);
722 return;
723
724err_out:
725 mrq->cmd->error = MMC_ERR_TIMEOUT;
726 mmc_request_done(mmc, mrq);
727}
728
729static void tifm_sd_end_cmd(unsigned long data)
730{
731 struct tifm_sd *host = (struct tifm_sd*)data;
732 struct tifm_dev *sock = host->dev;
733 struct mmc_host *mmc = tifm_get_drvdata(sock);
734 struct mmc_request *mrq;
735 struct mmc_data *r_data = NULL;
736 unsigned long flags;
737
738 spin_lock_irqsave(&sock->lock, flags);
739
740 del_timer(&host->timer);
741 mrq = host->req;
742 host->req = NULL;
743
744 if (!mrq) {
745 printk(KERN_ERR " %s : no request to complete?\n",
746 sock->dev.bus_id);
747 spin_unlock_irqrestore(&sock->lock, flags);
748 return;
749 }
750
751 r_data = mrq->cmd->data;
752 if (r_data) {
753 if (host->no_dma) {
754 writel((~TIFM_MMCSD_BUFINT)
755 & readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
756 sock->addr + SOCK_MMCSD_INT_ENABLE);
757 } else {
758 tifm_unmap_sg(sock, &host->bounce_buf, 1,
759 (r_data->flags & MMC_DATA_WRITE)
760 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
761 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
762 (r_data->flags & MMC_DATA_WRITE)
763 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
764 }
765
766 r_data->bytes_xfered = r_data->blocks
767 - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
768 r_data->bytes_xfered *= r_data->blksz;
769 r_data->bytes_xfered += r_data->blksz
770 - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
771 }
772
773 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
774 sock->addr + SOCK_CONTROL);
775
776 spin_unlock_irqrestore(&sock->lock, flags);
777 mmc_request_done(mmc, mrq);
778}
779
780static void tifm_sd_abort(unsigned long data)
781{
782 struct tifm_sd *host = (struct tifm_sd*)data;
783
784 printk(KERN_ERR
785 "%s : card failed to respond for a long period of time "
786 "(%x, %x)\n",
787 host->dev->dev.bus_id, host->req->cmd->opcode, host->cmd_flags);
788
789 tifm_eject(host->dev);
790}
791
792static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
793{
794 struct tifm_sd *host = mmc_priv(mmc);
795 struct tifm_dev *sock = host->dev;
796 unsigned int clk_div1, clk_div2;
797 unsigned long flags;
798
799 spin_lock_irqsave(&sock->lock, flags);
800
801 dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, "
802 "chip_select = %x, power_mode = %x, bus_width = %x\n",
803 ios->clock, ios->vdd, ios->bus_mode, ios->chip_select,
804 ios->power_mode, ios->bus_width);
805
806 if (ios->bus_width == MMC_BUS_WIDTH_4) {
807 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
808 sock->addr + SOCK_MMCSD_CONFIG);
809 } else {
810 writel((~TIFM_MMCSD_4BBUS)
811 & readl(sock->addr + SOCK_MMCSD_CONFIG),
812 sock->addr + SOCK_MMCSD_CONFIG);
813 }
814
815 if (ios->clock) {
816 clk_div1 = 20000000 / ios->clock;
817 if (!clk_div1)
818 clk_div1 = 1;
819
820 clk_div2 = 24000000 / ios->clock;
821 if (!clk_div2)
822 clk_div2 = 1;
823
824 if ((20000000 / clk_div1) > ios->clock)
825 clk_div1++;
826 if ((24000000 / clk_div2) > ios->clock)
827 clk_div2++;
828 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
829 host->clk_freq = 20000000;
830 host->clk_div = clk_div1;
831 writel((~TIFM_CTRL_FAST_CLK)
832 & readl(sock->addr + SOCK_CONTROL),
833 sock->addr + SOCK_CONTROL);
834 } else {
835 host->clk_freq = 24000000;
836 host->clk_div = clk_div2;
837 writel(TIFM_CTRL_FAST_CLK
838 | readl(sock->addr + SOCK_CONTROL),
839 sock->addr + SOCK_CONTROL);
840 }
841 } else {
842 host->clk_div = 0;
843 }
844 host->clk_div &= TIFM_MMCSD_CLKMASK;
845 writel(host->clk_div
846 | ((~TIFM_MMCSD_CLKMASK)
847 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
848 sock->addr + SOCK_MMCSD_CONFIG);
849
850 host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN);
851
852 /* chip_select : maybe later */
853 //vdd
854 //power is set before probe / after remove
855
856 spin_unlock_irqrestore(&sock->lock, flags);
857}
858
859static int tifm_sd_ro(struct mmc_host *mmc)
860{
861 int rc = 0;
862 struct tifm_sd *host = mmc_priv(mmc);
863 struct tifm_dev *sock = host->dev;
864 unsigned long flags;
865
866 spin_lock_irqsave(&sock->lock, flags);
867 if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE))
868 rc = 1;
869 spin_unlock_irqrestore(&sock->lock, flags);
870 return rc;
871}
872
873static const struct mmc_host_ops tifm_sd_ops = {
874 .request = tifm_sd_request,
875 .set_ios = tifm_sd_ios,
876 .get_ro = tifm_sd_ro
877};
878
879static int tifm_sd_initialize_host(struct tifm_sd *host)
880{
881 int rc;
882 unsigned int host_status = 0;
883 struct tifm_dev *sock = host->dev;
884
885 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
886 mmiowb();
887 host->clk_div = 61;
888 host->clk_freq = 20000000;
889 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
890 writel(host->clk_div | TIFM_MMCSD_POWER,
891 sock->addr + SOCK_MMCSD_CONFIG);
892
893 /* wait up to 0.51 sec for reset */
894 for (rc = 32; rc <= 256; rc <<= 1) {
895 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
896 rc = 0;
897 break;
898 }
899 msleep(rc);
900 }
901
902 if (rc) {
903 printk(KERN_ERR "%s : controller failed to reset\n",
904 sock->dev.bus_id);
905 return -ENODEV;
906 }
907
908 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
909 writel(host->clk_div | TIFM_MMCSD_POWER,
910 sock->addr + SOCK_MMCSD_CONFIG);
911 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
912
913 // command timeout fixed to 64 clocks for now
914 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
915 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
916
917 for (rc = 16; rc <= 64; rc <<= 1) {
918 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
919 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
920 if (!(host_status & TIFM_MMCSD_ERRMASK)
921 && (host_status & TIFM_MMCSD_EOC)) {
922 rc = 0;
923 break;
924 }
925 msleep(rc);
926 }
927
928 if (rc) {
929 printk(KERN_ERR
930 "%s : card not ready - probe failed on initialization\n",
931 sock->dev.bus_id);
932 return -ENODEV;
933 }
934
935 writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
936 | TIFM_MMCSD_ERRMASK,
937 sock->addr + SOCK_MMCSD_INT_ENABLE);
938 mmiowb();
939
940 return 0;
941}
942
943static int tifm_sd_probe(struct tifm_dev *sock)
944{
945 struct mmc_host *mmc;
946 struct tifm_sd *host;
947 int rc = -EIO;
948
949 if (!(TIFM_SOCK_STATE_OCCUPIED
950 & readl(sock->addr + SOCK_PRESENT_STATE))) {
951 printk(KERN_WARNING "%s : card gone, unexpectedly\n",
952 sock->dev.bus_id);
953 return rc;
954 }
955
956 mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
957 if (!mmc)
958 return -ENOMEM;
959
960 host = mmc_priv(mmc);
961 host->no_dma = no_dma;
962 tifm_set_drvdata(sock, mmc);
963 host->dev = sock;
964 host->timeout_jiffies = msecs_to_jiffies(1000);
965
966 tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
967 (unsigned long)host);
968 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
969
970 mmc->ops = &tifm_sd_ops;
971 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
972 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
973 mmc->f_min = 20000000 / 60;
974 mmc->f_max = 24000000;
975
976 mmc->max_blk_count = 2048;
977 mmc->max_hw_segs = mmc->max_blk_count;
978 mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
979 mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
980 mmc->max_req_size = mmc->max_seg_size;
981 mmc->max_phys_segs = mmc->max_hw_segs;
982
983 sock->card_event = tifm_sd_card_event;
984 sock->data_event = tifm_sd_data_event;
985 rc = tifm_sd_initialize_host(host);
986
987 if (!rc)
988 rc = mmc_add_host(mmc);
989 if (!rc)
990 return 0;
991
992 mmc_free_host(mmc);
993 return rc;
994}
995
996static void tifm_sd_remove(struct tifm_dev *sock)
997{
998 struct mmc_host *mmc = tifm_get_drvdata(sock);
999 struct tifm_sd *host = mmc_priv(mmc);
1000 unsigned long flags;
1001
1002 spin_lock_irqsave(&sock->lock, flags);
1003 host->eject = 1;
1004 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
1005 mmiowb();
1006 spin_unlock_irqrestore(&sock->lock, flags);
1007
1008 tasklet_kill(&host->finish_tasklet);
1009
1010 spin_lock_irqsave(&sock->lock, flags);
1011 if (host->req) {
1012 writel(TIFM_FIFO_INT_SETALL,
1013 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
1014 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
1015 host->req->cmd->error = MMC_ERR_TIMEOUT;
1016 if (host->req->stop)
1017 host->req->stop->error = MMC_ERR_TIMEOUT;
1018 tasklet_schedule(&host->finish_tasklet);
1019 }
1020 spin_unlock_irqrestore(&sock->lock, flags);
1021 mmc_remove_host(mmc);
1022 dev_dbg(&sock->dev, "after remove\n");
1023
1024 /* The meaning of the bit majority in this constant is unknown. */
1025 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1026 sock->addr + SOCK_CONTROL);
1027
1028 mmc_free_host(mmc);
1029}
1030
1031#ifdef CONFIG_PM
1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{
1035 struct mmc_host *mmc = tifm_get_drvdata(sock);
1036 int rc;
1037
1038 rc = mmc_suspend_host(mmc, state);
1039 /* The meaning of the bit majority in this constant is unknown. */
1040 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
1041 sock->addr + SOCK_CONTROL);
1042 return rc;
1043}
1044
1045static int tifm_sd_resume(struct tifm_dev *sock)
1046{
1047 struct mmc_host *mmc = tifm_get_drvdata(sock);
1048 struct tifm_sd *host = mmc_priv(mmc);
1049 int rc;
1050
1051 rc = tifm_sd_initialize_host(host);
1052 dev_dbg(&sock->dev, "resume initialize %d\n", rc);
1053
1054 if (rc)
1055 host->eject = 1;
1056 else
1057 rc = mmc_resume_host(mmc);
1058
1059 return rc;
1060}
1061
1062#else
1063
1064#define tifm_sd_suspend NULL
1065#define tifm_sd_resume NULL
1066
1067#endif /* CONFIG_PM */
1068
1069static struct tifm_device_id tifm_sd_id_tbl[] = {
1070 { TIFM_TYPE_SD }, { }
1071};
1072
1073static struct tifm_driver tifm_sd_driver = {
1074 .driver = {
1075 .name = DRIVER_NAME,
1076 .owner = THIS_MODULE
1077 },
1078 .id_table = tifm_sd_id_tbl,
1079 .probe = tifm_sd_probe,
1080 .remove = tifm_sd_remove,
1081 .suspend = tifm_sd_suspend,
1082 .resume = tifm_sd_resume
1083};
1084
1085static int __init tifm_sd_init(void)
1086{
1087 return tifm_register_driver(&tifm_sd_driver);
1088}
1089
1090static void __exit tifm_sd_exit(void)
1091{
1092 tifm_unregister_driver(&tifm_sd_driver);
1093}
1094
1095MODULE_AUTHOR("Alex Dubov");
1096MODULE_DESCRIPTION("TI FlashMedia SD driver");
1097MODULE_LICENSE("GPL");
1098MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
1099MODULE_VERSION(DRIVER_VERSION);
1100
1101module_init(tifm_sd_init);
1102module_exit(tifm_sd_exit);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
new file mode 100644
index 000000000000..867ca6a69298
--- /dev/null
+++ b/drivers/mmc/host/wbsd.c
@@ -0,0 +1,2061 @@
1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 *
12 * Warning!
13 *
14 * Changes to the FIFO system should be done with extreme care since
15 * the hardware is full of bugs related to the FIFO. Known issues are:
16 *
17 * - FIFO size field in FSR is always zero.
18 *
19 * - FIFO interrupts tend not to work as they should. Interrupts are
20 * triggered only for full/empty events, not for threshold values.
21 *
22 * - On APIC systems the FIFO empty interrupt is sometimes lost.
23 */
24
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/platform_device.h>
30#include <linux/interrupt.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/pnp.h>
34#include <linux/highmem.h>
35#include <linux/mmc/host.h>
36
37#include <asm/io.h>
38#include <asm/dma.h>
39#include <asm/scatterlist.h>
40
41#include "wbsd.h"
42
43#define DRIVER_NAME "wbsd"
44
45#define DBG(x...) \
46 pr_debug(DRIVER_NAME ": " x)
47#define DBGF(f, x...) \
48 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
49
50/*
51 * Device resources
52 */
53
54#ifdef CONFIG_PNP
55
56static const struct pnp_device_id pnp_dev_table[] = {
57 { "WEC0517", 0 },
58 { "WEC0518", 0 },
59 { "", 0 },
60};
61
62MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
63
64#endif /* CONFIG_PNP */
65
66static const int config_ports[] = { 0x2E, 0x4E };
67static const int unlock_codes[] = { 0x83, 0x87 };
68
69static const int valid_ids[] = {
70 0x7112,
71 };
72
73#ifdef CONFIG_PNP
74static unsigned int nopnp = 0;
75#else
76static const unsigned int nopnp = 1;
77#endif
78static unsigned int io = 0x248;
79static unsigned int irq = 6;
80static int dma = 2;
81
82/*
83 * Basic functions
84 */
85
86static inline void wbsd_unlock_config(struct wbsd_host *host)
87{
88 BUG_ON(host->config == 0);
89
90 outb(host->unlock_code, host->config);
91 outb(host->unlock_code, host->config);
92}
93
94static inline void wbsd_lock_config(struct wbsd_host *host)
95{
96 BUG_ON(host->config == 0);
97
98 outb(LOCK_CODE, host->config);
99}
100
101static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
102{
103 BUG_ON(host->config == 0);
104
105 outb(reg, host->config);
106 outb(value, host->config + 1);
107}
108
109static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
110{
111 BUG_ON(host->config == 0);
112
113 outb(reg, host->config);
114 return inb(host->config + 1);
115}
116
117static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
118{
119 outb(index, host->base + WBSD_IDXR);
120 outb(value, host->base + WBSD_DATAR);
121}
122
123static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
124{
125 outb(index, host->base + WBSD_IDXR);
126 return inb(host->base + WBSD_DATAR);
127}
128
129/*
130 * Common routines
131 */
132
133static void wbsd_init_device(struct wbsd_host *host)
134{
135 u8 setup, ier;
136
137 /*
138 * Reset chip (SD/MMC part) and fifo.
139 */
140 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
141 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
142 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
143
144 /*
145 * Set DAT3 to input
146 */
147 setup &= ~WBSD_DAT3_H;
148 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
149 host->flags &= ~WBSD_FIGNORE_DETECT;
150
151 /*
152 * Read back default clock.
153 */
154 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
155
156 /*
157 * Power down port.
158 */
159 outb(WBSD_POWER_N, host->base + WBSD_CSR);
160
161 /*
162 * Set maximum timeout.
163 */
164 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
165
166 /*
167 * Test for card presence
168 */
169 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
170 host->flags |= WBSD_FCARD_PRESENT;
171 else
172 host->flags &= ~WBSD_FCARD_PRESENT;
173
174 /*
175 * Enable interesting interrupts.
176 */
177 ier = 0;
178 ier |= WBSD_EINT_CARD;
179 ier |= WBSD_EINT_FIFO_THRE;
180 ier |= WBSD_EINT_CRC;
181 ier |= WBSD_EINT_TIMEOUT;
182 ier |= WBSD_EINT_TC;
183
184 outb(ier, host->base + WBSD_EIR);
185
186 /*
187 * Clear interrupts.
188 */
189 inb(host->base + WBSD_ISR);
190}
191
192static void wbsd_reset(struct wbsd_host *host)
193{
194 u8 setup;
195
196 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
197
198 /*
199 * Soft reset of chip (SD/MMC part).
200 */
201 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
202 setup |= WBSD_SOFT_RESET;
203 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
204}
205
206static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
207{
208 unsigned long dmaflags;
209
210 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
211
212 if (host->dma >= 0) {
213 /*
214 * Release ISA DMA controller.
215 */
216 dmaflags = claim_dma_lock();
217 disable_dma(host->dma);
218 clear_dma_ff(host->dma);
219 release_dma_lock(dmaflags);
220
221 /*
222 * Disable DMA on host.
223 */
224 wbsd_write_index(host, WBSD_IDX_DMA, 0);
225 }
226
227 host->mrq = NULL;
228
229 /*
230 * MMC layer might call back into the driver so first unlock.
231 */
232 spin_unlock(&host->lock);
233 mmc_request_done(host->mmc, mrq);
234 spin_lock(&host->lock);
235}
236
237/*
238 * Scatter/gather functions
239 */
240
241static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
242{
243 /*
244 * Get info. about SG list from data structure.
245 */
246 host->cur_sg = data->sg;
247 host->num_sg = data->sg_len;
248
249 host->offset = 0;
250 host->remain = host->cur_sg->length;
251}
252
253static inline int wbsd_next_sg(struct wbsd_host *host)
254{
255 /*
256 * Skip to next SG entry.
257 */
258 host->cur_sg++;
259 host->num_sg--;
260
261 /*
262 * Any entries left?
263 */
264 if (host->num_sg > 0) {
265 host->offset = 0;
266 host->remain = host->cur_sg->length;
267 }
268
269 return host->num_sg;
270}
271
272static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
273{
274 return page_address(host->cur_sg->page) + host->cur_sg->offset;
275}
276
277static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
278{
279 unsigned int len, i;
280 struct scatterlist *sg;
281 char *dmabuf = host->dma_buffer;
282 char *sgbuf;
283
284 sg = data->sg;
285 len = data->sg_len;
286
287 for (i = 0; i < len; i++) {
288 sgbuf = page_address(sg[i].page) + sg[i].offset;
289 memcpy(dmabuf, sgbuf, sg[i].length);
290 dmabuf += sg[i].length;
291 }
292}
293
294static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
295{
296 unsigned int len, i;
297 struct scatterlist *sg;
298 char *dmabuf = host->dma_buffer;
299 char *sgbuf;
300
301 sg = data->sg;
302 len = data->sg_len;
303
304 for (i = 0; i < len; i++) {
305 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 memcpy(sgbuf, dmabuf, sg[i].length);
307 dmabuf += sg[i].length;
308 }
309}
310
311/*
312 * Command handling
313 */
314
315static inline void wbsd_get_short_reply(struct wbsd_host *host,
316 struct mmc_command *cmd)
317{
318 /*
319 * Correct response type?
320 */
321 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
322 cmd->error = MMC_ERR_INVALID;
323 return;
324 }
325
326 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
327 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
328 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
329 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
330 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
331}
332
333static inline void wbsd_get_long_reply(struct wbsd_host *host,
334 struct mmc_command *cmd)
335{
336 int i;
337
338 /*
339 * Correct response type?
340 */
341 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
342 cmd->error = MMC_ERR_INVALID;
343 return;
344 }
345
346 for (i = 0; i < 4; i++) {
347 cmd->resp[i] =
348 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
349 cmd->resp[i] |=
350 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
351 cmd->resp[i] |=
352 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
353 cmd->resp[i] |=
354 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
355 }
356}
357
358static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
359{
360 int i;
361 u8 status, isr;
362
363 DBGF("Sending cmd (%x)\n", cmd->opcode);
364
365 /*
366 * Clear accumulated ISR. The interrupt routine
367 * will fill this one with events that occur during
368 * transfer.
369 */
370 host->isr = 0;
371
372 /*
373 * Send the command (CRC calculated by host).
374 */
375 outb(cmd->opcode, host->base + WBSD_CMDR);
376 for (i = 3; i >= 0; i--)
377 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
378
379 cmd->error = MMC_ERR_NONE;
380
381 /*
382 * Wait for the request to complete.
383 */
384 do {
385 status = wbsd_read_index(host, WBSD_IDX_STATUS);
386 } while (status & WBSD_CARDTRAFFIC);
387
388 /*
389 * Do we expect a reply?
390 */
391 if (cmd->flags & MMC_RSP_PRESENT) {
392 /*
393 * Read back status.
394 */
395 isr = host->isr;
396
397 /* Card removed? */
398 if (isr & WBSD_INT_CARD)
399 cmd->error = MMC_ERR_TIMEOUT;
400 /* Timeout? */
401 else if (isr & WBSD_INT_TIMEOUT)
402 cmd->error = MMC_ERR_TIMEOUT;
403 /* CRC? */
404 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
405 cmd->error = MMC_ERR_BADCRC;
406 /* All ok */
407 else {
408 if (cmd->flags & MMC_RSP_136)
409 wbsd_get_long_reply(host, cmd);
410 else
411 wbsd_get_short_reply(host, cmd);
412 }
413 }
414
415 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
416}
417
418/*
419 * Data functions
420 */
421
422static void wbsd_empty_fifo(struct wbsd_host *host)
423{
424 struct mmc_data *data = host->mrq->cmd->data;
425 char *buffer;
426 int i, fsr, fifo;
427
428 /*
429 * Handle excessive data.
430 */
431 if (host->num_sg == 0)
432 return;
433
434 buffer = wbsd_sg_to_buffer(host) + host->offset;
435
436 /*
437 * Drain the fifo. This has a tendency to loop longer
438 * than the FIFO length (usually one block).
439 */
440 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
441 /*
442 * The size field in the FSR is broken so we have to
443 * do some guessing.
444 */
445 if (fsr & WBSD_FIFO_FULL)
446 fifo = 16;
447 else if (fsr & WBSD_FIFO_FUTHRE)
448 fifo = 8;
449 else
450 fifo = 1;
451
452 for (i = 0; i < fifo; i++) {
453 *buffer = inb(host->base + WBSD_DFR);
454 buffer++;
455 host->offset++;
456 host->remain--;
457
458 data->bytes_xfered++;
459
460 /*
461 * End of scatter list entry?
462 */
463 if (host->remain == 0) {
464 /*
465 * Get next entry. Check if last.
466 */
467 if (!wbsd_next_sg(host))
468 return;
469
470 buffer = wbsd_sg_to_buffer(host);
471 }
472 }
473 }
474
475 /*
476 * This is a very dirty hack to solve a
477 * hardware problem. The chip doesn't trigger
478 * FIFO threshold interrupts properly.
479 */
480 if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
481 tasklet_schedule(&host->fifo_tasklet);
482}
483
484static void wbsd_fill_fifo(struct wbsd_host *host)
485{
486 struct mmc_data *data = host->mrq->cmd->data;
487 char *buffer;
488 int i, fsr, fifo;
489
490 /*
491 * Check that we aren't being called after the
492 * entire buffer has been transfered.
493 */
494 if (host->num_sg == 0)
495 return;
496
497 buffer = wbsd_sg_to_buffer(host) + host->offset;
498
499 /*
500 * Fill the fifo. This has a tendency to loop longer
501 * than the FIFO length (usually one block).
502 */
503 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
504 /*
505 * The size field in the FSR is broken so we have to
506 * do some guessing.
507 */
508 if (fsr & WBSD_FIFO_EMPTY)
509 fifo = 0;
510 else if (fsr & WBSD_FIFO_EMTHRE)
511 fifo = 8;
512 else
513 fifo = 15;
514
515 for (i = 16; i > fifo; i--) {
516 outb(*buffer, host->base + WBSD_DFR);
517 buffer++;
518 host->offset++;
519 host->remain--;
520
521 data->bytes_xfered++;
522
523 /*
524 * End of scatter list entry?
525 */
526 if (host->remain == 0) {
527 /*
528 * Get next entry. Check if last.
529 */
530 if (!wbsd_next_sg(host))
531 return;
532
533 buffer = wbsd_sg_to_buffer(host);
534 }
535 }
536 }
537
538 /*
539 * The controller stops sending interrupts for
540 * 'FIFO empty' under certain conditions. So we
541 * need to be a bit more pro-active.
542 */
543 tasklet_schedule(&host->fifo_tasklet);
544}
545
546static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
547{
548 u16 blksize;
549 u8 setup;
550 unsigned long dmaflags;
551 unsigned int size;
552
553 DBGF("blksz %04x blks %04x flags %08x\n",
554 data->blksz, data->blocks, data->flags);
555 DBGF("tsac %d ms nsac %d clk\n",
556 data->timeout_ns / 1000000, data->timeout_clks);
557
558 /*
559 * Calculate size.
560 */
561 size = data->blocks * data->blksz;
562
563 /*
564 * Check timeout values for overflow.
565 * (Yes, some cards cause this value to overflow).
566 */
567 if (data->timeout_ns > 127000000)
568 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
569 else {
570 wbsd_write_index(host, WBSD_IDX_TAAC,
571 data->timeout_ns / 1000000);
572 }
573
574 if (data->timeout_clks > 255)
575 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
576 else
577 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
578
579 /*
580 * Inform the chip of how large blocks will be
581 * sent. It needs this to determine when to
582 * calculate CRC.
583 *
584 * Space for CRC must be included in the size.
585 * Two bytes are needed for each data line.
586 */
587 if (host->bus_width == MMC_BUS_WIDTH_1) {
588 blksize = data->blksz + 2;
589
590 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
591 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
592 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
593 blksize = data->blksz + 2 * 4;
594
595 wbsd_write_index(host, WBSD_IDX_PBSMSB,
596 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
597 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
598 } else {
599 data->error = MMC_ERR_INVALID;
600 return;
601 }
602
603 /*
604 * Clear the FIFO. This is needed even for DMA
605 * transfers since the chip still uses the FIFO
606 * internally.
607 */
608 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
609 setup |= WBSD_FIFO_RESET;
610 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
611
612 /*
613 * DMA transfer?
614 */
615 if (host->dma >= 0) {
616 /*
617 * The buffer for DMA is only 64 kB.
618 */
619 BUG_ON(size > 0x10000);
620 if (size > 0x10000) {
621 data->error = MMC_ERR_INVALID;
622 return;
623 }
624
625 /*
626 * Transfer data from the SG list to
627 * the DMA buffer.
628 */
629 if (data->flags & MMC_DATA_WRITE)
630 wbsd_sg_to_dma(host, data);
631
632 /*
633 * Initialise the ISA DMA controller.
634 */
635 dmaflags = claim_dma_lock();
636 disable_dma(host->dma);
637 clear_dma_ff(host->dma);
638 if (data->flags & MMC_DATA_READ)
639 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
640 else
641 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
642 set_dma_addr(host->dma, host->dma_addr);
643 set_dma_count(host->dma, size);
644
645 enable_dma(host->dma);
646 release_dma_lock(dmaflags);
647
648 /*
649 * Enable DMA on the host.
650 */
651 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
652 } else {
653 /*
654 * This flag is used to keep printk
655 * output to a minimum.
656 */
657 host->firsterr = 1;
658
659 /*
660 * Initialise the SG list.
661 */
662 wbsd_init_sg(host, data);
663
664 /*
665 * Turn off DMA.
666 */
667 wbsd_write_index(host, WBSD_IDX_DMA, 0);
668
669 /*
670 * Set up FIFO threshold levels (and fill
671 * buffer if doing a write).
672 */
673 if (data->flags & MMC_DATA_READ) {
674 wbsd_write_index(host, WBSD_IDX_FIFOEN,
675 WBSD_FIFOEN_FULL | 8);
676 } else {
677 wbsd_write_index(host, WBSD_IDX_FIFOEN,
678 WBSD_FIFOEN_EMPTY | 8);
679 wbsd_fill_fifo(host);
680 }
681 }
682
683 data->error = MMC_ERR_NONE;
684}
685
686static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
687{
688 unsigned long dmaflags;
689 int count;
690 u8 status;
691
692 WARN_ON(host->mrq == NULL);
693
694 /*
695 * Send a stop command if needed.
696 */
697 if (data->stop)
698 wbsd_send_command(host, data->stop);
699
700 /*
701 * Wait for the controller to leave data
702 * transfer state.
703 */
704 do {
705 status = wbsd_read_index(host, WBSD_IDX_STATUS);
706 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
707
708 /*
709 * DMA transfer?
710 */
711 if (host->dma >= 0) {
712 /*
713 * Disable DMA on the host.
714 */
715 wbsd_write_index(host, WBSD_IDX_DMA, 0);
716
717 /*
718 * Turn of ISA DMA controller.
719 */
720 dmaflags = claim_dma_lock();
721 disable_dma(host->dma);
722 clear_dma_ff(host->dma);
723 count = get_dma_residue(host->dma);
724 release_dma_lock(dmaflags);
725
726 data->bytes_xfered = host->mrq->data->blocks *
727 host->mrq->data->blksz - count;
728 data->bytes_xfered -= data->bytes_xfered % data->blksz;
729
730 /*
731 * Any leftover data?
732 */
733 if (count) {
734 printk(KERN_ERR "%s: Incomplete DMA transfer. "
735 "%d bytes left.\n",
736 mmc_hostname(host->mmc), count);
737
738 if (data->error == MMC_ERR_NONE)
739 data->error = MMC_ERR_FAILED;
740 } else {
741 /*
742 * Transfer data from DMA buffer to
743 * SG list.
744 */
745 if (data->flags & MMC_DATA_READ)
746 wbsd_dma_to_sg(host, data);
747 }
748
749 if (data->error != MMC_ERR_NONE) {
750 if (data->bytes_xfered)
751 data->bytes_xfered -= data->blksz;
752 }
753 }
754
755 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
756
757 wbsd_request_end(host, host->mrq);
758}
759
760/*****************************************************************************\
761 * *
762 * MMC layer callbacks *
763 * *
764\*****************************************************************************/
765
766static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
767{
768 struct wbsd_host *host = mmc_priv(mmc);
769 struct mmc_command *cmd;
770
771 /*
772 * Disable tasklets to avoid a deadlock.
773 */
774 spin_lock_bh(&host->lock);
775
776 BUG_ON(host->mrq != NULL);
777
778 cmd = mrq->cmd;
779
780 host->mrq = mrq;
781
782 /*
783 * If there is no card in the slot then
784 * timeout immediatly.
785 */
786 if (!(host->flags & WBSD_FCARD_PRESENT)) {
787 cmd->error = MMC_ERR_TIMEOUT;
788 goto done;
789 }
790
791 if (cmd->data) {
792 /*
793 * The hardware is so delightfully stupid that it has a list
794 * of "data" commands. If a command isn't on this list, it'll
795 * just go back to the idle state and won't send any data
796 * interrupts.
797 */
798 switch (cmd->opcode) {
799 case 11:
800 case 17:
801 case 18:
802 case 20:
803 case 24:
804 case 25:
805 case 26:
806 case 27:
807 case 30:
808 case 42:
809 case 56:
810 break;
811
812 /* ACMDs. We don't keep track of state, so we just treat them
813 * like any other command. */
814 case 51:
815 break;
816
817 default:
818#ifdef CONFIG_MMC_DEBUG
819 printk(KERN_WARNING "%s: Data command %d is not "
820 "supported by this controller.\n",
821 mmc_hostname(host->mmc), cmd->opcode);
822#endif
823 cmd->error = MMC_ERR_INVALID;
824
825 goto done;
826 };
827 }
828
829 /*
830 * Does the request include data?
831 */
832 if (cmd->data) {
833 wbsd_prepare_data(host, cmd->data);
834
835 if (cmd->data->error != MMC_ERR_NONE)
836 goto done;
837 }
838
839 wbsd_send_command(host, cmd);
840
841 /*
842 * If this is a data transfer the request
843 * will be finished after the data has
844 * transfered.
845 */
846 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
847 /*
848 * Dirty fix for hardware bug.
849 */
850 if (host->dma == -1)
851 tasklet_schedule(&host->fifo_tasklet);
852
853 spin_unlock_bh(&host->lock);
854
855 return;
856 }
857
858done:
859 wbsd_request_end(host, mrq);
860
861 spin_unlock_bh(&host->lock);
862}
863
864static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
865{
866 struct wbsd_host *host = mmc_priv(mmc);
867 u8 clk, setup, pwr;
868
869 spin_lock_bh(&host->lock);
870
871 /*
872 * Reset the chip on each power off.
873 * Should clear out any weird states.
874 */
875 if (ios->power_mode == MMC_POWER_OFF)
876 wbsd_init_device(host);
877
878 if (ios->clock >= 24000000)
879 clk = WBSD_CLK_24M;
880 else if (ios->clock >= 16000000)
881 clk = WBSD_CLK_16M;
882 else if (ios->clock >= 12000000)
883 clk = WBSD_CLK_12M;
884 else
885 clk = WBSD_CLK_375K;
886
887 /*
888 * Only write to the clock register when
889 * there is an actual change.
890 */
891 if (clk != host->clk) {
892 wbsd_write_index(host, WBSD_IDX_CLK, clk);
893 host->clk = clk;
894 }
895
896 /*
897 * Power up card.
898 */
899 if (ios->power_mode != MMC_POWER_OFF) {
900 pwr = inb(host->base + WBSD_CSR);
901 pwr &= ~WBSD_POWER_N;
902 outb(pwr, host->base + WBSD_CSR);
903 }
904
905 /*
906 * MMC cards need to have pin 1 high during init.
907 * It wreaks havoc with the card detection though so
908 * that needs to be disabled.
909 */
910 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
911 if (ios->chip_select == MMC_CS_HIGH) {
912 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
913 setup |= WBSD_DAT3_H;
914 host->flags |= WBSD_FIGNORE_DETECT;
915 } else {
916 if (setup & WBSD_DAT3_H) {
917 setup &= ~WBSD_DAT3_H;
918
919 /*
920 * We cannot resume card detection immediatly
921 * because of capacitance and delays in the chip.
922 */
923 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
924 }
925 }
926 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
927
928 /*
929 * Store bus width for later. Will be used when
930 * setting up the data transfer.
931 */
932 host->bus_width = ios->bus_width;
933
934 spin_unlock_bh(&host->lock);
935}
936
937static int wbsd_get_ro(struct mmc_host *mmc)
938{
939 struct wbsd_host *host = mmc_priv(mmc);
940 u8 csr;
941
942 spin_lock_bh(&host->lock);
943
944 csr = inb(host->base + WBSD_CSR);
945 csr |= WBSD_MSLED;
946 outb(csr, host->base + WBSD_CSR);
947
948 mdelay(1);
949
950 csr = inb(host->base + WBSD_CSR);
951 csr &= ~WBSD_MSLED;
952 outb(csr, host->base + WBSD_CSR);
953
954 spin_unlock_bh(&host->lock);
955
956 return csr & WBSD_WRPT;
957}
958
959static const struct mmc_host_ops wbsd_ops = {
960 .request = wbsd_request,
961 .set_ios = wbsd_set_ios,
962 .get_ro = wbsd_get_ro,
963};
964
965/*****************************************************************************\
966 * *
967 * Interrupt handling *
968 * *
969\*****************************************************************************/
970
971/*
972 * Helper function to reset detection ignore
973 */
974
975static void wbsd_reset_ignore(unsigned long data)
976{
977 struct wbsd_host *host = (struct wbsd_host *)data;
978
979 BUG_ON(host == NULL);
980
981 DBG("Resetting card detection ignore\n");
982
983 spin_lock_bh(&host->lock);
984
985 host->flags &= ~WBSD_FIGNORE_DETECT;
986
987 /*
988 * Card status might have changed during the
989 * blackout.
990 */
991 tasklet_schedule(&host->card_tasklet);
992
993 spin_unlock_bh(&host->lock);
994}
995
996/*
997 * Tasklets
998 */
999
1000static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1001{
1002 WARN_ON(!host->mrq);
1003 if (!host->mrq)
1004 return NULL;
1005
1006 WARN_ON(!host->mrq->cmd);
1007 if (!host->mrq->cmd)
1008 return NULL;
1009
1010 WARN_ON(!host->mrq->cmd->data);
1011 if (!host->mrq->cmd->data)
1012 return NULL;
1013
1014 return host->mrq->cmd->data;
1015}
1016
1017static void wbsd_tasklet_card(unsigned long param)
1018{
1019 struct wbsd_host *host = (struct wbsd_host *)param;
1020 u8 csr;
1021 int delay = -1;
1022
1023 spin_lock(&host->lock);
1024
1025 if (host->flags & WBSD_FIGNORE_DETECT) {
1026 spin_unlock(&host->lock);
1027 return;
1028 }
1029
1030 csr = inb(host->base + WBSD_CSR);
1031 WARN_ON(csr == 0xff);
1032
1033 if (csr & WBSD_CARDPRESENT) {
1034 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1035 DBG("Card inserted\n");
1036 host->flags |= WBSD_FCARD_PRESENT;
1037
1038 delay = 500;
1039 }
1040 } else if (host->flags & WBSD_FCARD_PRESENT) {
1041 DBG("Card removed\n");
1042 host->flags &= ~WBSD_FCARD_PRESENT;
1043
1044 if (host->mrq) {
1045 printk(KERN_ERR "%s: Card removed during transfer!\n",
1046 mmc_hostname(host->mmc));
1047 wbsd_reset(host);
1048
1049 host->mrq->cmd->error = MMC_ERR_FAILED;
1050 tasklet_schedule(&host->finish_tasklet);
1051 }
1052
1053 delay = 0;
1054 }
1055
1056 /*
1057 * Unlock first since we might get a call back.
1058 */
1059
1060 spin_unlock(&host->lock);
1061
1062 if (delay != -1)
1063 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1064}
1065
1066static void wbsd_tasklet_fifo(unsigned long param)
1067{
1068 struct wbsd_host *host = (struct wbsd_host *)param;
1069 struct mmc_data *data;
1070
1071 spin_lock(&host->lock);
1072
1073 if (!host->mrq)
1074 goto end;
1075
1076 data = wbsd_get_data(host);
1077 if (!data)
1078 goto end;
1079
1080 if (data->flags & MMC_DATA_WRITE)
1081 wbsd_fill_fifo(host);
1082 else
1083 wbsd_empty_fifo(host);
1084
1085 /*
1086 * Done?
1087 */
1088 if (host->num_sg == 0) {
1089 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1090 tasklet_schedule(&host->finish_tasklet);
1091 }
1092
1093end:
1094 spin_unlock(&host->lock);
1095}
1096
1097static void wbsd_tasklet_crc(unsigned long param)
1098{
1099 struct wbsd_host *host = (struct wbsd_host *)param;
1100 struct mmc_data *data;
1101
1102 spin_lock(&host->lock);
1103
1104 if (!host->mrq)
1105 goto end;
1106
1107 data = wbsd_get_data(host);
1108 if (!data)
1109 goto end;
1110
1111 DBGF("CRC error\n");
1112
1113 data->error = MMC_ERR_BADCRC;
1114
1115 tasklet_schedule(&host->finish_tasklet);
1116
1117end:
1118 spin_unlock(&host->lock);
1119}
1120
1121static void wbsd_tasklet_timeout(unsigned long param)
1122{
1123 struct wbsd_host *host = (struct wbsd_host *)param;
1124 struct mmc_data *data;
1125
1126 spin_lock(&host->lock);
1127
1128 if (!host->mrq)
1129 goto end;
1130
1131 data = wbsd_get_data(host);
1132 if (!data)
1133 goto end;
1134
1135 DBGF("Timeout\n");
1136
1137 data->error = MMC_ERR_TIMEOUT;
1138
1139 tasklet_schedule(&host->finish_tasklet);
1140
1141end:
1142 spin_unlock(&host->lock);
1143}
1144
1145static void wbsd_tasklet_finish(unsigned long param)
1146{
1147 struct wbsd_host *host = (struct wbsd_host *)param;
1148 struct mmc_data *data;
1149
1150 spin_lock(&host->lock);
1151
1152 WARN_ON(!host->mrq);
1153 if (!host->mrq)
1154 goto end;
1155
1156 data = wbsd_get_data(host);
1157 if (!data)
1158 goto end;
1159
1160 wbsd_finish_data(host, data);
1161
1162end:
1163 spin_unlock(&host->lock);
1164}
1165
1166/*
1167 * Interrupt handling
1168 */
1169
1170static irqreturn_t wbsd_irq(int irq, void *dev_id)
1171{
1172 struct wbsd_host *host = dev_id;
1173 int isr;
1174
1175 isr = inb(host->base + WBSD_ISR);
1176
1177 /*
1178 * Was it actually our hardware that caused the interrupt?
1179 */
1180 if (isr == 0xff || isr == 0x00)
1181 return IRQ_NONE;
1182
1183 host->isr |= isr;
1184
1185 /*
1186 * Schedule tasklets as needed.
1187 */
1188 if (isr & WBSD_INT_CARD)
1189 tasklet_schedule(&host->card_tasklet);
1190 if (isr & WBSD_INT_FIFO_THRE)
1191 tasklet_schedule(&host->fifo_tasklet);
1192 if (isr & WBSD_INT_CRC)
1193 tasklet_hi_schedule(&host->crc_tasklet);
1194 if (isr & WBSD_INT_TIMEOUT)
1195 tasklet_hi_schedule(&host->timeout_tasklet);
1196 if (isr & WBSD_INT_TC)
1197 tasklet_schedule(&host->finish_tasklet);
1198
1199 return IRQ_HANDLED;
1200}
1201
1202/*****************************************************************************\
1203 * *
1204 * Device initialisation and shutdown *
1205 * *
1206\*****************************************************************************/
1207
1208/*
1209 * Allocate/free MMC structure.
1210 */
1211
1212static int __devinit wbsd_alloc_mmc(struct device *dev)
1213{
1214 struct mmc_host *mmc;
1215 struct wbsd_host *host;
1216
1217 /*
1218 * Allocate MMC structure.
1219 */
1220 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1221 if (!mmc)
1222 return -ENOMEM;
1223
1224 host = mmc_priv(mmc);
1225 host->mmc = mmc;
1226
1227 host->dma = -1;
1228
1229 /*
1230 * Set host parameters.
1231 */
1232 mmc->ops = &wbsd_ops;
1233 mmc->f_min = 375000;
1234 mmc->f_max = 24000000;
1235 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1236 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1237
1238 spin_lock_init(&host->lock);
1239
1240 /*
1241 * Set up timers
1242 */
1243 init_timer(&host->ignore_timer);
1244 host->ignore_timer.data = (unsigned long)host;
1245 host->ignore_timer.function = wbsd_reset_ignore;
1246
1247 /*
1248 * Maximum number of segments. Worst case is one sector per segment
1249 * so this will be 64kB/512.
1250 */
1251 mmc->max_hw_segs = 128;
1252 mmc->max_phys_segs = 128;
1253
1254 /*
1255 * Maximum request size. Also limited by 64KiB buffer.
1256 */
1257 mmc->max_req_size = 65536;
1258
1259 /*
1260 * Maximum segment size. Could be one segment with the maximum number
1261 * of bytes.
1262 */
1263 mmc->max_seg_size = mmc->max_req_size;
1264
1265 /*
1266 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1267 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1268 */
1269 mmc->max_blk_size = 4087;
1270
1271 /*
1272 * Maximum block count. There is no real limit so the maximum
1273 * request size will be the only restriction.
1274 */
1275 mmc->max_blk_count = mmc->max_req_size;
1276
1277 dev_set_drvdata(dev, mmc);
1278
1279 return 0;
1280}
1281
1282static void __devexit wbsd_free_mmc(struct device *dev)
1283{
1284 struct mmc_host *mmc;
1285 struct wbsd_host *host;
1286
1287 mmc = dev_get_drvdata(dev);
1288 if (!mmc)
1289 return;
1290
1291 host = mmc_priv(mmc);
1292 BUG_ON(host == NULL);
1293
1294 del_timer_sync(&host->ignore_timer);
1295
1296 mmc_free_host(mmc);
1297
1298 dev_set_drvdata(dev, NULL);
1299}
1300
1301/*
1302 * Scan for known chip id:s
1303 */
1304
1305static int __devinit wbsd_scan(struct wbsd_host *host)
1306{
1307 int i, j, k;
1308 int id;
1309
1310 /*
1311 * Iterate through all ports, all codes to
1312 * find hardware that is in our known list.
1313 */
1314 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1315 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1316 continue;
1317
1318 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1319 id = 0xFFFF;
1320
1321 host->config = config_ports[i];
1322 host->unlock_code = unlock_codes[j];
1323
1324 wbsd_unlock_config(host);
1325
1326 outb(WBSD_CONF_ID_HI, config_ports[i]);
1327 id = inb(config_ports[i] + 1) << 8;
1328
1329 outb(WBSD_CONF_ID_LO, config_ports[i]);
1330 id |= inb(config_ports[i] + 1);
1331
1332 wbsd_lock_config(host);
1333
1334 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1335 if (id == valid_ids[k]) {
1336 host->chip_id = id;
1337
1338 return 0;
1339 }
1340 }
1341
1342 if (id != 0xFFFF) {
1343 DBG("Unknown hardware (id %x) found at %x\n",
1344 id, config_ports[i]);
1345 }
1346 }
1347
1348 release_region(config_ports[i], 2);
1349 }
1350
1351 host->config = 0;
1352 host->unlock_code = 0;
1353
1354 return -ENODEV;
1355}
1356
1357/*
1358 * Allocate/free io port ranges
1359 */
1360
1361static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1362{
1363 if (base & 0x7)
1364 return -EINVAL;
1365
1366 if (!request_region(base, 8, DRIVER_NAME))
1367 return -EIO;
1368
1369 host->base = base;
1370
1371 return 0;
1372}
1373
1374static void __devexit wbsd_release_regions(struct wbsd_host *host)
1375{
1376 if (host->base)
1377 release_region(host->base, 8);
1378
1379 host->base = 0;
1380
1381 if (host->config)
1382 release_region(host->config, 2);
1383
1384 host->config = 0;
1385}
1386
1387/*
1388 * Allocate/free DMA port and buffer
1389 */
1390
1391static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1392{
1393 if (dma < 0)
1394 return;
1395
1396 if (request_dma(dma, DRIVER_NAME))
1397 goto err;
1398
1399 /*
1400 * We need to allocate a special buffer in
1401 * order for ISA to be able to DMA to it.
1402 */
1403 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1404 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1405 if (!host->dma_buffer)
1406 goto free;
1407
1408 /*
1409 * Translate the address to a physical address.
1410 */
1411 host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
1412 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1413
1414 /*
1415 * ISA DMA must be aligned on a 64k basis.
1416 */
1417 if ((host->dma_addr & 0xffff) != 0)
1418 goto kfree;
1419 /*
1420 * ISA cannot access memory above 16 MB.
1421 */
1422 else if (host->dma_addr >= 0x1000000)
1423 goto kfree;
1424
1425 host->dma = dma;
1426
1427 return;
1428
1429kfree:
1430 /*
1431 * If we've gotten here then there is some kind of alignment bug
1432 */
1433 BUG_ON(1);
1434
1435 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1436 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1437 host->dma_addr = (dma_addr_t)NULL;
1438
1439 kfree(host->dma_buffer);
1440 host->dma_buffer = NULL;
1441
1442free:
1443 free_dma(dma);
1444
1445err:
1446 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1447 "Falling back on FIFO.\n", dma);
1448}
1449
1450static void __devexit wbsd_release_dma(struct wbsd_host *host)
1451{
1452 if (host->dma_addr) {
1453 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1454 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1455 }
1456 kfree(host->dma_buffer);
1457 if (host->dma >= 0)
1458 free_dma(host->dma);
1459
1460 host->dma = -1;
1461 host->dma_buffer = NULL;
1462 host->dma_addr = (dma_addr_t)NULL;
1463}
1464
1465/*
1466 * Allocate/free IRQ.
1467 */
1468
1469static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1470{
1471 int ret;
1472
1473 /*
1474 * Allocate interrupt.
1475 */
1476
1477 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1478 if (ret)
1479 return ret;
1480
1481 host->irq = irq;
1482
1483 /*
1484 * Set up tasklets.
1485 */
1486 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1487 (unsigned long)host);
1488 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1489 (unsigned long)host);
1490 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1491 (unsigned long)host);
1492 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1493 (unsigned long)host);
1494 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1495 (unsigned long)host);
1496
1497 return 0;
1498}
1499
1500static void __devexit wbsd_release_irq(struct wbsd_host *host)
1501{
1502 if (!host->irq)
1503 return;
1504
1505 free_irq(host->irq, host);
1506
1507 host->irq = 0;
1508
1509 tasklet_kill(&host->card_tasklet);
1510 tasklet_kill(&host->fifo_tasklet);
1511 tasklet_kill(&host->crc_tasklet);
1512 tasklet_kill(&host->timeout_tasklet);
1513 tasklet_kill(&host->finish_tasklet);
1514}
1515
1516/*
1517 * Allocate all resources for the host.
1518 */
1519
1520static int __devinit wbsd_request_resources(struct wbsd_host *host,
1521 int base, int irq, int dma)
1522{
1523 int ret;
1524
1525 /*
1526 * Allocate I/O ports.
1527 */
1528 ret = wbsd_request_region(host, base);
1529 if (ret)
1530 return ret;
1531
1532 /*
1533 * Allocate interrupt.
1534 */
1535 ret = wbsd_request_irq(host, irq);
1536 if (ret)
1537 return ret;
1538
1539 /*
1540 * Allocate DMA.
1541 */
1542 wbsd_request_dma(host, dma);
1543
1544 return 0;
1545}
1546
1547/*
1548 * Release all resources for the host.
1549 */
1550
1551static void __devexit wbsd_release_resources(struct wbsd_host *host)
1552{
1553 wbsd_release_dma(host);
1554 wbsd_release_irq(host);
1555 wbsd_release_regions(host);
1556}
1557
1558/*
1559 * Configure the resources the chip should use.
1560 */
1561
1562static void wbsd_chip_config(struct wbsd_host *host)
1563{
1564 wbsd_unlock_config(host);
1565
1566 /*
1567 * Reset the chip.
1568 */
1569 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1570 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1571
1572 /*
1573 * Select SD/MMC function.
1574 */
1575 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1576
1577 /*
1578 * Set up card detection.
1579 */
1580 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1581
1582 /*
1583 * Configure chip
1584 */
1585 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1586 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1587
1588 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1589
1590 if (host->dma >= 0)
1591 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1592
1593 /*
1594 * Enable and power up chip.
1595 */
1596 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1597 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1598
1599 wbsd_lock_config(host);
1600}
1601
1602/*
1603 * Check that configured resources are correct.
1604 */
1605
1606static int wbsd_chip_validate(struct wbsd_host *host)
1607{
1608 int base, irq, dma;
1609
1610 wbsd_unlock_config(host);
1611
1612 /*
1613 * Select SD/MMC function.
1614 */
1615 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1616
1617 /*
1618 * Read configuration.
1619 */
1620 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1621 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1622
1623 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1624
1625 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1626
1627 wbsd_lock_config(host);
1628
1629 /*
1630 * Validate against given configuration.
1631 */
1632 if (base != host->base)
1633 return 0;
1634 if (irq != host->irq)
1635 return 0;
1636 if ((dma != host->dma) && (host->dma != -1))
1637 return 0;
1638
1639 return 1;
1640}
1641
1642/*
1643 * Powers down the SD function
1644 */
1645
1646static void wbsd_chip_poweroff(struct wbsd_host *host)
1647{
1648 wbsd_unlock_config(host);
1649
1650 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1651 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1652
1653 wbsd_lock_config(host);
1654}
1655
1656/*****************************************************************************\
1657 * *
1658 * Devices setup and shutdown *
1659 * *
1660\*****************************************************************************/
1661
1662static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1663 int pnp)
1664{
1665 struct wbsd_host *host = NULL;
1666 struct mmc_host *mmc = NULL;
1667 int ret;
1668
1669 ret = wbsd_alloc_mmc(dev);
1670 if (ret)
1671 return ret;
1672
1673 mmc = dev_get_drvdata(dev);
1674 host = mmc_priv(mmc);
1675
1676 /*
1677 * Scan for hardware.
1678 */
1679 ret = wbsd_scan(host);
1680 if (ret) {
1681 if (pnp && (ret == -ENODEV)) {
1682 printk(KERN_WARNING DRIVER_NAME
1683 ": Unable to confirm device presence. You may "
1684 "experience lock-ups.\n");
1685 } else {
1686 wbsd_free_mmc(dev);
1687 return ret;
1688 }
1689 }
1690
1691 /*
1692 * Request resources.
1693 */
1694 ret = wbsd_request_resources(host, base, irq, dma);
1695 if (ret) {
1696 wbsd_release_resources(host);
1697 wbsd_free_mmc(dev);
1698 return ret;
1699 }
1700
1701 /*
1702 * See if chip needs to be configured.
1703 */
1704 if (pnp) {
1705 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1706 printk(KERN_WARNING DRIVER_NAME
1707 ": PnP active but chip not configured! "
1708 "You probably have a buggy BIOS. "
1709 "Configuring chip manually.\n");
1710 wbsd_chip_config(host);
1711 }
1712 } else
1713 wbsd_chip_config(host);
1714
1715 /*
1716 * Power Management stuff. No idea how this works.
1717 * Not tested.
1718 */
1719#ifdef CONFIG_PM
1720 if (host->config) {
1721 wbsd_unlock_config(host);
1722 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1723 wbsd_lock_config(host);
1724 }
1725#endif
1726 /*
1727 * Allow device to initialise itself properly.
1728 */
1729 mdelay(5);
1730
1731 /*
1732 * Reset the chip into a known state.
1733 */
1734 wbsd_init_device(host);
1735
1736 mmc_add_host(mmc);
1737
1738 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1739 if (host->chip_id != 0)
1740 printk(" id %x", (int)host->chip_id);
1741 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1742 if (host->dma >= 0)
1743 printk(" dma %d", (int)host->dma);
1744 else
1745 printk(" FIFO");
1746 if (pnp)
1747 printk(" PnP");
1748 printk("\n");
1749
1750 return 0;
1751}
1752
1753static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1754{
1755 struct mmc_host *mmc = dev_get_drvdata(dev);
1756 struct wbsd_host *host;
1757
1758 if (!mmc)
1759 return;
1760
1761 host = mmc_priv(mmc);
1762
1763 mmc_remove_host(mmc);
1764
1765 /*
1766 * Power down the SD/MMC function.
1767 */
1768 if (!pnp)
1769 wbsd_chip_poweroff(host);
1770
1771 wbsd_release_resources(host);
1772
1773 wbsd_free_mmc(dev);
1774}
1775
1776/*
1777 * Non-PnP
1778 */
1779
1780static int __devinit wbsd_probe(struct platform_device *dev)
1781{
1782 /* Use the module parameters for resources */
1783 return wbsd_init(&dev->dev, io, irq, dma, 0);
1784}
1785
1786static int __devexit wbsd_remove(struct platform_device *dev)
1787{
1788 wbsd_shutdown(&dev->dev, 0);
1789
1790 return 0;
1791}
1792
1793/*
1794 * PnP
1795 */
1796
1797#ifdef CONFIG_PNP
1798
1799static int __devinit
1800wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1801{
1802 int io, irq, dma;
1803
1804 /*
1805 * Get resources from PnP layer.
1806 */
1807 io = pnp_port_start(pnpdev, 0);
1808 irq = pnp_irq(pnpdev, 0);
1809 if (pnp_dma_valid(pnpdev, 0))
1810 dma = pnp_dma(pnpdev, 0);
1811 else
1812 dma = -1;
1813
1814 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1815
1816 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1817}
1818
1819static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1820{
1821 wbsd_shutdown(&dev->dev, 1);
1822}
1823
1824#endif /* CONFIG_PNP */
1825
1826/*
1827 * Power management
1828 */
1829
1830#ifdef CONFIG_PM
1831
1832static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1833{
1834 BUG_ON(host == NULL);
1835
1836 return mmc_suspend_host(host->mmc, state);
1837}
1838
1839static int wbsd_resume(struct wbsd_host *host)
1840{
1841 BUG_ON(host == NULL);
1842
1843 wbsd_init_device(host);
1844
1845 return mmc_resume_host(host->mmc);
1846}
1847
1848static int wbsd_platform_suspend(struct platform_device *dev,
1849 pm_message_t state)
1850{
1851 struct mmc_host *mmc = platform_get_drvdata(dev);
1852 struct wbsd_host *host;
1853 int ret;
1854
1855 if (mmc == NULL)
1856 return 0;
1857
1858 DBGF("Suspending...\n");
1859
1860 host = mmc_priv(mmc);
1861
1862 ret = wbsd_suspend(host, state);
1863 if (ret)
1864 return ret;
1865
1866 wbsd_chip_poweroff(host);
1867
1868 return 0;
1869}
1870
1871static int wbsd_platform_resume(struct platform_device *dev)
1872{
1873 struct mmc_host *mmc = platform_get_drvdata(dev);
1874 struct wbsd_host *host;
1875
1876 if (mmc == NULL)
1877 return 0;
1878
1879 DBGF("Resuming...\n");
1880
1881 host = mmc_priv(mmc);
1882
1883 wbsd_chip_config(host);
1884
1885 /*
1886 * Allow device to initialise itself properly.
1887 */
1888 mdelay(5);
1889
1890 return wbsd_resume(host);
1891}
1892
1893#ifdef CONFIG_PNP
1894
1895static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1896{
1897 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1898 struct wbsd_host *host;
1899
1900 if (mmc == NULL)
1901 return 0;
1902
1903 DBGF("Suspending...\n");
1904
1905 host = mmc_priv(mmc);
1906
1907 return wbsd_suspend(host, state);
1908}
1909
1910static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
1911{
1912 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1913 struct wbsd_host *host;
1914
1915 if (mmc == NULL)
1916 return 0;
1917
1918 DBGF("Resuming...\n");
1919
1920 host = mmc_priv(mmc);
1921
1922 /*
1923 * See if chip needs to be configured.
1924 */
1925 if (host->config != 0) {
1926 if (!wbsd_chip_validate(host)) {
1927 printk(KERN_WARNING DRIVER_NAME
1928 ": PnP active but chip not configured! "
1929 "You probably have a buggy BIOS. "
1930 "Configuring chip manually.\n");
1931 wbsd_chip_config(host);
1932 }
1933 }
1934
1935 /*
1936 * Allow device to initialise itself properly.
1937 */
1938 mdelay(5);
1939
1940 return wbsd_resume(host);
1941}
1942
1943#endif /* CONFIG_PNP */
1944
1945#else /* CONFIG_PM */
1946
1947#define wbsd_platform_suspend NULL
1948#define wbsd_platform_resume NULL
1949
1950#define wbsd_pnp_suspend NULL
1951#define wbsd_pnp_resume NULL
1952
1953#endif /* CONFIG_PM */
1954
1955static struct platform_device *wbsd_device;
1956
1957static struct platform_driver wbsd_driver = {
1958 .probe = wbsd_probe,
1959 .remove = __devexit_p(wbsd_remove),
1960
1961 .suspend = wbsd_platform_suspend,
1962 .resume = wbsd_platform_resume,
1963 .driver = {
1964 .name = DRIVER_NAME,
1965 },
1966};
1967
1968#ifdef CONFIG_PNP
1969
1970static struct pnp_driver wbsd_pnp_driver = {
1971 .name = DRIVER_NAME,
1972 .id_table = pnp_dev_table,
1973 .probe = wbsd_pnp_probe,
1974 .remove = __devexit_p(wbsd_pnp_remove),
1975
1976 .suspend = wbsd_pnp_suspend,
1977 .resume = wbsd_pnp_resume,
1978};
1979
1980#endif /* CONFIG_PNP */
1981
1982/*
1983 * Module loading/unloading
1984 */
1985
1986static int __init wbsd_drv_init(void)
1987{
1988 int result;
1989
1990 printk(KERN_INFO DRIVER_NAME
1991 ": Winbond W83L51xD SD/MMC card interface driver\n");
1992 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1993
1994#ifdef CONFIG_PNP
1995
1996 if (!nopnp) {
1997 result = pnp_register_driver(&wbsd_pnp_driver);
1998 if (result < 0)
1999 return result;
2000 }
2001#endif /* CONFIG_PNP */
2002
2003 if (nopnp) {
2004 result = platform_driver_register(&wbsd_driver);
2005 if (result < 0)
2006 return result;
2007
2008 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2009 if (!wbsd_device) {
2010 platform_driver_unregister(&wbsd_driver);
2011 return -ENOMEM;
2012 }
2013
2014 result = platform_device_add(wbsd_device);
2015 if (result) {
2016 platform_device_put(wbsd_device);
2017 platform_driver_unregister(&wbsd_driver);
2018 return result;
2019 }
2020 }
2021
2022 return 0;
2023}
2024
2025static void __exit wbsd_drv_exit(void)
2026{
2027#ifdef CONFIG_PNP
2028
2029 if (!nopnp)
2030 pnp_unregister_driver(&wbsd_pnp_driver);
2031
2032#endif /* CONFIG_PNP */
2033
2034 if (nopnp) {
2035 platform_device_unregister(wbsd_device);
2036
2037 platform_driver_unregister(&wbsd_driver);
2038 }
2039
2040 DBG("unloaded\n");
2041}
2042
2043module_init(wbsd_drv_init);
2044module_exit(wbsd_drv_exit);
2045#ifdef CONFIG_PNP
2046module_param(nopnp, uint, 0444);
2047#endif
2048module_param(io, uint, 0444);
2049module_param(irq, uint, 0444);
2050module_param(dma, int, 0444);
2051
2052MODULE_LICENSE("GPL");
2053MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2054MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2055
2056#ifdef CONFIG_PNP
2057MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2058#endif
2059MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2060MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2061MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h
new file mode 100644
index 000000000000..873bda1e59b4
--- /dev/null
+++ b/drivers/mmc/host/wbsd.h
@@ -0,0 +1,185 @@
1/*
2 * linux/drivers/mmc/wbsd.h - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#define LOCK_CODE 0xAA
13
14#define WBSD_CONF_SWRST 0x02
15#define WBSD_CONF_DEVICE 0x07
16#define WBSD_CONF_ID_HI 0x20
17#define WBSD_CONF_ID_LO 0x21
18#define WBSD_CONF_POWER 0x22
19#define WBSD_CONF_PME 0x23
20#define WBSD_CONF_PMES 0x24
21
22#define WBSD_CONF_ENABLE 0x30
23#define WBSD_CONF_PORT_HI 0x60
24#define WBSD_CONF_PORT_LO 0x61
25#define WBSD_CONF_IRQ 0x70
26#define WBSD_CONF_DRQ 0x74
27
28#define WBSD_CONF_PINS 0xF0
29
30#define DEVICE_SD 0x03
31
32#define WBSD_PINS_DAT3_HI 0x20
33#define WBSD_PINS_DAT3_OUT 0x10
34#define WBSD_PINS_GP11_HI 0x04
35#define WBSD_PINS_DETECT_GP11 0x02
36#define WBSD_PINS_DETECT_DAT3 0x01
37
38#define WBSD_CMDR 0x00
39#define WBSD_DFR 0x01
40#define WBSD_EIR 0x02
41#define WBSD_ISR 0x03
42#define WBSD_FSR 0x04
43#define WBSD_IDXR 0x05
44#define WBSD_DATAR 0x06
45#define WBSD_CSR 0x07
46
47#define WBSD_EINT_CARD 0x40
48#define WBSD_EINT_FIFO_THRE 0x20
49#define WBSD_EINT_CRC 0x10
50#define WBSD_EINT_TIMEOUT 0x08
51#define WBSD_EINT_PROGEND 0x04
52#define WBSD_EINT_BUSYEND 0x02
53#define WBSD_EINT_TC 0x01
54
55#define WBSD_INT_PENDING 0x80
56#define WBSD_INT_CARD 0x40
57#define WBSD_INT_FIFO_THRE 0x20
58#define WBSD_INT_CRC 0x10
59#define WBSD_INT_TIMEOUT 0x08
60#define WBSD_INT_PROGEND 0x04
61#define WBSD_INT_BUSYEND 0x02
62#define WBSD_INT_TC 0x01
63
64#define WBSD_FIFO_EMPTY 0x80
65#define WBSD_FIFO_FULL 0x40
66#define WBSD_FIFO_EMTHRE 0x20
67#define WBSD_FIFO_FUTHRE 0x10
68#define WBSD_FIFO_SZMASK 0x0F
69
70#define WBSD_MSLED 0x20
71#define WBSD_POWER_N 0x10
72#define WBSD_WRPT 0x04
73#define WBSD_CARDPRESENT 0x01
74
75#define WBSD_IDX_CLK 0x01
76#define WBSD_IDX_PBSMSB 0x02
77#define WBSD_IDX_TAAC 0x03
78#define WBSD_IDX_NSAC 0x04
79#define WBSD_IDX_PBSLSB 0x05
80#define WBSD_IDX_SETUP 0x06
81#define WBSD_IDX_DMA 0x07
82#define WBSD_IDX_FIFOEN 0x08
83#define WBSD_IDX_STATUS 0x10
84#define WBSD_IDX_RSPLEN 0x1E
85#define WBSD_IDX_RESP0 0x1F
86#define WBSD_IDX_RESP1 0x20
87#define WBSD_IDX_RESP2 0x21
88#define WBSD_IDX_RESP3 0x22
89#define WBSD_IDX_RESP4 0x23
90#define WBSD_IDX_RESP5 0x24
91#define WBSD_IDX_RESP6 0x25
92#define WBSD_IDX_RESP7 0x26
93#define WBSD_IDX_RESP8 0x27
94#define WBSD_IDX_RESP9 0x28
95#define WBSD_IDX_RESP10 0x29
96#define WBSD_IDX_RESP11 0x2A
97#define WBSD_IDX_RESP12 0x2B
98#define WBSD_IDX_RESP13 0x2C
99#define WBSD_IDX_RESP14 0x2D
100#define WBSD_IDX_RESP15 0x2E
101#define WBSD_IDX_RESP16 0x2F
102#define WBSD_IDX_CRCSTATUS 0x30
103#define WBSD_IDX_ISR 0x3F
104
105#define WBSD_CLK_375K 0x00
106#define WBSD_CLK_12M 0x01
107#define WBSD_CLK_16M 0x02
108#define WBSD_CLK_24M 0x03
109
110#define WBSD_DATA_WIDTH 0x01
111
112#define WBSD_DAT3_H 0x08
113#define WBSD_FIFO_RESET 0x04
114#define WBSD_SOFT_RESET 0x02
115#define WBSD_INC_INDEX 0x01
116
117#define WBSD_DMA_SINGLE 0x02
118#define WBSD_DMA_ENABLE 0x01
119
120#define WBSD_FIFOEN_EMPTY 0x20
121#define WBSD_FIFOEN_FULL 0x10
122#define WBSD_FIFO_THREMASK 0x0F
123
124#define WBSD_BLOCK_READ 0x80
125#define WBSD_BLOCK_WRITE 0x40
126#define WBSD_BUSY 0x20
127#define WBSD_CARDTRAFFIC 0x04
128#define WBSD_SENDCMD 0x02
129#define WBSD_RECVRES 0x01
130
131#define WBSD_RSP_SHORT 0x00
132#define WBSD_RSP_LONG 0x01
133
134#define WBSD_CRC_MASK 0x1F
135#define WBSD_CRC_OK 0x05 /* S010E (00101) */
136#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */
137
138#define WBSD_DMA_SIZE 65536
139
140struct wbsd_host
141{
142 struct mmc_host* mmc; /* MMC structure */
143
144 spinlock_t lock; /* Mutex */
145
146 int flags; /* Driver states */
147
148#define WBSD_FCARD_PRESENT (1<<0) /* Card is present */
149#define WBSD_FIGNORE_DETECT (1<<1) /* Ignore card detection */
150
151 struct mmc_request* mrq; /* Current request */
152
153 u8 isr; /* Accumulated ISR */
154
155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */
157
158 unsigned int offset; /* Offset into current entry */
159 unsigned int remain; /* Data left in curren entry */
160
161 char* dma_buffer; /* ISA DMA buffer */
162 dma_addr_t dma_addr; /* Physical address for same */
163
164 int firsterr; /* See fifo functions */
165
166 u8 clk; /* Current clock speed */
167 unsigned char bus_width; /* Current bus width */
168
169 int config; /* Config port */
170 u8 unlock_code; /* Code to unlock config */
171
172 int chip_id; /* ID of controller */
173
174 int base; /* I/O port base */
175 int irq; /* Interrupt */
176 int dma; /* DMA channel */
177
178 struct tasklet_struct card_tasklet; /* Tasklet structures */
179 struct tasklet_struct fifo_tasklet;
180 struct tasklet_struct crc_tasklet;
181 struct tasklet_struct timeout_tasklet;
182 struct tasklet_struct finish_tasklet;
183
184 struct timer_list ignore_timer; /* Ignore detection timer */
185};