aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Newton <will.newton@gmail.com>2011-01-02 01:11:59 -0500
committerChris Ball <cjb@laptop.org>2011-01-08 23:52:24 -0500
commitf95f3850f7a9e1d49ebc5b6e72e7cc3ec3685b0b (patch)
tree2903746678fde809a1fcede6ce16cd9f45334214
parent03d2bfc878e4dff9e596accc7b7eccf947804a3c (diff)
mmc: dw_mmc: Add Synopsys DesignWare mmc host driver.
This adds the mmc host driver for the Synopsys DesignWare mmc host controller, found in a number of embedded SoC designs. Signed-off-by: Will Newton <will.newton@imgtec.com> Reviewed-by: Matt Fleming <matt@console-pimps.org> Reviewed-by: Chris Ball <cjb@laptop.org> Signed-off-by: Chris Ball <cjb@laptop.org>
-rw-r--r--drivers/mmc/host/Kconfig16
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/dw_mmc.c1796
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--include/linux/mmc/dw_mmc.h217
5 files changed, 2198 insertions, 0 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9f47d38dcc7f..5ec02a536c5d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -479,6 +479,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
479 help 479 help
480 If you say yes here SD-Cards may work on the EZkit. 480 If you say yes here SD-Cards may work on the EZkit.
481 481
482config MMC_DW
483 tristate "Synopsys DesignWare Memory Card Interface"
484 depends on ARM
485 help
486 This selects support for the Synopsys DesignWare Mobile Storage IP
487 block, this provides host support for SD and MMC interfaces, in both
488 PIO and external DMA modes.
489
490config MMC_DW_IDMAC
491 bool "Internal DMAC interface"
492 depends on MMC_DW
493 help
494 This selects support for the internal DMAC block within the Synopsys
495 Designware Mobile Storage IP block. This disables the external DMA
496 interface.
497
482config MMC_SH_MMCIF 498config MMC_SH_MMCIF
483 tristate "SuperH Internal MMCIF support" 499 tristate "SuperH Internal MMCIF support"
484 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) 500 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 6d1ff9e27368..e834fb223e9a 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
34obj-$(CONFIG_MMC_DW) += dw_mmc.o
34obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 35obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
35obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 36obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
36obj-$(CONFIG_MMC_USHC) += ushc.o 37obj-$(CONFIG_MMC_USHC) += ushc.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..2fcc82577c1b
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
35
36#include "dw_mmc.h"
37
38/* Common flag combinations */
39#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
40 SDMMC_INT_HTO | SDMMC_INT_SBE | \
41 SDMMC_INT_EBE)
42#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
43 SDMMC_INT_RESP_ERR)
44#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
45 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
46#define DW_MCI_SEND_STATUS 1
47#define DW_MCI_RECV_STATUS 2
48#define DW_MCI_DMA_THRESHOLD 16
49
50#ifdef CONFIG_MMC_DW_IDMAC
51struct idmac_desc {
52 u32 des0; /* Control Descriptor */
53#define IDMAC_DES0_DIC BIT(1)
54#define IDMAC_DES0_LD BIT(2)
55#define IDMAC_DES0_FD BIT(3)
56#define IDMAC_DES0_CH BIT(4)
57#define IDMAC_DES0_ER BIT(5)
58#define IDMAC_DES0_CES BIT(30)
59#define IDMAC_DES0_OWN BIT(31)
60
61 u32 des1; /* Buffer sizes */
62#define IDMAC_SET_BUFFER1_SIZE(d, s) \
63 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
64
65 u32 des2; /* buffer 1 physical address */
66
67 u32 des3; /* buffer 2 physical address */
68};
69#endif /* CONFIG_MMC_DW_IDMAC */
70
71/**
72 * struct dw_mci_slot - MMC slot state
73 * @mmc: The mmc_host representing this slot.
74 * @host: The MMC controller this slot is using.
75 * @ctype: Card type for this slot.
76 * @mrq: mmc_request currently being processed or waiting to be
77 * processed, or NULL when the slot is idle.
78 * @queue_node: List node for placing this node in the @queue list of
79 * &struct dw_mci.
80 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
81 * @flags: Random state bits associated with the slot.
82 * @id: Number of this slot.
83 * @last_detect_state: Most recently observed card detect state.
84 */
85struct dw_mci_slot {
86 struct mmc_host *mmc;
87 struct dw_mci *host;
88
89 u32 ctype;
90
91 struct mmc_request *mrq;
92 struct list_head queue_node;
93
94 unsigned int clock;
95 unsigned long flags;
96#define DW_MMC_CARD_PRESENT 0
97#define DW_MMC_CARD_NEED_INIT 1
98 int id;
99 int last_detect_state;
100};
101
102#if defined(CONFIG_DEBUG_FS)
103static int dw_mci_req_show(struct seq_file *s, void *v)
104{
105 struct dw_mci_slot *slot = s->private;
106 struct mmc_request *mrq;
107 struct mmc_command *cmd;
108 struct mmc_command *stop;
109 struct mmc_data *data;
110
111 /* Make sure we get a consistent snapshot */
112 spin_lock_bh(&slot->host->lock);
113 mrq = slot->mrq;
114
115 if (mrq) {
116 cmd = mrq->cmd;
117 data = mrq->data;
118 stop = mrq->stop;
119
120 if (cmd)
121 seq_printf(s,
122 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
123 cmd->opcode, cmd->arg, cmd->flags,
124 cmd->resp[0], cmd->resp[1], cmd->resp[2],
125 cmd->resp[2], cmd->error);
126 if (data)
127 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
128 data->bytes_xfered, data->blocks,
129 data->blksz, data->flags, data->error);
130 if (stop)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 stop->opcode, stop->arg, stop->flags,
134 stop->resp[0], stop->resp[1], stop->resp[2],
135 stop->resp[2], stop->error);
136 }
137
138 spin_unlock_bh(&slot->host->lock);
139
140 return 0;
141}
142
143static int dw_mci_req_open(struct inode *inode, struct file *file)
144{
145 return single_open(file, dw_mci_req_show, inode->i_private);
146}
147
148static const struct file_operations dw_mci_req_fops = {
149 .owner = THIS_MODULE,
150 .open = dw_mci_req_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156static int dw_mci_regs_show(struct seq_file *s, void *v)
157{
158 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
159 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
160 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
161 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
162 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
163 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
164
165 return 0;
166}
167
168static int dw_mci_regs_open(struct inode *inode, struct file *file)
169{
170 return single_open(file, dw_mci_regs_show, inode->i_private);
171}
172
173static const struct file_operations dw_mci_regs_fops = {
174 .owner = THIS_MODULE,
175 .open = dw_mci_regs_open,
176 .read = seq_read,
177 .llseek = seq_lseek,
178 .release = single_release,
179};
180
181static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
182{
183 struct mmc_host *mmc = slot->mmc;
184 struct dw_mci *host = slot->host;
185 struct dentry *root;
186 struct dentry *node;
187
188 root = mmc->debugfs_root;
189 if (!root)
190 return;
191
192 node = debugfs_create_file("regs", S_IRUSR, root, host,
193 &dw_mci_regs_fops);
194 if (!node)
195 goto err;
196
197 node = debugfs_create_file("req", S_IRUSR, root, slot,
198 &dw_mci_req_fops);
199 if (!node)
200 goto err;
201
202 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
203 if (!node)
204 goto err;
205
206 node = debugfs_create_x32("pending_events", S_IRUSR, root,
207 (u32 *)&host->pending_events);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_x32("completed_events", S_IRUSR, root,
212 (u32 *)&host->completed_events);
213 if (!node)
214 goto err;
215
216 return;
217
218err:
219 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
220}
221#endif /* defined(CONFIG_DEBUG_FS) */
222
223static void dw_mci_set_timeout(struct dw_mci *host)
224{
225 /* timeout (maximum) */
226 mci_writel(host, TMOUT, 0xffffffff);
227}
228
229static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
230{
231 struct mmc_data *data;
232 u32 cmdr;
233 cmd->error = -EINPROGRESS;
234
235 cmdr = cmd->opcode;
236
237 if (cmdr == MMC_STOP_TRANSMISSION)
238 cmdr |= SDMMC_CMD_STOP;
239 else
240 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
241
242 if (cmd->flags & MMC_RSP_PRESENT) {
243 /* We expect a response, so set this bit */
244 cmdr |= SDMMC_CMD_RESP_EXP;
245 if (cmd->flags & MMC_RSP_136)
246 cmdr |= SDMMC_CMD_RESP_LONG;
247 }
248
249 if (cmd->flags & MMC_RSP_CRC)
250 cmdr |= SDMMC_CMD_RESP_CRC;
251
252 data = cmd->data;
253 if (data) {
254 cmdr |= SDMMC_CMD_DAT_EXP;
255 if (data->flags & MMC_DATA_STREAM)
256 cmdr |= SDMMC_CMD_STRM_MODE;
257 if (data->flags & MMC_DATA_WRITE)
258 cmdr |= SDMMC_CMD_DAT_WR;
259 }
260
261 return cmdr;
262}
263
264static void dw_mci_start_command(struct dw_mci *host,
265 struct mmc_command *cmd, u32 cmd_flags)
266{
267 host->cmd = cmd;
268 dev_vdbg(&host->pdev->dev,
269 "start command: ARGR=0x%08x CMDR=0x%08x\n",
270 cmd->arg, cmd_flags);
271
272 mci_writel(host, CMDARG, cmd->arg);
273 wmb();
274
275 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
276}
277
278static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
279{
280 dw_mci_start_command(host, data->stop, host->stop_cmdr);
281}
282
283/* DMA interface functions */
284static void dw_mci_stop_dma(struct dw_mci *host)
285{
286 if (host->use_dma) {
287 host->dma_ops->stop(host);
288 host->dma_ops->cleanup(host);
289 } else {
290 /* Data transfer was stopped by the interrupt handler */
291 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
292 }
293}
294
295#ifdef CONFIG_MMC_DW_IDMAC
296static void dw_mci_dma_cleanup(struct dw_mci *host)
297{
298 struct mmc_data *data = host->data;
299
300 if (data)
301 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
302 ((data->flags & MMC_DATA_WRITE)
303 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
304}
305
306static void dw_mci_idmac_stop_dma(struct dw_mci *host)
307{
308 u32 temp;
309
310 /* Disable and reset the IDMAC interface */
311 temp = mci_readl(host, CTRL);
312 temp &= ~SDMMC_CTRL_USE_IDMAC;
313 temp |= SDMMC_CTRL_DMA_RESET;
314 mci_writel(host, CTRL, temp);
315
316 /* Stop the IDMAC running */
317 temp = mci_readl(host, BMOD);
318 temp &= ~SDMMC_IDMAC_ENABLE;
319 mci_writel(host, BMOD, temp);
320}
321
322static void dw_mci_idmac_complete_dma(struct dw_mci *host)
323{
324 struct mmc_data *data = host->data;
325
326 dev_vdbg(&host->pdev->dev, "DMA complete\n");
327
328 host->dma_ops->cleanup(host);
329
330 /*
331 * If the card was removed, data will be NULL. No point in trying to
332 * send the stop command or waiting for NBUSY in this case.
333 */
334 if (data) {
335 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
336 tasklet_schedule(&host->tasklet);
337 }
338}
339
340static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
341 unsigned int sg_len)
342{
343 int i;
344 struct idmac_desc *desc = host->sg_cpu;
345
346 for (i = 0; i < sg_len; i++, desc++) {
347 unsigned int length = sg_dma_len(&data->sg[i]);
348 u32 mem_addr = sg_dma_address(&data->sg[i]);
349
350 /* Set the OWN bit and disable interrupts for this descriptor */
351 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
352
353 /* Buffer length */
354 IDMAC_SET_BUFFER1_SIZE(desc, length);
355
356 /* Physical address to DMA to/from */
357 desc->des2 = mem_addr;
358 }
359
360 /* Set first descriptor */
361 desc = host->sg_cpu;
362 desc->des0 |= IDMAC_DES0_FD;
363
364 /* Set last descriptor */
365 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
366 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
367 desc->des0 |= IDMAC_DES0_LD;
368
369 wmb();
370}
371
372static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
373{
374 u32 temp;
375
376 dw_mci_translate_sglist(host, host->data, sg_len);
377
378 /* Select IDMAC interface */
379 temp = mci_readl(host, CTRL);
380 temp |= SDMMC_CTRL_USE_IDMAC;
381 mci_writel(host, CTRL, temp);
382
383 wmb();
384
385 /* Enable the IDMAC */
386 temp = mci_readl(host, BMOD);
387 temp |= SDMMC_IDMAC_ENABLE;
388 mci_writel(host, BMOD, temp);
389
390 /* Start it running */
391 mci_writel(host, PLDMND, 1);
392}
393
394static int dw_mci_idmac_init(struct dw_mci *host)
395{
396 struct idmac_desc *p;
397 int i;
398
399 /* Number of descriptors in the ring buffer */
400 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
401
402 /* Forward link the descriptor list */
403 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
404 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
405
406 /* Set the last descriptor as the end-of-ring descriptor */
407 p->des3 = host->sg_dma;
408 p->des0 = IDMAC_DES0_ER;
409
410 /* Mask out interrupts - get Tx & Rx complete only */
411 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
412 SDMMC_IDMAC_INT_TI);
413
414 /* Set the descriptor base address */
415 mci_writel(host, DBADDR, host->sg_dma);
416 return 0;
417}
418
419static struct dw_mci_dma_ops dw_mci_idmac_ops = {
420 .init = dw_mci_idmac_init,
421 .start = dw_mci_idmac_start_dma,
422 .stop = dw_mci_idmac_stop_dma,
423 .complete = dw_mci_idmac_complete_dma,
424 .cleanup = dw_mci_dma_cleanup,
425};
426#endif /* CONFIG_MMC_DW_IDMAC */
427
428static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
429{
430 struct scatterlist *sg;
431 unsigned int i, direction, sg_len;
432 u32 temp;
433
434 /* If we don't have a channel, we can't do DMA */
435 if (!host->use_dma)
436 return -ENODEV;
437
438 /*
439 * We don't do DMA on "complex" transfers, i.e. with
440 * non-word-aligned buffers or lengths. Also, we don't bother
441 * with all the DMA setup overhead for short transfers.
442 */
443 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
444 return -EINVAL;
445 if (data->blksz & 3)
446 return -EINVAL;
447
448 for_each_sg(data->sg, sg, data->sg_len, i) {
449 if (sg->offset & 3 || sg->length & 3)
450 return -EINVAL;
451 }
452
453 if (data->flags & MMC_DATA_READ)
454 direction = DMA_FROM_DEVICE;
455 else
456 direction = DMA_TO_DEVICE;
457
458 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
459 direction);
460
461 dev_vdbg(&host->pdev->dev,
462 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
463 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
464 sg_len);
465
466 /* Enable the DMA interface */
467 temp = mci_readl(host, CTRL);
468 temp |= SDMMC_CTRL_DMA_ENABLE;
469 mci_writel(host, CTRL, temp);
470
471 /* Disable RX/TX IRQs, let DMA handle it */
472 temp = mci_readl(host, INTMASK);
473 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
474 mci_writel(host, INTMASK, temp);
475
476 host->dma_ops->start(host, sg_len);
477
478 return 0;
479}
480
481static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
482{
483 u32 temp;
484
485 data->error = -EINPROGRESS;
486
487 WARN_ON(host->data);
488 host->sg = NULL;
489 host->data = data;
490
491 if (dw_mci_submit_data_dma(host, data)) {
492 host->sg = data->sg;
493 host->pio_offset = 0;
494 if (data->flags & MMC_DATA_READ)
495 host->dir_status = DW_MCI_RECV_STATUS;
496 else
497 host->dir_status = DW_MCI_SEND_STATUS;
498
499 temp = mci_readl(host, INTMASK);
500 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
501 mci_writel(host, INTMASK, temp);
502
503 temp = mci_readl(host, CTRL);
504 temp &= ~SDMMC_CTRL_DMA_ENABLE;
505 mci_writel(host, CTRL, temp);
506 }
507}
508
509static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
510{
511 struct dw_mci *host = slot->host;
512 unsigned long timeout = jiffies + msecs_to_jiffies(500);
513 unsigned int cmd_status = 0;
514
515 mci_writel(host, CMDARG, arg);
516 wmb();
517 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
518
519 while (time_before(jiffies, timeout)) {
520 cmd_status = mci_readl(host, CMD);
521 if (!(cmd_status & SDMMC_CMD_START))
522 return;
523 }
524 dev_err(&slot->mmc->class_dev,
525 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
526 cmd, arg, cmd_status);
527}
528
529static void dw_mci_setup_bus(struct dw_mci_slot *slot)
530{
531 struct dw_mci *host = slot->host;
532 u32 div;
533
534 if (slot->clock != host->current_speed) {
535 if (host->bus_hz % slot->clock)
536 /*
537 * move the + 1 after the divide to prevent
538 * over-clocking the card.
539 */
540 div = ((host->bus_hz / slot->clock) >> 1) + 1;
541 else
542 div = (host->bus_hz / slot->clock) >> 1;
543
544 dev_info(&slot->mmc->class_dev,
545 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
546 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
547 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
548
549 /* disable clock */
550 mci_writel(host, CLKENA, 0);
551 mci_writel(host, CLKSRC, 0);
552
553 /* inform CIU */
554 mci_send_cmd(slot,
555 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
556
557 /* set clock to desired speed */
558 mci_writel(host, CLKDIV, div);
559
560 /* inform CIU */
561 mci_send_cmd(slot,
562 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
563
564 /* enable clock */
565 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
566
567 /* inform CIU */
568 mci_send_cmd(slot,
569 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
570
571 host->current_speed = slot->clock;
572 }
573
574 /* Set the current slot bus width */
575 mci_writel(host, CTYPE, slot->ctype);
576}
577
578static void dw_mci_start_request(struct dw_mci *host,
579 struct dw_mci_slot *slot)
580{
581 struct mmc_request *mrq;
582 struct mmc_command *cmd;
583 struct mmc_data *data;
584 u32 cmdflags;
585
586 mrq = slot->mrq;
587 if (host->pdata->select_slot)
588 host->pdata->select_slot(slot->id);
589
590 /* Slot specific timing and width adjustment */
591 dw_mci_setup_bus(slot);
592
593 host->cur_slot = slot;
594 host->mrq = mrq;
595
596 host->pending_events = 0;
597 host->completed_events = 0;
598 host->data_status = 0;
599
600 data = mrq->data;
601 if (data) {
602 dw_mci_set_timeout(host);
603 mci_writel(host, BYTCNT, data->blksz*data->blocks);
604 mci_writel(host, BLKSIZ, data->blksz);
605 }
606
607 cmd = mrq->cmd;
608 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
609
610 /* this is the first command, send the initialization clock */
611 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
612 cmdflags |= SDMMC_CMD_INIT;
613
614 if (data) {
615 dw_mci_submit_data(host, data);
616 wmb();
617 }
618
619 dw_mci_start_command(host, cmd, cmdflags);
620
621 if (mrq->stop)
622 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
623}
624
625static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
626 struct mmc_request *mrq)
627{
628 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
629 host->state);
630
631 spin_lock_bh(&host->lock);
632 slot->mrq = mrq;
633
634 if (host->state == STATE_IDLE) {
635 host->state = STATE_SENDING_CMD;
636 dw_mci_start_request(host, slot);
637 } else {
638 list_add_tail(&slot->queue_node, &host->queue);
639 }
640
641 spin_unlock_bh(&host->lock);
642}
643
644static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
645{
646 struct dw_mci_slot *slot = mmc_priv(mmc);
647 struct dw_mci *host = slot->host;
648
649 WARN_ON(slot->mrq);
650
651 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
652 mrq->cmd->error = -ENOMEDIUM;
653 mmc_request_done(mmc, mrq);
654 return;
655 }
656
657 /* We don't support multiple blocks of weird lengths. */
658 dw_mci_queue_request(host, slot, mrq);
659}
660
661static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{
663 struct dw_mci_slot *slot = mmc_priv(mmc);
664
665 /* set default 1 bit mode */
666 slot->ctype = SDMMC_CTYPE_1BIT;
667
668 switch (ios->bus_width) {
669 case MMC_BUS_WIDTH_1:
670 slot->ctype = SDMMC_CTYPE_1BIT;
671 break;
672 case MMC_BUS_WIDTH_4:
673 slot->ctype = SDMMC_CTYPE_4BIT;
674 break;
675 }
676
677 if (ios->clock) {
678 /*
679 * Use mirror of ios->clock to prevent race with mmc
680 * core ios update when finding the minimum.
681 */
682 slot->clock = ios->clock;
683 }
684
685 switch (ios->power_mode) {
686 case MMC_POWER_UP:
687 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
688 break;
689 default:
690 break;
691 }
692}
693
694static int dw_mci_get_ro(struct mmc_host *mmc)
695{
696 int read_only;
697 struct dw_mci_slot *slot = mmc_priv(mmc);
698 struct dw_mci_board *brd = slot->host->pdata;
699
700 /* Use platform get_ro function, else try on board write protect */
701 if (brd->get_ro)
702 read_only = brd->get_ro(slot->id);
703 else
704 read_only =
705 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
706
707 dev_dbg(&mmc->class_dev, "card is %s\n",
708 read_only ? "read-only" : "read-write");
709
710 return read_only;
711}
712
713static int dw_mci_get_cd(struct mmc_host *mmc)
714{
715 int present;
716 struct dw_mci_slot *slot = mmc_priv(mmc);
717 struct dw_mci_board *brd = slot->host->pdata;
718
719 /* Use platform get_cd function, else try onboard card detect */
720 if (brd->get_cd)
721 present = !brd->get_cd(slot->id);
722 else
723 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
724 == 0 ? 1 : 0;
725
726 if (present)
727 dev_dbg(&mmc->class_dev, "card is present\n");
728 else
729 dev_dbg(&mmc->class_dev, "card is not present\n");
730
731 return present;
732}
733
734static const struct mmc_host_ops dw_mci_ops = {
735 .request = dw_mci_request,
736 .set_ios = dw_mci_set_ios,
737 .get_ro = dw_mci_get_ro,
738 .get_cd = dw_mci_get_cd,
739};
740
741static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
742 __releases(&host->lock)
743 __acquires(&host->lock)
744{
745 struct dw_mci_slot *slot;
746 struct mmc_host *prev_mmc = host->cur_slot->mmc;
747
748 WARN_ON(host->cmd || host->data);
749
750 host->cur_slot->mrq = NULL;
751 host->mrq = NULL;
752 if (!list_empty(&host->queue)) {
753 slot = list_entry(host->queue.next,
754 struct dw_mci_slot, queue_node);
755 list_del(&slot->queue_node);
756 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
757 mmc_hostname(slot->mmc));
758 host->state = STATE_SENDING_CMD;
759 dw_mci_start_request(host, slot);
760 } else {
761 dev_vdbg(&host->pdev->dev, "list empty\n");
762 host->state = STATE_IDLE;
763 }
764
765 spin_unlock(&host->lock);
766 mmc_request_done(prev_mmc, mrq);
767 spin_lock(&host->lock);
768}
769
770static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
771{
772 u32 status = host->cmd_status;
773
774 host->cmd_status = 0;
775
776 /* Read the response from the card (up to 16 bytes) */
777 if (cmd->flags & MMC_RSP_PRESENT) {
778 if (cmd->flags & MMC_RSP_136) {
779 cmd->resp[3] = mci_readl(host, RESP0);
780 cmd->resp[2] = mci_readl(host, RESP1);
781 cmd->resp[1] = mci_readl(host, RESP2);
782 cmd->resp[0] = mci_readl(host, RESP3);
783 } else {
784 cmd->resp[0] = mci_readl(host, RESP0);
785 cmd->resp[1] = 0;
786 cmd->resp[2] = 0;
787 cmd->resp[3] = 0;
788 }
789 }
790
791 if (status & SDMMC_INT_RTO)
792 cmd->error = -ETIMEDOUT;
793 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
794 cmd->error = -EILSEQ;
795 else if (status & SDMMC_INT_RESP_ERR)
796 cmd->error = -EIO;
797 else
798 cmd->error = 0;
799
800 if (cmd->error) {
801 /* newer ip versions need a delay between retries */
802 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
803 mdelay(20);
804
805 if (cmd->data) {
806 host->data = NULL;
807 dw_mci_stop_dma(host);
808 }
809 }
810}
811
812static void dw_mci_tasklet_func(unsigned long priv)
813{
814 struct dw_mci *host = (struct dw_mci *)priv;
815 struct mmc_data *data;
816 struct mmc_command *cmd;
817 enum dw_mci_state state;
818 enum dw_mci_state prev_state;
819 u32 status;
820
821 spin_lock(&host->lock);
822
823 state = host->state;
824 data = host->data;
825
826 do {
827 prev_state = state;
828
829 switch (state) {
830 case STATE_IDLE:
831 break;
832
833 case STATE_SENDING_CMD:
834 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
835 &host->pending_events))
836 break;
837
838 cmd = host->cmd;
839 host->cmd = NULL;
840 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
841 dw_mci_command_complete(host, host->mrq->cmd);
842 if (!host->mrq->data || cmd->error) {
843 dw_mci_request_end(host, host->mrq);
844 goto unlock;
845 }
846
847 prev_state = state = STATE_SENDING_DATA;
848 /* fall through */
849
850 case STATE_SENDING_DATA:
851 if (test_and_clear_bit(EVENT_DATA_ERROR,
852 &host->pending_events)) {
853 dw_mci_stop_dma(host);
854 if (data->stop)
855 send_stop_cmd(host, data);
856 state = STATE_DATA_ERROR;
857 break;
858 }
859
860 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
861 &host->pending_events))
862 break;
863
864 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
865 prev_state = state = STATE_DATA_BUSY;
866 /* fall through */
867
868 case STATE_DATA_BUSY:
869 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
870 &host->pending_events))
871 break;
872
873 host->data = NULL;
874 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
875 status = host->data_status;
876
877 if (status & DW_MCI_DATA_ERROR_FLAGS) {
878 if (status & SDMMC_INT_DTO) {
879 dev_err(&host->pdev->dev,
880 "data timeout error\n");
881 data->error = -ETIMEDOUT;
882 } else if (status & SDMMC_INT_DCRC) {
883 dev_err(&host->pdev->dev,
884 "data CRC error\n");
885 data->error = -EILSEQ;
886 } else {
887 dev_err(&host->pdev->dev,
888 "data FIFO error "
889 "(status=%08x)\n",
890 status);
891 data->error = -EIO;
892 }
893 } else {
894 data->bytes_xfered = data->blocks * data->blksz;
895 data->error = 0;
896 }
897
898 if (!data->stop) {
899 dw_mci_request_end(host, host->mrq);
900 goto unlock;
901 }
902
903 prev_state = state = STATE_SENDING_STOP;
904 if (!data->error)
905 send_stop_cmd(host, data);
906 /* fall through */
907
908 case STATE_SENDING_STOP:
909 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
910 &host->pending_events))
911 break;
912
913 host->cmd = NULL;
914 dw_mci_command_complete(host, host->mrq->stop);
915 dw_mci_request_end(host, host->mrq);
916 goto unlock;
917
918 case STATE_DATA_ERROR:
919 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
920 &host->pending_events))
921 break;
922
923 state = STATE_DATA_BUSY;
924 break;
925 }
926 } while (state != prev_state);
927
928 host->state = state;
929unlock:
930 spin_unlock(&host->lock);
931
932}
933
934static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
935{
936 u16 *pdata = (u16 *)buf;
937
938 WARN_ON(cnt % 2 != 0);
939
940 cnt = cnt >> 1;
941 while (cnt > 0) {
942 mci_writew(host, DATA, *pdata++);
943 cnt--;
944 }
945}
946
947static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
948{
949 u16 *pdata = (u16 *)buf;
950
951 WARN_ON(cnt % 2 != 0);
952
953 cnt = cnt >> 1;
954 while (cnt > 0) {
955 *pdata++ = mci_readw(host, DATA);
956 cnt--;
957 }
958}
959
960static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
961{
962 u32 *pdata = (u32 *)buf;
963
964 WARN_ON(cnt % 4 != 0);
965 WARN_ON((unsigned long)pdata & 0x3);
966
967 cnt = cnt >> 2;
968 while (cnt > 0) {
969 mci_writel(host, DATA, *pdata++);
970 cnt--;
971 }
972}
973
974static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
975{
976 u32 *pdata = (u32 *)buf;
977
978 WARN_ON(cnt % 4 != 0);
979 WARN_ON((unsigned long)pdata & 0x3);
980
981 cnt = cnt >> 2;
982 while (cnt > 0) {
983 *pdata++ = mci_readl(host, DATA);
984 cnt--;
985 }
986}
987
988static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
989{
990 u64 *pdata = (u64 *)buf;
991
992 WARN_ON(cnt % 8 != 0);
993
994 cnt = cnt >> 3;
995 while (cnt > 0) {
996 mci_writeq(host, DATA, *pdata++);
997 cnt--;
998 }
999}
1000
1001static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1002{
1003 u64 *pdata = (u64 *)buf;
1004
1005 WARN_ON(cnt % 8 != 0);
1006
1007 cnt = cnt >> 3;
1008 while (cnt > 0) {
1009 *pdata++ = mci_readq(host, DATA);
1010 cnt--;
1011 }
1012}
1013
1014static void dw_mci_read_data_pio(struct dw_mci *host)
1015{
1016 struct scatterlist *sg = host->sg;
1017 void *buf = sg_virt(sg);
1018 unsigned int offset = host->pio_offset;
1019 struct mmc_data *data = host->data;
1020 int shift = host->data_shift;
1021 u32 status;
1022 unsigned int nbytes = 0, len, old_len, count = 0;
1023
1024 do {
1025 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1026 if (count == 0)
1027 old_len = len;
1028
1029 if (offset + len <= sg->length) {
1030 host->pull_data(host, (void *)(buf + offset), len);
1031
1032 offset += len;
1033 nbytes += len;
1034
1035 if (offset == sg->length) {
1036 flush_dcache_page(sg_page(sg));
1037 host->sg = sg = sg_next(sg);
1038 if (!sg)
1039 goto done;
1040
1041 offset = 0;
1042 buf = sg_virt(sg);
1043 }
1044 } else {
1045 unsigned int remaining = sg->length - offset;
1046 host->pull_data(host, (void *)(buf + offset),
1047 remaining);
1048 nbytes += remaining;
1049
1050 flush_dcache_page(sg_page(sg));
1051 host->sg = sg = sg_next(sg);
1052 if (!sg)
1053 goto done;
1054
1055 offset = len - remaining;
1056 buf = sg_virt(sg);
1057 host->pull_data(host, buf, offset);
1058 nbytes += offset;
1059 }
1060
1061 status = mci_readl(host, MINTSTS);
1062 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1063 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1064 host->data_status = status;
1065 data->bytes_xfered += nbytes;
1066 smp_wmb();
1067
1068 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1069
1070 tasklet_schedule(&host->tasklet);
1071 return;
1072 }
1073 count++;
1074 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1075 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1076 host->pio_offset = offset;
1077 data->bytes_xfered += nbytes;
1078 return;
1079
1080done:
1081 data->bytes_xfered += nbytes;
1082 smp_wmb();
1083 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1084}
1085
1086static void dw_mci_write_data_pio(struct dw_mci *host)
1087{
1088 struct scatterlist *sg = host->sg;
1089 void *buf = sg_virt(sg);
1090 unsigned int offset = host->pio_offset;
1091 struct mmc_data *data = host->data;
1092 int shift = host->data_shift;
1093 u32 status;
1094 unsigned int nbytes = 0, len;
1095
1096 do {
1097 len = SDMMC_FIFO_SZ -
1098 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1099 if (offset + len <= sg->length) {
1100 host->push_data(host, (void *)(buf + offset), len);
1101
1102 offset += len;
1103 nbytes += len;
1104 if (offset == sg->length) {
1105 host->sg = sg = sg_next(sg);
1106 if (!sg)
1107 goto done;
1108
1109 offset = 0;
1110 buf = sg_virt(sg);
1111 }
1112 } else {
1113 unsigned int remaining = sg->length - offset;
1114
1115 host->push_data(host, (void *)(buf + offset),
1116 remaining);
1117 nbytes += remaining;
1118
1119 host->sg = sg = sg_next(sg);
1120 if (!sg)
1121 goto done;
1122
1123 offset = len - remaining;
1124 buf = sg_virt(sg);
1125 host->push_data(host, (void *)buf, offset);
1126 nbytes += offset;
1127 }
1128
1129 status = mci_readl(host, MINTSTS);
1130 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1131 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1132 host->data_status = status;
1133 data->bytes_xfered += nbytes;
1134
1135 smp_wmb();
1136
1137 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1138
1139 tasklet_schedule(&host->tasklet);
1140 return;
1141 }
1142 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1143
1144 host->pio_offset = offset;
1145 data->bytes_xfered += nbytes;
1146
1147 return;
1148
1149done:
1150 data->bytes_xfered += nbytes;
1151 smp_wmb();
1152 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1153}
1154
1155static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1156{
1157 if (!host->cmd_status)
1158 host->cmd_status = status;
1159
1160 smp_wmb();
1161
1162 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1163 tasklet_schedule(&host->tasklet);
1164}
1165
1166static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1167{
1168 struct dw_mci *host = dev_id;
1169 u32 status, pending;
1170 unsigned int pass_count = 0;
1171
1172 do {
1173 status = mci_readl(host, RINTSTS);
1174 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1175
1176 /*
1177 * DTO fix - version 2.10a and below, and only if internal DMA
1178 * is configured.
1179 */
1180 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1181 if (!pending &&
1182 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1183 pending |= SDMMC_INT_DATA_OVER;
1184 }
1185
1186 if (!pending)
1187 break;
1188
1189 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1190 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1191 host->cmd_status = status;
1192 smp_wmb();
1193 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1194 tasklet_schedule(&host->tasklet);
1195 }
1196
1197 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1198 /* if there is an error report DATA_ERROR */
1199 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1200 host->data_status = status;
1201 smp_wmb();
1202 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1203 tasklet_schedule(&host->tasklet);
1204 }
1205
1206 if (pending & SDMMC_INT_DATA_OVER) {
1207 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1208 if (!host->data_status)
1209 host->data_status = status;
1210 smp_wmb();
1211 if (host->dir_status == DW_MCI_RECV_STATUS) {
1212 if (host->sg != NULL)
1213 dw_mci_read_data_pio(host);
1214 }
1215 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1216 tasklet_schedule(&host->tasklet);
1217 }
1218
1219 if (pending & SDMMC_INT_RXDR) {
1220 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1221 if (host->sg)
1222 dw_mci_read_data_pio(host);
1223 }
1224
1225 if (pending & SDMMC_INT_TXDR) {
1226 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1227 if (host->sg)
1228 dw_mci_write_data_pio(host);
1229 }
1230
1231 if (pending & SDMMC_INT_CMD_DONE) {
1232 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1233 dw_mci_cmd_interrupt(host, status);
1234 }
1235
1236 if (pending & SDMMC_INT_CD) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1238 tasklet_schedule(&host->card_tasklet);
1239 }
1240
1241 } while (pass_count++ < 5);
1242
1243#ifdef CONFIG_MMC_DW_IDMAC
1244 /* Handle DMA interrupts */
1245 pending = mci_readl(host, IDSTS);
1246 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1247 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1248 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1249 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1250 host->dma_ops->complete(host);
1251 }
1252#endif
1253
1254 return IRQ_HANDLED;
1255}
1256
1257static void dw_mci_tasklet_card(unsigned long data)
1258{
1259 struct dw_mci *host = (struct dw_mci *)data;
1260 int i;
1261
1262 for (i = 0; i < host->num_slots; i++) {
1263 struct dw_mci_slot *slot = host->slot[i];
1264 struct mmc_host *mmc = slot->mmc;
1265 struct mmc_request *mrq;
1266 int present;
1267 u32 ctrl;
1268
1269 present = dw_mci_get_cd(mmc);
1270 while (present != slot->last_detect_state) {
1271 spin_lock(&host->lock);
1272
1273 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1274 present ? "inserted" : "removed");
1275
1276 /* Card change detected */
1277 slot->last_detect_state = present;
1278
1279 /* Power up slot */
1280 if (present != 0) {
1281 if (host->pdata->setpower)
1282 host->pdata->setpower(slot->id,
1283 mmc->ocr_avail);
1284
1285 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1286 }
1287
1288 /* Clean up queue if present */
1289 mrq = slot->mrq;
1290 if (mrq) {
1291 if (mrq == host->mrq) {
1292 host->data = NULL;
1293 host->cmd = NULL;
1294
1295 switch (host->state) {
1296 case STATE_IDLE:
1297 break;
1298 case STATE_SENDING_CMD:
1299 mrq->cmd->error = -ENOMEDIUM;
1300 if (!mrq->data)
1301 break;
1302 /* fall through */
1303 case STATE_SENDING_DATA:
1304 mrq->data->error = -ENOMEDIUM;
1305 dw_mci_stop_dma(host);
1306 break;
1307 case STATE_DATA_BUSY:
1308 case STATE_DATA_ERROR:
1309 if (mrq->data->error == -EINPROGRESS)
1310 mrq->data->error = -ENOMEDIUM;
1311 if (!mrq->stop)
1312 break;
1313 /* fall through */
1314 case STATE_SENDING_STOP:
1315 mrq->stop->error = -ENOMEDIUM;
1316 break;
1317 }
1318
1319 dw_mci_request_end(host, mrq);
1320 } else {
1321 list_del(&slot->queue_node);
1322 mrq->cmd->error = -ENOMEDIUM;
1323 if (mrq->data)
1324 mrq->data->error = -ENOMEDIUM;
1325 if (mrq->stop)
1326 mrq->stop->error = -ENOMEDIUM;
1327
1328 spin_unlock(&host->lock);
1329 mmc_request_done(slot->mmc, mrq);
1330 spin_lock(&host->lock);
1331 }
1332 }
1333
1334 /* Power down slot */
1335 if (present == 0) {
1336 if (host->pdata->setpower)
1337 host->pdata->setpower(slot->id, 0);
1338 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1339
1340 /*
1341 * Clear down the FIFO - doing so generates a
1342 * block interrupt, hence setting the
1343 * scatter-gather pointer to NULL.
1344 */
1345 host->sg = NULL;
1346
1347 ctrl = mci_readl(host, CTRL);
1348 ctrl |= SDMMC_CTRL_FIFO_RESET;
1349 mci_writel(host, CTRL, ctrl);
1350
1351#ifdef CONFIG_MMC_DW_IDMAC
1352 ctrl = mci_readl(host, BMOD);
1353 ctrl |= 0x01; /* Software reset of DMA */
1354 mci_writel(host, BMOD, ctrl);
1355#endif
1356
1357 }
1358
1359 spin_unlock(&host->lock);
1360 present = dw_mci_get_cd(mmc);
1361 }
1362
1363 mmc_detect_change(slot->mmc,
1364 msecs_to_jiffies(host->pdata->detect_delay_ms));
1365 }
1366}
1367
1368static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1369{
1370 struct mmc_host *mmc;
1371 struct dw_mci_slot *slot;
1372
1373 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
1374 if (!mmc)
1375 return -ENOMEM;
1376
1377 slot = mmc_priv(mmc);
1378 slot->id = id;
1379 slot->mmc = mmc;
1380 slot->host = host;
1381
1382 mmc->ops = &dw_mci_ops;
1383 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1384 mmc->f_max = host->bus_hz;
1385
1386 if (host->pdata->get_ocr)
1387 mmc->ocr_avail = host->pdata->get_ocr(id);
1388 else
1389 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1390
1391 /*
1392 * Start with slot power disabled, it will be enabled when a card
1393 * is detected.
1394 */
1395 if (host->pdata->setpower)
1396 host->pdata->setpower(id, 0);
1397
1398 mmc->caps = 0;
1399 if (host->pdata->get_bus_wd)
1400 if (host->pdata->get_bus_wd(slot->id) >= 4)
1401 mmc->caps |= MMC_CAP_4_BIT_DATA;
1402
1403 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1404 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1405
1406#ifdef CONFIG_MMC_DW_IDMAC
1407 mmc->max_segs = host->ring_size;
1408 mmc->max_blk_size = 65536;
1409 mmc->max_blk_count = host->ring_size;
1410 mmc->max_seg_size = 0x1000;
1411 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1412#else
1413 if (host->pdata->blk_settings) {
1414 mmc->max_segs = host->pdata->blk_settings->max_segs;
1415 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1416 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1417 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1418 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1419 } else {
1420 /* Useful defaults if platform data is unset. */
1421 mmc->max_segs = 64;
1422 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1423 mmc->max_blk_count = 512;
1424 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1425 mmc->max_seg_size = mmc->max_req_size;
1426 }
1427#endif /* CONFIG_MMC_DW_IDMAC */
1428
1429 if (dw_mci_get_cd(mmc))
1430 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1431 else
1432 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1433
1434 host->slot[id] = slot;
1435 mmc_add_host(mmc);
1436
1437#if defined(CONFIG_DEBUG_FS)
1438 dw_mci_init_debugfs(slot);
1439#endif
1440
1441 /* Card initially undetected */
1442 slot->last_detect_state = 0;
1443
1444 return 0;
1445}
1446
1447static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1448{
1449 /* Shutdown detect IRQ */
1450 if (slot->host->pdata->exit)
1451 slot->host->pdata->exit(id);
1452
1453 /* Debugfs stuff is cleaned up by mmc core */
1454 mmc_remove_host(slot->mmc);
1455 slot->host->slot[id] = NULL;
1456 mmc_free_host(slot->mmc);
1457}
1458
1459static void dw_mci_init_dma(struct dw_mci *host)
1460{
1461 /* Alloc memory for sg translation */
1462 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
1463 &host->sg_dma, GFP_KERNEL);
1464 if (!host->sg_cpu) {
1465 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
1466 __func__);
1467 goto no_dma;
1468 }
1469
1470 /* Determine which DMA interface to use */
1471#ifdef CONFIG_MMC_DW_IDMAC
1472 host->dma_ops = &dw_mci_idmac_ops;
1473 dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
1474#endif
1475
1476 if (!host->dma_ops)
1477 goto no_dma;
1478
1479 if (host->dma_ops->init) {
1480 if (host->dma_ops->init(host)) {
1481 dev_err(&host->pdev->dev, "%s: Unable to initialize "
1482 "DMA Controller.\n", __func__);
1483 goto no_dma;
1484 }
1485 } else {
1486 dev_err(&host->pdev->dev, "DMA initialization not found.\n");
1487 goto no_dma;
1488 }
1489
1490 host->use_dma = 1;
1491 return;
1492
1493no_dma:
1494 dev_info(&host->pdev->dev, "Using PIO mode.\n");
1495 host->use_dma = 0;
1496 return;
1497}
1498
1499static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1500{
1501 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1502 unsigned int ctrl;
1503
1504 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1505 SDMMC_CTRL_DMA_RESET));
1506
1507 /* wait till resets clear */
1508 do {
1509 ctrl = mci_readl(host, CTRL);
1510 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1511 SDMMC_CTRL_DMA_RESET)))
1512 return true;
1513 } while (time_before(jiffies, timeout));
1514
1515 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1516
1517 return false;
1518}
1519
1520static int dw_mci_probe(struct platform_device *pdev)
1521{
1522 struct dw_mci *host;
1523 struct resource *regs;
1524 struct dw_mci_board *pdata;
1525 int irq, ret, i, width;
1526 u32 fifo_size;
1527
1528 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1529 if (!regs)
1530 return -ENXIO;
1531
1532 irq = platform_get_irq(pdev, 0);
1533 if (irq < 0)
1534 return irq;
1535
1536 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
1537 if (!host)
1538 return -ENOMEM;
1539
1540 host->pdev = pdev;
1541 host->pdata = pdata = pdev->dev.platform_data;
1542 if (!pdata || !pdata->init) {
1543 dev_err(&pdev->dev,
1544 "Platform data must supply init function\n");
1545 ret = -ENODEV;
1546 goto err_freehost;
1547 }
1548
1549 if (!pdata->select_slot && pdata->num_slots > 1) {
1550 dev_err(&pdev->dev,
1551 "Platform data must supply select_slot function\n");
1552 ret = -ENODEV;
1553 goto err_freehost;
1554 }
1555
1556 if (!pdata->bus_hz) {
1557 dev_err(&pdev->dev,
1558 "Platform data must supply bus speed\n");
1559 ret = -ENODEV;
1560 goto err_freehost;
1561 }
1562
1563 host->bus_hz = pdata->bus_hz;
1564 host->quirks = pdata->quirks;
1565
1566 spin_lock_init(&host->lock);
1567 INIT_LIST_HEAD(&host->queue);
1568
1569 ret = -ENOMEM;
1570 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1571 if (!host->regs)
1572 goto err_freehost;
1573
1574 host->dma_ops = pdata->dma_ops;
1575 dw_mci_init_dma(host);
1576
1577 /*
1578 * Get the host data width - this assumes that HCON has been set with
1579 * the correct values.
1580 */
1581 i = (mci_readl(host, HCON) >> 7) & 0x7;
1582 if (!i) {
1583 host->push_data = dw_mci_push_data16;
1584 host->pull_data = dw_mci_pull_data16;
1585 width = 16;
1586 host->data_shift = 1;
1587 } else if (i == 2) {
1588 host->push_data = dw_mci_push_data64;
1589 host->pull_data = dw_mci_pull_data64;
1590 width = 64;
1591 host->data_shift = 3;
1592 } else {
1593 /* Check for a reserved value, and warn if it is */
1594 WARN((i != 1),
1595 "HCON reports a reserved host data width!\n"
1596 "Defaulting to 32-bit access.\n");
1597 host->push_data = dw_mci_push_data32;
1598 host->pull_data = dw_mci_pull_data32;
1599 width = 32;
1600 host->data_shift = 2;
1601 }
1602
1603 /* Reset all blocks */
1604 if (!mci_wait_reset(&pdev->dev, host)) {
1605 ret = -ENODEV;
1606 goto err_dmaunmap;
1607 }
1608
1609 /* Clear the interrupts for the host controller */
1610 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1611 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1612
1613 /* Put in max timeout */
1614 mci_writel(host, TMOUT, 0xFFFFFFFF);
1615
1616 /*
1617 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1618 * Tx Mark = fifo_size / 2 DMA Size = 8
1619 */
1620 fifo_size = mci_readl(host, FIFOTH);
1621 fifo_size = (fifo_size >> 16) & 0x7ff;
1622 mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1623 ((fifo_size/2) << 0)));
1624
1625 /* disable clock to CIU */
1626 mci_writel(host, CLKENA, 0);
1627 mci_writel(host, CLKSRC, 0);
1628
1629 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1630 tasklet_init(&host->card_tasklet,
1631 dw_mci_tasklet_card, (unsigned long)host);
1632
1633 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1634 if (ret)
1635 goto err_dmaunmap;
1636
1637 platform_set_drvdata(pdev, host);
1638
1639 if (host->pdata->num_slots)
1640 host->num_slots = host->pdata->num_slots;
1641 else
1642 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
1643
1644 /* We need at least one slot to succeed */
1645 for (i = 0; i < host->num_slots; i++) {
1646 ret = dw_mci_init_slot(host, i);
1647 if (ret) {
1648 ret = -ENODEV;
1649 goto err_init_slot;
1650 }
1651 }
1652
1653 /*
1654 * Enable interrupts for command done, data over, data empty, card det,
1655 * receive ready and error such as transmit, receive timeout, crc error
1656 */
1657 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1658 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1659 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1660 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1661 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1662
1663 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1664 "%d bit host data width\n", irq, width);
1665 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1666 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1667
1668 return 0;
1669
1670err_init_slot:
1671 /* De-init any initialized slots */
1672 while (i > 0) {
1673 if (host->slot[i])
1674 dw_mci_cleanup_slot(host->slot[i], i);
1675 i--;
1676 }
1677 free_irq(irq, host);
1678
1679err_dmaunmap:
1680 if (host->use_dma && host->dma_ops->exit)
1681 host->dma_ops->exit(host);
1682 dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
1683 host->sg_cpu, host->sg_dma);
1684 iounmap(host->regs);
1685
1686err_freehost:
1687 kfree(host);
1688 return ret;
1689}
1690
1691static int __exit dw_mci_remove(struct platform_device *pdev)
1692{
1693 struct dw_mci *host = platform_get_drvdata(pdev);
1694 int i;
1695
1696 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1697 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1698
1699 platform_set_drvdata(pdev, NULL);
1700
1701 for (i = 0; i < host->num_slots; i++) {
1702 dev_dbg(&pdev->dev, "remove slot %d\n", i);
1703 if (host->slot[i])
1704 dw_mci_cleanup_slot(host->slot[i], i);
1705 }
1706
1707 /* disable clock to CIU */
1708 mci_writel(host, CLKENA, 0);
1709 mci_writel(host, CLKSRC, 0);
1710
1711 free_irq(platform_get_irq(pdev, 0), host);
1712 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1713
1714 if (host->use_dma && host->dma_ops->exit)
1715 host->dma_ops->exit(host);
1716
1717 iounmap(host->regs);
1718
1719 kfree(host);
1720 return 0;
1721}
1722
1723#ifdef CONFIG_PM
1724/*
1725 * TODO: we should probably disable the clock to the card in the suspend path.
1726 */
1727static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1728{
1729 int i, ret;
1730 struct dw_mci *host = platform_get_drvdata(pdev);
1731
1732 for (i = 0; i < host->num_slots; i++) {
1733 struct dw_mci_slot *slot = host->slot[i];
1734 if (!slot)
1735 continue;
1736 ret = mmc_suspend_host(slot->mmc);
1737 if (ret < 0) {
1738 while (--i >= 0) {
1739 slot = host->slot[i];
1740 if (slot)
1741 mmc_resume_host(host->slot[i]->mmc);
1742 }
1743 return ret;
1744 }
1745 }
1746
1747 return 0;
1748}
1749
1750static int dw_mci_resume(struct platform_device *pdev)
1751{
1752 int i, ret;
1753 struct dw_mci *host = platform_get_drvdata(pdev);
1754
1755 for (i = 0; i < host->num_slots; i++) {
1756 struct dw_mci_slot *slot = host->slot[i];
1757 if (!slot)
1758 continue;
1759 ret = mmc_resume_host(host->slot[i]->mmc);
1760 if (ret < 0)
1761 return ret;
1762 }
1763
1764 return 0;
1765}
1766#else
1767#define dw_mci_suspend NULL
1768#define dw_mci_resume NULL
1769#endif /* CONFIG_PM */
1770
1771static struct platform_driver dw_mci_driver = {
1772 .remove = __exit_p(dw_mci_remove),
1773 .suspend = dw_mci_suspend,
1774 .resume = dw_mci_resume,
1775 .driver = {
1776 .name = "dw_mmc",
1777 },
1778};
1779
1780static int __init dw_mci_init(void)
1781{
1782 return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
1783}
1784
1785static void __exit dw_mci_exit(void)
1786{
1787 platform_driver_unregister(&dw_mci_driver);
1788}
1789
1790module_init(dw_mci_init);
1791module_exit(dw_mci_exit);
1792
1793MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
1794MODULE_AUTHOR("NXP Semiconductor VietNam");
1795MODULE_AUTHOR("Imagination Technologies Ltd");
1796MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..5dd55a75233d
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_
16
17#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008
20#define SDMMC_CLKSRC 0x00c
21#define SDMMC_CLKENA 0x010
22#define SDMMC_TMOUT 0x014
23#define SDMMC_CTYPE 0x018
24#define SDMMC_BLKSIZ 0x01c
25#define SDMMC_BYTCNT 0x020
26#define SDMMC_INTMASK 0x024
27#define SDMMC_CMDARG 0x028
28#define SDMMC_CMD 0x02c
29#define SDMMC_RESP0 0x030
30#define SDMMC_RESP1 0x034
31#define SDMMC_RESP2 0x038
32#define SDMMC_RESP3 0x03c
33#define SDMMC_MINTSTS 0x040
34#define SDMMC_RINTSTS 0x044
35#define SDMMC_STATUS 0x048
36#define SDMMC_FIFOTH 0x04c
37#define SDMMC_CDETECT 0x050
38#define SDMMC_WRTPRT 0x054
39#define SDMMC_GPIO 0x058
40#define SDMMC_TCBCNT 0x05c
41#define SDMMC_TBBCNT 0x060
42#define SDMMC_DEBNCE 0x064
43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070
46#define SDMMC_BMOD 0x080
47#define SDMMC_PLDMND 0x084
48#define SDMMC_DBADDR 0x088
49#define SDMMC_IDSTS 0x08c
50#define SDMMC_IDINTEN 0x090
51#define SDMMC_DSCADDR 0x094
52#define SDMMC_BUFADDR 0x098
53#define SDMMC_DATA 0x100
54#define SDMMC_DATA_ADR 0x100
55
56/* shift bit field */
57#define _SBF(f, v) ((v) << (f))
58
59/* Control register defines */
60#define SDMMC_CTRL_USE_IDMAC BIT(25)
61#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
62#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
63#define SDMMC_CTRL_SEND_CCSD BIT(9)
64#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
65#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
66#define SDMMC_CTRL_READ_WAIT BIT(6)
67#define SDMMC_CTRL_DMA_ENABLE BIT(5)
68#define SDMMC_CTRL_INT_ENABLE BIT(4)
69#define SDMMC_CTRL_DMA_RESET BIT(2)
70#define SDMMC_CTRL_FIFO_RESET BIT(1)
71#define SDMMC_CTRL_RESET BIT(0)
72/* Clock Enable register defines */
73#define SDMMC_CLKEN_LOW_PWR BIT(16)
74#define SDMMC_CLKEN_ENABLE BIT(0)
75/* time-out register defines */
76#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
77#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
78#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
79#define SDMMC_TMOUT_RESP_MSK 0xFF
80/* card-type register defines */
81#define SDMMC_CTYPE_8BIT BIT(16)
82#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16)
86#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13)
89#define SDMMC_INT_HLE BIT(12)
90#define SDMMC_INT_FRUN BIT(11)
91#define SDMMC_INT_HTO BIT(10)
92#define SDMMC_INT_DTO BIT(9)
93#define SDMMC_INT_RTO BIT(8)
94#define SDMMC_INT_DCRC BIT(7)
95#define SDMMC_INT_RCRC BIT(6)
96#define SDMMC_INT_RXDR BIT(5)
97#define SDMMC_INT_TXDR BIT(4)
98#define SDMMC_INT_DATA_OVER BIT(3)
99#define SDMMC_INT_CMD_DONE BIT(2)
100#define SDMMC_INT_RESP_ERR BIT(1)
101#define SDMMC_INT_CD BIT(0)
102#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */
104#define SDMMC_CMD_START BIT(31)
105#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21)
108#define SDMMC_CMD_INIT BIT(15)
109#define SDMMC_CMD_STOP BIT(14)
110#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
111#define SDMMC_CMD_SEND_STOP BIT(12)
112#define SDMMC_CMD_STRM_MODE BIT(11)
113#define SDMMC_CMD_DAT_WR BIT(10)
114#define SDMMC_CMD_DAT_EXP BIT(9)
115#define SDMMC_CMD_RESP_CRC BIT(8)
116#define SDMMC_CMD_RESP_LONG BIT(7)
117#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8)
125#define SDMMC_IDMAC_INT_CES BIT(5)
126#define SDMMC_IDMAC_INT_DU BIT(4)
127#define SDMMC_IDMAC_INT_FBE BIT(2)
128#define SDMMC_IDMAC_INT_RI BIT(1)
129#define SDMMC_IDMAC_INT_TI BIT(0)
130/* Internal DMAC bus mode bits */
131#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0)
134
135/* Register access macros */
136#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg)
140
141/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg)
146
147/* 64-bit FIFO access macros */
148#ifdef readq
149#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg)
153#else
154/*
155 * Dummy readq implementation for architectures that don't define it.
156 *
157 * We would assume that none of these architectures would configure
158 * the IP block with a 64bit FIFO width, so this code will never be
159 * executed on those machines. Defining these macros here keeps the
160 * rest of the code free from ifdefs.
161 */
162#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
166#endif
167
168#endif /* _DW_MMC_H_ */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 000000000000..16b0261763ed
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _LINUX_MMC_DW_MMC_H_
15#define _LINUX_MMC_DW_MMC_H_
16
17#define MAX_MCI_SLOTS 2
18
19enum dw_mci_state {
20 STATE_IDLE = 0,
21 STATE_SENDING_CMD,
22 STATE_SENDING_DATA,
23 STATE_DATA_BUSY,
24 STATE_SENDING_STOP,
25 STATE_DATA_ERROR,
26};
27
28enum {
29 EVENT_CMD_COMPLETE = 0,
30 EVENT_XFER_COMPLETE,
31 EVENT_DATA_COMPLETE,
32 EVENT_DATA_ERROR,
33 EVENT_XFER_ERROR
34};
35
36struct mmc_data;
37
38/**
39 * struct dw_mci - MMC controller state shared between all slots
40 * @lock: Spinlock protecting the queue and associated data.
41 * @regs: Pointer to MMIO registers.
42 * @sg: Scatterlist entry currently being processed by PIO code, if any.
43 * @pio_offset: Offset into the current scatterlist entry.
44 * @cur_slot: The slot which is currently using the controller.
45 * @mrq: The request currently being processed on @cur_slot,
46 * or NULL if the controller is idle.
47 * @cmd: The command currently being sent to the card, or NULL.
48 * @data: The data currently being transferred, or NULL if no data
49 * transfer is in progress.
50 * @use_dma: Whether DMA channel is initialized or not.
51 * @sg_dma: Bus address of DMA buffer.
52 * @sg_cpu: Virtual address of DMA buffer.
53 * @dma_ops: Pointer to platform-specific DMA callbacks.
54 * @cmd_status: Snapshot of SR taken upon completion of the current
55 * command. Only valid when EVENT_CMD_COMPLETE is pending.
56 * @data_status: Snapshot of SR taken upon completion of the current
57 * data transfer. Only valid when EVENT_DATA_COMPLETE or
58 * EVENT_DATA_ERROR is pending.
59 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
60 * to be sent.
61 * @dir_status: Direction of current transfer.
62 * @tasklet: Tasklet running the request state machine.
63 * @card_tasklet: Tasklet handling card detect.
64 * @pending_events: Bitmask of events flagged by the interrupt handler
65 * to be processed by the tasklet.
66 * @completed_events: Bitmask of events which the state machine has
67 * processed.
68 * @state: Tasklet state.
69 * @queue: List of slots waiting for access to the controller.
70 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
71 * rate and timeout calculations.
72 * @current_speed: Configured rate of the controller.
73 * @num_slots: Number of slots available.
74 * @pdev: Platform device associated with the MMC controller.
75 * @pdata: Platform data associated with the MMC controller.
76 * @slot: Slots sharing this MMC controller.
77 * @data_shift: log2 of FIFO item size.
78 * @push_data: Pointer to FIFO push function.
79 * @pull_data: Pointer to FIFO pull function.
80 * @quirks: Set of quirks that apply to specific versions of the IP.
81 *
82 * Locking
83 * =======
84 *
85 * @lock is a softirq-safe spinlock protecting @queue as well as
86 * @cur_slot, @mrq and @state. These must always be updated
87 * at the same time while holding @lock.
88 *
89 * The @mrq field of struct dw_mci_slot is also protected by @lock,
90 * and must always be written at the same time as the slot is added to
91 * @queue.
92 *
93 * @pending_events and @completed_events are accessed using atomic bit
94 * operations, so they don't need any locking.
95 *
96 * None of the fields touched by the interrupt handler need any
97 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
98 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
99 * interrupts must be disabled and @data_status updated with a
100 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
101 * CMDRDY interupt must be disabled and @cmd_status updated with a
102 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
103 * bytes_xfered field of @data must be written. This is ensured by
104 * using barriers.
105 */
106struct dw_mci {
107 spinlock_t lock;
108 void __iomem *regs;
109
110 struct scatterlist *sg;
111 unsigned int pio_offset;
112
113 struct dw_mci_slot *cur_slot;
114 struct mmc_request *mrq;
115 struct mmc_command *cmd;
116 struct mmc_data *data;
117
118 /* DMA interface members*/
119 int use_dma;
120
121 dma_addr_t sg_dma;
122 void *sg_cpu;
123 struct dw_mci_dma_ops *dma_ops;
124#ifdef CONFIG_MMC_DW_IDMAC
125 unsigned int ring_size;
126#else
127 struct dw_mci_dma_data *dma_data;
128#endif
129 u32 cmd_status;
130 u32 data_status;
131 u32 stop_cmdr;
132 u32 dir_status;
133 struct tasklet_struct tasklet;
134 struct tasklet_struct card_tasklet;
135 unsigned long pending_events;
136 unsigned long completed_events;
137 enum dw_mci_state state;
138 struct list_head queue;
139
140 u32 bus_hz;
141 u32 current_speed;
142 u32 num_slots;
143 struct platform_device *pdev;
144 struct dw_mci_board *pdata;
145 struct dw_mci_slot *slot[MAX_MCI_SLOTS];
146
147 /* FIFO push and pull */
148 int data_shift;
149 void (*push_data)(struct dw_mci *host, void *buf, int cnt);
150 void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
151
152 /* Workaround flags */
153 u32 quirks;
154};
155
156/* DMA ops for Internal/External DMAC interface */
157struct dw_mci_dma_ops {
158 /* DMA Ops */
159 int (*init)(struct dw_mci *host);
160 void (*start)(struct dw_mci *host, unsigned int sg_len);
161 void (*complete)(struct dw_mci *host);
162 void (*stop)(struct dw_mci *host);
163 void (*cleanup)(struct dw_mci *host);
164 void (*exit)(struct dw_mci *host);
165};
166
167/* IP Quirks/flags. */
168/* No special quirks or flags to cater for */
169#define DW_MCI_QUIRK_NONE 0
170/* DTO fix for command transmission with IDMAC configured */
171#define DW_MCI_QUIRK_IDMAC_DTO 1
172/* delay needed between retries on some 2.11a implementations */
173#define DW_MCI_QUIRK_RETRY_DELAY 2
174/* High Speed Capable - Supports HS cards (upto 50MHz) */
175#define DW_MCI_QUIRK_HIGHSPEED 4
176
177
178struct dma_pdata;
179
180struct block_settings {
181 unsigned short max_segs; /* see blk_queue_max_segments */
182 unsigned int max_blk_size; /* maximum size of one mmc block */
183 unsigned int max_blk_count; /* maximum number of blocks in one req*/
184 unsigned int max_req_size; /* maximum number of bytes in one req*/
185 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
186};
187
188/* Board platform data */
189struct dw_mci_board {
190 u32 num_slots;
191
192 u32 quirks; /* Workaround / Quirk flags */
193 unsigned int bus_hz; /* Bus speed */
194
195 /* delay in mS before detecting cards after interrupt */
196 u32 detect_delay_ms;
197
198 int (*init)(u32 slot_id, irq_handler_t , void *);
199 int (*get_ro)(u32 slot_id);
200 int (*get_cd)(u32 slot_id);
201 int (*get_ocr)(u32 slot_id);
202 int (*get_bus_wd)(u32 slot_id);
203 /*
204 * Enable power to selected slot and set voltage to desired level.
205 * Voltage levels are specified using MMC_VDD_xxx defines defined
206 * in linux/mmc/host.h file.
207 */
208 void (*setpower)(u32 slot_id, u32 volt);
209 void (*exit)(u32 slot_id);
210 void (*select_slot)(u32 slot_id);
211
212 struct dw_mci_dma_ops *dma_ops;
213 struct dma_pdata *data;
214 struct block_settings *blk_settings;
215};
216
217#endif /* _LINUX_MMC_DW_MMC_H_ */