aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 19:50:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 19:50:35 -0500
commit3c0cb7c31c206aaedb967e44b98442bbeb17a6c4 (patch)
tree3ecba45d7ffae4fba4a5aafaef4af5b0b1105bde /drivers/mmc
parentf70f5b9dc74ca7d0a64c4ead3fb28da09dc1b234 (diff)
parent404a02cbd2ae8bf256a2fa1169bdfe86bb5ebb34 (diff)
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (416 commits) ARM: DMA: add support for DMA debugging ARM: PL011: add DMA burst threshold support for ST variants ARM: PL011: Add support for transmit DMA ARM: PL011: Ensure IRQs are disabled in UART interrupt handler ARM: PL011: Separate hardware FIFO size from TTY FIFO size ARM: PL011: Allow better handling of vendor data ARM: PL011: Ensure error flags are clear at startup ARM: PL011: include revision number in boot-time port printk ARM: vexpress: add sched_clock() for Versatile Express ARM i.MX53: Make MX53 EVK bootable ARM i.MX53: Some bug fix about MX53 MSL code ARM: 6607/1: sa1100: Update platform device registration ARM: 6606/1: sa1100: Fix platform device registration ARM i.MX51: rename IPU irqs ARM i.MX51: Add ipu clock support ARM: imx/mx27_3ds: Add PMIC support ARM: DMA: Replace page_to_dma()/dma_to_page() with pfn_to_dma()/dma_to_pfn() mx51: fix usb clock support MX51: Add support for usb host 2 arch/arm/plat-mxc/ehci.c: fix errors/typos ...
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/mmci.c207
-rw-r--r--drivers/mmc/host/mmci.h9
2 files changed, 186 insertions, 30 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 87b4fc6c98c2..563022825667 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -19,6 +19,7 @@
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22#include <linux/mmc/card.h>
22#include <linux/amba/bus.h> 23#include <linux/amba/bus.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
24#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
@@ -45,6 +46,12 @@ static unsigned int fmax = 515633;
45 * is asserted (likewise for RX) 46 * is asserted (likewise for RX)
46 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
47 * is asserted (likewise for RX) 48 * is asserted (likewise for RX)
49 * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50 * and will not work at all.
51 * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52 * using DMA.
53 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm
48 */ 55 */
49struct variant_data { 56struct variant_data {
50 unsigned int clkreg; 57 unsigned int clkreg;
@@ -52,6 +59,10 @@ struct variant_data {
52 unsigned int datalength_bits; 59 unsigned int datalength_bits;
53 unsigned int fifosize; 60 unsigned int fifosize;
54 unsigned int fifohalfsize; 61 unsigned int fifohalfsize;
62 bool broken_blockend;
63 bool broken_blockend_dma;
64 bool sdio;
65 bool st_clkdiv;
55}; 66};
56 67
57static struct variant_data variant_arm = { 68static struct variant_data variant_arm = {
@@ -65,6 +76,8 @@ static struct variant_data variant_u300 = {
65 .fifohalfsize = 8 * 4, 76 .fifohalfsize = 8 * 4,
66 .clkreg_enable = 1 << 13, /* HWFCEN */ 77 .clkreg_enable = 1 << 13, /* HWFCEN */
67 .datalength_bits = 16, 78 .datalength_bits = 16,
79 .broken_blockend_dma = true,
80 .sdio = true,
68}; 81};
69 82
70static struct variant_data variant_ux500 = { 83static struct variant_data variant_ux500 = {
@@ -73,7 +86,11 @@ static struct variant_data variant_ux500 = {
73 .clkreg = MCI_CLK_ENABLE, 86 .clkreg = MCI_CLK_ENABLE,
74 .clkreg_enable = 1 << 14, /* HWFCEN */ 87 .clkreg_enable = 1 << 14, /* HWFCEN */
75 .datalength_bits = 24, 88 .datalength_bits = 24,
89 .broken_blockend = true,
90 .sdio = true,
91 .st_clkdiv = true,
76}; 92};
93
77/* 94/*
78 * This must be called with host->lock held 95 * This must be called with host->lock held
79 */ 96 */
@@ -86,7 +103,22 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
86 if (desired >= host->mclk) { 103 if (desired >= host->mclk) {
87 clk = MCI_CLK_BYPASS; 104 clk = MCI_CLK_BYPASS;
88 host->cclk = host->mclk; 105 host->cclk = host->mclk;
106 } else if (variant->st_clkdiv) {
107 /*
108 * DB8500 TRM says f = mclk / (clkdiv + 2)
109 * => clkdiv = (mclk / f) - 2
110 * Round the divider up so we don't exceed the max
111 * frequency
112 */
113 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
114 if (clk >= 256)
115 clk = 255;
116 host->cclk = host->mclk / (clk + 2);
89 } else { 117 } else {
118 /*
119 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
120 * => clkdiv = mclk / (2 * f) - 1
121 */
90 clk = host->mclk / (2 * desired) - 1; 122 clk = host->mclk / (2 * desired) - 1;
91 if (clk >= 256) 123 if (clk >= 256)
92 clk = 255; 124 clk = 255;
@@ -129,10 +161,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
129 spin_lock(&host->lock); 161 spin_lock(&host->lock);
130} 162}
131 163
164static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
165{
166 void __iomem *base = host->base;
167
168 if (host->singleirq) {
169 unsigned int mask0 = readl(base + MMCIMASK0);
170
171 mask0 &= ~MCI_IRQ1MASK;
172 mask0 |= mask;
173
174 writel(mask0, base + MMCIMASK0);
175 }
176
177 writel(mask, base + MMCIMASK1);
178}
179
132static void mmci_stop_data(struct mmci_host *host) 180static void mmci_stop_data(struct mmci_host *host)
133{ 181{
134 writel(0, host->base + MMCIDATACTRL); 182 writel(0, host->base + MMCIDATACTRL);
135 writel(0, host->base + MMCIMASK1); 183 mmci_set_mask1(host, 0);
136 host->data = NULL; 184 host->data = NULL;
137} 185}
138 186
@@ -162,6 +210,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
162 host->data = data; 210 host->data = data;
163 host->size = data->blksz * data->blocks; 211 host->size = data->blksz * data->blocks;
164 host->data_xfered = 0; 212 host->data_xfered = 0;
213 host->blockend = false;
214 host->dataend = false;
165 215
166 mmci_init_sg(host, data); 216 mmci_init_sg(host, data);
167 217
@@ -196,9 +246,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
196 irqmask = MCI_TXFIFOHALFEMPTYMASK; 246 irqmask = MCI_TXFIFOHALFEMPTYMASK;
197 } 247 }
198 248
249 /* The ST Micro variants has a special bit to enable SDIO */
250 if (variant->sdio && host->mmc->card)
251 if (mmc_card_sdio(host->mmc->card))
252 datactrl |= MCI_ST_DPSM_SDIOEN;
253
199 writel(datactrl, base + MMCIDATACTRL); 254 writel(datactrl, base + MMCIDATACTRL);
200 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 255 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
201 writel(irqmask, base + MMCIMASK1); 256 mmci_set_mask1(host, irqmask);
202} 257}
203 258
204static void 259static void
@@ -233,20 +288,9 @@ static void
233mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 288mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
234 unsigned int status) 289 unsigned int status)
235{ 290{
236 if (status & MCI_DATABLOCKEND) { 291 struct variant_data *variant = host->variant;
237 host->data_xfered += data->blksz; 292
238#ifdef CONFIG_ARCH_U300 293 /* First check for errors */
239 /*
240 * On the U300 some signal or other is
241 * badly routed so that a data write does
242 * not properly terminate with a MCI_DATAEND
243 * status flag. This quirk will make writes
244 * work again.
245 */
246 if (data->flags & MMC_DATA_WRITE)
247 status |= MCI_DATAEND;
248#endif
249 }
250 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 294 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
251 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 295 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
252 if (status & MCI_DATACRCFAIL) 296 if (status & MCI_DATACRCFAIL)
@@ -255,7 +299,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
255 data->error = -ETIMEDOUT; 299 data->error = -ETIMEDOUT;
256 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 300 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
257 data->error = -EIO; 301 data->error = -EIO;
258 status |= MCI_DATAEND; 302
303 /* Force-complete the transaction */
304 host->blockend = true;
305 host->dataend = true;
259 306
260 /* 307 /*
261 * We hit an error condition. Ensure that any data 308 * We hit an error condition. Ensure that any data
@@ -273,9 +320,64 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
273 local_irq_restore(flags); 320 local_irq_restore(flags);
274 } 321 }
275 } 322 }
276 if (status & MCI_DATAEND) { 323
324 /*
325 * On ARM variants in PIO mode, MCI_DATABLOCKEND
326 * is always sent first, and we increase the
327 * transfered number of bytes for that IRQ. Then
328 * MCI_DATAEND follows and we conclude the transaction.
329 *
330 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
331 * doesn't seem to immediately clear from the status,
332 * so we can't use it keep count when only one irq is
333 * used because the irq will hit for other reasons, and
334 * then the flag is still up. So we use the MCI_DATAEND
335 * IRQ at the end of the entire transfer because
336 * MCI_DATABLOCKEND is broken.
337 *
338 * In the U300, the IRQs can arrive out-of-order,
339 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
340 * so for this case we use the flags "blockend" and
341 * "dataend" to make sure both IRQs have arrived before
342 * concluding the transaction. (This does not apply
343 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
344 * at all.) In DMA mode it suffers from the same problem
345 * as the Ux500.
346 */
347 if (status & MCI_DATABLOCKEND) {
348 /*
349 * Just being a little over-cautious, we do not
350 * use this progressive update if the hardware blockend
351 * flag is unreliable: since it can stay high between
352 * IRQs it will corrupt the transfer counter.
353 */
354 if (!variant->broken_blockend)
355 host->data_xfered += data->blksz;
356 host->blockend = true;
357 }
358
359 if (status & MCI_DATAEND)
360 host->dataend = true;
361
362 /*
363 * On variants with broken blockend we shall only wait for dataend,
364 * on others we must sync with the blockend signal since they can
365 * appear out-of-order.
366 */
367 if (host->dataend && (host->blockend || variant->broken_blockend)) {
277 mmci_stop_data(host); 368 mmci_stop_data(host);
278 369
370 /* Reset these flags */
371 host->blockend = false;
372 host->dataend = false;
373
374 /*
375 * Variants with broken blockend flags need to handle the
376 * end of the entire transfer here.
377 */
378 if (variant->broken_blockend && !data->error)
379 host->data_xfered += data->blksz * data->blocks;
380
279 if (!data->stop) { 381 if (!data->stop) {
280 mmci_request_end(host, data->mrq); 382 mmci_request_end(host, data->mrq);
281 } else { 383 } else {
@@ -356,7 +458,32 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
356 variant->fifosize : variant->fifohalfsize; 458 variant->fifosize : variant->fifohalfsize;
357 count = min(remain, maxcnt); 459 count = min(remain, maxcnt);
358 460
359 writesl(base + MMCIFIFO, ptr, count >> 2); 461 /*
462 * The ST Micro variant for SDIO transfer sizes
463 * less then 8 bytes should have clock H/W flow
464 * control disabled.
465 */
466 if (variant->sdio &&
467 mmc_card_sdio(host->mmc->card)) {
468 if (count < 8)
469 writel(readl(host->base + MMCICLOCK) &
470 ~variant->clkreg_enable,
471 host->base + MMCICLOCK);
472 else
473 writel(readl(host->base + MMCICLOCK) |
474 variant->clkreg_enable,
475 host->base + MMCICLOCK);
476 }
477
478 /*
479 * SDIO especially may want to send something that is
480 * not divisible by 4 (as opposed to card sectors
481 * etc), and the FIFO only accept full 32-bit writes.
482 * So compensate by adding +3 on the count, a single
483 * byte become a 32bit write, 7 bytes will be two
484 * 32bit writes etc.
485 */
486 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
360 487
361 ptr += count; 488 ptr += count;
362 remain -= count; 489 remain -= count;
@@ -437,7 +564,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
437 * "any data available" mode. 564 * "any data available" mode.
438 */ 565 */
439 if (status & MCI_RXACTIVE && host->size < variant->fifosize) 566 if (status & MCI_RXACTIVE && host->size < variant->fifosize)
440 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 567 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
441 568
442 /* 569 /*
443 * If we run out of data, disable the data IRQs; this 570 * If we run out of data, disable the data IRQs; this
@@ -446,7 +573,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
446 * stops us racing with our data end IRQ. 573 * stops us racing with our data end IRQ.
447 */ 574 */
448 if (host->size == 0) { 575 if (host->size == 0) {
449 writel(0, base + MMCIMASK1); 576 mmci_set_mask1(host, 0);
450 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 577 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
451 } 578 }
452 579
@@ -469,6 +596,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
469 struct mmc_data *data; 596 struct mmc_data *data;
470 597
471 status = readl(host->base + MMCISTATUS); 598 status = readl(host->base + MMCISTATUS);
599
600 if (host->singleirq) {
601 if (status & readl(host->base + MMCIMASK1))
602 mmci_pio_irq(irq, dev_id);
603
604 status &= ~MCI_IRQ1MASK;
605 }
606
472 status &= readl(host->base + MMCIMASK0); 607 status &= readl(host->base + MMCIMASK0);
473 writel(status, host->base + MMCICLEAR); 608 writel(status, host->base + MMCICLEAR);
474 609
@@ -635,6 +770,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
635 struct variant_data *variant = id->data; 770 struct variant_data *variant = id->data;
636 struct mmci_host *host; 771 struct mmci_host *host;
637 struct mmc_host *mmc; 772 struct mmc_host *mmc;
773 unsigned int mask;
638 int ret; 774 int ret;
639 775
640 /* must have platform data */ 776 /* must have platform data */
@@ -806,20 +942,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
806 if (ret) 942 if (ret)
807 goto unmap; 943 goto unmap;
808 944
809 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 945 if (dev->irq[1] == NO_IRQ)
810 if (ret) 946 host->singleirq = true;
811 goto irq0_free; 947 else {
948 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
949 DRIVER_NAME " (pio)", host);
950 if (ret)
951 goto irq0_free;
952 }
812 953
813 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 954 mask = MCI_IRQENABLE;
955 /* Don't use the datablockend flag if it's broken */
956 if (variant->broken_blockend)
957 mask &= ~MCI_DATABLOCKEND;
814 958
815 amba_set_drvdata(dev, mmc); 959 writel(mask, host->base + MMCIMASK0);
816 960
817 mmc_add_host(mmc); 961 amba_set_drvdata(dev, mmc);
818 962
819 dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 963 dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
820 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 964 mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
821 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 965 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
822 966
967 mmc_add_host(mmc);
968
823 return 0; 969 return 0;
824 970
825 irq0_free: 971 irq0_free:
@@ -864,7 +1010,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
864 writel(0, host->base + MMCIDATACTRL); 1010 writel(0, host->base + MMCIDATACTRL);
865 1011
866 free_irq(dev->irq[0], host); 1012 free_irq(dev->irq[0], host);
867 free_irq(dev->irq[1], host); 1013 if (!host->singleirq)
1014 free_irq(dev->irq[1], host);
868 1015
869 if (host->gpio_wp != -ENOSYS) 1016 if (host->gpio_wp != -ENOSYS)
870 gpio_free(host->gpio_wp); 1017 gpio_free(host->gpio_wp);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4ae887fc0189..df06f01aac89 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -139,6 +139,11 @@
139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
141 141
142/* These interrupts are directed to IRQ1 when two IRQ lines are available */
143#define MCI_IRQ1MASK \
144 (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
145 MCI_TXFIFOHALFEMPTYMASK)
146
142#define NR_SG 16 147#define NR_SG 16
143 148
144struct clk; 149struct clk;
@@ -154,6 +159,7 @@ struct mmci_host {
154 int gpio_cd; 159 int gpio_cd;
155 int gpio_wp; 160 int gpio_wp;
156 int gpio_cd_irq; 161 int gpio_cd_irq;
162 bool singleirq;
157 163
158 unsigned int data_xfered; 164 unsigned int data_xfered;
159 165
@@ -171,6 +177,9 @@ struct mmci_host {
171 struct timer_list timer; 177 struct timer_list timer;
172 unsigned int oldstat; 178 unsigned int oldstat;
173 179
180 bool blockend;
181 bool dataend;
182
174 /* pio stuff */ 183 /* pio stuff */
175 struct sg_mapping_iter sg_miter; 184 struct sg_mapping_iter sg_miter;
176 unsigned int size; 185 unsigned int size;