aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-25 18:04:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-25 18:04:18 -0500
commit6663050edd9c2e8b1e1f55c09459144d84c045f0 (patch)
treef683d34d517465d62dcf2fd311f8f6e5d7e736b0 /drivers
parent3af03655e885ba7f48ca6318e231a7086a51082e (diff)
parent2f8e7285606bcdf8f574bff633675eabcee83d5e (diff)
Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm: ALSA: AACI: fix timeout duration ALSA: AACI: fix timeout condition checking ARM: 6636/1: ep93xx: default multiplexed gpio ports to gpio mode ARM: 6637/1: Make the argument to virt_to_phys() "const volatile" ARM: twd: ensure timer reload is reprogrammed on entry to periodic mode ARM: 6635/2: Configure reference clock for Versatile Express timers ARM: versatile: name configuration options after actual board names ARM: realview: name configuration options after actual board names ARM: realview,vexpress: fix section mismatch warning for pen_release ARM: 6632/3: mmci: stop using the blockend interrupts
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/host/mmci.c98
-rw-r--r--drivers/mmc/host/mmci.h5
2 files changed, 21 insertions, 82 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 563022825667..2de12fe155da 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -46,10 +46,6 @@ static unsigned int fmax = 515633;
46 * is asserted (likewise for RX) 46 * is asserted (likewise for RX)
47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
48 * is asserted (likewise for RX) 48 * is asserted (likewise for RX)
49 * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50 * and will not work at all.
51 * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52 * using DMA.
53 * @sdio: variant supports SDIO 49 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 50 * @st_clkdiv: true if using a ST-specific clock divider algorithm
55 */ 51 */
@@ -59,8 +55,6 @@ struct variant_data {
59 unsigned int datalength_bits; 55 unsigned int datalength_bits;
60 unsigned int fifosize; 56 unsigned int fifosize;
61 unsigned int fifohalfsize; 57 unsigned int fifohalfsize;
62 bool broken_blockend;
63 bool broken_blockend_dma;
64 bool sdio; 58 bool sdio;
65 bool st_clkdiv; 59 bool st_clkdiv;
66}; 60};
@@ -76,7 +70,6 @@ static struct variant_data variant_u300 = {
76 .fifohalfsize = 8 * 4, 70 .fifohalfsize = 8 * 4,
77 .clkreg_enable = 1 << 13, /* HWFCEN */ 71 .clkreg_enable = 1 << 13, /* HWFCEN */
78 .datalength_bits = 16, 72 .datalength_bits = 16,
79 .broken_blockend_dma = true,
80 .sdio = true, 73 .sdio = true,
81}; 74};
82 75
@@ -86,7 +79,6 @@ static struct variant_data variant_ux500 = {
86 .clkreg = MCI_CLK_ENABLE, 79 .clkreg = MCI_CLK_ENABLE,
87 .clkreg_enable = 1 << 14, /* HWFCEN */ 80 .clkreg_enable = 1 << 14, /* HWFCEN */
88 .datalength_bits = 24, 81 .datalength_bits = 24,
89 .broken_blockend = true,
90 .sdio = true, 82 .sdio = true,
91 .st_clkdiv = true, 83 .st_clkdiv = true,
92}; 84};
@@ -210,8 +202,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
210 host->data = data; 202 host->data = data;
211 host->size = data->blksz * data->blocks; 203 host->size = data->blksz * data->blocks;
212 host->data_xfered = 0; 204 host->data_xfered = 0;
213 host->blockend = false;
214 host->dataend = false;
215 205
216 mmci_init_sg(host, data); 206 mmci_init_sg(host, data);
217 207
@@ -288,21 +278,26 @@ static void
288mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 278mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
289 unsigned int status) 279 unsigned int status)
290{ 280{
291 struct variant_data *variant = host->variant;
292
293 /* First check for errors */ 281 /* First check for errors */
294 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 282 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
283 u32 remain, success;
284
285 /* Calculate how far we are into the transfer */
286 remain = readl(host->base + MMCIDATACNT) << 2;
287 success = data->blksz * data->blocks - remain;
288
295 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 289 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
296 if (status & MCI_DATACRCFAIL) 290 if (status & MCI_DATACRCFAIL) {
291 /* Last block was not successful */
292 host->data_xfered = ((success / data->blksz) - 1 * data->blksz);
297 data->error = -EILSEQ; 293 data->error = -EILSEQ;
298 else if (status & MCI_DATATIMEOUT) 294 } else if (status & MCI_DATATIMEOUT) {
295 host->data_xfered = success;
299 data->error = -ETIMEDOUT; 296 data->error = -ETIMEDOUT;
300 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 297 } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
298 host->data_xfered = success;
301 data->error = -EIO; 299 data->error = -EIO;
302 300 }
303 /* Force-complete the transaction */
304 host->blockend = true;
305 host->dataend = true;
306 301
307 /* 302 /*
308 * We hit an error condition. Ensure that any data 303 * We hit an error condition. Ensure that any data
@@ -321,61 +316,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
321 } 316 }
322 } 317 }
323 318
324 /* 319 if (status & MCI_DATABLOCKEND)
325 * On ARM variants in PIO mode, MCI_DATABLOCKEND 320 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
326 * is always sent first, and we increase the
327 * transfered number of bytes for that IRQ. Then
328 * MCI_DATAEND follows and we conclude the transaction.
329 *
330 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
331 * doesn't seem to immediately clear from the status,
332 * so we can't use it keep count when only one irq is
333 * used because the irq will hit for other reasons, and
334 * then the flag is still up. So we use the MCI_DATAEND
335 * IRQ at the end of the entire transfer because
336 * MCI_DATABLOCKEND is broken.
337 *
338 * In the U300, the IRQs can arrive out-of-order,
339 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
340 * so for this case we use the flags "blockend" and
341 * "dataend" to make sure both IRQs have arrived before
342 * concluding the transaction. (This does not apply
343 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
344 * at all.) In DMA mode it suffers from the same problem
345 * as the Ux500.
346 */
347 if (status & MCI_DATABLOCKEND) {
348 /*
349 * Just being a little over-cautious, we do not
350 * use this progressive update if the hardware blockend
351 * flag is unreliable: since it can stay high between
352 * IRQs it will corrupt the transfer counter.
353 */
354 if (!variant->broken_blockend)
355 host->data_xfered += data->blksz;
356 host->blockend = true;
357 }
358
359 if (status & MCI_DATAEND)
360 host->dataend = true;
361 321
362 /* 322 if (status & MCI_DATAEND) {
363 * On variants with broken blockend we shall only wait for dataend,
364 * on others we must sync with the blockend signal since they can
365 * appear out-of-order.
366 */
367 if (host->dataend && (host->blockend || variant->broken_blockend)) {
368 mmci_stop_data(host); 323 mmci_stop_data(host);
369 324
370 /* Reset these flags */ 325 if (!data->error)
371 host->blockend = false; 326 /* The error clause is handled above, success! */
372 host->dataend = false;
373
374 /*
375 * Variants with broken blockend flags need to handle the
376 * end of the entire transfer here.
377 */
378 if (variant->broken_blockend && !data->error)
379 host->data_xfered += data->blksz * data->blocks; 327 host->data_xfered += data->blksz * data->blocks;
380 328
381 if (!data->stop) { 329 if (!data->stop) {
@@ -770,7 +718,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
770 struct variant_data *variant = id->data; 718 struct variant_data *variant = id->data;
771 struct mmci_host *host; 719 struct mmci_host *host;
772 struct mmc_host *mmc; 720 struct mmc_host *mmc;
773 unsigned int mask;
774 int ret; 721 int ret;
775 722
776 /* must have platform data */ 723 /* must have platform data */
@@ -951,12 +898,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
951 goto irq0_free; 898 goto irq0_free;
952 } 899 }
953 900
954 mask = MCI_IRQENABLE; 901 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
955 /* Don't use the datablockend flag if it's broken */
956 if (variant->broken_blockend)
957 mask &= ~MCI_DATABLOCKEND;
958
959 writel(mask, host->base + MMCIMASK0);
960 902
961 amba_set_drvdata(dev, mmc); 903 amba_set_drvdata(dev, mmc);
962 904
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index df06f01aac89..c1df7b82d36c 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,7 +137,7 @@
137#define MCI_IRQENABLE \ 137#define MCI_IRQENABLE \
138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
141 141
142/* These interrupts are directed to IRQ1 when two IRQ lines are available */ 142/* These interrupts are directed to IRQ1 when two IRQ lines are available */
143#define MCI_IRQ1MASK \ 143#define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@ struct mmci_host {
177 struct timer_list timer; 177 struct timer_list timer;
178 unsigned int oldstat; 178 unsigned int oldstat;
179 179
180 bool blockend;
181 bool dataend;
182
183 /* pio stuff */ 180 /* pio stuff */
184 struct sg_mapping_iter sg_miter; 181 struct sg_mapping_iter sg_miter;
185 unsigned int size; 182 unsigned int size;