diff options
author | Linus Walleij <linus.walleij@stericsson.com> | 2010-10-19 08:41:24 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-11-10 08:12:59 -0500 |
commit | f20f8f21e0402c785c342547f7e49eafc42cfb52 (patch) | |
tree | ce9f9b37de7c065a406273d597bc31e114effdfd /drivers/mmc/host/mmci.c | |
parent | 2686b4b408c25349aee7b35558722d5730d67224 (diff) |
ARM: 6399/3: mmci: handle broken MCI_DATABLOCKEND hardware
On the U300 the MCI_DATAEND and MCI_DATABLOCKEND IRQs can arrive
out-of-order. Replace an ugly #ifdef hack with a proper runtime
solution which models what is really happening.
In the U300 DMA mode and on all Ux500 models, the MCI_DATABLOCKEND
flag isn't properly cleared in hardware following and ACK leading
to all kind of weird behaviour when the flag is still up in
subsequent interrupts, so we add two flags indicating the
error and handle this runtime.
Cc: Rabin Vincent <rabin.vincent@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r-- | drivers/mmc/host/mmci.c | 93 |
1 files changed, 77 insertions, 16 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index ed700a5b03ae..976c9d0e8080 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -45,6 +45,10 @@ static unsigned int fmax = 515633; | |||
45 | * is asserted (likewise for RX) | 45 | * is asserted (likewise for RX) |
46 | * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY | 46 | * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY |
47 | * is asserted (likewise for RX) | 47 | * is asserted (likewise for RX) |
48 | * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware | ||
49 | * and will not work at all. | ||
50 | * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when | ||
51 | * using DMA. | ||
48 | */ | 52 | */ |
49 | struct variant_data { | 53 | struct variant_data { |
50 | unsigned int clkreg; | 54 | unsigned int clkreg; |
@@ -52,6 +56,8 @@ struct variant_data { | |||
52 | unsigned int datalength_bits; | 56 | unsigned int datalength_bits; |
53 | unsigned int fifosize; | 57 | unsigned int fifosize; |
54 | unsigned int fifohalfsize; | 58 | unsigned int fifohalfsize; |
59 | bool broken_blockend; | ||
60 | bool broken_blockend_dma; | ||
55 | }; | 61 | }; |
56 | 62 | ||
57 | static struct variant_data variant_arm = { | 63 | static struct variant_data variant_arm = { |
@@ -65,6 +71,7 @@ static struct variant_data variant_u300 = { | |||
65 | .fifohalfsize = 8 * 4, | 71 | .fifohalfsize = 8 * 4, |
66 | .clkreg_enable = 1 << 13, /* HWFCEN */ | 72 | .clkreg_enable = 1 << 13, /* HWFCEN */ |
67 | .datalength_bits = 16, | 73 | .datalength_bits = 16, |
74 | .broken_blockend_dma = true, | ||
68 | }; | 75 | }; |
69 | 76 | ||
70 | static struct variant_data variant_ux500 = { | 77 | static struct variant_data variant_ux500 = { |
@@ -73,6 +80,7 @@ static struct variant_data variant_ux500 = { | |||
73 | .clkreg = MCI_CLK_ENABLE, | 80 | .clkreg = MCI_CLK_ENABLE, |
74 | .clkreg_enable = 1 << 14, /* HWFCEN */ | 81 | .clkreg_enable = 1 << 14, /* HWFCEN */ |
75 | .datalength_bits = 24, | 82 | .datalength_bits = 24, |
83 | .broken_blockend = true, | ||
76 | }; | 84 | }; |
77 | /* | 85 | /* |
78 | * This must be called with host->lock held | 86 | * This must be called with host->lock held |
@@ -178,6 +186,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
178 | host->data = data; | 186 | host->data = data; |
179 | host->size = data->blksz * data->blocks; | 187 | host->size = data->blksz * data->blocks; |
180 | host->data_xfered = 0; | 188 | host->data_xfered = 0; |
189 | host->blockend = false; | ||
190 | host->dataend = false; | ||
181 | 191 | ||
182 | mmci_init_sg(host, data); | 192 | mmci_init_sg(host, data); |
183 | 193 | ||
@@ -249,20 +259,9 @@ static void | |||
249 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | 259 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, |
250 | unsigned int status) | 260 | unsigned int status) |
251 | { | 261 | { |
252 | if (status & MCI_DATABLOCKEND) { | 262 | struct variant_data *variant = host->variant; |
253 | host->data_xfered += data->blksz; | 263 | |
254 | #ifdef CONFIG_ARCH_U300 | 264 | /* First check for errors */ |
255 | /* | ||
256 | * On the U300 some signal or other is | ||
257 | * badly routed so that a data write does | ||
258 | * not properly terminate with a MCI_DATAEND | ||
259 | * status flag. This quirk will make writes | ||
260 | * work again. | ||
261 | */ | ||
262 | if (data->flags & MMC_DATA_WRITE) | ||
263 | status |= MCI_DATAEND; | ||
264 | #endif | ||
265 | } | ||
266 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { | 265 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { |
267 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); | 266 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); |
268 | if (status & MCI_DATACRCFAIL) | 267 | if (status & MCI_DATACRCFAIL) |
@@ -271,7 +270,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | |||
271 | data->error = -ETIMEDOUT; | 270 | data->error = -ETIMEDOUT; |
272 | else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) | 271 | else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) |
273 | data->error = -EIO; | 272 | data->error = -EIO; |
274 | status |= MCI_DATAEND; | 273 | |
274 | /* Force-complete the transaction */ | ||
275 | host->blockend = true; | ||
276 | host->dataend = true; | ||
275 | 277 | ||
276 | /* | 278 | /* |
277 | * We hit an error condition. Ensure that any data | 279 | * We hit an error condition. Ensure that any data |
@@ -289,9 +291,64 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | |||
289 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
290 | } | 292 | } |
291 | } | 293 | } |
292 | if (status & MCI_DATAEND) { | 294 | |
295 | /* | ||
296 | * On ARM variants in PIO mode, MCI_DATABLOCKEND | ||
297 | * is always sent first, and we increase the | ||
298 | * transfered number of bytes for that IRQ. Then | ||
299 | * MCI_DATAEND follows and we conclude the transaction. | ||
300 | * | ||
301 | * On the Ux500 single-IRQ variant MCI_DATABLOCKEND | ||
302 | * doesn't seem to immediately clear from the status, | ||
303 | * so we can't use it keep count when only one irq is | ||
304 | * used because the irq will hit for other reasons, and | ||
305 | * then the flag is still up. So we use the MCI_DATAEND | ||
306 | * IRQ at the end of the entire transfer because | ||
307 | * MCI_DATABLOCKEND is broken. | ||
308 | * | ||
309 | * In the U300, the IRQs can arrive out-of-order, | ||
310 | * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND, | ||
311 | * so for this case we use the flags "blockend" and | ||
312 | * "dataend" to make sure both IRQs have arrived before | ||
313 | * concluding the transaction. (This does not apply | ||
314 | * to the Ux500 which doesn't fire MCI_DATABLOCKEND | ||
315 | * at all.) In DMA mode it suffers from the same problem | ||
316 | * as the Ux500. | ||
317 | */ | ||
318 | if (status & MCI_DATABLOCKEND) { | ||
319 | /* | ||
320 | * Just being a little over-cautious, we do not | ||
321 | * use this progressive update if the hardware blockend | ||
322 | * flag is unreliable: since it can stay high between | ||
323 | * IRQs it will corrupt the transfer counter. | ||
324 | */ | ||
325 | if (!variant->broken_blockend) | ||
326 | host->data_xfered += data->blksz; | ||
327 | host->blockend = true; | ||
328 | } | ||
329 | |||
330 | if (status & MCI_DATAEND) | ||
331 | host->dataend = true; | ||
332 | |||
333 | /* | ||
334 | * On variants with broken blockend we shall only wait for dataend, | ||
335 | * on others we must sync with the blockend signal since they can | ||
336 | * appear out-of-order. | ||
337 | */ | ||
338 | if (host->dataend && (host->blockend || variant->broken_blockend)) { | ||
293 | mmci_stop_data(host); | 339 | mmci_stop_data(host); |
294 | 340 | ||
341 | /* Reset these flags */ | ||
342 | host->blockend = false; | ||
343 | host->dataend = false; | ||
344 | |||
345 | /* | ||
346 | * Variants with broken blockend flags need to handle the | ||
347 | * end of the entire transfer here. | ||
348 | */ | ||
349 | if (variant->broken_blockend && !data->error) | ||
350 | host->data_xfered += data->blksz * data->blocks; | ||
351 | |||
295 | if (!data->stop) { | 352 | if (!data->stop) { |
296 | mmci_request_end(host, data->mrq); | 353 | mmci_request_end(host, data->mrq); |
297 | } else { | 354 | } else { |
@@ -841,6 +898,10 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
841 | } | 898 | } |
842 | 899 | ||
843 | mask = MCI_IRQENABLE; | 900 | mask = MCI_IRQENABLE; |
901 | /* Don't use the datablockend flag if it's broken */ | ||
902 | if (variant->broken_blockend) | ||
903 | mask &= ~MCI_DATABLOCKEND; | ||
904 | |||
844 | writel(mask, host->base + MMCIMASK0); | 905 | writel(mask, host->base + MMCIMASK0); |
845 | 906 | ||
846 | amba_set_drvdata(dev, mmc); | 907 | amba_set_drvdata(dev, mmc); |