aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c443
1 files changed, 322 insertions, 121 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 563022825667..5bbb87d10251 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -14,6 +14,7 @@
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
@@ -24,8 +25,10 @@
24#include <linux/clk.h> 25#include <linux/clk.h>
25#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
26#include <linux/gpio.h> 27#include <linux/gpio.h>
27#include <linux/amba/mmci.h>
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
29 32
30#include <asm/div64.h> 33#include <asm/div64.h>
31#include <asm/io.h> 34#include <asm/io.h>
@@ -46,10 +49,6 @@ static unsigned int fmax = 515633;
46 * is asserted (likewise for RX) 49 * is asserted (likewise for RX)
47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
48 * is asserted (likewise for RX) 51 * is asserted (likewise for RX)
49 * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50 * and will not work at all.
51 * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52 * using DMA.
53 * @sdio: variant supports SDIO 52 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm
55 */ 54 */
@@ -59,8 +58,6 @@ struct variant_data {
59 unsigned int datalength_bits; 58 unsigned int datalength_bits;
60 unsigned int fifosize; 59 unsigned int fifosize;
61 unsigned int fifohalfsize; 60 unsigned int fifohalfsize;
62 bool broken_blockend;
63 bool broken_blockend_dma;
64 bool sdio; 61 bool sdio;
65 bool st_clkdiv; 62 bool st_clkdiv;
66}; 63};
@@ -76,7 +73,6 @@ static struct variant_data variant_u300 = {
76 .fifohalfsize = 8 * 4, 73 .fifohalfsize = 8 * 4,
77 .clkreg_enable = 1 << 13, /* HWFCEN */ 74 .clkreg_enable = 1 << 13, /* HWFCEN */
78 .datalength_bits = 16, 75 .datalength_bits = 16,
79 .broken_blockend_dma = true,
80 .sdio = true, 76 .sdio = true,
81}; 77};
82 78
@@ -86,7 +82,6 @@ static struct variant_data variant_ux500 = {
86 .clkreg = MCI_CLK_ENABLE, 82 .clkreg = MCI_CLK_ENABLE,
87 .clkreg_enable = 1 << 14, /* HWFCEN */ 83 .clkreg_enable = 1 << 14, /* HWFCEN */
88 .datalength_bits = 24, 84 .datalength_bits = 24,
89 .broken_blockend = true,
90 .sdio = true, 85 .sdio = true,
91 .st_clkdiv = true, 86 .st_clkdiv = true,
92}; 87};
@@ -149,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
149 host->mrq = NULL; 144 host->mrq = NULL;
150 host->cmd = NULL; 145 host->cmd = NULL;
151 146
152 if (mrq->data)
153 mrq->data->bytes_xfered = host->data_xfered;
154
155 /* 147 /*
156 * Need to drop the host lock here; mmc_request_done may call 148 * Need to drop the host lock here; mmc_request_done may call
157 * back into the driver... 149 * back into the driver...
@@ -196,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
196 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 188 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
197} 189}
198 190
191/*
192 * All the DMA operation mode stuff goes inside this ifdef.
193 * This assumes that you have a generic DMA device interface,
194 * no custom DMA interfaces are supported.
195 */
196#ifdef CONFIG_DMA_ENGINE
197static void __devinit mmci_dma_setup(struct mmci_host *host)
198{
199 struct mmci_platform_data *plat = host->plat;
200 const char *rxname, *txname;
201 dma_cap_mask_t mask;
202
203 if (!plat || !plat->dma_filter) {
204 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
205 return;
206 }
207
208 /* Try to acquire a generic DMA engine slave channel */
209 dma_cap_zero(mask);
210 dma_cap_set(DMA_SLAVE, mask);
211
212 /*
213 * If only an RX channel is specified, the driver will
214 * attempt to use it bidirectionally, however if it is
215 * is specified but cannot be located, DMA will be disabled.
216 */
217 if (plat->dma_rx_param) {
218 host->dma_rx_channel = dma_request_channel(mask,
219 plat->dma_filter,
220 plat->dma_rx_param);
221 /* E.g if no DMA hardware is present */
222 if (!host->dma_rx_channel)
223 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
224 }
225
226 if (plat->dma_tx_param) {
227 host->dma_tx_channel = dma_request_channel(mask,
228 plat->dma_filter,
229 plat->dma_tx_param);
230 if (!host->dma_tx_channel)
231 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
232 } else {
233 host->dma_tx_channel = host->dma_rx_channel;
234 }
235
236 if (host->dma_rx_channel)
237 rxname = dma_chan_name(host->dma_rx_channel);
238 else
239 rxname = "none";
240
241 if (host->dma_tx_channel)
242 txname = dma_chan_name(host->dma_tx_channel);
243 else
244 txname = "none";
245
246 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
247 rxname, txname);
248
249 /*
250 * Limit the maximum segment size in any SG entry according to
251 * the parameters of the DMA engine device.
252 */
253 if (host->dma_tx_channel) {
254 struct device *dev = host->dma_tx_channel->device->dev;
255 unsigned int max_seg_size = dma_get_max_seg_size(dev);
256
257 if (max_seg_size < host->mmc->max_seg_size)
258 host->mmc->max_seg_size = max_seg_size;
259 }
260 if (host->dma_rx_channel) {
261 struct device *dev = host->dma_rx_channel->device->dev;
262 unsigned int max_seg_size = dma_get_max_seg_size(dev);
263
264 if (max_seg_size < host->mmc->max_seg_size)
265 host->mmc->max_seg_size = max_seg_size;
266 }
267}
268
269/*
270 * This is used in __devinit or __devexit so inline it
271 * so it can be discarded.
272 */
273static inline void mmci_dma_release(struct mmci_host *host)
274{
275 struct mmci_platform_data *plat = host->plat;
276
277 if (host->dma_rx_channel)
278 dma_release_channel(host->dma_rx_channel);
279 if (host->dma_tx_channel && plat->dma_tx_param)
280 dma_release_channel(host->dma_tx_channel);
281 host->dma_rx_channel = host->dma_tx_channel = NULL;
282}
283
284static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
285{
286 struct dma_chan *chan = host->dma_current;
287 enum dma_data_direction dir;
288 u32 status;
289 int i;
290
291 /* Wait up to 1ms for the DMA to complete */
292 for (i = 0; ; i++) {
293 status = readl(host->base + MMCISTATUS);
294 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
295 break;
296 udelay(10);
297 }
298
299 /*
300 * Check to see whether we still have some data left in the FIFO -
301 * this catches DMA controllers which are unable to monitor the
302 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
303 * contiguous buffers. On TX, we'll get a FIFO underrun error.
304 */
305 if (status & MCI_RXDATAAVLBLMASK) {
306 dmaengine_terminate_all(chan);
307 if (!data->error)
308 data->error = -EIO;
309 }
310
311 if (data->flags & MMC_DATA_WRITE) {
312 dir = DMA_TO_DEVICE;
313 } else {
314 dir = DMA_FROM_DEVICE;
315 }
316
317 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
318
319 /*
320 * Use of DMA with scatter-gather is impossible.
321 * Give up with DMA and switch back to PIO mode.
322 */
323 if (status & MCI_RXDATAAVLBLMASK) {
324 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
325 mmci_dma_release(host);
326 }
327}
328
329static void mmci_dma_data_error(struct mmci_host *host)
330{
331 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
332 dmaengine_terminate_all(host->dma_current);
333}
334
335static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
336{
337 struct variant_data *variant = host->variant;
338 struct dma_slave_config conf = {
339 .src_addr = host->phybase + MMCIFIFO,
340 .dst_addr = host->phybase + MMCIFIFO,
341 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
342 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
343 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
344 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
345 };
346 struct mmc_data *data = host->data;
347 struct dma_chan *chan;
348 struct dma_device *device;
349 struct dma_async_tx_descriptor *desc;
350 int nr_sg;
351
352 host->dma_current = NULL;
353
354 if (data->flags & MMC_DATA_READ) {
355 conf.direction = DMA_FROM_DEVICE;
356 chan = host->dma_rx_channel;
357 } else {
358 conf.direction = DMA_TO_DEVICE;
359 chan = host->dma_tx_channel;
360 }
361
362 /* If there's no DMA channel, fall back to PIO */
363 if (!chan)
364 return -EINVAL;
365
366 /* If less than or equal to the fifo size, don't bother with DMA */
367 if (host->size <= variant->fifosize)
368 return -EINVAL;
369
370 device = chan->device;
371 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
372 if (nr_sg == 0)
373 return -EINVAL;
374
375 dmaengine_slave_config(chan, &conf);
376 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
377 conf.direction, DMA_CTRL_ACK);
378 if (!desc)
379 goto unmap_exit;
380
381 /* Okay, go for it. */
382 host->dma_current = chan;
383
384 dev_vdbg(mmc_dev(host->mmc),
385 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
386 data->sg_len, data->blksz, data->blocks, data->flags);
387 dmaengine_submit(desc);
388 dma_async_issue_pending(chan);
389
390 datactrl |= MCI_DPSM_DMAENABLE;
391
392 /* Trigger the DMA transfer */
393 writel(datactrl, host->base + MMCIDATACTRL);
394
395 /*
396 * Let the MMCI say when the data is ended and it's time
397 * to fire next DMA request. When that happens, MMCI will
398 * call mmci_data_end()
399 */
400 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
401 host->base + MMCIMASK0);
402 return 0;
403
404unmap_exit:
405 dmaengine_terminate_all(chan);
406 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
407 return -ENOMEM;
408}
409#else
410/* Blank functions if the DMA engine is not available */
411static inline void mmci_dma_setup(struct mmci_host *host)
412{
413}
414
415static inline void mmci_dma_release(struct mmci_host *host)
416{
417}
418
419static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
420{
421}
422
423static inline void mmci_dma_data_error(struct mmci_host *host)
424{
425}
426
427static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
428{
429 return -ENOSYS;
430}
431#endif
432
199static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 433static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
200{ 434{
201 struct variant_data *variant = host->variant; 435 struct variant_data *variant = host->variant;
@@ -209,11 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
209 443
210 host->data = data; 444 host->data = data;
211 host->size = data->blksz * data->blocks; 445 host->size = data->blksz * data->blocks;
212 host->data_xfered = 0; 446 data->bytes_xfered = 0;
213 host->blockend = false;
214 host->dataend = false;
215
216 mmci_init_sg(host, data);
217 447
218 clks = (unsigned long long)data->timeout_ns * host->cclk; 448 clks = (unsigned long long)data->timeout_ns * host->cclk;
219 do_div(clks, 1000000000UL); 449 do_div(clks, 1000000000UL);
@@ -228,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
228 BUG_ON(1 << blksz_bits != data->blksz); 458 BUG_ON(1 << blksz_bits != data->blksz);
229 459
230 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 460 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
231 if (data->flags & MMC_DATA_READ) { 461
462 if (data->flags & MMC_DATA_READ)
232 datactrl |= MCI_DPSM_DIRECTION; 463 datactrl |= MCI_DPSM_DIRECTION;
464
465 /*
466 * Attempt to use DMA operation mode, if this
467 * should fail, fall back to PIO mode
468 */
469 if (!mmci_dma_start_data(host, datactrl))
470 return;
471
472 /* IRQ mode, map the SG list for CPU reading/writing */
473 mmci_init_sg(host, data);
474
475 if (data->flags & MMC_DATA_READ) {
233 irqmask = MCI_RXFIFOHALFFULLMASK; 476 irqmask = MCI_RXFIFOHALFFULLMASK;
234 477
235 /* 478 /*
236 * If we have less than a FIFOSIZE of bytes to transfer, 479 * If we have less than the fifo 'half-full' threshold to
237 * trigger a PIO interrupt as soon as any data is available. 480 * transfer, trigger a PIO interrupt as soon as any data
481 * is available.
238 */ 482 */
239 if (host->size < variant->fifosize) 483 if (host->size < variant->fifohalfsize)
240 irqmask |= MCI_RXDATAAVLBLMASK; 484 irqmask |= MCI_RXDATAAVLBLMASK;
241 } else { 485 } else {
242 /* 486 /*
@@ -288,95 +532,55 @@ static void
288mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 532mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
289 unsigned int status) 533 unsigned int status)
290{ 534{
291 struct variant_data *variant = host->variant;
292
293 /* First check for errors */ 535 /* First check for errors */
294 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 536 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
295 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 537 u32 remain, success;
296 if (status & MCI_DATACRCFAIL)
297 data->error = -EILSEQ;
298 else if (status & MCI_DATATIMEOUT)
299 data->error = -ETIMEDOUT;
300 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
301 data->error = -EIO;
302 538
303 /* Force-complete the transaction */ 539 /* Terminate the DMA transfer */
304 host->blockend = true; 540 if (dma_inprogress(host))
305 host->dataend = true; 541 mmci_dma_data_error(host);
306 542
307 /* 543 /*
308 * We hit an error condition. Ensure that any data 544 * Calculate how far we are into the transfer. Note that
309 * partially written to a page is properly coherent. 545 * the data counter gives the number of bytes transferred
546 * on the MMC bus, not on the host side. On reads, this
547 * can be as much as a FIFO-worth of data ahead. This
548 * matters for FIFO overruns only.
310 */ 549 */
311 if (data->flags & MMC_DATA_READ) { 550 remain = readl(host->base + MMCIDATACNT);
312 struct sg_mapping_iter *sg_miter = &host->sg_miter; 551 success = data->blksz * data->blocks - remain;
313 unsigned long flags; 552
314 553 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
315 local_irq_save(flags); 554 status, success);
316 if (sg_miter_next(sg_miter)) { 555 if (status & MCI_DATACRCFAIL) {
317 flush_dcache_page(sg_miter->page); 556 /* Last block was not successful */
318 sg_miter_stop(sg_miter); 557 success -= 1;
319 } 558 data->error = -EILSEQ;
320 local_irq_restore(flags); 559 } else if (status & MCI_DATATIMEOUT) {
560 data->error = -ETIMEDOUT;
561 } else if (status & MCI_TXUNDERRUN) {
562 data->error = -EIO;
563 } else if (status & MCI_RXOVERRUN) {
564 if (success > host->variant->fifosize)
565 success -= host->variant->fifosize;
566 else
567 success = 0;
568 data->error = -EIO;
321 } 569 }
570 data->bytes_xfered = round_down(success, data->blksz);
322 } 571 }
323 572
324 /* 573 if (status & MCI_DATABLOCKEND)
325 * On ARM variants in PIO mode, MCI_DATABLOCKEND 574 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
326 * is always sent first, and we increase the
327 * transfered number of bytes for that IRQ. Then
328 * MCI_DATAEND follows and we conclude the transaction.
329 *
330 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
331 * doesn't seem to immediately clear from the status,
332 * so we can't use it keep count when only one irq is
333 * used because the irq will hit for other reasons, and
334 * then the flag is still up. So we use the MCI_DATAEND
335 * IRQ at the end of the entire transfer because
336 * MCI_DATABLOCKEND is broken.
337 *
338 * In the U300, the IRQs can arrive out-of-order,
339 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
340 * so for this case we use the flags "blockend" and
341 * "dataend" to make sure both IRQs have arrived before
342 * concluding the transaction. (This does not apply
343 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
344 * at all.) In DMA mode it suffers from the same problem
345 * as the Ux500.
346 */
347 if (status & MCI_DATABLOCKEND) {
348 /*
349 * Just being a little over-cautious, we do not
350 * use this progressive update if the hardware blockend
351 * flag is unreliable: since it can stay high between
352 * IRQs it will corrupt the transfer counter.
353 */
354 if (!variant->broken_blockend)
355 host->data_xfered += data->blksz;
356 host->blockend = true;
357 }
358 575
359 if (status & MCI_DATAEND) 576 if (status & MCI_DATAEND || data->error) {
360 host->dataend = true; 577 if (dma_inprogress(host))
361 578 mmci_dma_unmap(host, data);
362 /*
363 * On variants with broken blockend we shall only wait for dataend,
364 * on others we must sync with the blockend signal since they can
365 * appear out-of-order.
366 */
367 if (host->dataend && (host->blockend || variant->broken_blockend)) {
368 mmci_stop_data(host); 579 mmci_stop_data(host);
369 580
370 /* Reset these flags */ 581 if (!data->error)
371 host->blockend = false; 582 /* The error clause is handled above, success! */
372 host->dataend = false; 583 data->bytes_xfered = data->blksz * data->blocks;
373
374 /*
375 * Variants with broken blockend flags need to handle the
376 * end of the entire transfer here.
377 */
378 if (variant->broken_blockend && !data->error)
379 host->data_xfered += data->blksz * data->blocks;
380 584
381 if (!data->stop) { 585 if (!data->stop) {
382 mmci_request_end(host, data->mrq); 586 mmci_request_end(host, data->mrq);
@@ -394,15 +598,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
394 598
395 host->cmd = NULL; 599 host->cmd = NULL;
396 600
397 cmd->resp[0] = readl(base + MMCIRESPONSE0);
398 cmd->resp[1] = readl(base + MMCIRESPONSE1);
399 cmd->resp[2] = readl(base + MMCIRESPONSE2);
400 cmd->resp[3] = readl(base + MMCIRESPONSE3);
401
402 if (status & MCI_CMDTIMEOUT) { 601 if (status & MCI_CMDTIMEOUT) {
403 cmd->error = -ETIMEDOUT; 602 cmd->error = -ETIMEDOUT;
404 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 603 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
405 cmd->error = -EILSEQ; 604 cmd->error = -EILSEQ;
605 } else {
606 cmd->resp[0] = readl(base + MMCIRESPONSE0);
607 cmd->resp[1] = readl(base + MMCIRESPONSE1);
608 cmd->resp[2] = readl(base + MMCIRESPONSE2);
609 cmd->resp[3] = readl(base + MMCIRESPONSE3);
406 } 610 }
407 611
408 if (!cmd->data || cmd->error) { 612 if (!cmd->data || cmd->error) {
@@ -549,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
549 if (remain) 753 if (remain)
550 break; 754 break;
551 755
552 if (status & MCI_RXACTIVE)
553 flush_dcache_page(sg_miter->page);
554
555 status = readl(base + MMCISTATUS); 756 status = readl(base + MMCISTATUS);
556 } while (1); 757 } while (1);
557 758
@@ -560,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
560 local_irq_restore(flags); 761 local_irq_restore(flags);
561 762
562 /* 763 /*
563 * If we're nearing the end of the read, switch to 764 * If we have less than the fifo 'half-full' threshold to transfer,
564 * "any data available" mode. 765 * trigger a PIO interrupt as soon as any data is available.
565 */ 766 */
566 if (status & MCI_RXACTIVE && host->size < variant->fifosize) 767 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
567 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 768 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
568 769
569 /* 770 /*
@@ -764,13 +965,13 @@ static const struct mmc_host_ops mmci_ops = {
764 .get_cd = mmci_get_cd, 965 .get_cd = mmci_get_cd,
765}; 966};
766 967
767static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 968static int __devinit mmci_probe(struct amba_device *dev,
969 const struct amba_id *id)
768{ 970{
769 struct mmci_platform_data *plat = dev->dev.platform_data; 971 struct mmci_platform_data *plat = dev->dev.platform_data;
770 struct variant_data *variant = id->data; 972 struct variant_data *variant = id->data;
771 struct mmci_host *host; 973 struct mmci_host *host;
772 struct mmc_host *mmc; 974 struct mmc_host *mmc;
773 unsigned int mask;
774 int ret; 975 int ret;
775 976
776 /* must have platform data */ 977 /* must have platform data */
@@ -828,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
828 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1029 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
829 host->mclk); 1030 host->mclk);
830 } 1031 }
1032 host->phybase = dev->res.start;
831 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1033 host->base = ioremap(dev->res.start, resource_size(&dev->res));
832 if (!host->base) { 1034 if (!host->base) {
833 ret = -ENOMEM; 1035 ret = -ENOMEM;
@@ -951,18 +1153,16 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
951 goto irq0_free; 1153 goto irq0_free;
952 } 1154 }
953 1155
954 mask = MCI_IRQENABLE; 1156 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
955 /* Don't use the datablockend flag if it's broken */
956 if (variant->broken_blockend)
957 mask &= ~MCI_DATABLOCKEND;
958
959 writel(mask, host->base + MMCIMASK0);
960 1157
961 amba_set_drvdata(dev, mmc); 1158 amba_set_drvdata(dev, mmc);
962 1159
963 dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", 1160 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
964 mmc_hostname(mmc), amba_part(dev), amba_rev(dev), 1161 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
965 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1162 amba_rev(dev), (unsigned long long)dev->res.start,
1163 dev->irq[0], dev->irq[1]);
1164
1165 mmci_dma_setup(host);
966 1166
967 mmc_add_host(mmc); 1167 mmc_add_host(mmc);
968 1168
@@ -1009,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
1009 writel(0, host->base + MMCICOMMAND); 1209 writel(0, host->base + MMCICOMMAND);
1010 writel(0, host->base + MMCIDATACTRL); 1210 writel(0, host->base + MMCIDATACTRL);
1011 1211
1212 mmci_dma_release(host);
1012 free_irq(dev->irq[0], host); 1213 free_irq(dev->irq[0], host);
1013 if (!host->singleirq) 1214 if (!host->singleirq)
1014 free_irq(dev->irq[1], host); 1215 free_irq(dev->irq[1], host);