aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-11 14:35:53 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-02-04 08:25:49 -0500
commitc8ebae37034c0ead62eb4df8ef88e999ddb8d5cf (patch)
treec9925f03a9c627d7408ef483d7920e25a927e633 /drivers/mmc/host/mmci.c
parent51d4375dd72f352594f1a4f1d7598bf9a75b8dfe (diff)
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij. Add dmaengine based support for DMA to the MMCI driver, using the Primecell DMA engine interface. The changes over Linus' driver are: - rename txsize_threshold to dmasize_threshold, as this reflects the purpose more. - use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'. - clean up requesting of dma channels. - don't release a single channel twice when it's shared between tx and rx. - get rid of 'dma_enable' bool - instead check whether the channel is NULL. - detect incomplete DMA at the end of a transfer. Some DMA controllers (eg, PL08x) are unable to be configured for scatter DMA and also listen to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI. They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the final burst/words, PL08x does not transfer the last few words. - map and unmap DMA buffers using the DMA engine struct device, not the MMCI struct device - the DMA engine is doing the DMA transfer, not us. - avoid double-unmapping of the DMA buffers on MMCI data errors. - don't check for negative values from the dmaengine tx submission function - Dan says this must never fail. - use new dmaengine helper functions rather than using the ugly function pointers directly. - allow DMA code to be fully optimized away using dma_inprogress() which is defined to constant 0 if DMA engine support is disabled. - request maximum segment size from the DMA engine struct device and set this appropriately. - removed checking of buffer alignment - the DMA engine should deal with its own restrictions on buffer alignment, not the individual DMA engine users. - removed setting DMAREQCTL - this confuses some DMA controllers as it causes LBREQ to be asserted for the last seven transfers, rather than six SREQ and one LSREQ. - removed burst setting - the DMA controller should not burst past the transfer size required to complete the DMA operation. Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c282
1 files changed, 274 insertions, 8 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index db2a358143d6..8a29c9f4a812 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/amba/mmci.h>
29#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
30 32
31#include <asm/div64.h> 33#include <asm/div64.h>
32#include <asm/io.h> 34#include <asm/io.h>
@@ -186,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
186 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 188 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
187} 189}
188 190
191/*
192 * All the DMA operation mode stuff goes inside this ifdef.
193 * This assumes that you have a generic DMA device interface,
194 * no custom DMA interfaces are supported.
195 */
196#ifdef CONFIG_DMA_ENGINE
197static void __devinit mmci_dma_setup(struct mmci_host *host)
198{
199 struct mmci_platform_data *plat = host->plat;
200 const char *rxname, *txname;
201 dma_cap_mask_t mask;
202
203 if (!plat || !plat->dma_filter) {
204 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
205 return;
206 }
207
208 /* Try to acquire a generic DMA engine slave channel */
209 dma_cap_zero(mask);
210 dma_cap_set(DMA_SLAVE, mask);
211
212 /*
213 * If only an RX channel is specified, the driver will
214 * attempt to use it bidirectionally, however if it is
215 * is specified but cannot be located, DMA will be disabled.
216 */
217 if (plat->dma_rx_param) {
218 host->dma_rx_channel = dma_request_channel(mask,
219 plat->dma_filter,
220 plat->dma_rx_param);
221 /* E.g if no DMA hardware is present */
222 if (!host->dma_rx_channel)
223 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
224 }
225
226 if (plat->dma_tx_param) {
227 host->dma_tx_channel = dma_request_channel(mask,
228 plat->dma_filter,
229 plat->dma_tx_param);
230 if (!host->dma_tx_channel)
231 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
232 } else {
233 host->dma_tx_channel = host->dma_rx_channel;
234 }
235
236 if (host->dma_rx_channel)
237 rxname = dma_chan_name(host->dma_rx_channel);
238 else
239 rxname = "none";
240
241 if (host->dma_tx_channel)
242 txname = dma_chan_name(host->dma_tx_channel);
243 else
244 txname = "none";
245
246 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
247 rxname, txname);
248
249 /*
250 * Limit the maximum segment size in any SG entry according to
251 * the parameters of the DMA engine device.
252 */
253 if (host->dma_tx_channel) {
254 struct device *dev = host->dma_tx_channel->device->dev;
255 unsigned int max_seg_size = dma_get_max_seg_size(dev);
256
257 if (max_seg_size < host->mmc->max_seg_size)
258 host->mmc->max_seg_size = max_seg_size;
259 }
260 if (host->dma_rx_channel) {
261 struct device *dev = host->dma_rx_channel->device->dev;
262 unsigned int max_seg_size = dma_get_max_seg_size(dev);
263
264 if (max_seg_size < host->mmc->max_seg_size)
265 host->mmc->max_seg_size = max_seg_size;
266 }
267}
268
269/*
270 * This is used in __devinit or __devexit so inline it
271 * so it can be discarded.
272 */
273static inline void mmci_dma_release(struct mmci_host *host)
274{
275 struct mmci_platform_data *plat = host->plat;
276
277 if (host->dma_rx_channel)
278 dma_release_channel(host->dma_rx_channel);
279 if (host->dma_tx_channel && plat->dma_tx_param)
280 dma_release_channel(host->dma_tx_channel);
281 host->dma_rx_channel = host->dma_tx_channel = NULL;
282}
283
284static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
285{
286 struct dma_chan *chan = host->dma_current;
287 enum dma_data_direction dir;
288 u32 status;
289 int i;
290
291 /* Wait up to 1ms for the DMA to complete */
292 for (i = 0; ; i++) {
293 status = readl(host->base + MMCISTATUS);
294 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
295 break;
296 udelay(10);
297 }
298
299 /*
300 * Check to see whether we still have some data left in the FIFO -
301 * this catches DMA controllers which are unable to monitor the
302 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
303 * contiguous buffers. On TX, we'll get a FIFO underrun error.
304 */
305 if (status & MCI_RXDATAAVLBLMASK) {
306 dmaengine_terminate_all(chan);
307 if (!data->error)
308 data->error = -EIO;
309 }
310
311 if (data->flags & MMC_DATA_WRITE) {
312 dir = DMA_TO_DEVICE;
313 } else {
314 dir = DMA_FROM_DEVICE;
315 }
316
317 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
318
319 /*
320 * Use of DMA with scatter-gather is impossible.
321 * Give up with DMA and switch back to PIO mode.
322 */
323 if (status & MCI_RXDATAAVLBLMASK) {
324 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
325 mmci_dma_release(host);
326 }
327}
328
329static void mmci_dma_data_error(struct mmci_host *host)
330{
331 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
332 dmaengine_terminate_all(host->dma_current);
333}
334
335static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
336{
337 struct variant_data *variant = host->variant;
338 struct dma_slave_config conf = {
339 .src_addr = host->phybase + MMCIFIFO,
340 .dst_addr = host->phybase + MMCIFIFO,
341 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
342 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
343 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
344 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
345 };
346 struct mmc_data *data = host->data;
347 struct dma_chan *chan;
348 struct dma_device *device;
349 struct dma_async_tx_descriptor *desc;
350 int nr_sg;
351
352 host->dma_current = NULL;
353
354 if (data->flags & MMC_DATA_READ) {
355 conf.direction = DMA_FROM_DEVICE;
356 chan = host->dma_rx_channel;
357 } else {
358 conf.direction = DMA_TO_DEVICE;
359 chan = host->dma_tx_channel;
360 }
361
362 /* If there's no DMA channel, fall back to PIO */
363 if (!chan)
364 return -EINVAL;
365
366 /* If less than or equal to the fifo size, don't bother with DMA */
367 if (host->size <= variant->fifosize)
368 return -EINVAL;
369
370 device = chan->device;
371 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
372 if (nr_sg == 0)
373 return -EINVAL;
374
375 dmaengine_slave_config(chan, &conf);
376 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
377 conf.direction, DMA_CTRL_ACK);
378 if (!desc)
379 goto unmap_exit;
380
381 /* Okay, go for it. */
382 host->dma_current = chan;
383
384 dev_vdbg(mmc_dev(host->mmc),
385 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
386 data->sg_len, data->blksz, data->blocks, data->flags);
387 dmaengine_submit(desc);
388 dma_async_issue_pending(chan);
389
390 datactrl |= MCI_DPSM_DMAENABLE;
391
392 /* Trigger the DMA transfer */
393 writel(datactrl, host->base + MMCIDATACTRL);
394
395 /*
396 * Let the MMCI say when the data is ended and it's time
397 * to fire next DMA request. When that happens, MMCI will
398 * call mmci_data_end()
399 */
400 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
401 host->base + MMCIMASK0);
402 return 0;
403
404unmap_exit:
405 dmaengine_terminate_all(chan);
406 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
407 return -ENOMEM;
408}
409#else
410/* Blank functions if the DMA engine is not available */
411static inline void mmci_dma_setup(struct mmci_host *host)
412{
413}
414
415static inline void mmci_dma_release(struct mmci_host *host)
416{
417}
418
419static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
420{
421}
422
423static inline void mmci_dma_data_error(struct mmci_host *host)
424{
425}
426
427static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
428{
429 return -ENOSYS;
430}
431#endif
432
189static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 433static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
190{ 434{
191 struct variant_data *variant = host->variant; 435 struct variant_data *variant = host->variant;
@@ -201,8 +445,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
201 host->size = data->blksz * data->blocks; 445 host->size = data->blksz * data->blocks;
202 data->bytes_xfered = 0; 446 data->bytes_xfered = 0;
203 447
204 mmci_init_sg(host, data);
205
206 clks = (unsigned long long)data->timeout_ns * host->cclk; 448 clks = (unsigned long long)data->timeout_ns * host->cclk;
207 do_div(clks, 1000000000UL); 449 do_div(clks, 1000000000UL);
208 450
@@ -216,8 +458,21 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
216 BUG_ON(1 << blksz_bits != data->blksz); 458 BUG_ON(1 << blksz_bits != data->blksz);
217 459
218 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 460 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
219 if (data->flags & MMC_DATA_READ) { 461
462 if (data->flags & MMC_DATA_READ)
220 datactrl |= MCI_DPSM_DIRECTION; 463 datactrl |= MCI_DPSM_DIRECTION;
464
465 /*
466 * Attempt to use DMA operation mode, if this
467 * should fail, fall back to PIO mode
468 */
469 if (!mmci_dma_start_data(host, datactrl))
470 return;
471
472 /* IRQ mode, map the SG list for CPU reading/writing */
473 mmci_init_sg(host, data);
474
475 if (data->flags & MMC_DATA_READ) {
221 irqmask = MCI_RXFIFOHALFFULLMASK; 476 irqmask = MCI_RXFIFOHALFFULLMASK;
222 477
223 /* 478 /*
@@ -281,6 +536,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
281 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 536 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
282 u32 remain, success; 537 u32 remain, success;
283 538
539 /* Terminate the DMA transfer */
540 if (dma_inprogress(host))
541 mmci_dma_data_error(host);
542
284 /* 543 /*
285 * Calculate how far we are into the transfer. Note that 544 * Calculate how far we are into the transfer. Note that
286 * the data counter gives the number of bytes transferred 545 * the data counter gives the number of bytes transferred
@@ -315,6 +574,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
315 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 574 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
316 575
317 if (status & MCI_DATAEND || data->error) { 576 if (status & MCI_DATAEND || data->error) {
577 if (dma_inprogress(host))
578 mmci_dma_unmap(host, data);
318 mmci_stop_data(host); 579 mmci_stop_data(host);
319 580
320 if (!data->error) 581 if (!data->error)
@@ -767,6 +1028,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
767 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1028 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
768 host->mclk); 1029 host->mclk);
769 } 1030 }
1031 host->phybase = dev->res.start;
770 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1032 host->base = ioremap(dev->res.start, resource_size(&dev->res));
771 if (!host->base) { 1033 if (!host->base) {
772 ret = -ENOMEM; 1034 ret = -ENOMEM;
@@ -894,9 +1156,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
894 1156
895 amba_set_drvdata(dev, mmc); 1157 amba_set_drvdata(dev, mmc);
896 1158
897 dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", 1159 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
898 mmc_hostname(mmc), amba_part(dev), amba_rev(dev), 1160 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
899 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1161 amba_rev(dev), (unsigned long long)dev->res.start,
1162 dev->irq[0], dev->irq[1]);
1163
1164 mmci_dma_setup(host);
900 1165
901 mmc_add_host(mmc); 1166 mmc_add_host(mmc);
902 1167
@@ -943,6 +1208,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
943 writel(0, host->base + MMCICOMMAND); 1208 writel(0, host->base + MMCICOMMAND);
944 writel(0, host->base + MMCIDATACTRL); 1209 writel(0, host->base + MMCIDATACTRL);
945 1210
1211 mmci_dma_release(host);
946 free_irq(dev->irq[0], host); 1212 free_irq(dev->irq[0], host);
947 if (!host->singleirq) 1213 if (!host->singleirq)
948 free_irq(dev->irq[1], host); 1214 free_irq(dev->irq[1], host);