aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/sh_mmcif.c
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2010-11-24 05:05:22 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-11-25 02:26:46 -0500
commita782d688e9c6f9ca9a7a9a28e8e2876969ddef53 (patch)
treec8b13a19a1397bbbda255a59ce8a26518991d2e5 /drivers/mmc/host/sh_mmcif.c
parente47bf32aa8de06ec72e18b4fbbd880caeedb0088 (diff)
mmc: sh_mmcif: add DMA support
The MMCIF controller on sh-mobile platforms can use the DMA controller for data transfers. Interface to the SH dmaengine driver to enable DMA. We also have to lower the maximum number of segments to match with the number od DMA descriptors on SuperH, this doesn't significantly affect driver's PIO performance. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/mmc/host/sh_mmcif.c')
-rw-r--r--drivers/mmc/host/sh_mmcif.c246
1 files changed, 241 insertions, 5 deletions
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index b2f261cdaec1..d09a2b38eeeb 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -20,12 +20,14 @@
20#include <linux/completion.h> 20#include <linux/completion.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
23#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
24#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
25#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
26#include <linux/mmc/mmc.h> 27#include <linux/mmc/mmc.h>
27#include <linux/mmc/sdio.h> 28#include <linux/mmc/sdio.h>
28#include <linux/mmc/sh_mmcif.h> 29#include <linux/mmc/sh_mmcif.h>
30#include <linux/pagemap.h>
29#include <linux/platform_device.h> 31#include <linux/platform_device.h>
30 32
31#define DRIVER_NAME "sh_mmcif" 33#define DRIVER_NAME "sh_mmcif"
@@ -162,8 +164,13 @@ struct sh_mmcif_host {
162 long timeout; 164 long timeout;
163 void __iomem *addr; 165 void __iomem *addr;
164 struct completion intr_wait; 166 struct completion intr_wait;
165};
166 167
168 /* DMA support */
169 struct dma_chan *chan_rx;
170 struct dma_chan *chan_tx;
171 struct completion dma_complete;
172 unsigned int dma_sglen;
173};
167 174
168static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, 175static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
169 unsigned int reg, u32 val) 176 unsigned int reg, u32 val)
@@ -177,6 +184,208 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
177 writel(~val & readl(host->addr + reg), host->addr + reg); 184 writel(~val & readl(host->addr + reg), host->addr + reg);
178} 185}
179 186
187#ifdef CONFIG_SH_MMCIF_DMA
188static void mmcif_dma_complete(void *arg)
189{
190 struct sh_mmcif_host *host = arg;
191 dev_dbg(&host->pd->dev, "Command completed\n");
192
193 if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
194 dev_name(&host->pd->dev)))
195 return;
196
197 if (host->data->flags & MMC_DATA_READ)
198 dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
199 DMA_FROM_DEVICE);
200 else
201 dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
202 DMA_TO_DEVICE);
203
204 complete(&host->dma_complete);
205}
206
207static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
208{
209 struct scatterlist *sg = host->data->sg;
210 struct dma_async_tx_descriptor *desc = NULL;
211 struct dma_chan *chan = host->chan_rx;
212 dma_cookie_t cookie = -EINVAL;
213 int ret;
214
215 ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE);
216 if (ret > 0) {
217 host->dma_sglen = ret;
218 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
219 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
220 }
221
222 if (desc) {
223 desc->callback = mmcif_dma_complete;
224 desc->callback_param = host;
225 cookie = desc->tx_submit(desc);
226 if (cookie < 0) {
227 desc = NULL;
228 ret = cookie;
229 } else {
230 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
231 chan->device->device_issue_pending(chan);
232 }
233 }
234 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
235 __func__, host->data->sg_len, ret, cookie);
236
237 if (!desc) {
238 /* DMA failed, fall back to PIO */
239 if (ret >= 0)
240 ret = -EIO;
241 host->chan_rx = NULL;
242 host->dma_sglen = 0;
243 dma_release_channel(chan);
244 /* Free the Tx channel too */
245 chan = host->chan_tx;
246 if (chan) {
247 host->chan_tx = NULL;
248 dma_release_channel(chan);
249 }
250 dev_warn(&host->pd->dev,
251 "DMA failed: %d, falling back to PIO\n", ret);
252 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
253 }
254
255 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
256 desc, cookie, host->data->sg_len);
257}
258
259static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
260{
261 struct scatterlist *sg = host->data->sg;
262 struct dma_async_tx_descriptor *desc = NULL;
263 struct dma_chan *chan = host->chan_tx;
264 dma_cookie_t cookie = -EINVAL;
265 int ret;
266
267 ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE);
268 if (ret > 0) {
269 host->dma_sglen = ret;
270 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
271 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
272 }
273
274 if (desc) {
275 desc->callback = mmcif_dma_complete;
276 desc->callback_param = host;
277 cookie = desc->tx_submit(desc);
278 if (cookie < 0) {
279 desc = NULL;
280 ret = cookie;
281 } else {
282 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
283 chan->device->device_issue_pending(chan);
284 }
285 }
286 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
287 __func__, host->data->sg_len, ret, cookie);
288
289 if (!desc) {
290 /* DMA failed, fall back to PIO */
291 if (ret >= 0)
292 ret = -EIO;
293 host->chan_tx = NULL;
294 host->dma_sglen = 0;
295 dma_release_channel(chan);
296 /* Free the Rx channel too */
297 chan = host->chan_rx;
298 if (chan) {
299 host->chan_rx = NULL;
300 dma_release_channel(chan);
301 }
302 dev_warn(&host->pd->dev,
303 "DMA failed: %d, falling back to PIO\n", ret);
304 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
305 }
306
307 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
308 desc, cookie);
309}
310
311static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
312{
313 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
314 chan->private = arg;
315 return true;
316}
317
318static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
319 struct sh_mmcif_plat_data *pdata)
320{
321 host->dma_sglen = 0;
322
323 /* We can only either use DMA for both Tx and Rx or not use it at all */
324 if (pdata->dma) {
325 dma_cap_mask_t mask;
326
327 dma_cap_zero(mask);
328 dma_cap_set(DMA_SLAVE, mask);
329
330 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
331 &pdata->dma->chan_priv_tx);
332 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
333 host->chan_tx);
334
335 if (!host->chan_tx)
336 return;
337
338 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
339 &pdata->dma->chan_priv_rx);
340 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
341 host->chan_rx);
342
343 if (!host->chan_rx) {
344 dma_release_channel(host->chan_tx);
345 host->chan_tx = NULL;
346 return;
347 }
348
349 init_completion(&host->dma_complete);
350 }
351}
352
353static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
354{
355 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
356 /* Descriptors are freed automatically */
357 if (host->chan_tx) {
358 struct dma_chan *chan = host->chan_tx;
359 host->chan_tx = NULL;
360 dma_release_channel(chan);
361 }
362 if (host->chan_rx) {
363 struct dma_chan *chan = host->chan_rx;
364 host->chan_rx = NULL;
365 dma_release_channel(chan);
366 }
367
368 host->dma_sglen = 0;
369}
370#else
371static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
372{
373}
374
375static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
376{
377}
378
379static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
380 struct sh_mmcif_plat_data *pdata)
381{
382 /* host->chan_tx, host->chan_tx and host->dma_sglen are all zero */
383}
384
385static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
386{
387}
388#endif
180 389
181static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) 390static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
182{ 391{
@@ -564,7 +773,20 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
564 } 773 }
565 sh_mmcif_get_response(host, cmd); 774 sh_mmcif_get_response(host, cmd);
566 if (host->data) { 775 if (host->data) {
567 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); 776 if (!host->dma_sglen) {
777 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
778 } else {
779 long time =
780 wait_for_completion_interruptible_timeout(&host->dma_complete,
781 host->timeout);
782 if (!time)
783 ret = -ETIMEDOUT;
784 else if (time < 0)
785 ret = time;
786 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
787 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
788 host->dma_sglen = 0;
789 }
568 if (ret < 0) 790 if (ret < 0)
569 mrq->data->bytes_xfered = 0; 791 mrq->data->bytes_xfered = 0;
570 else 792 else
@@ -622,6 +844,15 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
622 break; 844 break;
623 } 845 }
624 host->data = mrq->data; 846 host->data = mrq->data;
847 if (mrq->data) {
848 if (mrq->data->flags & MMC_DATA_READ) {
849 if (host->chan_rx)
850 sh_mmcif_start_dma_rx(host);
851 } else {
852 if (host->chan_tx)
853 sh_mmcif_start_dma_tx(host);
854 }
855 }
625 sh_mmcif_start_cmd(host, mrq, mrq->cmd); 856 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
626 host->data = NULL; 857 host->data = NULL;
627 858
@@ -806,14 +1037,18 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806 mmc->caps = MMC_CAP_MMC_HIGHSPEED; 1037 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
807 if (pd->caps) 1038 if (pd->caps)
808 mmc->caps |= pd->caps; 1039 mmc->caps |= pd->caps;
809 mmc->max_segs = 128; 1040 mmc->max_segs = 32;
810 mmc->max_blk_size = 512; 1041 mmc->max_blk_size = 512;
811 mmc->max_blk_count = 65535; 1042 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
812 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1043 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
813 mmc->max_seg_size = mmc->max_req_size; 1044 mmc->max_seg_size = mmc->max_req_size;
814 1045
815 sh_mmcif_sync_reset(host); 1046 sh_mmcif_sync_reset(host);
816 platform_set_drvdata(pdev, host); 1047 platform_set_drvdata(pdev, host);
1048
1049 /* See if we also get DMA */
1050 sh_mmcif_request_dma(host, pd);
1051
817 mmc_add_host(mmc); 1052 mmc_add_host(mmc);
818 1053
819 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1054 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
@@ -852,6 +1087,7 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
852 int irq[2]; 1087 int irq[2];
853 1088
854 mmc_remove_host(host->mmc); 1089 mmc_remove_host(host->mmc);
1090 sh_mmcif_release_dma(host);
855 1091
856 if (host->addr) 1092 if (host->addr)
857 iounmap(host->addr); 1093 iounmap(host->addr);