diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/mmc/host/mmci.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r-- | drivers/mmc/host/mmci.c | 633 |
1 files changed, 538 insertions, 95 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 840b301b5671..fe140724a02e 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver | 2 | * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver |
3 | * | 3 | * |
4 | * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. | 4 | * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. |
5 | * Copyright (C) 2010 ST-Ericsson AB. | 5 | * Copyright (C) 2010 ST-Ericsson SA |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -14,17 +14,21 @@ | |||
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel.h> | ||
17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
20 | #include <linux/log2.h> | 21 | #include <linux/log2.h> |
21 | #include <linux/mmc/host.h> | 22 | #include <linux/mmc/host.h> |
23 | #include <linux/mmc/card.h> | ||
22 | #include <linux/amba/bus.h> | 24 | #include <linux/amba/bus.h> |
23 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
24 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
25 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
26 | #include <linux/amba/mmci.h> | ||
27 | #include <linux/regulator/consumer.h> | 28 | #include <linux/regulator/consumer.h> |
29 | #include <linux/dmaengine.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/amba/mmci.h> | ||
28 | 32 | ||
29 | #include <asm/div64.h> | 33 | #include <asm/div64.h> |
30 | #include <asm/io.h> | 34 | #include <asm/io.h> |
@@ -41,27 +45,66 @@ static unsigned int fmax = 515633; | |||
41 | * @clkreg: default value for MCICLOCK register | 45 | * @clkreg: default value for MCICLOCK register |
42 | * @clkreg_enable: enable value for MMCICLOCK register | 46 | * @clkreg_enable: enable value for MMCICLOCK register |
43 | * @datalength_bits: number of bits in the MMCIDATALENGTH register | 47 | * @datalength_bits: number of bits in the MMCIDATALENGTH register |
48 | * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY | ||
49 | * is asserted (likewise for RX) | ||
50 | * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY | ||
51 | * is asserted (likewise for RX) | ||
52 | * @sdio: variant supports SDIO | ||
53 | * @st_clkdiv: true if using a ST-specific clock divider algorithm | ||
54 | * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register | ||
44 | */ | 55 | */ |
45 | struct variant_data { | 56 | struct variant_data { |
46 | unsigned int clkreg; | 57 | unsigned int clkreg; |
47 | unsigned int clkreg_enable; | 58 | unsigned int clkreg_enable; |
48 | unsigned int datalength_bits; | 59 | unsigned int datalength_bits; |
60 | unsigned int fifosize; | ||
61 | unsigned int fifohalfsize; | ||
62 | bool sdio; | ||
63 | bool st_clkdiv; | ||
64 | bool blksz_datactrl16; | ||
49 | }; | 65 | }; |
50 | 66 | ||
51 | static struct variant_data variant_arm = { | 67 | static struct variant_data variant_arm = { |
68 | .fifosize = 16 * 4, | ||
69 | .fifohalfsize = 8 * 4, | ||
70 | .datalength_bits = 16, | ||
71 | }; | ||
72 | |||
73 | static struct variant_data variant_arm_extended_fifo = { | ||
74 | .fifosize = 128 * 4, | ||
75 | .fifohalfsize = 64 * 4, | ||
52 | .datalength_bits = 16, | 76 | .datalength_bits = 16, |
53 | }; | 77 | }; |
54 | 78 | ||
55 | static struct variant_data variant_u300 = { | 79 | static struct variant_data variant_u300 = { |
56 | .clkreg_enable = 1 << 13, /* HWFCEN */ | 80 | .fifosize = 16 * 4, |
81 | .fifohalfsize = 8 * 4, | ||
82 | .clkreg_enable = MCI_ST_U300_HWFCEN, | ||
57 | .datalength_bits = 16, | 83 | .datalength_bits = 16, |
84 | .sdio = true, | ||
58 | }; | 85 | }; |
59 | 86 | ||
60 | static struct variant_data variant_ux500 = { | 87 | static struct variant_data variant_ux500 = { |
88 | .fifosize = 30 * 4, | ||
89 | .fifohalfsize = 8 * 4, | ||
61 | .clkreg = MCI_CLK_ENABLE, | 90 | .clkreg = MCI_CLK_ENABLE, |
62 | .clkreg_enable = 1 << 14, /* HWFCEN */ | 91 | .clkreg_enable = MCI_ST_UX500_HWFCEN, |
63 | .datalength_bits = 24, | 92 | .datalength_bits = 24, |
93 | .sdio = true, | ||
94 | .st_clkdiv = true, | ||
64 | }; | 95 | }; |
96 | |||
97 | static struct variant_data variant_ux500v2 = { | ||
98 | .fifosize = 30 * 4, | ||
99 | .fifohalfsize = 8 * 4, | ||
100 | .clkreg = MCI_CLK_ENABLE, | ||
101 | .clkreg_enable = MCI_ST_UX500_HWFCEN, | ||
102 | .datalength_bits = 24, | ||
103 | .sdio = true, | ||
104 | .st_clkdiv = true, | ||
105 | .blksz_datactrl16 = true, | ||
106 | }; | ||
107 | |||
65 | /* | 108 | /* |
66 | * This must be called with host->lock held | 109 | * This must be called with host->lock held |
67 | */ | 110 | */ |
@@ -73,8 +116,25 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) | |||
73 | if (desired) { | 116 | if (desired) { |
74 | if (desired >= host->mclk) { | 117 | if (desired >= host->mclk) { |
75 | clk = MCI_CLK_BYPASS; | 118 | clk = MCI_CLK_BYPASS; |
119 | if (variant->st_clkdiv) | ||
120 | clk |= MCI_ST_UX500_NEG_EDGE; | ||
76 | host->cclk = host->mclk; | 121 | host->cclk = host->mclk; |
122 | } else if (variant->st_clkdiv) { | ||
123 | /* | ||
124 | * DB8500 TRM says f = mclk / (clkdiv + 2) | ||
125 | * => clkdiv = (mclk / f) - 2 | ||
126 | * Round the divider up so we don't exceed the max | ||
127 | * frequency | ||
128 | */ | ||
129 | clk = DIV_ROUND_UP(host->mclk, desired) - 2; | ||
130 | if (clk >= 256) | ||
131 | clk = 255; | ||
132 | host->cclk = host->mclk / (clk + 2); | ||
77 | } else { | 133 | } else { |
134 | /* | ||
135 | * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) | ||
136 | * => clkdiv = mclk / (2 * f) - 1 | ||
137 | */ | ||
78 | clk = host->mclk / (2 * desired) - 1; | 138 | clk = host->mclk / (2 * desired) - 1; |
79 | if (clk >= 256) | 139 | if (clk >= 256) |
80 | clk = 255; | 140 | clk = 255; |
@@ -105,9 +165,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
105 | host->mrq = NULL; | 165 | host->mrq = NULL; |
106 | host->cmd = NULL; | 166 | host->cmd = NULL; |
107 | 167 | ||
108 | if (mrq->data) | ||
109 | mrq->data->bytes_xfered = host->data_xfered; | ||
110 | |||
111 | /* | 168 | /* |
112 | * Need to drop the host lock here; mmc_request_done may call | 169 | * Need to drop the host lock here; mmc_request_done may call |
113 | * back into the driver... | 170 | * back into the driver... |
@@ -117,10 +174,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
117 | spin_lock(&host->lock); | 174 | spin_lock(&host->lock); |
118 | } | 175 | } |
119 | 176 | ||
177 | static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) | ||
178 | { | ||
179 | void __iomem *base = host->base; | ||
180 | |||
181 | if (host->singleirq) { | ||
182 | unsigned int mask0 = readl(base + MMCIMASK0); | ||
183 | |||
184 | mask0 &= ~MCI_IRQ1MASK; | ||
185 | mask0 |= mask; | ||
186 | |||
187 | writel(mask0, base + MMCIMASK0); | ||
188 | } | ||
189 | |||
190 | writel(mask, base + MMCIMASK1); | ||
191 | } | ||
192 | |||
120 | static void mmci_stop_data(struct mmci_host *host) | 193 | static void mmci_stop_data(struct mmci_host *host) |
121 | { | 194 | { |
122 | writel(0, host->base + MMCIDATACTRL); | 195 | writel(0, host->base + MMCIDATACTRL); |
123 | writel(0, host->base + MMCIMASK1); | 196 | mmci_set_mask1(host, 0); |
124 | host->data = NULL; | 197 | host->data = NULL; |
125 | } | 198 | } |
126 | 199 | ||
@@ -136,8 +209,251 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) | |||
136 | sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); | 209 | sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); |
137 | } | 210 | } |
138 | 211 | ||
212 | /* | ||
213 | * All the DMA operation mode stuff goes inside this ifdef. | ||
214 | * This assumes that you have a generic DMA device interface, | ||
215 | * no custom DMA interfaces are supported. | ||
216 | */ | ||
217 | #ifdef CONFIG_DMA_ENGINE | ||
218 | static void __devinit mmci_dma_setup(struct mmci_host *host) | ||
219 | { | ||
220 | struct mmci_platform_data *plat = host->plat; | ||
221 | const char *rxname, *txname; | ||
222 | dma_cap_mask_t mask; | ||
223 | |||
224 | if (!plat || !plat->dma_filter) { | ||
225 | dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Try to acquire a generic DMA engine slave channel */ | ||
230 | dma_cap_zero(mask); | ||
231 | dma_cap_set(DMA_SLAVE, mask); | ||
232 | |||
233 | /* | ||
234 | * If only an RX channel is specified, the driver will | ||
235 | * attempt to use it bidirectionally, however if it is | ||
236 | * is specified but cannot be located, DMA will be disabled. | ||
237 | */ | ||
238 | if (plat->dma_rx_param) { | ||
239 | host->dma_rx_channel = dma_request_channel(mask, | ||
240 | plat->dma_filter, | ||
241 | plat->dma_rx_param); | ||
242 | /* E.g if no DMA hardware is present */ | ||
243 | if (!host->dma_rx_channel) | ||
244 | dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); | ||
245 | } | ||
246 | |||
247 | if (plat->dma_tx_param) { | ||
248 | host->dma_tx_channel = dma_request_channel(mask, | ||
249 | plat->dma_filter, | ||
250 | plat->dma_tx_param); | ||
251 | if (!host->dma_tx_channel) | ||
252 | dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); | ||
253 | } else { | ||
254 | host->dma_tx_channel = host->dma_rx_channel; | ||
255 | } | ||
256 | |||
257 | if (host->dma_rx_channel) | ||
258 | rxname = dma_chan_name(host->dma_rx_channel); | ||
259 | else | ||
260 | rxname = "none"; | ||
261 | |||
262 | if (host->dma_tx_channel) | ||
263 | txname = dma_chan_name(host->dma_tx_channel); | ||
264 | else | ||
265 | txname = "none"; | ||
266 | |||
267 | dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", | ||
268 | rxname, txname); | ||
269 | |||
270 | /* | ||
271 | * Limit the maximum segment size in any SG entry according to | ||
272 | * the parameters of the DMA engine device. | ||
273 | */ | ||
274 | if (host->dma_tx_channel) { | ||
275 | struct device *dev = host->dma_tx_channel->device->dev; | ||
276 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | ||
277 | |||
278 | if (max_seg_size < host->mmc->max_seg_size) | ||
279 | host->mmc->max_seg_size = max_seg_size; | ||
280 | } | ||
281 | if (host->dma_rx_channel) { | ||
282 | struct device *dev = host->dma_rx_channel->device->dev; | ||
283 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | ||
284 | |||
285 | if (max_seg_size < host->mmc->max_seg_size) | ||
286 | host->mmc->max_seg_size = max_seg_size; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * This is used in __devinit or __devexit so inline it | ||
292 | * so it can be discarded. | ||
293 | */ | ||
294 | static inline void mmci_dma_release(struct mmci_host *host) | ||
295 | { | ||
296 | struct mmci_platform_data *plat = host->plat; | ||
297 | |||
298 | if (host->dma_rx_channel) | ||
299 | dma_release_channel(host->dma_rx_channel); | ||
300 | if (host->dma_tx_channel && plat->dma_tx_param) | ||
301 | dma_release_channel(host->dma_tx_channel); | ||
302 | host->dma_rx_channel = host->dma_tx_channel = NULL; | ||
303 | } | ||
304 | |||
305 | static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) | ||
306 | { | ||
307 | struct dma_chan *chan = host->dma_current; | ||
308 | enum dma_data_direction dir; | ||
309 | u32 status; | ||
310 | int i; | ||
311 | |||
312 | /* Wait up to 1ms for the DMA to complete */ | ||
313 | for (i = 0; ; i++) { | ||
314 | status = readl(host->base + MMCISTATUS); | ||
315 | if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) | ||
316 | break; | ||
317 | udelay(10); | ||
318 | } | ||
319 | |||
320 | /* | ||
321 | * Check to see whether we still have some data left in the FIFO - | ||
322 | * this catches DMA controllers which are unable to monitor the | ||
323 | * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- | ||
324 | * contiguous buffers. On TX, we'll get a FIFO underrun error. | ||
325 | */ | ||
326 | if (status & MCI_RXDATAAVLBLMASK) { | ||
327 | dmaengine_terminate_all(chan); | ||
328 | if (!data->error) | ||
329 | data->error = -EIO; | ||
330 | } | ||
331 | |||
332 | if (data->flags & MMC_DATA_WRITE) { | ||
333 | dir = DMA_TO_DEVICE; | ||
334 | } else { | ||
335 | dir = DMA_FROM_DEVICE; | ||
336 | } | ||
337 | |||
338 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); | ||
339 | |||
340 | /* | ||
341 | * Use of DMA with scatter-gather is impossible. | ||
342 | * Give up with DMA and switch back to PIO mode. | ||
343 | */ | ||
344 | if (status & MCI_RXDATAAVLBLMASK) { | ||
345 | dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); | ||
346 | mmci_dma_release(host); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | static void mmci_dma_data_error(struct mmci_host *host) | ||
351 | { | ||
352 | dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); | ||
353 | dmaengine_terminate_all(host->dma_current); | ||
354 | } | ||
355 | |||
356 | static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | ||
357 | { | ||
358 | struct variant_data *variant = host->variant; | ||
359 | struct dma_slave_config conf = { | ||
360 | .src_addr = host->phybase + MMCIFIFO, | ||
361 | .dst_addr = host->phybase + MMCIFIFO, | ||
362 | .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
363 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
364 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ | ||
365 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ | ||
366 | }; | ||
367 | struct mmc_data *data = host->data; | ||
368 | struct dma_chan *chan; | ||
369 | struct dma_device *device; | ||
370 | struct dma_async_tx_descriptor *desc; | ||
371 | int nr_sg; | ||
372 | |||
373 | host->dma_current = NULL; | ||
374 | |||
375 | if (data->flags & MMC_DATA_READ) { | ||
376 | conf.direction = DMA_FROM_DEVICE; | ||
377 | chan = host->dma_rx_channel; | ||
378 | } else { | ||
379 | conf.direction = DMA_TO_DEVICE; | ||
380 | chan = host->dma_tx_channel; | ||
381 | } | ||
382 | |||
383 | /* If there's no DMA channel, fall back to PIO */ | ||
384 | if (!chan) | ||
385 | return -EINVAL; | ||
386 | |||
387 | /* If less than or equal to the fifo size, don't bother with DMA */ | ||
388 | if (host->size <= variant->fifosize) | ||
389 | return -EINVAL; | ||
390 | |||
391 | device = chan->device; | ||
392 | nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); | ||
393 | if (nr_sg == 0) | ||
394 | return -EINVAL; | ||
395 | |||
396 | dmaengine_slave_config(chan, &conf); | ||
397 | desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, | ||
398 | conf.direction, DMA_CTRL_ACK); | ||
399 | if (!desc) | ||
400 | goto unmap_exit; | ||
401 | |||
402 | /* Okay, go for it. */ | ||
403 | host->dma_current = chan; | ||
404 | |||
405 | dev_vdbg(mmc_dev(host->mmc), | ||
406 | "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", | ||
407 | data->sg_len, data->blksz, data->blocks, data->flags); | ||
408 | dmaengine_submit(desc); | ||
409 | dma_async_issue_pending(chan); | ||
410 | |||
411 | datactrl |= MCI_DPSM_DMAENABLE; | ||
412 | |||
413 | /* Trigger the DMA transfer */ | ||
414 | writel(datactrl, host->base + MMCIDATACTRL); | ||
415 | |||
416 | /* | ||
417 | * Let the MMCI say when the data is ended and it's time | ||
418 | * to fire next DMA request. When that happens, MMCI will | ||
419 | * call mmci_data_end() | ||
420 | */ | ||
421 | writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, | ||
422 | host->base + MMCIMASK0); | ||
423 | return 0; | ||
424 | |||
425 | unmap_exit: | ||
426 | dmaengine_terminate_all(chan); | ||
427 | dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | ||
428 | return -ENOMEM; | ||
429 | } | ||
430 | #else | ||
431 | /* Blank functions if the DMA engine is not available */ | ||
432 | static inline void mmci_dma_setup(struct mmci_host *host) | ||
433 | { | ||
434 | } | ||
435 | |||
436 | static inline void mmci_dma_release(struct mmci_host *host) | ||
437 | { | ||
438 | } | ||
439 | |||
440 | static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) | ||
441 | { | ||
442 | } | ||
443 | |||
444 | static inline void mmci_dma_data_error(struct mmci_host *host) | ||
445 | { | ||
446 | } | ||
447 | |||
448 | static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | ||
449 | { | ||
450 | return -ENOSYS; | ||
451 | } | ||
452 | #endif | ||
453 | |||
139 | static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | 454 | static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) |
140 | { | 455 | { |
456 | struct variant_data *variant = host->variant; | ||
141 | unsigned int datactrl, timeout, irqmask; | 457 | unsigned int datactrl, timeout, irqmask; |
142 | unsigned long long clks; | 458 | unsigned long long clks; |
143 | void __iomem *base; | 459 | void __iomem *base; |
@@ -148,9 +464,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
148 | 464 | ||
149 | host->data = data; | 465 | host->data = data; |
150 | host->size = data->blksz * data->blocks; | 466 | host->size = data->blksz * data->blocks; |
151 | host->data_xfered = 0; | 467 | data->bytes_xfered = 0; |
152 | |||
153 | mmci_init_sg(host, data); | ||
154 | 468 | ||
155 | clks = (unsigned long long)data->timeout_ns * host->cclk; | 469 | clks = (unsigned long long)data->timeout_ns * host->cclk; |
156 | do_div(clks, 1000000000UL); | 470 | do_div(clks, 1000000000UL); |
@@ -164,16 +478,33 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
164 | blksz_bits = ffs(data->blksz) - 1; | 478 | blksz_bits = ffs(data->blksz) - 1; |
165 | BUG_ON(1 << blksz_bits != data->blksz); | 479 | BUG_ON(1 << blksz_bits != data->blksz); |
166 | 480 | ||
167 | datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; | 481 | if (variant->blksz_datactrl16) |
168 | if (data->flags & MMC_DATA_READ) { | 482 | datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); |
483 | else | ||
484 | datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; | ||
485 | |||
486 | if (data->flags & MMC_DATA_READ) | ||
169 | datactrl |= MCI_DPSM_DIRECTION; | 487 | datactrl |= MCI_DPSM_DIRECTION; |
488 | |||
489 | /* | ||
490 | * Attempt to use DMA operation mode, if this | ||
491 | * should fail, fall back to PIO mode | ||
492 | */ | ||
493 | if (!mmci_dma_start_data(host, datactrl)) | ||
494 | return; | ||
495 | |||
496 | /* IRQ mode, map the SG list for CPU reading/writing */ | ||
497 | mmci_init_sg(host, data); | ||
498 | |||
499 | if (data->flags & MMC_DATA_READ) { | ||
170 | irqmask = MCI_RXFIFOHALFFULLMASK; | 500 | irqmask = MCI_RXFIFOHALFFULLMASK; |
171 | 501 | ||
172 | /* | 502 | /* |
173 | * If we have less than a FIFOSIZE of bytes to transfer, | 503 | * If we have less than the fifo 'half-full' threshold to |
174 | * trigger a PIO interrupt as soon as any data is available. | 504 | * transfer, trigger a PIO interrupt as soon as any data |
505 | * is available. | ||
175 | */ | 506 | */ |
176 | if (host->size < MCI_FIFOSIZE) | 507 | if (host->size < variant->fifohalfsize) |
177 | irqmask |= MCI_RXDATAAVLBLMASK; | 508 | irqmask |= MCI_RXDATAAVLBLMASK; |
178 | } else { | 509 | } else { |
179 | /* | 510 | /* |
@@ -183,9 +514,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
183 | irqmask = MCI_TXFIFOHALFEMPTYMASK; | 514 | irqmask = MCI_TXFIFOHALFEMPTYMASK; |
184 | } | 515 | } |
185 | 516 | ||
517 | /* The ST Micro variants has a special bit to enable SDIO */ | ||
518 | if (variant->sdio && host->mmc->card) | ||
519 | if (mmc_card_sdio(host->mmc->card)) | ||
520 | datactrl |= MCI_ST_DPSM_SDIOEN; | ||
521 | |||
186 | writel(datactrl, base + MMCIDATACTRL); | 522 | writel(datactrl, base + MMCIDATACTRL); |
187 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); | 523 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); |
188 | writel(irqmask, base + MMCIMASK1); | 524 | mmci_set_mask1(host, irqmask); |
189 | } | 525 | } |
190 | 526 | ||
191 | static void | 527 | static void |
@@ -220,49 +556,58 @@ static void | |||
220 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | 556 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, |
221 | unsigned int status) | 557 | unsigned int status) |
222 | { | 558 | { |
223 | if (status & MCI_DATABLOCKEND) { | 559 | /* First check for errors */ |
224 | host->data_xfered += data->blksz; | 560 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { |
225 | #ifdef CONFIG_ARCH_U300 | 561 | u32 remain, success; |
562 | |||
563 | /* Terminate the DMA transfer */ | ||
564 | if (dma_inprogress(host)) | ||
565 | mmci_dma_data_error(host); | ||
566 | |||
226 | /* | 567 | /* |
227 | * On the U300 some signal or other is | 568 | * Calculate how far we are into the transfer. Note that |
228 | * badly routed so that a data write does | 569 | * the data counter gives the number of bytes transferred |
229 | * not properly terminate with a MCI_DATAEND | 570 | * on the MMC bus, not on the host side. On reads, this |
230 | * status flag. This quirk will make writes | 571 | * can be as much as a FIFO-worth of data ahead. This |
231 | * work again. | 572 | * matters for FIFO overruns only. |
232 | */ | 573 | */ |
233 | if (data->flags & MMC_DATA_WRITE) | 574 | remain = readl(host->base + MMCIDATACNT); |
234 | status |= MCI_DATAEND; | 575 | success = data->blksz * data->blocks - remain; |
235 | #endif | 576 | |
236 | } | 577 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", |
237 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { | 578 | status, success); |
238 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); | 579 | if (status & MCI_DATACRCFAIL) { |
239 | if (status & MCI_DATACRCFAIL) | 580 | /* Last block was not successful */ |
581 | success -= 1; | ||
240 | data->error = -EILSEQ; | 582 | data->error = -EILSEQ; |
241 | else if (status & MCI_DATATIMEOUT) | 583 | } else if (status & MCI_DATATIMEOUT) { |
242 | data->error = -ETIMEDOUT; | 584 | data->error = -ETIMEDOUT; |
243 | else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) | 585 | } else if (status & MCI_STARTBITERR) { |
586 | data->error = -ECOMM; | ||
587 | } else if (status & MCI_TXUNDERRUN) { | ||
588 | data->error = -EIO; | ||
589 | } else if (status & MCI_RXOVERRUN) { | ||
590 | if (success > host->variant->fifosize) | ||
591 | success -= host->variant->fifosize; | ||
592 | else | ||
593 | success = 0; | ||
244 | data->error = -EIO; | 594 | data->error = -EIO; |
245 | status |= MCI_DATAEND; | ||
246 | |||
247 | /* | ||
248 | * We hit an error condition. Ensure that any data | ||
249 | * partially written to a page is properly coherent. | ||
250 | */ | ||
251 | if (data->flags & MMC_DATA_READ) { | ||
252 | struct sg_mapping_iter *sg_miter = &host->sg_miter; | ||
253 | unsigned long flags; | ||
254 | |||
255 | local_irq_save(flags); | ||
256 | if (sg_miter_next(sg_miter)) { | ||
257 | flush_dcache_page(sg_miter->page); | ||
258 | sg_miter_stop(sg_miter); | ||
259 | } | ||
260 | local_irq_restore(flags); | ||
261 | } | 595 | } |
596 | data->bytes_xfered = round_down(success, data->blksz); | ||
262 | } | 597 | } |
263 | if (status & MCI_DATAEND) { | 598 | |
599 | if (status & MCI_DATABLOCKEND) | ||
600 | dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); | ||
601 | |||
602 | if (status & MCI_DATAEND || data->error) { | ||
603 | if (dma_inprogress(host)) | ||
604 | mmci_dma_unmap(host, data); | ||
264 | mmci_stop_data(host); | 605 | mmci_stop_data(host); |
265 | 606 | ||
607 | if (!data->error) | ||
608 | /* The error clause is handled above, success! */ | ||
609 | data->bytes_xfered = data->blksz * data->blocks; | ||
610 | |||
266 | if (!data->stop) { | 611 | if (!data->stop) { |
267 | mmci_request_end(host, data->mrq); | 612 | mmci_request_end(host, data->mrq); |
268 | } else { | 613 | } else { |
@@ -279,15 +624,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, | |||
279 | 624 | ||
280 | host->cmd = NULL; | 625 | host->cmd = NULL; |
281 | 626 | ||
282 | cmd->resp[0] = readl(base + MMCIRESPONSE0); | ||
283 | cmd->resp[1] = readl(base + MMCIRESPONSE1); | ||
284 | cmd->resp[2] = readl(base + MMCIRESPONSE2); | ||
285 | cmd->resp[3] = readl(base + MMCIRESPONSE3); | ||
286 | |||
287 | if (status & MCI_CMDTIMEOUT) { | 627 | if (status & MCI_CMDTIMEOUT) { |
288 | cmd->error = -ETIMEDOUT; | 628 | cmd->error = -ETIMEDOUT; |
289 | } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { | 629 | } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { |
290 | cmd->error = -EILSEQ; | 630 | cmd->error = -EILSEQ; |
631 | } else { | ||
632 | cmd->resp[0] = readl(base + MMCIRESPONSE0); | ||
633 | cmd->resp[1] = readl(base + MMCIRESPONSE1); | ||
634 | cmd->resp[2] = readl(base + MMCIRESPONSE2); | ||
635 | cmd->resp[3] = readl(base + MMCIRESPONSE3); | ||
291 | } | 636 | } |
292 | 637 | ||
293 | if (!cmd->data || cmd->error) { | 638 | if (!cmd->data || cmd->error) { |
@@ -332,16 +677,43 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema | |||
332 | 677 | ||
333 | static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) | 678 | static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) |
334 | { | 679 | { |
680 | struct variant_data *variant = host->variant; | ||
335 | void __iomem *base = host->base; | 681 | void __iomem *base = host->base; |
336 | char *ptr = buffer; | 682 | char *ptr = buffer; |
337 | 683 | ||
338 | do { | 684 | do { |
339 | unsigned int count, maxcnt; | 685 | unsigned int count, maxcnt; |
340 | 686 | ||
341 | maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; | 687 | maxcnt = status & MCI_TXFIFOEMPTY ? |
688 | variant->fifosize : variant->fifohalfsize; | ||
342 | count = min(remain, maxcnt); | 689 | count = min(remain, maxcnt); |
343 | 690 | ||
344 | writesl(base + MMCIFIFO, ptr, count >> 2); | 691 | /* |
692 | * The ST Micro variant for SDIO transfer sizes | ||
693 | * less then 8 bytes should have clock H/W flow | ||
694 | * control disabled. | ||
695 | */ | ||
696 | if (variant->sdio && | ||
697 | mmc_card_sdio(host->mmc->card)) { | ||
698 | if (count < 8) | ||
699 | writel(readl(host->base + MMCICLOCK) & | ||
700 | ~variant->clkreg_enable, | ||
701 | host->base + MMCICLOCK); | ||
702 | else | ||
703 | writel(readl(host->base + MMCICLOCK) | | ||
704 | variant->clkreg_enable, | ||
705 | host->base + MMCICLOCK); | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * SDIO especially may want to send something that is | ||
710 | * not divisible by 4 (as opposed to card sectors | ||
711 | * etc), and the FIFO only accept full 32-bit writes. | ||
712 | * So compensate by adding +3 on the count, a single | ||
713 | * byte become a 32bit write, 7 bytes will be two | ||
714 | * 32bit writes etc. | ||
715 | */ | ||
716 | writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); | ||
345 | 717 | ||
346 | ptr += count; | 718 | ptr += count; |
347 | remain -= count; | 719 | remain -= count; |
@@ -362,6 +734,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
362 | { | 734 | { |
363 | struct mmci_host *host = dev_id; | 735 | struct mmci_host *host = dev_id; |
364 | struct sg_mapping_iter *sg_miter = &host->sg_miter; | 736 | struct sg_mapping_iter *sg_miter = &host->sg_miter; |
737 | struct variant_data *variant = host->variant; | ||
365 | void __iomem *base = host->base; | 738 | void __iomem *base = host->base; |
366 | unsigned long flags; | 739 | unsigned long flags; |
367 | u32 status; | 740 | u32 status; |
@@ -406,9 +779,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
406 | if (remain) | 779 | if (remain) |
407 | break; | 780 | break; |
408 | 781 | ||
409 | if (status & MCI_RXACTIVE) | ||
410 | flush_dcache_page(sg_miter->page); | ||
411 | |||
412 | status = readl(base + MMCISTATUS); | 782 | status = readl(base + MMCISTATUS); |
413 | } while (1); | 783 | } while (1); |
414 | 784 | ||
@@ -417,11 +787,11 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
417 | local_irq_restore(flags); | 787 | local_irq_restore(flags); |
418 | 788 | ||
419 | /* | 789 | /* |
420 | * If we're nearing the end of the read, switch to | 790 | * If we have less than the fifo 'half-full' threshold to transfer, |
421 | * "any data available" mode. | 791 | * trigger a PIO interrupt as soon as any data is available. |
422 | */ | 792 | */ |
423 | if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) | 793 | if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) |
424 | writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); | 794 | mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); |
425 | 795 | ||
426 | /* | 796 | /* |
427 | * If we run out of data, disable the data IRQs; this | 797 | * If we run out of data, disable the data IRQs; this |
@@ -430,7 +800,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
430 | * stops us racing with our data end IRQ. | 800 | * stops us racing with our data end IRQ. |
431 | */ | 801 | */ |
432 | if (host->size == 0) { | 802 | if (host->size == 0) { |
433 | writel(0, base + MMCIMASK1); | 803 | mmci_set_mask1(host, 0); |
434 | writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); | 804 | writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); |
435 | } | 805 | } |
436 | 806 | ||
@@ -453,6 +823,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) | |||
453 | struct mmc_data *data; | 823 | struct mmc_data *data; |
454 | 824 | ||
455 | status = readl(host->base + MMCISTATUS); | 825 | status = readl(host->base + MMCISTATUS); |
826 | |||
827 | if (host->singleirq) { | ||
828 | if (status & readl(host->base + MMCIMASK1)) | ||
829 | mmci_pio_irq(irq, dev_id); | ||
830 | |||
831 | status &= ~MCI_IRQ1MASK; | ||
832 | } | ||
833 | |||
456 | status &= readl(host->base + MMCIMASK0); | 834 | status &= readl(host->base + MMCIMASK0); |
457 | writel(status, host->base + MMCICLEAR); | 835 | writel(status, host->base + MMCICLEAR); |
458 | 836 | ||
@@ -507,19 +885,27 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
507 | struct mmci_host *host = mmc_priv(mmc); | 885 | struct mmci_host *host = mmc_priv(mmc); |
508 | u32 pwr = 0; | 886 | u32 pwr = 0; |
509 | unsigned long flags; | 887 | unsigned long flags; |
888 | int ret; | ||
510 | 889 | ||
511 | switch (ios->power_mode) { | 890 | switch (ios->power_mode) { |
512 | case MMC_POWER_OFF: | 891 | case MMC_POWER_OFF: |
513 | if(host->vcc && | 892 | if (host->vcc) |
514 | regulator_is_enabled(host->vcc)) | 893 | ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); |
515 | regulator_disable(host->vcc); | ||
516 | break; | 894 | break; |
517 | case MMC_POWER_UP: | 895 | case MMC_POWER_UP: |
518 | #ifdef CONFIG_REGULATOR | 896 | if (host->vcc) { |
519 | if (host->vcc) | 897 | ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); |
520 | /* This implicitly enables the regulator */ | 898 | if (ret) { |
521 | mmc_regulator_set_ocr(host->vcc, ios->vdd); | 899 | dev_err(mmc_dev(mmc), "unable to set OCR\n"); |
522 | #endif | 900 | /* |
901 | * The .set_ios() function in the mmc_host_ops | ||
902 | * struct return void, and failing to set the | ||
903 | * power should be rare so we print an error | ||
904 | * and return here. | ||
905 | */ | ||
906 | return; | ||
907 | } | ||
908 | } | ||
523 | if (host->plat->vdd_handler) | 909 | if (host->plat->vdd_handler) |
524 | pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, | 910 | pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, |
525 | ios->power_mode); | 911 | ios->power_mode); |
@@ -564,18 +950,23 @@ static int mmci_get_ro(struct mmc_host *mmc) | |||
564 | if (host->gpio_wp == -ENOSYS) | 950 | if (host->gpio_wp == -ENOSYS) |
565 | return -ENOSYS; | 951 | return -ENOSYS; |
566 | 952 | ||
567 | return gpio_get_value(host->gpio_wp); | 953 | return gpio_get_value_cansleep(host->gpio_wp); |
568 | } | 954 | } |
569 | 955 | ||
570 | static int mmci_get_cd(struct mmc_host *mmc) | 956 | static int mmci_get_cd(struct mmc_host *mmc) |
571 | { | 957 | { |
572 | struct mmci_host *host = mmc_priv(mmc); | 958 | struct mmci_host *host = mmc_priv(mmc); |
959 | struct mmci_platform_data *plat = host->plat; | ||
573 | unsigned int status; | 960 | unsigned int status; |
574 | 961 | ||
575 | if (host->gpio_cd == -ENOSYS) | 962 | if (host->gpio_cd == -ENOSYS) { |
576 | status = host->plat->status(mmc_dev(host->mmc)); | 963 | if (!plat->status) |
577 | else | 964 | return 1; /* Assume always present */ |
578 | status = !gpio_get_value(host->gpio_cd); | 965 | |
966 | status = plat->status(mmc_dev(host->mmc)); | ||
967 | } else | ||
968 | status = !!gpio_get_value_cansleep(host->gpio_cd) | ||
969 | ^ plat->cd_invert; | ||
579 | 970 | ||
580 | /* | 971 | /* |
581 | * Use positive logic throughout - status is zero for no card, | 972 | * Use positive logic throughout - status is zero for no card, |
@@ -584,6 +975,15 @@ static int mmci_get_cd(struct mmc_host *mmc) | |||
584 | return status; | 975 | return status; |
585 | } | 976 | } |
586 | 977 | ||
978 | static irqreturn_t mmci_cd_irq(int irq, void *dev_id) | ||
979 | { | ||
980 | struct mmci_host *host = dev_id; | ||
981 | |||
982 | mmc_detect_change(host->mmc, msecs_to_jiffies(500)); | ||
983 | |||
984 | return IRQ_HANDLED; | ||
985 | } | ||
986 | |||
587 | static const struct mmc_host_ops mmci_ops = { | 987 | static const struct mmc_host_ops mmci_ops = { |
588 | .request = mmci_request, | 988 | .request = mmci_request, |
589 | .set_ios = mmci_set_ios, | 989 | .set_ios = mmci_set_ios, |
@@ -591,7 +991,8 @@ static const struct mmc_host_ops mmci_ops = { | |||
591 | .get_cd = mmci_get_cd, | 991 | .get_cd = mmci_get_cd, |
592 | }; | 992 | }; |
593 | 993 | ||
594 | static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | 994 | static int __devinit mmci_probe(struct amba_device *dev, |
995 | const struct amba_id *id) | ||
595 | { | 996 | { |
596 | struct mmci_platform_data *plat = dev->dev.platform_data; | 997 | struct mmci_platform_data *plat = dev->dev.platform_data; |
597 | struct variant_data *variant = id->data; | 998 | struct variant_data *variant = id->data; |
@@ -620,6 +1021,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
620 | 1021 | ||
621 | host->gpio_wp = -ENOSYS; | 1022 | host->gpio_wp = -ENOSYS; |
622 | host->gpio_cd = -ENOSYS; | 1023 | host->gpio_cd = -ENOSYS; |
1024 | host->gpio_cd_irq = -1; | ||
623 | 1025 | ||
624 | host->hw_designer = amba_manf(dev); | 1026 | host->hw_designer = amba_manf(dev); |
625 | host->hw_revision = amba_rev(dev); | 1027 | host->hw_revision = amba_rev(dev); |
@@ -653,6 +1055,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
653 | dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", | 1055 | dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", |
654 | host->mclk); | 1056 | host->mclk); |
655 | } | 1057 | } |
1058 | host->phybase = dev->res.start; | ||
656 | host->base = ioremap(dev->res.start, resource_size(&dev->res)); | 1059 | host->base = ioremap(dev->res.start, resource_size(&dev->res)); |
657 | if (!host->base) { | 1060 | if (!host->base) { |
658 | ret = -ENOMEM; | 1061 | ret = -ENOMEM; |
@@ -699,13 +1102,11 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
699 | if (host->vcc == NULL) | 1102 | if (host->vcc == NULL) |
700 | mmc->ocr_avail = plat->ocr_mask; | 1103 | mmc->ocr_avail = plat->ocr_mask; |
701 | mmc->caps = plat->capabilities; | 1104 | mmc->caps = plat->capabilities; |
702 | mmc->caps |= MMC_CAP_NEEDS_POLL; | ||
703 | 1105 | ||
704 | /* | 1106 | /* |
705 | * We can do SGIO | 1107 | * We can do SGIO |
706 | */ | 1108 | */ |
707 | mmc->max_hw_segs = 16; | 1109 | mmc->max_segs = NR_SG; |
708 | mmc->max_phys_segs = NR_SG; | ||
709 | 1110 | ||
710 | /* | 1111 | /* |
711 | * Since only a certain number of bits are valid in the data length | 1112 | * Since only a certain number of bits are valid in the data length |
@@ -744,6 +1145,20 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
744 | host->gpio_cd = plat->gpio_cd; | 1145 | host->gpio_cd = plat->gpio_cd; |
745 | else if (ret != -ENOSYS) | 1146 | else if (ret != -ENOSYS) |
746 | goto err_gpio_cd; | 1147 | goto err_gpio_cd; |
1148 | |||
1149 | /* | ||
1150 | * A gpio pin that will detect cards when inserted and removed | ||
1151 | * will most likely want to trigger on the edges if it is | ||
1152 | * 0 when ejected and 1 when inserted (or mutatis mutandis | ||
1153 | * for the inverted case) so we request triggers on both | ||
1154 | * edges. | ||
1155 | */ | ||
1156 | ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), | ||
1157 | mmci_cd_irq, | ||
1158 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
1159 | DRIVER_NAME " (cd)", host); | ||
1160 | if (ret >= 0) | ||
1161 | host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); | ||
747 | } | 1162 | } |
748 | if (gpio_is_valid(plat->gpio_wp)) { | 1163 | if (gpio_is_valid(plat->gpio_wp)) { |
749 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); | 1164 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); |
@@ -755,23 +1170,35 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
755 | goto err_gpio_wp; | 1170 | goto err_gpio_wp; |
756 | } | 1171 | } |
757 | 1172 | ||
1173 | if ((host->plat->status || host->gpio_cd != -ENOSYS) | ||
1174 | && host->gpio_cd_irq < 0) | ||
1175 | mmc->caps |= MMC_CAP_NEEDS_POLL; | ||
1176 | |||
758 | ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); | 1177 | ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); |
759 | if (ret) | 1178 | if (ret) |
760 | goto unmap; | 1179 | goto unmap; |
761 | 1180 | ||
762 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); | 1181 | if (dev->irq[1] == NO_IRQ) |
763 | if (ret) | 1182 | host->singleirq = true; |
764 | goto irq0_free; | 1183 | else { |
1184 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, | ||
1185 | DRIVER_NAME " (pio)", host); | ||
1186 | if (ret) | ||
1187 | goto irq0_free; | ||
1188 | } | ||
765 | 1189 | ||
766 | writel(MCI_IRQENABLE, host->base + MMCIMASK0); | 1190 | writel(MCI_IRQENABLE, host->base + MMCIMASK0); |
767 | 1191 | ||
768 | amba_set_drvdata(dev, mmc); | 1192 | amba_set_drvdata(dev, mmc); |
769 | 1193 | ||
770 | mmc_add_host(mmc); | 1194 | dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", |
1195 | mmc_hostname(mmc), amba_part(dev), amba_manf(dev), | ||
1196 | amba_rev(dev), (unsigned long long)dev->res.start, | ||
1197 | dev->irq[0], dev->irq[1]); | ||
771 | 1198 | ||
772 | dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", | 1199 | mmci_dma_setup(host); |
773 | mmc_hostname(mmc), amba_rev(dev), amba_config(dev), | 1200 | |
774 | (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); | 1201 | mmc_add_host(mmc); |
775 | 1202 | ||
776 | return 0; | 1203 | return 0; |
777 | 1204 | ||
@@ -781,6 +1208,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
781 | if (host->gpio_wp != -ENOSYS) | 1208 | if (host->gpio_wp != -ENOSYS) |
782 | gpio_free(host->gpio_wp); | 1209 | gpio_free(host->gpio_wp); |
783 | err_gpio_wp: | 1210 | err_gpio_wp: |
1211 | if (host->gpio_cd_irq >= 0) | ||
1212 | free_irq(host->gpio_cd_irq, host); | ||
784 | if (host->gpio_cd != -ENOSYS) | 1213 | if (host->gpio_cd != -ENOSYS) |
785 | gpio_free(host->gpio_cd); | 1214 | gpio_free(host->gpio_cd); |
786 | err_gpio_cd: | 1215 | err_gpio_cd: |
@@ -814,11 +1243,15 @@ static int __devexit mmci_remove(struct amba_device *dev) | |||
814 | writel(0, host->base + MMCICOMMAND); | 1243 | writel(0, host->base + MMCICOMMAND); |
815 | writel(0, host->base + MMCIDATACTRL); | 1244 | writel(0, host->base + MMCIDATACTRL); |
816 | 1245 | ||
1246 | mmci_dma_release(host); | ||
817 | free_irq(dev->irq[0], host); | 1247 | free_irq(dev->irq[0], host); |
818 | free_irq(dev->irq[1], host); | 1248 | if (!host->singleirq) |
1249 | free_irq(dev->irq[1], host); | ||
819 | 1250 | ||
820 | if (host->gpio_wp != -ENOSYS) | 1251 | if (host->gpio_wp != -ENOSYS) |
821 | gpio_free(host->gpio_wp); | 1252 | gpio_free(host->gpio_wp); |
1253 | if (host->gpio_cd_irq >= 0) | ||
1254 | free_irq(host->gpio_cd_irq, host); | ||
822 | if (host->gpio_cd != -ENOSYS) | 1255 | if (host->gpio_cd != -ENOSYS) |
823 | gpio_free(host->gpio_cd); | 1256 | gpio_free(host->gpio_cd); |
824 | 1257 | ||
@@ -826,8 +1259,8 @@ static int __devexit mmci_remove(struct amba_device *dev) | |||
826 | clk_disable(host->clk); | 1259 | clk_disable(host->clk); |
827 | clk_put(host->clk); | 1260 | clk_put(host->clk); |
828 | 1261 | ||
829 | if (regulator_is_enabled(host->vcc)) | 1262 | if (host->vcc) |
830 | regulator_disable(host->vcc); | 1263 | mmc_regulator_set_ocr(mmc, host->vcc, 0); |
831 | regulator_put(host->vcc); | 1264 | regulator_put(host->vcc); |
832 | 1265 | ||
833 | mmc_free_host(mmc); | 1266 | mmc_free_host(mmc); |
@@ -878,10 +1311,15 @@ static int mmci_resume(struct amba_device *dev) | |||
878 | static struct amba_id mmci_ids[] = { | 1311 | static struct amba_id mmci_ids[] = { |
879 | { | 1312 | { |
880 | .id = 0x00041180, | 1313 | .id = 0x00041180, |
881 | .mask = 0x000fffff, | 1314 | .mask = 0xff0fffff, |
882 | .data = &variant_arm, | 1315 | .data = &variant_arm, |
883 | }, | 1316 | }, |
884 | { | 1317 | { |
1318 | .id = 0x01041180, | ||
1319 | .mask = 0xff0fffff, | ||
1320 | .data = &variant_arm_extended_fifo, | ||
1321 | }, | ||
1322 | { | ||
885 | .id = 0x00041181, | 1323 | .id = 0x00041181, |
886 | .mask = 0x000fffff, | 1324 | .mask = 0x000fffff, |
887 | .data = &variant_arm, | 1325 | .data = &variant_arm, |
@@ -899,9 +1337,14 @@ static struct amba_id mmci_ids[] = { | |||
899 | }, | 1337 | }, |
900 | { | 1338 | { |
901 | .id = 0x00480180, | 1339 | .id = 0x00480180, |
902 | .mask = 0x00ffffff, | 1340 | .mask = 0xf0ffffff, |
903 | .data = &variant_ux500, | 1341 | .data = &variant_ux500, |
904 | }, | 1342 | }, |
1343 | { | ||
1344 | .id = 0x10480180, | ||
1345 | .mask = 0xf0ffffff, | ||
1346 | .data = &variant_ux500v2, | ||
1347 | }, | ||
905 | { 0, 0 }, | 1348 | { 0, 0 }, |
906 | }; | 1349 | }; |
907 | 1350 | ||