diff options
-rw-r--r-- | drivers/mmc/host/mmci.c | 207 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.h | 9 | ||||
-rw-r--r-- | drivers/serial/amba-pl011.c | 592 | ||||
-rw-r--r-- | include/linux/amba/serial.h | 22 |
4 files changed, 769 insertions, 61 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 87b4fc6c98c2..563022825667 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/mmc/host.h> | 21 | #include <linux/mmc/host.h> |
22 | #include <linux/mmc/card.h> | ||
22 | #include <linux/amba/bus.h> | 23 | #include <linux/amba/bus.h> |
23 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
24 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
@@ -45,6 +46,12 @@ static unsigned int fmax = 515633; | |||
45 | * is asserted (likewise for RX) | 46 | * is asserted (likewise for RX) |
46 | * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY | 47 | * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY |
47 | * is asserted (likewise for RX) | 48 | * is asserted (likewise for RX) |
49 | * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware | ||
50 | * and will not work at all. | ||
51 | * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when | ||
52 | * using DMA. | ||
53 | * @sdio: variant supports SDIO | ||
54 | * @st_clkdiv: true if using a ST-specific clock divider algorithm | ||
48 | */ | 55 | */ |
49 | struct variant_data { | 56 | struct variant_data { |
50 | unsigned int clkreg; | 57 | unsigned int clkreg; |
@@ -52,6 +59,10 @@ struct variant_data { | |||
52 | unsigned int datalength_bits; | 59 | unsigned int datalength_bits; |
53 | unsigned int fifosize; | 60 | unsigned int fifosize; |
54 | unsigned int fifohalfsize; | 61 | unsigned int fifohalfsize; |
62 | bool broken_blockend; | ||
63 | bool broken_blockend_dma; | ||
64 | bool sdio; | ||
65 | bool st_clkdiv; | ||
55 | }; | 66 | }; |
56 | 67 | ||
57 | static struct variant_data variant_arm = { | 68 | static struct variant_data variant_arm = { |
@@ -65,6 +76,8 @@ static struct variant_data variant_u300 = { | |||
65 | .fifohalfsize = 8 * 4, | 76 | .fifohalfsize = 8 * 4, |
66 | .clkreg_enable = 1 << 13, /* HWFCEN */ | 77 | .clkreg_enable = 1 << 13, /* HWFCEN */ |
67 | .datalength_bits = 16, | 78 | .datalength_bits = 16, |
79 | .broken_blockend_dma = true, | ||
80 | .sdio = true, | ||
68 | }; | 81 | }; |
69 | 82 | ||
70 | static struct variant_data variant_ux500 = { | 83 | static struct variant_data variant_ux500 = { |
@@ -73,7 +86,11 @@ static struct variant_data variant_ux500 = { | |||
73 | .clkreg = MCI_CLK_ENABLE, | 86 | .clkreg = MCI_CLK_ENABLE, |
74 | .clkreg_enable = 1 << 14, /* HWFCEN */ | 87 | .clkreg_enable = 1 << 14, /* HWFCEN */ |
75 | .datalength_bits = 24, | 88 | .datalength_bits = 24, |
89 | .broken_blockend = true, | ||
90 | .sdio = true, | ||
91 | .st_clkdiv = true, | ||
76 | }; | 92 | }; |
93 | |||
77 | /* | 94 | /* |
78 | * This must be called with host->lock held | 95 | * This must be called with host->lock held |
79 | */ | 96 | */ |
@@ -86,7 +103,22 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) | |||
86 | if (desired >= host->mclk) { | 103 | if (desired >= host->mclk) { |
87 | clk = MCI_CLK_BYPASS; | 104 | clk = MCI_CLK_BYPASS; |
88 | host->cclk = host->mclk; | 105 | host->cclk = host->mclk; |
106 | } else if (variant->st_clkdiv) { | ||
107 | /* | ||
108 | * DB8500 TRM says f = mclk / (clkdiv + 2) | ||
109 | * => clkdiv = (mclk / f) - 2 | ||
110 | * Round the divider up so we don't exceed the max | ||
111 | * frequency | ||
112 | */ | ||
113 | clk = DIV_ROUND_UP(host->mclk, desired) - 2; | ||
114 | if (clk >= 256) | ||
115 | clk = 255; | ||
116 | host->cclk = host->mclk / (clk + 2); | ||
89 | } else { | 117 | } else { |
118 | /* | ||
119 | * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) | ||
120 | * => clkdiv = mclk / (2 * f) - 1 | ||
121 | */ | ||
90 | clk = host->mclk / (2 * desired) - 1; | 122 | clk = host->mclk / (2 * desired) - 1; |
91 | if (clk >= 256) | 123 | if (clk >= 256) |
92 | clk = 255; | 124 | clk = 255; |
@@ -129,10 +161,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) | |||
129 | spin_lock(&host->lock); | 161 | spin_lock(&host->lock); |
130 | } | 162 | } |
131 | 163 | ||
164 | static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) | ||
165 | { | ||
166 | void __iomem *base = host->base; | ||
167 | |||
168 | if (host->singleirq) { | ||
169 | unsigned int mask0 = readl(base + MMCIMASK0); | ||
170 | |||
171 | mask0 &= ~MCI_IRQ1MASK; | ||
172 | mask0 |= mask; | ||
173 | |||
174 | writel(mask0, base + MMCIMASK0); | ||
175 | } | ||
176 | |||
177 | writel(mask, base + MMCIMASK1); | ||
178 | } | ||
179 | |||
132 | static void mmci_stop_data(struct mmci_host *host) | 180 | static void mmci_stop_data(struct mmci_host *host) |
133 | { | 181 | { |
134 | writel(0, host->base + MMCIDATACTRL); | 182 | writel(0, host->base + MMCIDATACTRL); |
135 | writel(0, host->base + MMCIMASK1); | 183 | mmci_set_mask1(host, 0); |
136 | host->data = NULL; | 184 | host->data = NULL; |
137 | } | 185 | } |
138 | 186 | ||
@@ -162,6 +210,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
162 | host->data = data; | 210 | host->data = data; |
163 | host->size = data->blksz * data->blocks; | 211 | host->size = data->blksz * data->blocks; |
164 | host->data_xfered = 0; | 212 | host->data_xfered = 0; |
213 | host->blockend = false; | ||
214 | host->dataend = false; | ||
165 | 215 | ||
166 | mmci_init_sg(host, data); | 216 | mmci_init_sg(host, data); |
167 | 217 | ||
@@ -196,9 +246,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | |||
196 | irqmask = MCI_TXFIFOHALFEMPTYMASK; | 246 | irqmask = MCI_TXFIFOHALFEMPTYMASK; |
197 | } | 247 | } |
198 | 248 | ||
249 | /* The ST Micro variants has a special bit to enable SDIO */ | ||
250 | if (variant->sdio && host->mmc->card) | ||
251 | if (mmc_card_sdio(host->mmc->card)) | ||
252 | datactrl |= MCI_ST_DPSM_SDIOEN; | ||
253 | |||
199 | writel(datactrl, base + MMCIDATACTRL); | 254 | writel(datactrl, base + MMCIDATACTRL); |
200 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); | 255 | writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); |
201 | writel(irqmask, base + MMCIMASK1); | 256 | mmci_set_mask1(host, irqmask); |
202 | } | 257 | } |
203 | 258 | ||
204 | static void | 259 | static void |
@@ -233,20 +288,9 @@ static void | |||
233 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | 288 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, |
234 | unsigned int status) | 289 | unsigned int status) |
235 | { | 290 | { |
236 | if (status & MCI_DATABLOCKEND) { | 291 | struct variant_data *variant = host->variant; |
237 | host->data_xfered += data->blksz; | 292 | |
238 | #ifdef CONFIG_ARCH_U300 | 293 | /* First check for errors */ |
239 | /* | ||
240 | * On the U300 some signal or other is | ||
241 | * badly routed so that a data write does | ||
242 | * not properly terminate with a MCI_DATAEND | ||
243 | * status flag. This quirk will make writes | ||
244 | * work again. | ||
245 | */ | ||
246 | if (data->flags & MMC_DATA_WRITE) | ||
247 | status |= MCI_DATAEND; | ||
248 | #endif | ||
249 | } | ||
250 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { | 294 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { |
251 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); | 295 | dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); |
252 | if (status & MCI_DATACRCFAIL) | 296 | if (status & MCI_DATACRCFAIL) |
@@ -255,7 +299,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | |||
255 | data->error = -ETIMEDOUT; | 299 | data->error = -ETIMEDOUT; |
256 | else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) | 300 | else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) |
257 | data->error = -EIO; | 301 | data->error = -EIO; |
258 | status |= MCI_DATAEND; | 302 | |
303 | /* Force-complete the transaction */ | ||
304 | host->blockend = true; | ||
305 | host->dataend = true; | ||
259 | 306 | ||
260 | /* | 307 | /* |
261 | * We hit an error condition. Ensure that any data | 308 | * We hit an error condition. Ensure that any data |
@@ -273,9 +320,64 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | |||
273 | local_irq_restore(flags); | 320 | local_irq_restore(flags); |
274 | } | 321 | } |
275 | } | 322 | } |
276 | if (status & MCI_DATAEND) { | 323 | |
324 | /* | ||
325 | * On ARM variants in PIO mode, MCI_DATABLOCKEND | ||
326 | * is always sent first, and we increase the | ||
327 | * transfered number of bytes for that IRQ. Then | ||
328 | * MCI_DATAEND follows and we conclude the transaction. | ||
329 | * | ||
330 | * On the Ux500 single-IRQ variant MCI_DATABLOCKEND | ||
331 | * doesn't seem to immediately clear from the status, | ||
332 | * so we can't use it keep count when only one irq is | ||
333 | * used because the irq will hit for other reasons, and | ||
334 | * then the flag is still up. So we use the MCI_DATAEND | ||
335 | * IRQ at the end of the entire transfer because | ||
336 | * MCI_DATABLOCKEND is broken. | ||
337 | * | ||
338 | * In the U300, the IRQs can arrive out-of-order, | ||
339 | * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND, | ||
340 | * so for this case we use the flags "blockend" and | ||
341 | * "dataend" to make sure both IRQs have arrived before | ||
342 | * concluding the transaction. (This does not apply | ||
343 | * to the Ux500 which doesn't fire MCI_DATABLOCKEND | ||
344 | * at all.) In DMA mode it suffers from the same problem | ||
345 | * as the Ux500. | ||
346 | */ | ||
347 | if (status & MCI_DATABLOCKEND) { | ||
348 | /* | ||
349 | * Just being a little over-cautious, we do not | ||
350 | * use this progressive update if the hardware blockend | ||
351 | * flag is unreliable: since it can stay high between | ||
352 | * IRQs it will corrupt the transfer counter. | ||
353 | */ | ||
354 | if (!variant->broken_blockend) | ||
355 | host->data_xfered += data->blksz; | ||
356 | host->blockend = true; | ||
357 | } | ||
358 | |||
359 | if (status & MCI_DATAEND) | ||
360 | host->dataend = true; | ||
361 | |||
362 | /* | ||
363 | * On variants with broken blockend we shall only wait for dataend, | ||
364 | * on others we must sync with the blockend signal since they can | ||
365 | * appear out-of-order. | ||
366 | */ | ||
367 | if (host->dataend && (host->blockend || variant->broken_blockend)) { | ||
277 | mmci_stop_data(host); | 368 | mmci_stop_data(host); |
278 | 369 | ||
370 | /* Reset these flags */ | ||
371 | host->blockend = false; | ||
372 | host->dataend = false; | ||
373 | |||
374 | /* | ||
375 | * Variants with broken blockend flags need to handle the | ||
376 | * end of the entire transfer here. | ||
377 | */ | ||
378 | if (variant->broken_blockend && !data->error) | ||
379 | host->data_xfered += data->blksz * data->blocks; | ||
380 | |||
279 | if (!data->stop) { | 381 | if (!data->stop) { |
280 | mmci_request_end(host, data->mrq); | 382 | mmci_request_end(host, data->mrq); |
281 | } else { | 383 | } else { |
@@ -356,7 +458,32 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem | |||
356 | variant->fifosize : variant->fifohalfsize; | 458 | variant->fifosize : variant->fifohalfsize; |
357 | count = min(remain, maxcnt); | 459 | count = min(remain, maxcnt); |
358 | 460 | ||
359 | writesl(base + MMCIFIFO, ptr, count >> 2); | 461 | /* |
462 | * The ST Micro variant for SDIO transfer sizes | ||
463 | * less then 8 bytes should have clock H/W flow | ||
464 | * control disabled. | ||
465 | */ | ||
466 | if (variant->sdio && | ||
467 | mmc_card_sdio(host->mmc->card)) { | ||
468 | if (count < 8) | ||
469 | writel(readl(host->base + MMCICLOCK) & | ||
470 | ~variant->clkreg_enable, | ||
471 | host->base + MMCICLOCK); | ||
472 | else | ||
473 | writel(readl(host->base + MMCICLOCK) | | ||
474 | variant->clkreg_enable, | ||
475 | host->base + MMCICLOCK); | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * SDIO especially may want to send something that is | ||
480 | * not divisible by 4 (as opposed to card sectors | ||
481 | * etc), and the FIFO only accept full 32-bit writes. | ||
482 | * So compensate by adding +3 on the count, a single | ||
483 | * byte become a 32bit write, 7 bytes will be two | ||
484 | * 32bit writes etc. | ||
485 | */ | ||
486 | writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); | ||
360 | 487 | ||
361 | ptr += count; | 488 | ptr += count; |
362 | remain -= count; | 489 | remain -= count; |
@@ -437,7 +564,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
437 | * "any data available" mode. | 564 | * "any data available" mode. |
438 | */ | 565 | */ |
439 | if (status & MCI_RXACTIVE && host->size < variant->fifosize) | 566 | if (status & MCI_RXACTIVE && host->size < variant->fifosize) |
440 | writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); | 567 | mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); |
441 | 568 | ||
442 | /* | 569 | /* |
443 | * If we run out of data, disable the data IRQs; this | 570 | * If we run out of data, disable the data IRQs; this |
@@ -446,7 +573,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
446 | * stops us racing with our data end IRQ. | 573 | * stops us racing with our data end IRQ. |
447 | */ | 574 | */ |
448 | if (host->size == 0) { | 575 | if (host->size == 0) { |
449 | writel(0, base + MMCIMASK1); | 576 | mmci_set_mask1(host, 0); |
450 | writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); | 577 | writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); |
451 | } | 578 | } |
452 | 579 | ||
@@ -469,6 +596,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) | |||
469 | struct mmc_data *data; | 596 | struct mmc_data *data; |
470 | 597 | ||
471 | status = readl(host->base + MMCISTATUS); | 598 | status = readl(host->base + MMCISTATUS); |
599 | |||
600 | if (host->singleirq) { | ||
601 | if (status & readl(host->base + MMCIMASK1)) | ||
602 | mmci_pio_irq(irq, dev_id); | ||
603 | |||
604 | status &= ~MCI_IRQ1MASK; | ||
605 | } | ||
606 | |||
472 | status &= readl(host->base + MMCIMASK0); | 607 | status &= readl(host->base + MMCIMASK0); |
473 | writel(status, host->base + MMCICLEAR); | 608 | writel(status, host->base + MMCICLEAR); |
474 | 609 | ||
@@ -635,6 +770,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
635 | struct variant_data *variant = id->data; | 770 | struct variant_data *variant = id->data; |
636 | struct mmci_host *host; | 771 | struct mmci_host *host; |
637 | struct mmc_host *mmc; | 772 | struct mmc_host *mmc; |
773 | unsigned int mask; | ||
638 | int ret; | 774 | int ret; |
639 | 775 | ||
640 | /* must have platform data */ | 776 | /* must have platform data */ |
@@ -806,20 +942,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) | |||
806 | if (ret) | 942 | if (ret) |
807 | goto unmap; | 943 | goto unmap; |
808 | 944 | ||
809 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); | 945 | if (dev->irq[1] == NO_IRQ) |
810 | if (ret) | 946 | host->singleirq = true; |
811 | goto irq0_free; | 947 | else { |
948 | ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, | ||
949 | DRIVER_NAME " (pio)", host); | ||
950 | if (ret) | ||
951 | goto irq0_free; | ||
952 | } | ||
812 | 953 | ||
813 | writel(MCI_IRQENABLE, host->base + MMCIMASK0); | 954 | mask = MCI_IRQENABLE; |
955 | /* Don't use the datablockend flag if it's broken */ | ||
956 | if (variant->broken_blockend) | ||
957 | mask &= ~MCI_DATABLOCKEND; | ||
814 | 958 | ||
815 | amba_set_drvdata(dev, mmc); | 959 | writel(mask, host->base + MMCIMASK0); |
816 | 960 | ||
817 | mmc_add_host(mmc); | 961 | amba_set_drvdata(dev, mmc); |
818 | 962 | ||
819 | dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", | 963 | dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", |
820 | mmc_hostname(mmc), amba_rev(dev), amba_config(dev), | 964 | mmc_hostname(mmc), amba_part(dev), amba_rev(dev), |
821 | (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); | 965 | (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); |
822 | 966 | ||
967 | mmc_add_host(mmc); | ||
968 | |||
823 | return 0; | 969 | return 0; |
824 | 970 | ||
825 | irq0_free: | 971 | irq0_free: |
@@ -864,7 +1010,8 @@ static int __devexit mmci_remove(struct amba_device *dev) | |||
864 | writel(0, host->base + MMCIDATACTRL); | 1010 | writel(0, host->base + MMCIDATACTRL); |
865 | 1011 | ||
866 | free_irq(dev->irq[0], host); | 1012 | free_irq(dev->irq[0], host); |
867 | free_irq(dev->irq[1], host); | 1013 | if (!host->singleirq) |
1014 | free_irq(dev->irq[1], host); | ||
868 | 1015 | ||
869 | if (host->gpio_wp != -ENOSYS) | 1016 | if (host->gpio_wp != -ENOSYS) |
870 | gpio_free(host->gpio_wp); | 1017 | gpio_free(host->gpio_wp); |
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 4ae887fc0189..df06f01aac89 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h | |||
@@ -139,6 +139,11 @@ | |||
139 | MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ | 139 | MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ |
140 | MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) | 140 | MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) |
141 | 141 | ||
142 | /* These interrupts are directed to IRQ1 when two IRQ lines are available */ | ||
143 | #define MCI_IRQ1MASK \ | ||
144 | (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ | ||
145 | MCI_TXFIFOHALFEMPTYMASK) | ||
146 | |||
142 | #define NR_SG 16 | 147 | #define NR_SG 16 |
143 | 148 | ||
144 | struct clk; | 149 | struct clk; |
@@ -154,6 +159,7 @@ struct mmci_host { | |||
154 | int gpio_cd; | 159 | int gpio_cd; |
155 | int gpio_wp; | 160 | int gpio_wp; |
156 | int gpio_cd_irq; | 161 | int gpio_cd_irq; |
162 | bool singleirq; | ||
157 | 163 | ||
158 | unsigned int data_xfered; | 164 | unsigned int data_xfered; |
159 | 165 | ||
@@ -171,6 +177,9 @@ struct mmci_host { | |||
171 | struct timer_list timer; | 177 | struct timer_list timer; |
172 | unsigned int oldstat; | 178 | unsigned int oldstat; |
173 | 179 | ||
180 | bool blockend; | ||
181 | bool dataend; | ||
182 | |||
174 | /* pio stuff */ | 183 | /* pio stuff */ |
175 | struct sg_mapping_iter sg_miter; | 184 | struct sg_mapping_iter sg_miter; |
176 | unsigned int size; | 185 | unsigned int size; |
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 6ca7a44f29c2..e76d7d000128 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Copyright 1999 ARM Limited | 8 | * Copyright 1999 ARM Limited |
9 | * Copyright (C) 2000 Deep Blue Solutions Ltd. | 9 | * Copyright (C) 2000 Deep Blue Solutions Ltd. |
10 | * Copyright (C) 2010 ST-Ericsson SA | ||
10 | * | 11 | * |
11 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
@@ -48,6 +49,9 @@ | |||
48 | #include <linux/amba/serial.h> | 49 | #include <linux/amba/serial.h> |
49 | #include <linux/clk.h> | 50 | #include <linux/clk.h> |
50 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/dmaengine.h> | ||
53 | #include <linux/dma-mapping.h> | ||
54 | #include <linux/scatterlist.h> | ||
51 | 55 | ||
52 | #include <asm/io.h> | 56 | #include <asm/io.h> |
53 | #include <asm/sizes.h> | 57 | #include <asm/sizes.h> |
@@ -63,21 +67,6 @@ | |||
63 | #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) | 67 | #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) |
64 | #define UART_DUMMY_DR_RX (1 << 16) | 68 | #define UART_DUMMY_DR_RX (1 << 16) |
65 | 69 | ||
66 | /* | ||
67 | * We wrap our port structure around the generic uart_port. | ||
68 | */ | ||
69 | struct uart_amba_port { | ||
70 | struct uart_port port; | ||
71 | struct clk *clk; | ||
72 | unsigned int im; /* interrupt mask */ | ||
73 | unsigned int old_status; | ||
74 | unsigned int ifls; /* vendor-specific */ | ||
75 | unsigned int lcrh_tx; /* vendor-specific */ | ||
76 | unsigned int lcrh_rx; /* vendor-specific */ | ||
77 | bool oversampling; /* vendor-specific */ | ||
78 | bool autorts; | ||
79 | }; | ||
80 | |||
81 | /* There is by now at least one vendor with differing details, so handle it */ | 70 | /* There is by now at least one vendor with differing details, so handle it */ |
82 | struct vendor_data { | 71 | struct vendor_data { |
83 | unsigned int ifls; | 72 | unsigned int ifls; |
@@ -85,6 +74,7 @@ struct vendor_data { | |||
85 | unsigned int lcrh_tx; | 74 | unsigned int lcrh_tx; |
86 | unsigned int lcrh_rx; | 75 | unsigned int lcrh_rx; |
87 | bool oversampling; | 76 | bool oversampling; |
77 | bool dma_threshold; | ||
88 | }; | 78 | }; |
89 | 79 | ||
90 | static struct vendor_data vendor_arm = { | 80 | static struct vendor_data vendor_arm = { |
@@ -93,6 +83,7 @@ static struct vendor_data vendor_arm = { | |||
93 | .lcrh_tx = UART011_LCRH, | 83 | .lcrh_tx = UART011_LCRH, |
94 | .lcrh_rx = UART011_LCRH, | 84 | .lcrh_rx = UART011_LCRH, |
95 | .oversampling = false, | 85 | .oversampling = false, |
86 | .dma_threshold = false, | ||
96 | }; | 87 | }; |
97 | 88 | ||
98 | static struct vendor_data vendor_st = { | 89 | static struct vendor_data vendor_st = { |
@@ -101,22 +92,535 @@ static struct vendor_data vendor_st = { | |||
101 | .lcrh_tx = ST_UART011_LCRH_TX, | 92 | .lcrh_tx = ST_UART011_LCRH_TX, |
102 | .lcrh_rx = ST_UART011_LCRH_RX, | 93 | .lcrh_rx = ST_UART011_LCRH_RX, |
103 | .oversampling = true, | 94 | .oversampling = true, |
95 | .dma_threshold = true, | ||
96 | }; | ||
97 | |||
98 | /* Deals with DMA transactions */ | ||
99 | struct pl011_dmatx_data { | ||
100 | struct dma_chan *chan; | ||
101 | struct scatterlist sg; | ||
102 | char *buf; | ||
103 | bool queued; | ||
104 | }; | 104 | }; |
105 | 105 | ||
106 | /* | ||
107 | * We wrap our port structure around the generic uart_port. | ||
108 | */ | ||
109 | struct uart_amba_port { | ||
110 | struct uart_port port; | ||
111 | struct clk *clk; | ||
112 | const struct vendor_data *vendor; | ||
113 | unsigned int dmacr; /* dma control reg */ | ||
114 | unsigned int im; /* interrupt mask */ | ||
115 | unsigned int old_status; | ||
116 | unsigned int fifosize; /* vendor-specific */ | ||
117 | unsigned int lcrh_tx; /* vendor-specific */ | ||
118 | unsigned int lcrh_rx; /* vendor-specific */ | ||
119 | bool autorts; | ||
120 | char type[12]; | ||
121 | #ifdef CONFIG_DMA_ENGINE | ||
122 | /* DMA stuff */ | ||
123 | bool using_dma; | ||
124 | struct pl011_dmatx_data dmatx; | ||
125 | #endif | ||
126 | }; | ||
127 | |||
128 | /* | ||
129 | * All the DMA operation mode stuff goes inside this ifdef. | ||
130 | * This assumes that you have a generic DMA device interface, | ||
131 | * no custom DMA interfaces are supported. | ||
132 | */ | ||
133 | #ifdef CONFIG_DMA_ENGINE | ||
134 | |||
135 | #define PL011_DMA_BUFFER_SIZE PAGE_SIZE | ||
136 | |||
137 | static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | ||
138 | { | ||
139 | /* DMA is the sole user of the platform data right now */ | ||
140 | struct amba_pl011_data *plat = uap->port.dev->platform_data; | ||
141 | struct dma_slave_config tx_conf = { | ||
142 | .dst_addr = uap->port.mapbase + UART01x_DR, | ||
143 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | ||
144 | .direction = DMA_TO_DEVICE, | ||
145 | .dst_maxburst = uap->fifosize >> 1, | ||
146 | }; | ||
147 | struct dma_chan *chan; | ||
148 | dma_cap_mask_t mask; | ||
149 | |||
150 | /* We need platform data */ | ||
151 | if (!plat || !plat->dma_filter) { | ||
152 | dev_info(uap->port.dev, "no DMA platform data\n"); | ||
153 | return; | ||
154 | } | ||
155 | |||
156 | /* Try to acquire a generic DMA engine slave channel */ | ||
157 | dma_cap_zero(mask); | ||
158 | dma_cap_set(DMA_SLAVE, mask); | ||
159 | |||
160 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param); | ||
161 | if (!chan) { | ||
162 | dev_err(uap->port.dev, "no TX DMA channel!\n"); | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | dmaengine_slave_config(chan, &tx_conf); | ||
167 | uap->dmatx.chan = chan; | ||
168 | |||
169 | dev_info(uap->port.dev, "DMA channel TX %s\n", | ||
170 | dma_chan_name(uap->dmatx.chan)); | ||
171 | } | ||
172 | |||
173 | #ifndef MODULE | ||
174 | /* | ||
175 | * Stack up the UARTs and let the above initcall be done at device | ||
176 | * initcall time, because the serial driver is called as an arch | ||
177 | * initcall, and at this time the DMA subsystem is not yet registered. | ||
178 | * At this point the driver will switch over to using DMA where desired. | ||
179 | */ | ||
180 | struct dma_uap { | ||
181 | struct list_head node; | ||
182 | struct uart_amba_port *uap; | ||
183 | }; | ||
184 | |||
185 | static LIST_HEAD(pl011_dma_uarts); | ||
186 | |||
187 | static int __init pl011_dma_initcall(void) | ||
188 | { | ||
189 | struct list_head *node, *tmp; | ||
190 | |||
191 | list_for_each_safe(node, tmp, &pl011_dma_uarts) { | ||
192 | struct dma_uap *dmau = list_entry(node, struct dma_uap, node); | ||
193 | pl011_dma_probe_initcall(dmau->uap); | ||
194 | list_del(node); | ||
195 | kfree(dmau); | ||
196 | } | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | device_initcall(pl011_dma_initcall); | ||
201 | |||
202 | static void pl011_dma_probe(struct uart_amba_port *uap) | ||
203 | { | ||
204 | struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL); | ||
205 | if (dmau) { | ||
206 | dmau->uap = uap; | ||
207 | list_add_tail(&dmau->node, &pl011_dma_uarts); | ||
208 | } | ||
209 | } | ||
210 | #else | ||
211 | static void pl011_dma_probe(struct uart_amba_port *uap) | ||
212 | { | ||
213 | pl011_dma_probe_initcall(uap); | ||
214 | } | ||
215 | #endif | ||
216 | |||
217 | static void pl011_dma_remove(struct uart_amba_port *uap) | ||
218 | { | ||
219 | /* TODO: remove the initcall if it has not yet executed */ | ||
220 | if (uap->dmatx.chan) | ||
221 | dma_release_channel(uap->dmatx.chan); | ||
222 | } | ||
223 | |||
224 | |||
225 | /* Forward declare this for the refill routine */ | ||
226 | static int pl011_dma_tx_refill(struct uart_amba_port *uap); | ||
227 | |||
228 | /* | ||
229 | * The current DMA TX buffer has been sent. | ||
230 | * Try to queue up another DMA buffer. | ||
231 | */ | ||
232 | static void pl011_dma_tx_callback(void *data) | ||
233 | { | ||
234 | struct uart_amba_port *uap = data; | ||
235 | struct pl011_dmatx_data *dmatx = &uap->dmatx; | ||
236 | unsigned long flags; | ||
237 | u16 dmacr; | ||
238 | |||
239 | spin_lock_irqsave(&uap->port.lock, flags); | ||
240 | if (uap->dmatx.queued) | ||
241 | dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, | ||
242 | DMA_TO_DEVICE); | ||
243 | |||
244 | dmacr = uap->dmacr; | ||
245 | uap->dmacr = dmacr & ~UART011_TXDMAE; | ||
246 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
247 | |||
248 | /* | ||
249 | * If TX DMA was disabled, it means that we've stopped the DMA for | ||
250 | * some reason (eg, XOFF received, or we want to send an X-char.) | ||
251 | * | ||
252 | * Note: we need to be careful here of a potential race between DMA | ||
253 | * and the rest of the driver - if the driver disables TX DMA while | ||
254 | * a TX buffer completing, we must update the tx queued status to | ||
255 | * get further refills (hence we check dmacr). | ||
256 | */ | ||
257 | if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || | ||
258 | uart_circ_empty(&uap->port.state->xmit)) { | ||
259 | uap->dmatx.queued = false; | ||
260 | spin_unlock_irqrestore(&uap->port.lock, flags); | ||
261 | return; | ||
262 | } | ||
263 | |||
264 | if (pl011_dma_tx_refill(uap) <= 0) { | ||
265 | /* | ||
266 | * We didn't queue a DMA buffer for some reason, but we | ||
267 | * have data pending to be sent. Re-enable the TX IRQ. | ||
268 | */ | ||
269 | uap->im |= UART011_TXIM; | ||
270 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
271 | } | ||
272 | spin_unlock_irqrestore(&uap->port.lock, flags); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Try to refill the TX DMA buffer. | ||
277 | * Locking: called with port lock held and IRQs disabled. | ||
278 | * Returns: | ||
279 | * 1 if we queued up a TX DMA buffer. | ||
280 | * 0 if we didn't want to handle this by DMA | ||
281 | * <0 on error | ||
282 | */ | ||
283 | static int pl011_dma_tx_refill(struct uart_amba_port *uap) | ||
284 | { | ||
285 | struct pl011_dmatx_data *dmatx = &uap->dmatx; | ||
286 | struct dma_chan *chan = dmatx->chan; | ||
287 | struct dma_device *dma_dev = chan->device; | ||
288 | struct dma_async_tx_descriptor *desc; | ||
289 | struct circ_buf *xmit = &uap->port.state->xmit; | ||
290 | unsigned int count; | ||
291 | |||
292 | /* | ||
293 | * Try to avoid the overhead involved in using DMA if the | ||
294 | * transaction fits in the first half of the FIFO, by using | ||
295 | * the standard interrupt handling. This ensures that we | ||
296 | * issue a uart_write_wakeup() at the appropriate time. | ||
297 | */ | ||
298 | count = uart_circ_chars_pending(xmit); | ||
299 | if (count < (uap->fifosize >> 1)) { | ||
300 | uap->dmatx.queued = false; | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Bodge: don't send the last character by DMA, as this | ||
306 | * will prevent XON from notifying us to restart DMA. | ||
307 | */ | ||
308 | count -= 1; | ||
309 | |||
310 | /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ | ||
311 | if (count > PL011_DMA_BUFFER_SIZE) | ||
312 | count = PL011_DMA_BUFFER_SIZE; | ||
313 | |||
314 | if (xmit->tail < xmit->head) | ||
315 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); | ||
316 | else { | ||
317 | size_t first = UART_XMIT_SIZE - xmit->tail; | ||
318 | size_t second = xmit->head; | ||
319 | |||
320 | memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); | ||
321 | if (second) | ||
322 | memcpy(&dmatx->buf[first], &xmit->buf[0], second); | ||
323 | } | ||
324 | |||
325 | dmatx->sg.length = count; | ||
326 | |||
327 | if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { | ||
328 | uap->dmatx.queued = false; | ||
329 | dev_dbg(uap->port.dev, "unable to map TX DMA\n"); | ||
330 | return -EBUSY; | ||
331 | } | ||
332 | |||
333 | desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE, | ||
334 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
335 | if (!desc) { | ||
336 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); | ||
337 | uap->dmatx.queued = false; | ||
338 | /* | ||
339 | * If DMA cannot be used right now, we complete this | ||
340 | * transaction via IRQ and let the TTY layer retry. | ||
341 | */ | ||
342 | dev_dbg(uap->port.dev, "TX DMA busy\n"); | ||
343 | return -EBUSY; | ||
344 | } | ||
345 | |||
346 | /* Some data to go along to the callback */ | ||
347 | desc->callback = pl011_dma_tx_callback; | ||
348 | desc->callback_param = uap; | ||
349 | |||
350 | /* All errors should happen at prepare time */ | ||
351 | dmaengine_submit(desc); | ||
352 | |||
353 | /* Fire the DMA transaction */ | ||
354 | dma_dev->device_issue_pending(chan); | ||
355 | |||
356 | uap->dmacr |= UART011_TXDMAE; | ||
357 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
358 | uap->dmatx.queued = true; | ||
359 | |||
360 | /* | ||
361 | * Now we know that DMA will fire, so advance the ring buffer | ||
362 | * with the stuff we just dispatched. | ||
363 | */ | ||
364 | xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); | ||
365 | uap->port.icount.tx += count; | ||
366 | |||
367 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
368 | uart_write_wakeup(&uap->port); | ||
369 | |||
370 | return 1; | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * We received a transmit interrupt without a pending X-char but with | ||
375 | * pending characters. | ||
376 | * Locking: called with port lock held and IRQs disabled. | ||
377 | * Returns: | ||
378 | * false if we want to use PIO to transmit | ||
379 | * true if we queued a DMA buffer | ||
380 | */ | ||
381 | static bool pl011_dma_tx_irq(struct uart_amba_port *uap) | ||
382 | { | ||
383 | if (!uap->using_dma) | ||
384 | return false; | ||
385 | |||
386 | /* | ||
387 | * If we already have a TX buffer queued, but received a | ||
388 | * TX interrupt, it will be because we've just sent an X-char. | ||
389 | * Ensure the TX DMA is enabled and the TX IRQ is disabled. | ||
390 | */ | ||
391 | if (uap->dmatx.queued) { | ||
392 | uap->dmacr |= UART011_TXDMAE; | ||
393 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
394 | uap->im &= ~UART011_TXIM; | ||
395 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
396 | return true; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * We don't have a TX buffer queued, so try to queue one. | ||
401 | * If we succesfully queued a buffer, mask the TX IRQ. | ||
402 | */ | ||
403 | if (pl011_dma_tx_refill(uap) > 0) { | ||
404 | uap->im &= ~UART011_TXIM; | ||
405 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
406 | return true; | ||
407 | } | ||
408 | return false; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Stop the DMA transmit (eg, due to received XOFF). | ||
413 | * Locking: called with port lock held and IRQs disabled. | ||
414 | */ | ||
415 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) | ||
416 | { | ||
417 | if (uap->dmatx.queued) { | ||
418 | uap->dmacr &= ~UART011_TXDMAE; | ||
419 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
420 | } | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * Try to start a DMA transmit, or in the case of an XON/OFF | ||
425 | * character queued for send, try to get that character out ASAP. | ||
426 | * Locking: called with port lock held and IRQs disabled. | ||
427 | * Returns: | ||
428 | * false if we want the TX IRQ to be enabled | ||
429 | * true if we have a buffer queued | ||
430 | */ | ||
431 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) | ||
432 | { | ||
433 | u16 dmacr; | ||
434 | |||
435 | if (!uap->using_dma) | ||
436 | return false; | ||
437 | |||
438 | if (!uap->port.x_char) { | ||
439 | /* no X-char, try to push chars out in DMA mode */ | ||
440 | bool ret = true; | ||
441 | |||
442 | if (!uap->dmatx.queued) { | ||
443 | if (pl011_dma_tx_refill(uap) > 0) { | ||
444 | uap->im &= ~UART011_TXIM; | ||
445 | ret = true; | ||
446 | } else { | ||
447 | uap->im |= UART011_TXIM; | ||
448 | ret = false; | ||
449 | } | ||
450 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
451 | } else if (!(uap->dmacr & UART011_TXDMAE)) { | ||
452 | uap->dmacr |= UART011_TXDMAE; | ||
453 | writew(uap->dmacr, | ||
454 | uap->port.membase + UART011_DMACR); | ||
455 | } | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * We have an X-char to send. Disable DMA to prevent it loading | ||
461 | * the TX fifo, and then see if we can stuff it into the FIFO. | ||
462 | */ | ||
463 | dmacr = uap->dmacr; | ||
464 | uap->dmacr &= ~UART011_TXDMAE; | ||
465 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
466 | |||
467 | if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { | ||
468 | /* | ||
469 | * No space in the FIFO, so enable the transmit interrupt | ||
470 | * so we know when there is space. Note that once we've | ||
471 | * loaded the character, we should just re-enable DMA. | ||
472 | */ | ||
473 | return false; | ||
474 | } | ||
475 | |||
476 | writew(uap->port.x_char, uap->port.membase + UART01x_DR); | ||
477 | uap->port.icount.tx++; | ||
478 | uap->port.x_char = 0; | ||
479 | |||
480 | /* Success - restore the DMA state */ | ||
481 | uap->dmacr = dmacr; | ||
482 | writew(dmacr, uap->port.membase + UART011_DMACR); | ||
483 | |||
484 | return true; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Flush the transmit buffer. | ||
489 | * Locking: called with port lock held and IRQs disabled. | ||
490 | */ | ||
491 | static void pl011_dma_flush_buffer(struct uart_port *port) | ||
492 | { | ||
493 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | ||
494 | |||
495 | if (!uap->using_dma) | ||
496 | return; | ||
497 | |||
498 | /* Avoid deadlock with the DMA engine callback */ | ||
499 | spin_unlock(&uap->port.lock); | ||
500 | dmaengine_terminate_all(uap->dmatx.chan); | ||
501 | spin_lock(&uap->port.lock); | ||
502 | if (uap->dmatx.queued) { | ||
503 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, | ||
504 | DMA_TO_DEVICE); | ||
505 | uap->dmatx.queued = false; | ||
506 | uap->dmacr &= ~UART011_TXDMAE; | ||
507 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | |||
512 | static void pl011_dma_startup(struct uart_amba_port *uap) | ||
513 | { | ||
514 | if (!uap->dmatx.chan) | ||
515 | return; | ||
516 | |||
517 | uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); | ||
518 | if (!uap->dmatx.buf) { | ||
519 | dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); | ||
520 | uap->port.fifosize = uap->fifosize; | ||
521 | return; | ||
522 | } | ||
523 | |||
524 | sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); | ||
525 | |||
526 | /* The DMA buffer is now the FIFO the TTY subsystem can use */ | ||
527 | uap->port.fifosize = PL011_DMA_BUFFER_SIZE; | ||
528 | uap->using_dma = true; | ||
529 | |||
530 | /* Turn on DMA error (RX/TX will be enabled on demand) */ | ||
531 | uap->dmacr |= UART011_DMAONERR; | ||
532 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
533 | |||
534 | /* | ||
535 | * ST Micro variants has some specific dma burst threshold | ||
536 | * compensation. Set this to 16 bytes, so burst will only | ||
537 | * be issued above/below 16 bytes. | ||
538 | */ | ||
539 | if (uap->vendor->dma_threshold) | ||
540 | writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, | ||
541 | uap->port.membase + ST_UART011_DMAWM); | ||
542 | } | ||
543 | |||
544 | static void pl011_dma_shutdown(struct uart_amba_port *uap) | ||
545 | { | ||
546 | if (!uap->using_dma) | ||
547 | return; | ||
548 | |||
549 | /* Disable RX and TX DMA */ | ||
550 | while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) | ||
551 | barrier(); | ||
552 | |||
553 | spin_lock_irq(&uap->port.lock); | ||
554 | uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); | ||
555 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
556 | spin_unlock_irq(&uap->port.lock); | ||
557 | |||
558 | /* In theory, this should already be done by pl011_dma_flush_buffer */ | ||
559 | dmaengine_terminate_all(uap->dmatx.chan); | ||
560 | if (uap->dmatx.queued) { | ||
561 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, | ||
562 | DMA_TO_DEVICE); | ||
563 | uap->dmatx.queued = false; | ||
564 | } | ||
565 | |||
566 | kfree(uap->dmatx.buf); | ||
567 | |||
568 | uap->using_dma = false; | ||
569 | } | ||
570 | |||
571 | #else | ||
572 | /* Blank functions if the DMA engine is not available */ | ||
573 | static inline void pl011_dma_probe(struct uart_amba_port *uap) | ||
574 | { | ||
575 | } | ||
576 | |||
577 | static inline void pl011_dma_remove(struct uart_amba_port *uap) | ||
578 | { | ||
579 | } | ||
580 | |||
581 | static inline void pl011_dma_startup(struct uart_amba_port *uap) | ||
582 | { | ||
583 | } | ||
584 | |||
585 | static inline void pl011_dma_shutdown(struct uart_amba_port *uap) | ||
586 | { | ||
587 | } | ||
588 | |||
589 | static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) | ||
590 | { | ||
591 | return false; | ||
592 | } | ||
593 | |||
594 | static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) | ||
595 | { | ||
596 | } | ||
597 | |||
598 | static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) | ||
599 | { | ||
600 | return false; | ||
601 | } | ||
602 | |||
603 | #define pl011_dma_flush_buffer NULL | ||
604 | #endif | ||
605 | |||
606 | |||
106 | static void pl011_stop_tx(struct uart_port *port) | 607 | static void pl011_stop_tx(struct uart_port *port) |
107 | { | 608 | { |
108 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | 609 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
109 | 610 | ||
110 | uap->im &= ~UART011_TXIM; | 611 | uap->im &= ~UART011_TXIM; |
111 | writew(uap->im, uap->port.membase + UART011_IMSC); | 612 | writew(uap->im, uap->port.membase + UART011_IMSC); |
613 | pl011_dma_tx_stop(uap); | ||
112 | } | 614 | } |
113 | 615 | ||
114 | static void pl011_start_tx(struct uart_port *port) | 616 | static void pl011_start_tx(struct uart_port *port) |
115 | { | 617 | { |
116 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | 618 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
117 | 619 | ||
118 | uap->im |= UART011_TXIM; | 620 | if (!pl011_dma_tx_start(uap)) { |
119 | writew(uap->im, uap->port.membase + UART011_IMSC); | 621 | uap->im |= UART011_TXIM; |
622 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
623 | } | ||
120 | } | 624 | } |
121 | 625 | ||
122 | static void pl011_stop_rx(struct uart_port *port) | 626 | static void pl011_stop_rx(struct uart_port *port) |
@@ -203,7 +707,11 @@ static void pl011_tx_chars(struct uart_amba_port *uap) | |||
203 | return; | 707 | return; |
204 | } | 708 | } |
205 | 709 | ||
206 | count = uap->port.fifosize >> 1; | 710 | /* If we are using DMA mode, try to send some characters. */ |
711 | if (pl011_dma_tx_irq(uap)) | ||
712 | return; | ||
713 | |||
714 | count = uap->fifosize >> 1; | ||
207 | do { | 715 | do { |
208 | writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); | 716 | writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); |
209 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 717 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
@@ -246,10 +754,11 @@ static void pl011_modem_status(struct uart_amba_port *uap) | |||
246 | static irqreturn_t pl011_int(int irq, void *dev_id) | 754 | static irqreturn_t pl011_int(int irq, void *dev_id) |
247 | { | 755 | { |
248 | struct uart_amba_port *uap = dev_id; | 756 | struct uart_amba_port *uap = dev_id; |
757 | unsigned long flags; | ||
249 | unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; | 758 | unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; |
250 | int handled = 0; | 759 | int handled = 0; |
251 | 760 | ||
252 | spin_lock(&uap->port.lock); | 761 | spin_lock_irqsave(&uap->port.lock, flags); |
253 | 762 | ||
254 | status = readw(uap->port.membase + UART011_MIS); | 763 | status = readw(uap->port.membase + UART011_MIS); |
255 | if (status) { | 764 | if (status) { |
@@ -274,7 +783,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id) | |||
274 | handled = 1; | 783 | handled = 1; |
275 | } | 784 | } |
276 | 785 | ||
277 | spin_unlock(&uap->port.lock); | 786 | spin_unlock_irqrestore(&uap->port.lock, flags); |
278 | 787 | ||
279 | return IRQ_RETVAL(handled); | 788 | return IRQ_RETVAL(handled); |
280 | } | 789 | } |
@@ -396,7 +905,7 @@ static int pl011_startup(struct uart_port *port) | |||
396 | if (retval) | 905 | if (retval) |
397 | goto clk_dis; | 906 | goto clk_dis; |
398 | 907 | ||
399 | writew(uap->ifls, uap->port.membase + UART011_IFLS); | 908 | writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); |
400 | 909 | ||
401 | /* | 910 | /* |
402 | * Provoke TX FIFO interrupt into asserting. | 911 | * Provoke TX FIFO interrupt into asserting. |
@@ -423,11 +932,18 @@ static int pl011_startup(struct uart_port *port) | |||
423 | cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; | 932 | cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; |
424 | writew(cr, uap->port.membase + UART011_CR); | 933 | writew(cr, uap->port.membase + UART011_CR); |
425 | 934 | ||
935 | /* Clear pending error interrupts */ | ||
936 | writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, | ||
937 | uap->port.membase + UART011_ICR); | ||
938 | |||
426 | /* | 939 | /* |
427 | * initialise the old status of the modem signals | 940 | * initialise the old status of the modem signals |
428 | */ | 941 | */ |
429 | uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; | 942 | uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; |
430 | 943 | ||
944 | /* Startup DMA */ | ||
945 | pl011_dma_startup(uap); | ||
946 | |||
431 | /* | 947 | /* |
432 | * Finally, enable interrupts | 948 | * Finally, enable interrupts |
433 | */ | 949 | */ |
@@ -467,6 +983,8 @@ static void pl011_shutdown(struct uart_port *port) | |||
467 | writew(0xffff, uap->port.membase + UART011_ICR); | 983 | writew(0xffff, uap->port.membase + UART011_ICR); |
468 | spin_unlock_irq(&uap->port.lock); | 984 | spin_unlock_irq(&uap->port.lock); |
469 | 985 | ||
986 | pl011_dma_shutdown(uap); | ||
987 | |||
470 | /* | 988 | /* |
471 | * Free the interrupt | 989 | * Free the interrupt |
472 | */ | 990 | */ |
@@ -498,13 +1016,18 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, | |||
498 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | 1016 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
499 | unsigned int lcr_h, old_cr; | 1017 | unsigned int lcr_h, old_cr; |
500 | unsigned long flags; | 1018 | unsigned long flags; |
501 | unsigned int baud, quot; | 1019 | unsigned int baud, quot, clkdiv; |
1020 | |||
1021 | if (uap->vendor->oversampling) | ||
1022 | clkdiv = 8; | ||
1023 | else | ||
1024 | clkdiv = 16; | ||
502 | 1025 | ||
503 | /* | 1026 | /* |
504 | * Ask the core to calculate the divisor for us. | 1027 | * Ask the core to calculate the divisor for us. |
505 | */ | 1028 | */ |
506 | baud = uart_get_baud_rate(port, termios, old, 0, | 1029 | baud = uart_get_baud_rate(port, termios, old, 0, |
507 | port->uartclk/(uap->oversampling ? 8 : 16)); | 1030 | port->uartclk / clkdiv); |
508 | 1031 | ||
509 | if (baud > port->uartclk/16) | 1032 | if (baud > port->uartclk/16) |
510 | quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); | 1033 | quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); |
@@ -532,7 +1055,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, | |||
532 | if (!(termios->c_cflag & PARODD)) | 1055 | if (!(termios->c_cflag & PARODD)) |
533 | lcr_h |= UART01x_LCRH_EPS; | 1056 | lcr_h |= UART01x_LCRH_EPS; |
534 | } | 1057 | } |
535 | if (port->fifosize > 1) | 1058 | if (uap->fifosize > 1) |
536 | lcr_h |= UART01x_LCRH_FEN; | 1059 | lcr_h |= UART01x_LCRH_FEN; |
537 | 1060 | ||
538 | spin_lock_irqsave(&port->lock, flags); | 1061 | spin_lock_irqsave(&port->lock, flags); |
@@ -588,8 +1111,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, | |||
588 | uap->autorts = false; | 1111 | uap->autorts = false; |
589 | } | 1112 | } |
590 | 1113 | ||
591 | if (uap->oversampling) { | 1114 | if (uap->vendor->oversampling) { |
592 | if (baud > port->uartclk/16) | 1115 | if (baud > port->uartclk / 16) |
593 | old_cr |= ST_UART011_CR_OVSFACT; | 1116 | old_cr |= ST_UART011_CR_OVSFACT; |
594 | else | 1117 | else |
595 | old_cr &= ~ST_UART011_CR_OVSFACT; | 1118 | old_cr &= ~ST_UART011_CR_OVSFACT; |
@@ -622,7 +1145,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, | |||
622 | 1145 | ||
623 | static const char *pl011_type(struct uart_port *port) | 1146 | static const char *pl011_type(struct uart_port *port) |
624 | { | 1147 | { |
625 | return port->type == PORT_AMBA ? "AMBA/PL011" : NULL; | 1148 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
1149 | return uap->port.type == PORT_AMBA ? uap->type : NULL; | ||
626 | } | 1150 | } |
627 | 1151 | ||
628 | /* | 1152 | /* |
@@ -679,6 +1203,7 @@ static struct uart_ops amba_pl011_pops = { | |||
679 | .break_ctl = pl011_break_ctl, | 1203 | .break_ctl = pl011_break_ctl, |
680 | .startup = pl011_startup, | 1204 | .startup = pl011_startup, |
681 | .shutdown = pl011_shutdown, | 1205 | .shutdown = pl011_shutdown, |
1206 | .flush_buffer = pl011_dma_flush_buffer, | ||
682 | .set_termios = pl011_set_termios, | 1207 | .set_termios = pl011_set_termios, |
683 | .type = pl011_type, | 1208 | .type = pl011_type, |
684 | .release_port = pl010_release_port, | 1209 | .release_port = pl010_release_port, |
@@ -761,7 +1286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, | |||
761 | 1286 | ||
762 | *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); | 1287 | *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); |
763 | 1288 | ||
764 | if (uap->oversampling) { | 1289 | if (uap->vendor->oversampling) { |
765 | if (readw(uap->port.membase + UART011_CR) | 1290 | if (readw(uap->port.membase + UART011_CR) |
766 | & ST_UART011_CR_OVSFACT) | 1291 | & ST_UART011_CR_OVSFACT) |
767 | *baud *= 2; | 1292 | *baud *= 2; |
@@ -858,19 +1383,22 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) | |||
858 | goto unmap; | 1383 | goto unmap; |
859 | } | 1384 | } |
860 | 1385 | ||
861 | uap->ifls = vendor->ifls; | 1386 | uap->vendor = vendor; |
862 | uap->lcrh_rx = vendor->lcrh_rx; | 1387 | uap->lcrh_rx = vendor->lcrh_rx; |
863 | uap->lcrh_tx = vendor->lcrh_tx; | 1388 | uap->lcrh_tx = vendor->lcrh_tx; |
864 | uap->oversampling = vendor->oversampling; | 1389 | uap->fifosize = vendor->fifosize; |
865 | uap->port.dev = &dev->dev; | 1390 | uap->port.dev = &dev->dev; |
866 | uap->port.mapbase = dev->res.start; | 1391 | uap->port.mapbase = dev->res.start; |
867 | uap->port.membase = base; | 1392 | uap->port.membase = base; |
868 | uap->port.iotype = UPIO_MEM; | 1393 | uap->port.iotype = UPIO_MEM; |
869 | uap->port.irq = dev->irq[0]; | 1394 | uap->port.irq = dev->irq[0]; |
870 | uap->port.fifosize = vendor->fifosize; | 1395 | uap->port.fifosize = uap->fifosize; |
871 | uap->port.ops = &amba_pl011_pops; | 1396 | uap->port.ops = &amba_pl011_pops; |
872 | uap->port.flags = UPF_BOOT_AUTOCONF; | 1397 | uap->port.flags = UPF_BOOT_AUTOCONF; |
873 | uap->port.line = i; | 1398 | uap->port.line = i; |
1399 | pl011_dma_probe(uap); | ||
1400 | |||
1401 | snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); | ||
874 | 1402 | ||
875 | amba_ports[i] = uap; | 1403 | amba_ports[i] = uap; |
876 | 1404 | ||
@@ -879,6 +1407,7 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) | |||
879 | if (ret) { | 1407 | if (ret) { |
880 | amba_set_drvdata(dev, NULL); | 1408 | amba_set_drvdata(dev, NULL); |
881 | amba_ports[i] = NULL; | 1409 | amba_ports[i] = NULL; |
1410 | pl011_dma_remove(uap); | ||
882 | clk_put(uap->clk); | 1411 | clk_put(uap->clk); |
883 | unmap: | 1412 | unmap: |
884 | iounmap(base); | 1413 | iounmap(base); |
@@ -902,6 +1431,7 @@ static int pl011_remove(struct amba_device *dev) | |||
902 | if (amba_ports[i] == uap) | 1431 | if (amba_ports[i] == uap) |
903 | amba_ports[i] = NULL; | 1432 | amba_ports[i] = NULL; |
904 | 1433 | ||
1434 | pl011_dma_remove(uap); | ||
905 | iounmap(uap->port.membase); | 1435 | iounmap(uap->port.membase); |
906 | clk_put(uap->clk); | 1436 | clk_put(uap->clk); |
907 | kfree(uap); | 1437 | kfree(uap); |
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 6021588ba0a8..5479fdc849e9 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
@@ -113,6 +113,21 @@ | |||
113 | #define UART01x_LCRH_PEN 0x02 | 113 | #define UART01x_LCRH_PEN 0x02 |
114 | #define UART01x_LCRH_BRK 0x01 | 114 | #define UART01x_LCRH_BRK 0x01 |
115 | 115 | ||
116 | #define ST_UART011_DMAWM_RX_1 (0 << 3) | ||
117 | #define ST_UART011_DMAWM_RX_2 (1 << 3) | ||
118 | #define ST_UART011_DMAWM_RX_4 (2 << 3) | ||
119 | #define ST_UART011_DMAWM_RX_8 (3 << 3) | ||
120 | #define ST_UART011_DMAWM_RX_16 (4 << 3) | ||
121 | #define ST_UART011_DMAWM_RX_32 (5 << 3) | ||
122 | #define ST_UART011_DMAWM_RX_48 (6 << 3) | ||
123 | #define ST_UART011_DMAWM_TX_1 0 | ||
124 | #define ST_UART011_DMAWM_TX_2 1 | ||
125 | #define ST_UART011_DMAWM_TX_4 2 | ||
126 | #define ST_UART011_DMAWM_TX_8 3 | ||
127 | #define ST_UART011_DMAWM_TX_16 4 | ||
128 | #define ST_UART011_DMAWM_TX_32 5 | ||
129 | #define ST_UART011_DMAWM_TX_48 6 | ||
130 | |||
116 | #define UART010_IIR_RTIS 0x08 | 131 | #define UART010_IIR_RTIS 0x08 |
117 | #define UART010_IIR_TIS 0x04 | 132 | #define UART010_IIR_TIS 0x04 |
118 | #define UART010_IIR_RIS 0x02 | 133 | #define UART010_IIR_RIS 0x02 |
@@ -180,6 +195,13 @@ struct amba_device; /* in uncompress this is included but amba/bus.h is not */ | |||
180 | struct amba_pl010_data { | 195 | struct amba_pl010_data { |
181 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); | 196 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); |
182 | }; | 197 | }; |
198 | |||
199 | struct dma_chan; | ||
200 | struct amba_pl011_data { | ||
201 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
202 | void *dma_rx_param; | ||
203 | void *dma_tx_param; | ||
204 | }; | ||
183 | #endif | 205 | #endif |
184 | 206 | ||
185 | #endif | 207 | #endif |