aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-12-22 12:24:39 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-05 11:58:48 -0500
commit68b65f7305e54b822b2483c60de7d7b017526a92 (patch)
treec67c4d2791f45f48a042b3bdcf16b342f6b2163a /drivers
parent963cc981af620c7c07b5f6d1ab998b639e90ecb1 (diff)
ARM: PL011: Add support for transmit DMA
Add DMA engine support for transmit to the PL011 driver. Based on a patch from Linus Walliej, with the following changes: - remove RX DMA support. As PL011 doesn't give us receive timeout interrupts, we only get notified of received data when the RX DMA has completed. This rather sucks for interactive use of the TTY. - remove abuse of completions. Completions are supposed to be for events, not to tell what condition buffers are in. Replace it with a simple 'queued' bool. - fix locking - it is only safe to access the circular buffer with the port lock held. - only map the DMA buffer when required - if we're ever behind an IOMMU this helps keep IOMMU usage down, and also ensures that we're legal when we change the scatterlist entry length. - fix XON/XOFF sending - we must send XON/XOFF characters out as soon as possible - waiting for up to 4095 characters in the DMA buffer to be sent first is not acceptable. - fix XON/XOFF receive handling - we need to stop DMA when instructed to by the TTY layer, and restart it again when instructed to. There is a subtle problem here: we must not completely empty the circular buffer with DMA, otherwise we will not be notified of XON. - change the 'enable_dma' flag into a 'using DMA' flag, and track whether we can use TX DMA by whether the channel pointer is non-NULL. This gives us more control over whether we use DMA in the driver. - we don't need to have the TX DMA buffer continually allocated for each port - instead, allocate it when the port starts up, and free it when it's shut down. Update the 'using DMA' flag if we get the buffer, and adjust the TTY FIFO size appropriately. - if we're going to use PIO to send characters, use the existing IRQ based functionality rather than reimplementing it. This also ensures we call uart_write_wakeup() at the appropriate time, otherwise we'll stall. - use DMA engine helper functions for type safety. - fix init when built as a module - we can't have to initcall functions, so we must settle on one. This means we can eliminate the deferred DMA initialization. - there is no need to terminate transfers on a failed prep_slave_sg() call - nothing has been setup, so nothing needs to be terminated. This avoids a potential deadlock in the DMA engine code (tasklet->callback->failed prepare->terminate->tasklet_disable which then ends up waiting for the tasklet to finish running.) - Dan says that the submission callback should not return an error: | dma_submit_error() is something I should have removed after commit | a0587bcf "ioat1: move descriptor allocation from submit to prep" all | errors should be notified by prep failing to return a descriptor | handle. Negative dma_cookie_t values are only returned by the | dma_async_memcpy* calls which translate a prep failure into -ENOMEM. So remove the error handling at that point. This also solves the potential deadlock mentioned in the previous comment. Acked-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/serial/amba-pl011.c508
1 files changed, 506 insertions, 2 deletions
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index f741a8b51400..ab025dc52fa4 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright 1999 ARM Limited 8 * Copyright 1999 ARM Limited
9 * Copyright (C) 2000 Deep Blue Solutions Ltd. 9 * Copyright (C) 2000 Deep Blue Solutions Ltd.
10 * Copyright (C) 2010 ST-Ericsson SA
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -48,6 +49,9 @@
48#include <linux/amba/serial.h> 49#include <linux/amba/serial.h>
49#include <linux/clk.h> 50#include <linux/clk.h>
50#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/dmaengine.h>
53#include <linux/dma-mapping.h>
54#include <linux/scatterlist.h>
51 55
52#include <asm/io.h> 56#include <asm/io.h>
53#include <asm/sizes.h> 57#include <asm/sizes.h>
@@ -88,6 +92,14 @@ static struct vendor_data vendor_st = {
88 .oversampling = true, 92 .oversampling = true,
89}; 93};
90 94
95/* Deals with DMA transactions */
96struct pl011_dmatx_data {
97 struct dma_chan *chan;
98 struct scatterlist sg;
99 char *buf;
100 bool queued;
101};
102
91/* 103/*
92 * We wrap our port structure around the generic uart_port. 104 * We wrap our port structure around the generic uart_port.
93 */ 105 */
@@ -95,6 +107,7 @@ struct uart_amba_port {
95 struct uart_port port; 107 struct uart_port port;
96 struct clk *clk; 108 struct clk *clk;
97 const struct vendor_data *vendor; 109 const struct vendor_data *vendor;
110 unsigned int dmacr; /* dma control reg */
98 unsigned int im; /* interrupt mask */ 111 unsigned int im; /* interrupt mask */
99 unsigned int old_status; 112 unsigned int old_status;
100 unsigned int fifosize; /* vendor-specific */ 113 unsigned int fifosize; /* vendor-specific */
@@ -102,22 +115,500 @@ struct uart_amba_port {
102 unsigned int lcrh_rx; /* vendor-specific */ 115 unsigned int lcrh_rx; /* vendor-specific */
103 bool autorts; 116 bool autorts;
104 char type[12]; 117 char type[12];
118#ifdef CONFIG_DMA_ENGINE
119 /* DMA stuff */
120 bool using_dma;
121 struct pl011_dmatx_data dmatx;
122#endif
123};
124
125/*
126 * All the DMA operation mode stuff goes inside this ifdef.
127 * This assumes that you have a generic DMA device interface,
128 * no custom DMA interfaces are supported.
129 */
130#ifdef CONFIG_DMA_ENGINE
131
132#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
133
134static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
135{
136 /* DMA is the sole user of the platform data right now */
137 struct amba_pl011_data *plat = uap->port.dev->platform_data;
138 struct dma_slave_config tx_conf = {
139 .dst_addr = uap->port.mapbase + UART01x_DR,
140 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
141 .direction = DMA_TO_DEVICE,
142 .dst_maxburst = uap->fifosize >> 1,
143 };
144 struct dma_chan *chan;
145 dma_cap_mask_t mask;
146
147 /* We need platform data */
148 if (!plat || !plat->dma_filter) {
149 dev_info(uap->port.dev, "no DMA platform data\n");
150 return;
151 }
152
153 /* Try to acquire a generic DMA engine slave channel */
154 dma_cap_zero(mask);
155 dma_cap_set(DMA_SLAVE, mask);
156
157 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
158 if (!chan) {
159 dev_err(uap->port.dev, "no TX DMA channel!\n");
160 return;
161 }
162
163 dmaengine_slave_config(chan, &tx_conf);
164 uap->dmatx.chan = chan;
165
166 dev_info(uap->port.dev, "DMA channel TX %s\n",
167 dma_chan_name(uap->dmatx.chan));
168}
169
170#ifndef MODULE
171/*
172 * Stack up the UARTs and let the above initcall be done at device
173 * initcall time, because the serial driver is called as an arch
174 * initcall, and at this time the DMA subsystem is not yet registered.
175 * At this point the driver will switch over to using DMA where desired.
176 */
177struct dma_uap {
178 struct list_head node;
179 struct uart_amba_port *uap;
105}; 180};
106 181
182static LIST_HEAD(pl011_dma_uarts);
183
184static int __init pl011_dma_initcall(void)
185{
186 struct list_head *node, *tmp;
187
188 list_for_each_safe(node, tmp, &pl011_dma_uarts) {
189 struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
190 pl011_dma_probe_initcall(dmau->uap);
191 list_del(node);
192 kfree(dmau);
193 }
194 return 0;
195}
196
197device_initcall(pl011_dma_initcall);
198
199static void pl011_dma_probe(struct uart_amba_port *uap)
200{
201 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
202 if (dmau) {
203 dmau->uap = uap;
204 list_add_tail(&dmau->node, &pl011_dma_uarts);
205 }
206}
207#else
208static void pl011_dma_probe(struct uart_amba_port *uap)
209{
210 pl011_dma_probe_initcall(uap);
211}
212#endif
213
214static void pl011_dma_remove(struct uart_amba_port *uap)
215{
216 /* TODO: remove the initcall if it has not yet executed */
217 if (uap->dmatx.chan)
218 dma_release_channel(uap->dmatx.chan);
219}
220
221
222/* Forward declare this for the refill routine */
223static int pl011_dma_tx_refill(struct uart_amba_port *uap);
224
225/*
226 * The current DMA TX buffer has been sent.
227 * Try to queue up another DMA buffer.
228 */
229static void pl011_dma_tx_callback(void *data)
230{
231 struct uart_amba_port *uap = data;
232 struct pl011_dmatx_data *dmatx = &uap->dmatx;
233 unsigned long flags;
234 u16 dmacr;
235
236 spin_lock_irqsave(&uap->port.lock, flags);
237 if (uap->dmatx.queued)
238 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
239 DMA_TO_DEVICE);
240
241 dmacr = uap->dmacr;
242 uap->dmacr = dmacr & ~UART011_TXDMAE;
243 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
244
245 /*
246 * If TX DMA was disabled, it means that we've stopped the DMA for
247 * some reason (eg, XOFF received, or we want to send an X-char.)
248 *
249 * Note: we need to be careful here of a potential race between DMA
250 * and the rest of the driver - if the driver disables TX DMA while
251 * a TX buffer completing, we must update the tx queued status to
252 * get further refills (hence we check dmacr).
253 */
254 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
255 uart_circ_empty(&uap->port.state->xmit)) {
256 uap->dmatx.queued = false;
257 spin_unlock_irqrestore(&uap->port.lock, flags);
258 return;
259 }
260
261 if (pl011_dma_tx_refill(uap) <= 0) {
262 /*
263 * We didn't queue a DMA buffer for some reason, but we
264 * have data pending to be sent. Re-enable the TX IRQ.
265 */
266 uap->im |= UART011_TXIM;
267 writew(uap->im, uap->port.membase + UART011_IMSC);
268 }
269 spin_unlock_irqrestore(&uap->port.lock, flags);
270}
271
272/*
273 * Try to refill the TX DMA buffer.
274 * Locking: called with port lock held and IRQs disabled.
275 * Returns:
276 * 1 if we queued up a TX DMA buffer.
277 * 0 if we didn't want to handle this by DMA
278 * <0 on error
279 */
280static int pl011_dma_tx_refill(struct uart_amba_port *uap)
281{
282 struct pl011_dmatx_data *dmatx = &uap->dmatx;
283 struct dma_chan *chan = dmatx->chan;
284 struct dma_device *dma_dev = chan->device;
285 struct dma_async_tx_descriptor *desc;
286 struct circ_buf *xmit = &uap->port.state->xmit;
287 unsigned int count;
288
289 /*
290 * Try to avoid the overhead involved in using DMA if the
291 * transaction fits in the first half of the FIFO, by using
292 * the standard interrupt handling. This ensures that we
293 * issue a uart_write_wakeup() at the appropriate time.
294 */
295 count = uart_circ_chars_pending(xmit);
296 if (count < (uap->fifosize >> 1)) {
297 uap->dmatx.queued = false;
298 return 0;
299 }
300
301 /*
302 * Bodge: don't send the last character by DMA, as this
303 * will prevent XON from notifying us to restart DMA.
304 */
305 count -= 1;
306
307 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
308 if (count > PL011_DMA_BUFFER_SIZE)
309 count = PL011_DMA_BUFFER_SIZE;
310
311 if (xmit->tail < xmit->head)
312 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
313 else {
314 size_t first = UART_XMIT_SIZE - xmit->tail;
315 size_t second = xmit->head;
316
317 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
318 if (second)
319 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
320 }
321
322 dmatx->sg.length = count;
323
324 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
325 uap->dmatx.queued = false;
326 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
327 return -EBUSY;
328 }
329
330 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
331 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
332 if (!desc) {
333 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
334 uap->dmatx.queued = false;
335 /*
336 * If DMA cannot be used right now, we complete this
337 * transaction via IRQ and let the TTY layer retry.
338 */
339 dev_dbg(uap->port.dev, "TX DMA busy\n");
340 return -EBUSY;
341 }
342
343 /* Some data to go along to the callback */
344 desc->callback = pl011_dma_tx_callback;
345 desc->callback_param = uap;
346
347 /* All errors should happen at prepare time */
348 dmaengine_submit(desc);
349
350 /* Fire the DMA transaction */
351 dma_dev->device_issue_pending(chan);
352
353 uap->dmacr |= UART011_TXDMAE;
354 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
355 uap->dmatx.queued = true;
356
357 /*
358 * Now we know that DMA will fire, so advance the ring buffer
359 * with the stuff we just dispatched.
360 */
361 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
362 uap->port.icount.tx += count;
363
364 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
365 uart_write_wakeup(&uap->port);
366
367 return 1;
368}
369
370/*
371 * We received a transmit interrupt without a pending X-char but with
372 * pending characters.
373 * Locking: called with port lock held and IRQs disabled.
374 * Returns:
375 * false if we want to use PIO to transmit
376 * true if we queued a DMA buffer
377 */
378static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
379{
380 if (!uap->using_dma)
381 return false;
382
383 /*
384 * If we already have a TX buffer queued, but received a
385 * TX interrupt, it will be because we've just sent an X-char.
386 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
387 */
388 if (uap->dmatx.queued) {
389 uap->dmacr |= UART011_TXDMAE;
390 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
391 uap->im &= ~UART011_TXIM;
392 writew(uap->im, uap->port.membase + UART011_IMSC);
393 return true;
394 }
395
396 /*
397 * We don't have a TX buffer queued, so try to queue one.
398 * If we succesfully queued a buffer, mask the TX IRQ.
399 */
400 if (pl011_dma_tx_refill(uap) > 0) {
401 uap->im &= ~UART011_TXIM;
402 writew(uap->im, uap->port.membase + UART011_IMSC);
403 return true;
404 }
405 return false;
406}
407
408/*
409 * Stop the DMA transmit (eg, due to received XOFF).
410 * Locking: called with port lock held and IRQs disabled.
411 */
412static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
413{
414 if (uap->dmatx.queued) {
415 uap->dmacr &= ~UART011_TXDMAE;
416 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
417 }
418}
419
420/*
421 * Try to start a DMA transmit, or in the case of an XON/OFF
422 * character queued for send, try to get that character out ASAP.
423 * Locking: called with port lock held and IRQs disabled.
424 * Returns:
425 * false if we want the TX IRQ to be enabled
426 * true if we have a buffer queued
427 */
428static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
429{
430 u16 dmacr;
431
432 if (!uap->using_dma)
433 return false;
434
435 if (!uap->port.x_char) {
436 /* no X-char, try to push chars out in DMA mode */
437 bool ret = true;
438
439 if (!uap->dmatx.queued) {
440 if (pl011_dma_tx_refill(uap) > 0) {
441 uap->im &= ~UART011_TXIM;
442 ret = true;
443 } else {
444 uap->im |= UART011_TXIM;
445 ret = false;
446 }
447 writew(uap->im, uap->port.membase + UART011_IMSC);
448 } else if (!(uap->dmacr & UART011_TXDMAE)) {
449 uap->dmacr |= UART011_TXDMAE;
450 writew(uap->dmacr,
451 uap->port.membase + UART011_DMACR);
452 }
453 return ret;
454 }
455
456 /*
457 * We have an X-char to send. Disable DMA to prevent it loading
458 * the TX fifo, and then see if we can stuff it into the FIFO.
459 */
460 dmacr = uap->dmacr;
461 uap->dmacr &= ~UART011_TXDMAE;
462 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
463
464 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
465 /*
466 * No space in the FIFO, so enable the transmit interrupt
467 * so we know when there is space. Note that once we've
468 * loaded the character, we should just re-enable DMA.
469 */
470 return false;
471 }
472
473 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
474 uap->port.icount.tx++;
475 uap->port.x_char = 0;
476
477 /* Success - restore the DMA state */
478 uap->dmacr = dmacr;
479 writew(dmacr, uap->port.membase + UART011_DMACR);
480
481 return true;
482}
483
484/*
485 * Flush the transmit buffer.
486 * Locking: called with port lock held and IRQs disabled.
487 */
488static void pl011_dma_flush_buffer(struct uart_port *port)
489{
490 struct uart_amba_port *uap = (struct uart_amba_port *)port;
491
492 if (!uap->using_dma)
493 return;
494
495 /* Avoid deadlock with the DMA engine callback */
496 spin_unlock(&uap->port.lock);
497 dmaengine_terminate_all(uap->dmatx.chan);
498 spin_lock(&uap->port.lock);
499 if (uap->dmatx.queued) {
500 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
501 DMA_TO_DEVICE);
502 uap->dmatx.queued = false;
503 uap->dmacr &= ~UART011_TXDMAE;
504 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
505 }
506}
507
508
509static void pl011_dma_startup(struct uart_amba_port *uap)
510{
511 if (!uap->dmatx.chan)
512 return;
513
514 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
515 if (!uap->dmatx.buf) {
516 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
517 uap->port.fifosize = uap->fifosize;
518 return;
519 }
520
521 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
522
523 /* The DMA buffer is now the FIFO the TTY subsystem can use */
524 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
525 uap->using_dma = true;
526
527 /* Turn on DMA error (RX/TX will be enabled on demand) */
528 uap->dmacr |= UART011_DMAONERR;
529 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
530}
531
532static void pl011_dma_shutdown(struct uart_amba_port *uap)
533{
534 if (!uap->using_dma)
535 return;
536
537 /* Disable RX and TX DMA */
538 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
539 barrier();
540
541 spin_lock_irq(&uap->port.lock);
542 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
543 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
544 spin_unlock_irq(&uap->port.lock);
545
546 /* In theory, this should already be done by pl011_dma_flush_buffer */
547 dmaengine_terminate_all(uap->dmatx.chan);
548 if (uap->dmatx.queued) {
549 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
550 DMA_TO_DEVICE);
551 uap->dmatx.queued = false;
552 }
553
554 kfree(uap->dmatx.buf);
555
556 uap->using_dma = false;
557}
558
559#else
560/* Blank functions if the DMA engine is not available */
561static inline void pl011_dma_probe(struct uart_amba_port *uap)
562{
563}
564
565static inline void pl011_dma_remove(struct uart_amba_port *uap)
566{
567}
568
569static inline void pl011_dma_startup(struct uart_amba_port *uap)
570{
571}
572
573static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
574{
575}
576
577static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
578{
579 return false;
580}
581
582static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
583{
584}
585
586static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
587{
588 return false;
589}
590
591#define pl011_dma_flush_buffer NULL
592#endif
593
594
107static void pl011_stop_tx(struct uart_port *port) 595static void pl011_stop_tx(struct uart_port *port)
108{ 596{
109 struct uart_amba_port *uap = (struct uart_amba_port *)port; 597 struct uart_amba_port *uap = (struct uart_amba_port *)port;
110 598
111 uap->im &= ~UART011_TXIM; 599 uap->im &= ~UART011_TXIM;
112 writew(uap->im, uap->port.membase + UART011_IMSC); 600 writew(uap->im, uap->port.membase + UART011_IMSC);
601 pl011_dma_tx_stop(uap);
113} 602}
114 603
115static void pl011_start_tx(struct uart_port *port) 604static void pl011_start_tx(struct uart_port *port)
116{ 605{
117 struct uart_amba_port *uap = (struct uart_amba_port *)port; 606 struct uart_amba_port *uap = (struct uart_amba_port *)port;
118 607
119 uap->im |= UART011_TXIM; 608 if (!pl011_dma_tx_start(uap)) {
120 writew(uap->im, uap->port.membase + UART011_IMSC); 609 uap->im |= UART011_TXIM;
610 writew(uap->im, uap->port.membase + UART011_IMSC);
611 }
121} 612}
122 613
123static void pl011_stop_rx(struct uart_port *port) 614static void pl011_stop_rx(struct uart_port *port)
@@ -204,6 +695,10 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
204 return; 695 return;
205 } 696 }
206 697
698 /* If we are using DMA mode, try to send some characters. */
699 if (pl011_dma_tx_irq(uap))
700 return;
701
207 count = uap->fifosize >> 1; 702 count = uap->fifosize >> 1;
208 do { 703 do {
209 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); 704 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
@@ -434,6 +929,9 @@ static int pl011_startup(struct uart_port *port)
434 */ 929 */
435 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 930 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
436 931
932 /* Startup DMA */
933 pl011_dma_startup(uap);
934
437 /* 935 /*
438 * Finally, enable interrupts 936 * Finally, enable interrupts
439 */ 937 */
@@ -473,6 +971,8 @@ static void pl011_shutdown(struct uart_port *port)
473 writew(0xffff, uap->port.membase + UART011_ICR); 971 writew(0xffff, uap->port.membase + UART011_ICR);
474 spin_unlock_irq(&uap->port.lock); 972 spin_unlock_irq(&uap->port.lock);
475 973
974 pl011_dma_shutdown(uap);
975
476 /* 976 /*
477 * Free the interrupt 977 * Free the interrupt
478 */ 978 */
@@ -691,6 +1191,7 @@ static struct uart_ops amba_pl011_pops = {
691 .break_ctl = pl011_break_ctl, 1191 .break_ctl = pl011_break_ctl,
692 .startup = pl011_startup, 1192 .startup = pl011_startup,
693 .shutdown = pl011_shutdown, 1193 .shutdown = pl011_shutdown,
1194 .flush_buffer = pl011_dma_flush_buffer,
694 .set_termios = pl011_set_termios, 1195 .set_termios = pl011_set_termios,
695 .type = pl011_type, 1196 .type = pl011_type,
696 .release_port = pl010_release_port, 1197 .release_port = pl010_release_port,
@@ -883,6 +1384,7 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id)
883 uap->port.ops = &amba_pl011_pops; 1384 uap->port.ops = &amba_pl011_pops;
884 uap->port.flags = UPF_BOOT_AUTOCONF; 1385 uap->port.flags = UPF_BOOT_AUTOCONF;
885 uap->port.line = i; 1386 uap->port.line = i;
1387 pl011_dma_probe(uap);
886 1388
887 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 1389 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
888 1390
@@ -893,6 +1395,7 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id)
893 if (ret) { 1395 if (ret) {
894 amba_set_drvdata(dev, NULL); 1396 amba_set_drvdata(dev, NULL);
895 amba_ports[i] = NULL; 1397 amba_ports[i] = NULL;
1398 pl011_dma_remove(uap);
896 clk_put(uap->clk); 1399 clk_put(uap->clk);
897 unmap: 1400 unmap:
898 iounmap(base); 1401 iounmap(base);
@@ -916,6 +1419,7 @@ static int pl011_remove(struct amba_device *dev)
916 if (amba_ports[i] == uap) 1419 if (amba_ports[i] == uap)
917 amba_ports[i] = NULL; 1420 amba_ports[i] = NULL;
918 1421
1422 pl011_dma_remove(uap);
919 iounmap(uap->port.membase); 1423 iounmap(uap->port.membase);
920 clk_put(uap->clk); 1424 clk_put(uap->clk);
921 kfree(uap); 1425 kfree(uap);