aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2011-02-24 07:21:08 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-03-10 05:07:22 -0500
commitead76f329f777c7301e0a5456a0a1c7a081570bd (patch)
tree71a2954490057806b4ce87a7d368f11f62b16e85 /drivers/tty
parenta5abba989deceb731047425812d268daf7536575 (diff)
ARM: 6763/1: pl011: add optional RX DMA to PL011 v2
This adds an optional RX DMA codepath for the devices that support this by using the apropriate burst sizes instead of pulling single bytes. Includes portions of code written by Russell King during a PL08x hacking session. This has been tested on U300 and Ux500. Tested-by: Jerzy Kasenberg <jerzy.kasenberg@tieto.com> Tested-by: Grzegorz Sygieda <grzegorz.sygieda@tieto.com> Tested-by: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> Signed-off-by: Per Forlin <per.friden@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/tty')
-rw-r--r--drivers/tty/serial/amba-pl011.c454
1 files changed, 434 insertions, 20 deletions
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..cb45136f6867 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
96}; 96};
97 97
98/* Deals with DMA transactions */ 98/* Deals with DMA transactions */
99
100struct pl011_sgbuf {
101 struct scatterlist sg;
102 char *buf;
103};
104
105struct pl011_dmarx_data {
106 struct dma_chan *chan;
107 struct completion complete;
108 bool use_buf_b;
109 struct pl011_sgbuf sgbuf_a;
110 struct pl011_sgbuf sgbuf_b;
111 dma_cookie_t cookie;
112 bool running;
113};
114
99struct pl011_dmatx_data { 115struct pl011_dmatx_data {
100 struct dma_chan *chan; 116 struct dma_chan *chan;
101 struct scatterlist sg; 117 struct scatterlist sg;
@@ -120,7 +136,9 @@ struct uart_amba_port {
120 char type[12]; 136 char type[12];
121#ifdef CONFIG_DMA_ENGINE 137#ifdef CONFIG_DMA_ENGINE
122 /* DMA stuff */ 138 /* DMA stuff */
123 bool using_dma; 139 bool using_tx_dma;
140 bool using_rx_dma;
141 struct pl011_dmarx_data dmarx;
124 struct pl011_dmatx_data dmatx; 142 struct pl011_dmatx_data dmatx;
125#endif 143#endif
126}; 144};
@@ -134,6 +152,31 @@ struct uart_amba_port {
134 152
135#define PL011_DMA_BUFFER_SIZE PAGE_SIZE 153#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
136 154
155static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
156 enum dma_data_direction dir)
157{
158 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
159 if (!sg->buf)
160 return -ENOMEM;
161
162 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
163
164 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
165 kfree(sg->buf);
166 return -EINVAL;
167 }
168 return 0;
169}
170
171static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
172 enum dma_data_direction dir)
173{
174 if (sg->buf) {
175 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
176 kfree(sg->buf);
177 }
178}
179
137static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 180static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
138{ 181{
139 /* DMA is the sole user of the platform data right now */ 182 /* DMA is the sole user of the platform data right now */
@@ -153,7 +196,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
153 return; 196 return;
154 } 197 }
155 198
156 /* Try to acquire a generic DMA engine slave channel */ 199 /* Try to acquire a generic DMA engine slave TX channel */
157 dma_cap_zero(mask); 200 dma_cap_zero(mask);
158 dma_cap_set(DMA_SLAVE, mask); 201 dma_cap_set(DMA_SLAVE, mask);
159 202
@@ -168,6 +211,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
168 211
169 dev_info(uap->port.dev, "DMA channel TX %s\n", 212 dev_info(uap->port.dev, "DMA channel TX %s\n",
170 dma_chan_name(uap->dmatx.chan)); 213 dma_chan_name(uap->dmatx.chan));
214
215 /* Optionally make use of an RX channel as well */
216 if (plat->dma_rx_param) {
217 struct dma_slave_config rx_conf = {
218 .src_addr = uap->port.mapbase + UART01x_DR,
219 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
220 .direction = DMA_FROM_DEVICE,
221 .src_maxburst = uap->fifosize >> 1,
222 };
223
224 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
225 if (!chan) {
226 dev_err(uap->port.dev, "no RX DMA channel!\n");
227 return;
228 }
229
230 dmaengine_slave_config(chan, &rx_conf);
231 uap->dmarx.chan = chan;
232
233 dev_info(uap->port.dev, "DMA channel RX %s\n",
234 dma_chan_name(uap->dmarx.chan));
235 }
171} 236}
172 237
173#ifndef MODULE 238#ifndef MODULE
@@ -219,9 +284,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
219 /* TODO: remove the initcall if it has not yet executed */ 284 /* TODO: remove the initcall if it has not yet executed */
220 if (uap->dmatx.chan) 285 if (uap->dmatx.chan)
221 dma_release_channel(uap->dmatx.chan); 286 dma_release_channel(uap->dmatx.chan);
287 if (uap->dmarx.chan)
288 dma_release_channel(uap->dmarx.chan);
222} 289}
223 290
224
225/* Forward declare this for the refill routine */ 291/* Forward declare this for the refill routine */
226static int pl011_dma_tx_refill(struct uart_amba_port *uap); 292static int pl011_dma_tx_refill(struct uart_amba_port *uap);
227 293
@@ -380,7 +446,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
380 */ 446 */
381static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 447static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
382{ 448{
383 if (!uap->using_dma) 449 if (!uap->using_tx_dma)
384 return false; 450 return false;
385 451
386 /* 452 /*
@@ -432,7 +498,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
432{ 498{
433 u16 dmacr; 499 u16 dmacr;
434 500
435 if (!uap->using_dma) 501 if (!uap->using_tx_dma)
436 return false; 502 return false;
437 503
438 if (!uap->port.x_char) { 504 if (!uap->port.x_char) {
@@ -492,7 +558,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
492{ 558{
493 struct uart_amba_port *uap = (struct uart_amba_port *)port; 559 struct uart_amba_port *uap = (struct uart_amba_port *)port;
494 560
495 if (!uap->using_dma) 561 if (!uap->using_tx_dma)
496 return; 562 return;
497 563
498 /* Avoid deadlock with the DMA engine callback */ 564 /* Avoid deadlock with the DMA engine callback */
@@ -508,9 +574,260 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
508 } 574 }
509} 575}
510 576
577static void pl011_dma_rx_callback(void *data);
578
579static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
580{
581 struct dma_chan *rxchan = uap->dmarx.chan;
582 struct dma_device *dma_dev;
583 struct pl011_dmarx_data *dmarx = &uap->dmarx;
584 struct dma_async_tx_descriptor *desc;
585 struct pl011_sgbuf *sgbuf;
586
587 if (!rxchan)
588 return -EIO;
589
590 /* Start the RX DMA job */
591 sgbuf = uap->dmarx.use_buf_b ?
592 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
593 dma_dev = rxchan->device;
594 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
595 DMA_FROM_DEVICE,
596 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
597 /*
598 * If the DMA engine is busy and cannot prepare a
599 * channel, no big deal, the driver will fall back
600 * to interrupt mode as a result of this error code.
601 */
602 if (!desc) {
603 uap->dmarx.running = false;
604 dmaengine_terminate_all(rxchan);
605 return -EBUSY;
606 }
607
608 /* Some data to go along to the callback */
609 desc->callback = pl011_dma_rx_callback;
610 desc->callback_param = uap;
611 dmarx->cookie = dmaengine_submit(desc);
612 dma_async_issue_pending(rxchan);
613
614 uap->dmacr |= UART011_RXDMAE;
615 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
616 uap->dmarx.running = true;
617
618 uap->im &= ~UART011_RXIM;
619 writew(uap->im, uap->port.membase + UART011_IMSC);
620
621 return 0;
622}
623
624/*
625 * This is called when either the DMA job is complete, or
626 * the FIFO timeout interrupt occurred. This must be called
627 * with the port spinlock uap->port.lock held.
628 */
629static void pl011_dma_rx_chars(struct uart_amba_port *uap,
630 u32 pending, bool use_buf_b,
631 bool readfifo)
632{
633 struct tty_struct *tty = uap->port.state->port.tty;
634 struct pl011_sgbuf *sgbuf = use_buf_b ?
635 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
636 struct device *dev = uap->dmarx.chan->device->dev;
637 unsigned int status, ch, flag;
638 int dma_count = 0;
639 u32 fifotaken = 0; /* only used for vdbg() */
640
641 /* Pick everything from the DMA first */
642 if (pending) {
643 /* Sync in buffer */
644 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
645
646 /*
647 * First take all chars in the DMA pipe, then look in the FIFO.
648 * Note that tty_insert_flip_buf() tries to take as many chars
649 * as it can.
650 */
651 dma_count = tty_insert_flip_string(uap->port.state->port.tty,
652 sgbuf->buf, pending);
653
654 /* Return buffer to device */
655 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
656
657 uap->port.icount.rx += dma_count;
658 if (dma_count < pending)
659 dev_warn(uap->port.dev,
660 "couldn't insert all characters (TTY is full?)\n");
661 }
662
663 /*
664 * Only continue with trying to read the FIFO if all DMA chars have
665 * been taken first.
666 */
667 if (dma_count == pending && readfifo) {
668 /* Clear any error flags */
669 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
670 uap->port.membase + UART011_ICR);
671
672 /*
673 * If we read all the DMA'd characters, and we had an
674 * incomplete buffer, that could be due to an rx error,
675 * or maybe we just timed out. Read any pending chars
676 * and check the error status.
677 */
678 while (1) {
679 status = readw(uap->port.membase + UART01x_FR);
680 if (status & UART01x_FR_RXFE)
681 break;
682
683 /* Take chars from the FIFO and update status */
684 ch = readw(uap->port.membase + UART01x_DR) |
685 UART_DUMMY_DR_RX;
686 flag = TTY_NORMAL;
687 uap->port.icount.rx++;
688 fifotaken++;
689
690 /*
691 * Error conditions will only occur in the FIFO,
692 * these will trigger an immediate interrupt and
693 * stop the DMA job, so we will always find the
694 * error in the FIFO, never in the DMA buffer.
695 */
696 if (unlikely(ch & UART_DR_ERROR)) {
697 if (ch & UART011_DR_BE) {
698 ch &= ~(UART011_DR_FE | UART011_DR_PE);
699 uap->port.icount.brk++;
700 if (uart_handle_break(&uap->port))
701 continue;
702 } else if (ch & UART011_DR_PE)
703 uap->port.icount.parity++;
704 else if (ch & UART011_DR_FE)
705 uap->port.icount.frame++;
706 if (ch & UART011_DR_OE)
707 uap->port.icount.overrun++;
708
709 ch &= uap->port.read_status_mask;
710
711 if (ch & UART011_DR_BE)
712 flag = TTY_BREAK;
713 else if (ch & UART011_DR_PE)
714 flag = TTY_PARITY;
715 else if (ch & UART011_DR_FE)
716 flag = TTY_FRAME;
717 }
718
719 if (uart_handle_sysrq_char(&uap->port, ch & 255))
720 continue;
721
722 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
723 }
724 }
725
726 spin_unlock(&uap->port.lock);
727 dev_vdbg(uap->port.dev,
728 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
729 dma_count, fifotaken);
730 tty_flip_buffer_push(tty);
731 spin_lock(&uap->port.lock);
732}
733
734static void pl011_dma_rx_irq(struct uart_amba_port *uap)
735{
736 struct pl011_dmarx_data *dmarx = &uap->dmarx;
737 struct dma_chan *rxchan = dmarx->chan;
738 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
739 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
740 size_t pending;
741 struct dma_tx_state state;
742 enum dma_status dmastat;
743
744 /*
745 * Pause the transfer so we can trust the current counter,
746 * do this before we pause the PL011 block, else we may
747 * overflow the FIFO.
748 */
749 if (dmaengine_pause(rxchan))
750 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
751 dmastat = rxchan->device->device_tx_status(rxchan,
752 dmarx->cookie, &state);
753 if (dmastat != DMA_PAUSED)
754 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
755
756 /* Disable RX DMA - incoming data will wait in the FIFO */
757 uap->dmacr &= ~UART011_RXDMAE;
758 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
759 uap->dmarx.running = false;
760
761 pending = sgbuf->sg.length - state.residue;
762 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
763 /* Then we terminate the transfer - we now know our residue */
764 dmaengine_terminate_all(rxchan);
765
766 /*
767 * This will take the chars we have so far and insert
768 * into the framework.
769 */
770 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
771
772 /* Switch buffer & re-trigger DMA job */
773 dmarx->use_buf_b = !dmarx->use_buf_b;
774 if (pl011_dma_rx_trigger_dma(uap)) {
775 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
776 "fall back to interrupt mode\n");
777 uap->im |= UART011_RXIM;
778 writew(uap->im, uap->port.membase + UART011_IMSC);
779 }
780}
781
782static void pl011_dma_rx_callback(void *data)
783{
784 struct uart_amba_port *uap = data;
785 struct pl011_dmarx_data *dmarx = &uap->dmarx;
786 bool lastbuf = dmarx->use_buf_b;
787 int ret;
788
789 /*
790 * This completion interrupt occurs typically when the
791 * RX buffer is totally stuffed but no timeout has yet
792 * occurred. When that happens, we just want the RX
793 * routine to flush out the secondary DMA buffer while
794 * we immediately trigger the next DMA job.
795 */
796 spin_lock_irq(&uap->port.lock);
797 uap->dmarx.running = false;
798 dmarx->use_buf_b = !lastbuf;
799 ret = pl011_dma_rx_trigger_dma(uap);
800
801 pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
802 spin_unlock_irq(&uap->port.lock);
803 /*
804 * Do this check after we picked the DMA chars so we don't
805 * get some IRQ immediately from RX.
806 */
807 if (ret) {
808 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
809 "fall back to interrupt mode\n");
810 uap->im |= UART011_RXIM;
811 writew(uap->im, uap->port.membase + UART011_IMSC);
812 }
813}
814
815/*
816 * Stop accepting received characters, when we're shutting down or
817 * suspending this port.
818 * Locking: called with port lock held and IRQs disabled.
819 */
820static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
821{
822 /* FIXME. Just disable the DMA enable */
823 uap->dmacr &= ~UART011_RXDMAE;
824 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
825}
511 826
512static void pl011_dma_startup(struct uart_amba_port *uap) 827static void pl011_dma_startup(struct uart_amba_port *uap)
513{ 828{
829 int ret;
830
514 if (!uap->dmatx.chan) 831 if (!uap->dmatx.chan)
515 return; 832 return;
516 833
@@ -525,8 +842,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
525 842
526 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 843 /* The DMA buffer is now the FIFO the TTY subsystem can use */
527 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 844 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
528 uap->using_dma = true; 845 uap->using_tx_dma = true;
846
847 if (!uap->dmarx.chan)
848 goto skip_rx;
849
850 /* Allocate and map DMA RX buffers */
851 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
852 DMA_FROM_DEVICE);
853 if (ret) {
854 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
855 "RX buffer A", ret);
856 goto skip_rx;
857 }
858
859 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
860 DMA_FROM_DEVICE);
861 if (ret) {
862 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
863 "RX buffer B", ret);
864 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
865 DMA_FROM_DEVICE);
866 goto skip_rx;
867 }
868
869 uap->using_rx_dma = true;
529 870
871skip_rx:
530 /* Turn on DMA error (RX/TX will be enabled on demand) */ 872 /* Turn on DMA error (RX/TX will be enabled on demand) */
531 uap->dmacr |= UART011_DMAONERR; 873 uap->dmacr |= UART011_DMAONERR;
532 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 874 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@@ -539,11 +881,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
539 if (uap->vendor->dma_threshold) 881 if (uap->vendor->dma_threshold)
540 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 882 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
541 uap->port.membase + ST_UART011_DMAWM); 883 uap->port.membase + ST_UART011_DMAWM);
884
885 if (uap->using_rx_dma) {
886 if (pl011_dma_rx_trigger_dma(uap))
887 dev_dbg(uap->port.dev, "could not trigger initial "
888 "RX DMA job, fall back to interrupt mode\n");
889 }
542} 890}
543 891
544static void pl011_dma_shutdown(struct uart_amba_port *uap) 892static void pl011_dma_shutdown(struct uart_amba_port *uap)
545{ 893{
546 if (!uap->using_dma) 894 if (!(uap->using_tx_dma || uap->using_rx_dma))
547 return; 895 return;
548 896
549 /* Disable RX and TX DMA */ 897 /* Disable RX and TX DMA */
@@ -555,19 +903,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
555 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 903 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
556 spin_unlock_irq(&uap->port.lock); 904 spin_unlock_irq(&uap->port.lock);
557 905
558 /* In theory, this should already be done by pl011_dma_flush_buffer */ 906 if (uap->using_tx_dma) {
559 dmaengine_terminate_all(uap->dmatx.chan); 907 /* In theory, this should already be done by pl011_dma_flush_buffer */
560 if (uap->dmatx.queued) { 908 dmaengine_terminate_all(uap->dmatx.chan);
561 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 909 if (uap->dmatx.queued) {
562 DMA_TO_DEVICE); 910 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
563 uap->dmatx.queued = false; 911 DMA_TO_DEVICE);
912 uap->dmatx.queued = false;
913 }
914
915 kfree(uap->dmatx.buf);
916 uap->using_tx_dma = false;
917 }
918
919 if (uap->using_rx_dma) {
920 dmaengine_terminate_all(uap->dmarx.chan);
921 /* Clean up the RX DMA */
922 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
923 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
924 uap->using_rx_dma = false;
564 } 925 }
926}
565 927
566 kfree(uap->dmatx.buf); 928static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
929{
930 return uap->using_rx_dma;
931}
567 932
568 uap->using_dma = false; 933static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
934{
935 return uap->using_rx_dma && uap->dmarx.running;
569} 936}
570 937
938
571#else 939#else
572/* Blank functions if the DMA engine is not available */ 940/* Blank functions if the DMA engine is not available */
573static inline void pl011_dma_probe(struct uart_amba_port *uap) 941static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -600,6 +968,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
600 return false; 968 return false;
601} 969}
602 970
971static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
972{
973}
974
975static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
976{
977}
978
979static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
980{
981 return -EIO;
982}
983
984static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
985{
986 return false;
987}
988
989static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
990{
991 return false;
992}
993
603#define pl011_dma_flush_buffer NULL 994#define pl011_dma_flush_buffer NULL
604#endif 995#endif
605 996
@@ -630,6 +1021,8 @@ static void pl011_stop_rx(struct uart_port *port)
630 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1021 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
631 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1022 UART011_PEIM|UART011_BEIM|UART011_OEIM);
632 writew(uap->im, uap->port.membase + UART011_IMSC); 1023 writew(uap->im, uap->port.membase + UART011_IMSC);
1024
1025 pl011_dma_rx_stop(uap);
633} 1026}
634 1027
635static void pl011_enable_ms(struct uart_port *port) 1028static void pl011_enable_ms(struct uart_port *port)
@@ -688,6 +1081,19 @@ static void pl011_rx_chars(struct uart_amba_port *uap)
688 } 1081 }
689 spin_unlock(&uap->port.lock); 1082 spin_unlock(&uap->port.lock);
690 tty_flip_buffer_push(tty); 1083 tty_flip_buffer_push(tty);
1084 /*
1085 * If we were temporarily out of DMA mode for a while,
1086 * attempt to switch back to DMA mode again.
1087 */
1088 if (pl011_dma_rx_available(uap)) {
1089 if (pl011_dma_rx_trigger_dma(uap)) {
1090 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1091 "fall back to interrupt mode again\n");
1092 uap->im |= UART011_RXIM;
1093 } else
1094 uap->im &= ~UART011_RXIM;
1095 writew(uap->im, uap->port.membase + UART011_IMSC);
1096 }
691 spin_lock(&uap->port.lock); 1097 spin_lock(&uap->port.lock);
692} 1098}
693 1099
@@ -767,8 +1173,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
767 UART011_RXIS), 1173 UART011_RXIS),
768 uap->port.membase + UART011_ICR); 1174 uap->port.membase + UART011_ICR);
769 1175
770 if (status & (UART011_RTIS|UART011_RXIS)) 1176 if (status & (UART011_RTIS|UART011_RXIS)) {
771 pl011_rx_chars(uap); 1177 if (pl011_dma_rx_running(uap))
1178 pl011_dma_rx_irq(uap);
1179 else
1180 pl011_rx_chars(uap);
1181 }
772 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1182 if (status & (UART011_DSRMIS|UART011_DCDMIS|
773 UART011_CTSMIS|UART011_RIMIS)) 1183 UART011_CTSMIS|UART011_RIMIS))
774 pl011_modem_status(uap); 1184 pl011_modem_status(uap);
@@ -945,10 +1355,14 @@ static int pl011_startup(struct uart_port *port)
945 pl011_dma_startup(uap); 1355 pl011_dma_startup(uap);
946 1356
947 /* 1357 /*
948 * Finally, enable interrupts 1358 * Finally, enable interrupts, only timeouts when using DMA
1359 * if initial RX DMA job failed, start in interrupt mode
1360 * as well.
949 */ 1361 */
950 spin_lock_irq(&uap->port.lock); 1362 spin_lock_irq(&uap->port.lock);
951 uap->im = UART011_RXIM | UART011_RTIM; 1363 uap->im = UART011_RTIM;
1364 if (!pl011_dma_rx_running(uap))
1365 uap->im |= UART011_RXIM;
952 writew(uap->im, uap->port.membase + UART011_IMSC); 1366 writew(uap->im, uap->port.membase + UART011_IMSC);
953 spin_unlock_irq(&uap->port.lock); 1367 spin_unlock_irq(&uap->port.lock);
954 1368