aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/serial/imx.c
diff options
context:
space:
mode:
authorHuang Shijie <b32955@freescale.com>2013-07-08 05:14:18 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-26 18:44:11 -0400
commitb4cdc8f61beb2a55c8c3d22dfcaf5f34a919fe9b (patch)
tree453d7ce7d95b2ac99588ac7f5848932cf95b0cdf /drivers/tty/serial/imx.c
parent09bd00f6e9a434727e4bfe93b0498c5d893c1906 (diff)
serial: imx: add DMA support for imx6q
We only enable the DMA support when the following are meet: [1] The uart port supports the hardware flow control(CTS/RTS). (Some uart port does not support the CTS/RTS.) [2] The application enables the CTS/RTS. [3] The Soc is imx6q. For the sdma's firmware limit, we do not support the DMA except the imx6q platform. [4] The uart is not used as a console. Signed-off-by: Huang Shijie <b32955@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/tty/serial/imx.c')
-rw-r--r--drivers/tty/serial/imx.c405
1 files changed, 400 insertions, 5 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 79d1943c2e79..90655b875bab 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -48,9 +48,11 @@
48#include <linux/of.h> 48#include <linux/of.h>
49#include <linux/of_device.h> 49#include <linux/of_device.h>
50#include <linux/io.h> 50#include <linux/io.h>
51#include <linux/dma-mapping.h>
51 52
52#include <asm/irq.h> 53#include <asm/irq.h>
53#include <linux/platform_data/serial-imx.h> 54#include <linux/platform_data/serial-imx.h>
55#include <linux/platform_data/dma-imx.h>
54 56
55/* Register definitions */ 57/* Register definitions */
56#define URXD0 0x0 /* Receiver Register */ 58#define URXD0 0x0 /* Receiver Register */
@@ -82,6 +84,7 @@
82#define UCR1_ADBR (1<<14) /* Auto detect baud rate */ 84#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
83#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ 85#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
84#define UCR1_IDEN (1<<12) /* Idle condition interrupt */ 86#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
87#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
85#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ 88#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
86#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */ 89#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
87#define UCR1_IREN (1<<7) /* Infrared interface enable */ 90#define UCR1_IREN (1<<7) /* Infrared interface enable */
@@ -90,6 +93,7 @@
90#define UCR1_SNDBRK (1<<4) /* Send break */ 93#define UCR1_SNDBRK (1<<4) /* Send break */
91#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 94#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
92#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ 95#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
96#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */
93#define UCR1_DOZE (1<<1) /* Doze */ 97#define UCR1_DOZE (1<<1) /* Doze */
94#define UCR1_UARTEN (1<<0) /* UART enabled */ 98#define UCR1_UARTEN (1<<0) /* UART enabled */
95#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ 99#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -125,6 +129,7 @@
125#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ 129#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
126#define UCR4_WKEN (1<<7) /* Wake interrupt enable */ 130#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
127#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ 131#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
132#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */
128#define UCR4_IRSC (1<<5) /* IR special case */ 133#define UCR4_IRSC (1<<5) /* IR special case */
129#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ 134#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
130#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ 135#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
@@ -209,6 +214,19 @@ struct imx_port {
209 struct clk *clk_ipg; 214 struct clk *clk_ipg;
210 struct clk *clk_per; 215 struct clk *clk_per;
211 const struct imx_uart_data *devdata; 216 const struct imx_uart_data *devdata;
217
218 /* DMA fields */
219 unsigned int dma_is_inited:1;
220 unsigned int dma_is_enabled:1;
221 unsigned int dma_is_rxing:1;
222 unsigned int dma_is_txing:1;
223 struct dma_chan *dma_chan_rx, *dma_chan_tx;
224 struct scatterlist rx_sgl, tx_sgl[2];
225 void *rx_buf;
226 unsigned int rx_bytes, tx_bytes;
227 struct work_struct tsk_dma_rx, tsk_dma_tx;
228 unsigned int dma_tx_nents;
229 wait_queue_head_t dma_wait;
212}; 230};
213 231
214struct imx_port_ucrs { 232struct imx_port_ucrs {
@@ -399,6 +417,13 @@ static void imx_stop_tx(struct uart_port *port)
399 return; 417 return;
400 } 418 }
401 419
420 /*
421 * We are maybe in the SMP context, so if the DMA TX thread is running
422 * on other cpu, we have to wait for it to finish.
423 */
424 if (sport->dma_is_enabled && sport->dma_is_txing)
425 return;
426
402 temp = readl(sport->port.membase + UCR1); 427 temp = readl(sport->port.membase + UCR1);
403 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); 428 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
404} 429}
@@ -411,6 +436,13 @@ static void imx_stop_rx(struct uart_port *port)
411 struct imx_port *sport = (struct imx_port *)port; 436 struct imx_port *sport = (struct imx_port *)port;
412 unsigned long temp; 437 unsigned long temp;
413 438
439 /*
440 * We are maybe in the SMP context, so if the DMA TX thread is running
441 * on other cpu, we have to wait for it to finish.
442 */
443 if (sport->dma_is_enabled && sport->dma_is_rxing)
444 return;
445
414 temp = readl(sport->port.membase + UCR2); 446 temp = readl(sport->port.membase + UCR2);
415 writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); 447 writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
416} 448}
@@ -446,6 +478,95 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
446 imx_stop_tx(&sport->port); 478 imx_stop_tx(&sport->port);
447} 479}
448 480
481static void dma_tx_callback(void *data)
482{
483 struct imx_port *sport = data;
484 struct scatterlist *sgl = &sport->tx_sgl[0];
485 struct circ_buf *xmit = &sport->port.state->xmit;
486 unsigned long flags;
487
488 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
489
490 sport->dma_is_txing = 0;
491
492 /* update the stat */
493 spin_lock_irqsave(&sport->port.lock, flags);
494 xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
495 sport->port.icount.tx += sport->tx_bytes;
496 spin_unlock_irqrestore(&sport->port.lock, flags);
497
498 dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
499
500 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
501 uart_write_wakeup(&sport->port);
502
503 if (waitqueue_active(&sport->dma_wait)) {
504 wake_up(&sport->dma_wait);
505 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
506 return;
507 }
508
509 schedule_work(&sport->tsk_dma_tx);
510}
511
512static void dma_tx_work(struct work_struct *w)
513{
514 struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
515 struct circ_buf *xmit = &sport->port.state->xmit;
516 struct scatterlist *sgl = sport->tx_sgl;
517 struct dma_async_tx_descriptor *desc;
518 struct dma_chan *chan = sport->dma_chan_tx;
519 struct device *dev = sport->port.dev;
520 enum dma_status status;
521 unsigned long flags;
522 int ret;
523
524 status = chan->device->device_tx_status(chan, (dma_cookie_t)0, NULL);
525 if (DMA_IN_PROGRESS == status)
526 return;
527
528 spin_lock_irqsave(&sport->port.lock, flags);
529 sport->tx_bytes = uart_circ_chars_pending(xmit);
530 if (sport->tx_bytes == 0) {
531 spin_unlock_irqrestore(&sport->port.lock, flags);
532 return;
533 }
534
535 if (xmit->tail > xmit->head) {
536 sport->dma_tx_nents = 2;
537 sg_init_table(sgl, 2);
538 sg_set_buf(sgl, xmit->buf + xmit->tail,
539 UART_XMIT_SIZE - xmit->tail);
540 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
541 } else {
542 sport->dma_tx_nents = 1;
543 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
544 }
545 spin_unlock_irqrestore(&sport->port.lock, flags);
546
547 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
548 if (ret == 0) {
549 dev_err(dev, "DMA mapping error for TX.\n");
550 return;
551 }
552 desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
553 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
554 if (!desc) {
555 dev_err(dev, "We cannot prepare for the TX slave dma!\n");
556 return;
557 }
558 desc->callback = dma_tx_callback;
559 desc->callback_param = sport;
560
561 dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
562 uart_circ_chars_pending(xmit));
563 /* fire it */
564 sport->dma_is_txing = 1;
565 dmaengine_submit(desc);
566 dma_async_issue_pending(chan);
567 return;
568}
569
449/* 570/*
450 * interrupts disabled on entry 571 * interrupts disabled on entry
451 */ 572 */
@@ -472,8 +593,10 @@ static void imx_start_tx(struct uart_port *port)
472 temp |= UCR4_OREN; 593 temp |= UCR4_OREN;
473 writel(temp, sport->port.membase + UCR4); 594 writel(temp, sport->port.membase + UCR4);
474 595
475 temp = readl(sport->port.membase + UCR1); 596 if (!sport->dma_is_enabled) {
476 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); 597 temp = readl(sport->port.membase + UCR1);
598 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
599 }
477 600
478 if (USE_IRDA(sport)) { 601 if (USE_IRDA(sport)) {
479 temp = readl(sport->port.membase + UCR1); 602 temp = readl(sport->port.membase + UCR1);
@@ -485,6 +608,15 @@ static void imx_start_tx(struct uart_port *port)
485 writel(temp, sport->port.membase + UCR4); 608 writel(temp, sport->port.membase + UCR4);
486 } 609 }
487 610
611 if (sport->dma_is_enabled) {
612 /*
613 * We may in the interrupt context, so arise a work_struct to
614 * do the real job.
615 */
616 schedule_work(&sport->tsk_dma_tx);
617 return;
618 }
619
488 if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY) 620 if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
489 imx_transmit_buffer(sport); 621 imx_transmit_buffer(sport);
490} 622}
@@ -600,6 +732,28 @@ out:
600 return IRQ_HANDLED; 732 return IRQ_HANDLED;
601} 733}
602 734
735/*
736 * If the RXFIFO is filled with some data, and then we
737 * arise a DMA operation to receive them.
738 */
739static void imx_dma_rxint(struct imx_port *sport)
740{
741 unsigned long temp;
742
743 temp = readl(sport->port.membase + USR2);
744 if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
745 sport->dma_is_rxing = 1;
746
747 /* disable the `Recerver Ready Interrrupt` */
748 temp = readl(sport->port.membase + UCR1);
749 temp &= ~(UCR1_RRDYEN);
750 writel(temp, sport->port.membase + UCR1);
751
752 /* tell the DMA to receive the data. */
753 schedule_work(&sport->tsk_dma_rx);
754 }
755}
756
603static irqreturn_t imx_int(int irq, void *dev_id) 757static irqreturn_t imx_int(int irq, void *dev_id)
604{ 758{
605 struct imx_port *sport = dev_id; 759 struct imx_port *sport = dev_id;
@@ -608,8 +762,12 @@ static irqreturn_t imx_int(int irq, void *dev_id)
608 762
609 sts = readl(sport->port.membase + USR1); 763 sts = readl(sport->port.membase + USR1);
610 764
611 if (sts & USR1_RRDY) 765 if (sts & USR1_RRDY) {
612 imx_rxint(irq, dev_id); 766 if (sport->dma_is_enabled)
767 imx_dma_rxint(sport);
768 else
769 imx_rxint(irq, dev_id);
770 }
613 771
614 if (sts & USR1_TRDY && 772 if (sts & USR1_TRDY &&
615 readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) 773 readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
@@ -666,7 +824,8 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
666 temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS; 824 temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
667 825
668 if (mctrl & TIOCM_RTS) 826 if (mctrl & TIOCM_RTS)
669 temp |= UCR2_CTS; 827 if (!sport->dma_is_enabled)
828 temp |= UCR2_CTS;
670 829
671 writel(temp, sport->port.membase + UCR2); 830 writel(temp, sport->port.membase + UCR2);
672} 831}
@@ -705,6 +864,226 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
705 return 0; 864 return 0;
706} 865}
707 866
867#define RX_BUF_SIZE (PAGE_SIZE)
868static int start_rx_dma(struct imx_port *sport);
869static void dma_rx_work(struct work_struct *w)
870{
871 struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
872 struct tty_port *port = &sport->port.state->port;
873
874 if (sport->rx_bytes) {
875 tty_insert_flip_string(port, sport->rx_buf, sport->rx_bytes);
876 tty_flip_buffer_push(port);
877 sport->rx_bytes = 0;
878 }
879
880 if (sport->dma_is_rxing)
881 start_rx_dma(sport);
882}
883
884static void imx_rx_dma_done(struct imx_port *sport)
885{
886 unsigned long temp;
887
888 /* Enable this interrupt when the RXFIFO is empty. */
889 temp = readl(sport->port.membase + UCR1);
890 temp |= UCR1_RRDYEN;
891 writel(temp, sport->port.membase + UCR1);
892
893 sport->dma_is_rxing = 0;
894
895 /* Is the shutdown waiting for us? */
896 if (waitqueue_active(&sport->dma_wait))
897 wake_up(&sport->dma_wait);
898}
899
900/*
901 * There are three kinds of RX DMA interrupts(such as in the MX6Q):
902 * [1] the RX DMA buffer is full.
903 * [2] the Aging timer expires(wait for 8 bytes long)
904 * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
905 *
906 * The [2] is trigger when a character was been sitting in the FIFO
907 * meanwhile [3] can wait for 32 bytes long when the RX line is
908 * on IDLE state and RxFIFO is empty.
909 */
910static void dma_rx_callback(void *data)
911{
912 struct imx_port *sport = data;
913 struct dma_chan *chan = sport->dma_chan_rx;
914 struct scatterlist *sgl = &sport->rx_sgl;
915 struct dma_tx_state state;
916 enum dma_status status;
917 unsigned int count;
918
919 /* unmap it first */
920 dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
921
922 status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state);
923 count = RX_BUF_SIZE - state.residue;
924 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
925
926 if (count) {
927 sport->rx_bytes = count;
928 schedule_work(&sport->tsk_dma_rx);
929 } else
930 imx_rx_dma_done(sport);
931}
932
933static int start_rx_dma(struct imx_port *sport)
934{
935 struct scatterlist *sgl = &sport->rx_sgl;
936 struct dma_chan *chan = sport->dma_chan_rx;
937 struct device *dev = sport->port.dev;
938 struct dma_async_tx_descriptor *desc;
939 int ret;
940
941 sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
942 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
943 if (ret == 0) {
944 dev_err(dev, "DMA mapping error for RX.\n");
945 return -EINVAL;
946 }
947 desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
948 DMA_PREP_INTERRUPT);
949 if (!desc) {
950 dev_err(dev, "We cannot prepare for the RX slave dma!\n");
951 return -EINVAL;
952 }
953 desc->callback = dma_rx_callback;
954 desc->callback_param = sport;
955
956 dev_dbg(dev, "RX: prepare for the DMA.\n");
957 dmaengine_submit(desc);
958 dma_async_issue_pending(chan);
959 return 0;
960}
961
962static void imx_uart_dma_exit(struct imx_port *sport)
963{
964 if (sport->dma_chan_rx) {
965 dma_release_channel(sport->dma_chan_rx);
966 sport->dma_chan_rx = NULL;
967
968 kfree(sport->rx_buf);
969 sport->rx_buf = NULL;
970 }
971
972 if (sport->dma_chan_tx) {
973 dma_release_channel(sport->dma_chan_tx);
974 sport->dma_chan_tx = NULL;
975 }
976
977 sport->dma_is_inited = 0;
978}
979
980static int imx_uart_dma_init(struct imx_port *sport)
981{
982 struct dma_slave_config slave_config;
983 struct device *dev = sport->port.dev;
984 int ret;
985
986 /* Prepare for RX : */
987 sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
988 if (!sport->dma_chan_rx) {
989 dev_dbg(dev, "cannot get the DMA channel.\n");
990 ret = -EINVAL;
991 goto err;
992 }
993
994 slave_config.direction = DMA_DEV_TO_MEM;
995 slave_config.src_addr = sport->port.mapbase + URXD0;
996 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
997 slave_config.src_maxburst = RXTL;
998 ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
999 if (ret) {
1000 dev_err(dev, "error in RX dma configuration.\n");
1001 goto err;
1002 }
1003
1004 sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1005 if (!sport->rx_buf) {
1006 dev_err(dev, "cannot alloc DMA buffer.\n");
1007 ret = -ENOMEM;
1008 goto err;
1009 }
1010 sport->rx_bytes = 0;
1011
1012 /* Prepare for TX : */
1013 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1014 if (!sport->dma_chan_tx) {
1015 dev_err(dev, "cannot get the TX DMA channel!\n");
1016 ret = -EINVAL;
1017 goto err;
1018 }
1019
1020 slave_config.direction = DMA_MEM_TO_DEV;
1021 slave_config.dst_addr = sport->port.mapbase + URTX0;
1022 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1023 slave_config.dst_maxburst = TXTL;
1024 ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1025 if (ret) {
1026 dev_err(dev, "error in TX dma configuration.");
1027 goto err;
1028 }
1029
1030 sport->dma_is_inited = 1;
1031
1032 return 0;
1033err:
1034 imx_uart_dma_exit(sport);
1035 return ret;
1036}
1037
1038static void imx_enable_dma(struct imx_port *sport)
1039{
1040 unsigned long temp;
1041 struct tty_port *port = &sport->port.state->port;
1042
1043 port->low_latency = 1;
1044 INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
1045 INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
1046 init_waitqueue_head(&sport->dma_wait);
1047
1048 /* set UCR1 */
1049 temp = readl(sport->port.membase + UCR1);
1050 temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
1051 /* wait for 32 idle frames for IDDMA interrupt */
1052 UCR1_ICD_REG(3);
1053 writel(temp, sport->port.membase + UCR1);
1054
1055 /* set UCR4 */
1056 temp = readl(sport->port.membase + UCR4);
1057 temp |= UCR4_IDDMAEN;
1058 writel(temp, sport->port.membase + UCR4);
1059
1060 sport->dma_is_enabled = 1;
1061}
1062
1063static void imx_disable_dma(struct imx_port *sport)
1064{
1065 unsigned long temp;
1066 struct tty_port *port = &sport->port.state->port;
1067
1068 /* clear UCR1 */
1069 temp = readl(sport->port.membase + UCR1);
1070 temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1071 writel(temp, sport->port.membase + UCR1);
1072
1073 /* clear UCR2 */
1074 temp = readl(sport->port.membase + UCR2);
1075 temp &= ~(UCR2_CTSC | UCR2_CTS);
1076 writel(temp, sport->port.membase + UCR2);
1077
1078 /* clear UCR4 */
1079 temp = readl(sport->port.membase + UCR4);
1080 temp &= ~UCR4_IDDMAEN;
1081 writel(temp, sport->port.membase + UCR4);
1082
1083 sport->dma_is_enabled = 0;
1084 port->low_latency = 0;
1085}
1086
708/* half the RX buffer size */ 1087/* half the RX buffer size */
709#define CTSTL 16 1088#define CTSTL 16
710 1089
@@ -869,6 +1248,15 @@ static void imx_shutdown(struct uart_port *port)
869 unsigned long temp; 1248 unsigned long temp;
870 unsigned long flags; 1249 unsigned long flags;
871 1250
1251 if (sport->dma_is_enabled) {
1252 /* We have to wait for the DMA to finish. */
1253 wait_event(sport->dma_wait,
1254 !sport->dma_is_rxing && !sport->dma_is_txing);
1255 imx_stop_rx(port);
1256 imx_disable_dma(sport);
1257 imx_uart_dma_exit(sport);
1258 }
1259
872 spin_lock_irqsave(&sport->port.lock, flags); 1260 spin_lock_irqsave(&sport->port.lock, flags);
873 temp = readl(sport->port.membase + UCR2); 1261 temp = readl(sport->port.membase + UCR2);
874 temp &= ~(UCR2_TXEN); 1262 temp &= ~(UCR2_TXEN);
@@ -955,6 +1343,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
955 if (sport->have_rtscts) { 1343 if (sport->have_rtscts) {
956 ucr2 &= ~UCR2_IRTS; 1344 ucr2 &= ~UCR2_IRTS;
957 ucr2 |= UCR2_CTSC; 1345 ucr2 |= UCR2_CTSC;
1346
1347 /* Can we enable the DMA support? */
1348 if (is_imx6q_uart(sport) && !uart_console(port)
1349 && !sport->dma_is_inited)
1350 imx_uart_dma_init(sport);
958 } else { 1351 } else {
959 termios->c_cflag &= ~CRTSCTS; 1352 termios->c_cflag &= ~CRTSCTS;
960 } 1353 }
@@ -1073,6 +1466,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1073 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1466 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1074 imx_enable_ms(&sport->port); 1467 imx_enable_ms(&sport->port);
1075 1468
1469 if (sport->dma_is_inited && !sport->dma_is_enabled)
1470 imx_enable_dma(sport);
1076 spin_unlock_irqrestore(&sport->port.lock, flags); 1471 spin_unlock_irqrestore(&sport->port.lock, flags);
1077} 1472}
1078 1473