summaryrefslogtreecommitdiffstats
path: root/drivers/tty
diff options
context:
space:
mode:
authorHuang Shijie <b32955@freescale.com>2013-10-15 03:23:40 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-10-16 21:25:30 -0400
commit7cb92fd2a0515ea2ae905bf6c90a84aed2b78ffb (patch)
tree03787b19a1187f87a3d12b99b098fd40a98018d7 /drivers/tty
parentf0ef8834b280ebb6c271f155ea040bf4af6c1881 (diff)
serial: imx: optimization: remove the workqueues for DMA
I worried that the delay of the sdma_run_channel0() maybe too long for interrupt context, so I added the workqueues for RX/TX DMA. But tested with bluetooth device, I find that the delay of sdma_run_channel0() is about 8us (tested in imx6dl sabreauto board). I think the delay is acceptable. This patch removes the RX/TX workqueues for DMA, it makes the code more clear. Signed-off-by: Huang Shijie <b32955@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/tty')
-rw-r--r--drivers/tty/serial/imx.c54
1 files changed, 10 insertions, 44 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 13c2c2d09cac..bb84f8d4c54e 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -223,8 +223,7 @@ struct imx_port {
223 struct dma_chan *dma_chan_rx, *dma_chan_tx; 223 struct dma_chan *dma_chan_rx, *dma_chan_tx;
224 struct scatterlist rx_sgl, tx_sgl[2]; 224 struct scatterlist rx_sgl, tx_sgl[2];
225 void *rx_buf; 225 void *rx_buf;
226 unsigned int rx_bytes, tx_bytes; 226 unsigned int tx_bytes;
227 struct work_struct tsk_dma_rx, tsk_dma_tx;
228 unsigned int dma_tx_nents; 227 unsigned int dma_tx_nents;
229 wait_queue_head_t dma_wait; 228 wait_queue_head_t dma_wait;
230}; 229};
@@ -505,32 +504,23 @@ static void dma_tx_callback(void *data)
505 dev_dbg(sport->port.dev, "exit in %s.\n", __func__); 504 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
506 return; 505 return;
507 } 506 }
508
509 schedule_work(&sport->tsk_dma_tx);
510} 507}
511 508
512static void dma_tx_work(struct work_struct *w) 509static void imx_dma_tx(struct imx_port *sport)
513{ 510{
514 struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
515 struct circ_buf *xmit = &sport->port.state->xmit; 511 struct circ_buf *xmit = &sport->port.state->xmit;
516 struct scatterlist *sgl = sport->tx_sgl; 512 struct scatterlist *sgl = sport->tx_sgl;
517 struct dma_async_tx_descriptor *desc; 513 struct dma_async_tx_descriptor *desc;
518 struct dma_chan *chan = sport->dma_chan_tx; 514 struct dma_chan *chan = sport->dma_chan_tx;
519 struct device *dev = sport->port.dev; 515 struct device *dev = sport->port.dev;
520 enum dma_status status; 516 enum dma_status status;
521 unsigned long flags;
522 int ret; 517 int ret;
523 518
524 status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL); 519 status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
525 if (DMA_IN_PROGRESS == status) 520 if (DMA_IN_PROGRESS == status)
526 return; 521 return;
527 522
528 spin_lock_irqsave(&sport->port.lock, flags);
529 sport->tx_bytes = uart_circ_chars_pending(xmit); 523 sport->tx_bytes = uart_circ_chars_pending(xmit);
530 if (sport->tx_bytes == 0) {
531 spin_unlock_irqrestore(&sport->port.lock, flags);
532 return;
533 }
534 524
535 if (xmit->tail > xmit->head && xmit->head > 0) { 525 if (xmit->tail > xmit->head && xmit->head > 0) {
536 sport->dma_tx_nents = 2; 526 sport->dma_tx_nents = 2;
@@ -542,7 +532,6 @@ static void dma_tx_work(struct work_struct *w)
542 sport->dma_tx_nents = 1; 532 sport->dma_tx_nents = 1;
543 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); 533 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
544 } 534 }
545 spin_unlock_irqrestore(&sport->port.lock, flags);
546 535
547 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); 536 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
548 if (ret == 0) { 537 if (ret == 0) {
@@ -609,11 +598,7 @@ static void imx_start_tx(struct uart_port *port)
609 } 598 }
610 599
611 if (sport->dma_is_enabled) { 600 if (sport->dma_is_enabled) {
612 /* 601 imx_dma_tx(sport);
613 * We may in the interrupt context, so arise a work_struct to
614 * do the real job.
615 */
616 schedule_work(&sport->tsk_dma_tx);
617 return; 602 return;
618 } 603 }
619 604
@@ -732,6 +717,7 @@ out:
732 return IRQ_HANDLED; 717 return IRQ_HANDLED;
733} 718}
734 719
720static int start_rx_dma(struct imx_port *sport);
735/* 721/*
736 * If the RXFIFO is filled with some data, and then we 722 * If the RXFIFO is filled with some data, and then we
737 * arise a DMA operation to receive them. 723 * arise a DMA operation to receive them.
@@ -750,7 +736,7 @@ static void imx_dma_rxint(struct imx_port *sport)
750 writel(temp, sport->port.membase + UCR1); 736 writel(temp, sport->port.membase + UCR1);
751 737
752 /* tell the DMA to receive the data. */ 738 /* tell the DMA to receive the data. */
753 schedule_work(&sport->tsk_dma_rx); 739 start_rx_dma(sport);
754 } 740 }
755} 741}
756 742
@@ -872,22 +858,6 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
872} 858}
873 859
874#define RX_BUF_SIZE (PAGE_SIZE) 860#define RX_BUF_SIZE (PAGE_SIZE)
875static int start_rx_dma(struct imx_port *sport);
876static void dma_rx_work(struct work_struct *w)
877{
878 struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
879 struct tty_port *port = &sport->port.state->port;
880
881 if (sport->rx_bytes) {
882 tty_insert_flip_string(port, sport->rx_buf, sport->rx_bytes);
883 tty_flip_buffer_push(port);
884 sport->rx_bytes = 0;
885 }
886
887 if (sport->dma_is_rxing)
888 start_rx_dma(sport);
889}
890
891static void imx_rx_dma_done(struct imx_port *sport) 861static void imx_rx_dma_done(struct imx_port *sport)
892{ 862{
893 unsigned long temp; 863 unsigned long temp;
@@ -919,6 +889,7 @@ static void dma_rx_callback(void *data)
919 struct imx_port *sport = data; 889 struct imx_port *sport = data;
920 struct dma_chan *chan = sport->dma_chan_rx; 890 struct dma_chan *chan = sport->dma_chan_rx;
921 struct scatterlist *sgl = &sport->rx_sgl; 891 struct scatterlist *sgl = &sport->rx_sgl;
892 struct tty_port *port = &sport->port.state->port;
922 struct dma_tx_state state; 893 struct dma_tx_state state;
923 enum dma_status status; 894 enum dma_status status;
924 unsigned int count; 895 unsigned int count;
@@ -931,8 +902,10 @@ static void dma_rx_callback(void *data)
931 dev_dbg(sport->port.dev, "We get %d bytes.\n", count); 902 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
932 903
933 if (count) { 904 if (count) {
934 sport->rx_bytes = count; 905 tty_insert_flip_string(port, sport->rx_buf, count);
935 schedule_work(&sport->tsk_dma_rx); 906 tty_flip_buffer_push(port);
907
908 start_rx_dma(sport);
936 } else 909 } else
937 imx_rx_dma_done(sport); 910 imx_rx_dma_done(sport);
938} 911}
@@ -1014,7 +987,6 @@ static int imx_uart_dma_init(struct imx_port *sport)
1014 ret = -ENOMEM; 987 ret = -ENOMEM;
1015 goto err; 988 goto err;
1016 } 989 }
1017 sport->rx_bytes = 0;
1018 990
1019 /* Prepare for TX : */ 991 /* Prepare for TX : */
1020 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); 992 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
@@ -1045,11 +1017,7 @@ err:
1045static void imx_enable_dma(struct imx_port *sport) 1017static void imx_enable_dma(struct imx_port *sport)
1046{ 1018{
1047 unsigned long temp; 1019 unsigned long temp;
1048 struct tty_port *port = &sport->port.state->port;
1049 1020
1050 port->low_latency = 1;
1051 INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
1052 INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
1053 init_waitqueue_head(&sport->dma_wait); 1021 init_waitqueue_head(&sport->dma_wait);
1054 1022
1055 /* set UCR1 */ 1023 /* set UCR1 */
@@ -1070,7 +1038,6 @@ static void imx_enable_dma(struct imx_port *sport)
1070static void imx_disable_dma(struct imx_port *sport) 1038static void imx_disable_dma(struct imx_port *sport)
1071{ 1039{
1072 unsigned long temp; 1040 unsigned long temp;
1073 struct tty_port *port = &sport->port.state->port;
1074 1041
1075 /* clear UCR1 */ 1042 /* clear UCR1 */
1076 temp = readl(sport->port.membase + UCR1); 1043 temp = readl(sport->port.membase + UCR1);
@@ -1088,7 +1055,6 @@ static void imx_disable_dma(struct imx_port *sport)
1088 writel(temp, sport->port.membase + UCR4); 1055 writel(temp, sport->port.membase + UCR4);
1089 1056
1090 sport->dma_is_enabled = 0; 1057 sport->dma_is_enabled = 0;
1091 port->low_latency = 0;
1092} 1058}
1093 1059
1094/* half the RX buffer size */ 1060/* half the RX buffer size */