diff options
author | Huang Shijie <b32955@freescale.com> | 2014-05-23 00:40:40 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-05-28 15:41:32 -0400 |
commit | e2f2786606d49d3aae545c61c04757a64cf7e5f0 (patch) | |
tree | 6ac161033ca9b36ddaa3a87bbf3a956f967be000 /drivers/tty | |
parent | 8eccd0cd2106fbe0acc6bec3701e69e171353f25 (diff) |
serial: imx: remove the DMA wait queue
The DMA wait queue makes the code very complicated:
For RX, the @->stop_rx hook does not really stop the RX;
For TX, the @->stop_tx hook does not really stop the TX.
The above make the imx_shutdown has to wait the RX/TX DMA to be finished.
In order to make code more simple, this patch removes the DMA wait queue.
By calling the dmaengine_terminate_all, this patch makes the RX stops
immediately after we call the @->stop_rx hook, so does the TX.
Signed-off-by: Huang Shijie <b32955@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/tty')
-rw-r--r-- | drivers/tty/serial/imx.c | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index d373fe83da52..cdaeeeee6cec 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -225,7 +225,6 @@ struct imx_port { | |||
225 | void *rx_buf; | 225 | void *rx_buf; |
226 | unsigned int tx_bytes; | 226 | unsigned int tx_bytes; |
227 | unsigned int dma_tx_nents; | 227 | unsigned int dma_tx_nents; |
228 | wait_queue_head_t dma_wait; | ||
229 | }; | 228 | }; |
230 | 229 | ||
231 | struct imx_port_ucrs { | 230 | struct imx_port_ucrs { |
@@ -416,12 +415,10 @@ static void imx_stop_tx(struct uart_port *port) | |||
416 | return; | 415 | return; |
417 | } | 416 | } |
418 | 417 | ||
419 | /* | 418 | if (sport->dma_is_enabled && sport->dma_is_txing) { |
420 | * We are maybe in the SMP context, so if the DMA TX thread is running | 419 | dmaengine_terminate_all(sport->dma_chan_tx); |
421 | * on other cpu, we have to wait for it to finish. | 420 | sport->dma_is_txing = 0; |
422 | */ | 421 | } |
423 | if (sport->dma_is_enabled && sport->dma_is_txing) | ||
424 | return; | ||
425 | 422 | ||
426 | temp = readl(sport->port.membase + UCR1); | 423 | temp = readl(sport->port.membase + UCR1); |
427 | writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); | 424 | writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); |
@@ -435,12 +432,10 @@ static void imx_stop_rx(struct uart_port *port) | |||
435 | struct imx_port *sport = (struct imx_port *)port; | 432 | struct imx_port *sport = (struct imx_port *)port; |
436 | unsigned long temp; | 433 | unsigned long temp; |
437 | 434 | ||
438 | /* | 435 | if (sport->dma_is_enabled && sport->dma_is_rxing) { |
439 | * We are maybe in the SMP context, so if the DMA TX thread is running | 436 | dmaengine_terminate_all(sport->dma_chan_rx); |
440 | * on other cpu, we have to wait for it to finish. | 437 | sport->dma_is_rxing = 0; |
441 | */ | 438 | } |
442 | if (sport->dma_is_enabled && sport->dma_is_rxing) | ||
443 | return; | ||
444 | 439 | ||
445 | temp = readl(sport->port.membase + UCR2); | 440 | temp = readl(sport->port.membase + UCR2); |
446 | writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); | 441 | writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); |
@@ -497,12 +492,6 @@ static void dma_tx_callback(void *data) | |||
497 | dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); | 492 | dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); |
498 | 493 | ||
499 | uart_write_wakeup(&sport->port); | 494 | uart_write_wakeup(&sport->port); |
500 | |||
501 | if (waitqueue_active(&sport->dma_wait)) { | ||
502 | wake_up(&sport->dma_wait); | ||
503 | dev_dbg(sport->port.dev, "exit in %s.\n", __func__); | ||
504 | return; | ||
505 | } | ||
506 | } | 495 | } |
507 | 496 | ||
508 | static void imx_dma_tx(struct imx_port *sport) | 497 | static void imx_dma_tx(struct imx_port *sport) |
@@ -875,10 +864,6 @@ static void imx_rx_dma_done(struct imx_port *sport) | |||
875 | writel(temp, sport->port.membase + UCR1); | 864 | writel(temp, sport->port.membase + UCR1); |
876 | 865 | ||
877 | sport->dma_is_rxing = 0; | 866 | sport->dma_is_rxing = 0; |
878 | |||
879 | /* Is the shutdown waiting for us? */ | ||
880 | if (waitqueue_active(&sport->dma_wait)) | ||
881 | wake_up(&sport->dma_wait); | ||
882 | } | 867 | } |
883 | 868 | ||
884 | /* | 869 | /* |
@@ -1025,8 +1010,6 @@ static void imx_enable_dma(struct imx_port *sport) | |||
1025 | { | 1010 | { |
1026 | unsigned long temp; | 1011 | unsigned long temp; |
1027 | 1012 | ||
1028 | init_waitqueue_head(&sport->dma_wait); | ||
1029 | |||
1030 | /* set UCR1 */ | 1013 | /* set UCR1 */ |
1031 | temp = readl(sport->port.membase + UCR1); | 1014 | temp = readl(sport->port.membase + UCR1); |
1032 | temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | | 1015 | temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | |
@@ -1218,10 +1201,13 @@ static void imx_shutdown(struct uart_port *port) | |||
1218 | unsigned long flags; | 1201 | unsigned long flags; |
1219 | 1202 | ||
1220 | if (sport->dma_is_enabled) { | 1203 | if (sport->dma_is_enabled) { |
1221 | /* We have to wait for the DMA to finish. */ | 1204 | /* |
1222 | wait_event(sport->dma_wait, | 1205 | * The upper layer may does not call the @->stop_tx and |
1223 | !sport->dma_is_rxing && !sport->dma_is_txing); | 1206 | * @->stop_rx, so we call them ourselves. |
1207 | */ | ||
1208 | imx_stop_tx(port); | ||
1224 | imx_stop_rx(port); | 1209 | imx_stop_rx(port); |
1210 | |||
1225 | imx_disable_dma(sport); | 1211 | imx_disable_dma(sport); |
1226 | imx_uart_dma_exit(sport); | 1212 | imx_uart_dma_exit(sport); |
1227 | } | 1213 | } |