diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-05-29 22:30:54 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-05-29 22:30:54 -0400 |
commit | 9ce4f8f3f45443922c98e25133b8c9790fc7949a (patch) | |
tree | 0c6411105aa4b61b76b039ebf45dd74abcd1f596 | |
parent | bd71a1c08807966636daf52138119108c12c6061 (diff) |
Revert "serial: imx: remove the DMA wait queue"
This reverts commit e2f2786606d49d3aae545c61c04757a64cf7e5f0.
Huang reports that this patch is broken and should be reverted.
Cc: Huang Shijie <b32955@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/tty/serial/imx.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 3b706add81a9..e2f93874989b 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -225,6 +225,7 @@ struct imx_port { | |||
225 | void *rx_buf; | 225 | void *rx_buf; |
226 | unsigned int tx_bytes; | 226 | unsigned int tx_bytes; |
227 | unsigned int dma_tx_nents; | 227 | unsigned int dma_tx_nents; |
228 | wait_queue_head_t dma_wait; | ||
228 | }; | 229 | }; |
229 | 230 | ||
230 | struct imx_port_ucrs { | 231 | struct imx_port_ucrs { |
@@ -415,10 +416,12 @@ static void imx_stop_tx(struct uart_port *port) | |||
415 | return; | 416 | return; |
416 | } | 417 | } |
417 | 418 | ||
418 | if (sport->dma_is_enabled && sport->dma_is_txing) { | 419 | /* |
419 | dmaengine_terminate_all(sport->dma_chan_tx); | 420 | * We are maybe in the SMP context, so if the DMA TX thread is running |
420 | sport->dma_is_txing = 0; | 421 | * on other cpu, we have to wait for it to finish. |
421 | } | 422 | */ |
423 | if (sport->dma_is_enabled && sport->dma_is_txing) | ||
424 | return; | ||
422 | 425 | ||
423 | temp = readl(sport->port.membase + UCR1); | 426 | temp = readl(sport->port.membase + UCR1); |
424 | writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); | 427 | writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); |
@@ -432,10 +435,12 @@ static void imx_stop_rx(struct uart_port *port) | |||
432 | struct imx_port *sport = (struct imx_port *)port; | 435 | struct imx_port *sport = (struct imx_port *)port; |
433 | unsigned long temp; | 436 | unsigned long temp; |
434 | 437 | ||
435 | if (sport->dma_is_enabled && sport->dma_is_rxing) { | 438 | /* |
436 | dmaengine_terminate_all(sport->dma_chan_rx); | 439 | * We are maybe in the SMP context, so if the DMA TX thread is running |
437 | sport->dma_is_rxing = 0; | 440 | * on other cpu, we have to wait for it to finish. |
438 | } | 441 | */ |
442 | if (sport->dma_is_enabled && sport->dma_is_rxing) | ||
443 | return; | ||
439 | 444 | ||
440 | temp = readl(sport->port.membase + UCR2); | 445 | temp = readl(sport->port.membase + UCR2); |
441 | writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); | 446 | writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); |
@@ -496,6 +501,12 @@ static void dma_tx_callback(void *data) | |||
496 | dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); | 501 | dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); |
497 | 502 | ||
498 | uart_write_wakeup(&sport->port); | 503 | uart_write_wakeup(&sport->port); |
504 | |||
505 | if (waitqueue_active(&sport->dma_wait)) { | ||
506 | wake_up(&sport->dma_wait); | ||
507 | dev_dbg(sport->port.dev, "exit in %s.\n", __func__); | ||
508 | return; | ||
509 | } | ||
499 | } | 510 | } |
500 | 511 | ||
501 | static void imx_dma_tx(struct imx_port *sport) | 512 | static void imx_dma_tx(struct imx_port *sport) |
@@ -868,6 +879,10 @@ static void imx_rx_dma_done(struct imx_port *sport) | |||
868 | writel(temp, sport->port.membase + UCR1); | 879 | writel(temp, sport->port.membase + UCR1); |
869 | 880 | ||
870 | sport->dma_is_rxing = 0; | 881 | sport->dma_is_rxing = 0; |
882 | |||
883 | /* Is the shutdown waiting for us? */ | ||
884 | if (waitqueue_active(&sport->dma_wait)) | ||
885 | wake_up(&sport->dma_wait); | ||
871 | } | 886 | } |
872 | 887 | ||
873 | /* | 888 | /* |
@@ -1014,6 +1029,8 @@ static void imx_enable_dma(struct imx_port *sport) | |||
1014 | { | 1029 | { |
1015 | unsigned long temp; | 1030 | unsigned long temp; |
1016 | 1031 | ||
1032 | init_waitqueue_head(&sport->dma_wait); | ||
1033 | |||
1017 | /* set UCR1 */ | 1034 | /* set UCR1 */ |
1018 | temp = readl(sport->port.membase + UCR1); | 1035 | temp = readl(sport->port.membase + UCR1); |
1019 | temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | | 1036 | temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | |
@@ -1205,13 +1222,10 @@ static void imx_shutdown(struct uart_port *port) | |||
1205 | unsigned long flags; | 1222 | unsigned long flags; |
1206 | 1223 | ||
1207 | if (sport->dma_is_enabled) { | 1224 | if (sport->dma_is_enabled) { |
1208 | /* | 1225 | /* We have to wait for the DMA to finish. */ |
1209 | * The upper layer may does not call the @->stop_tx and | 1226 | wait_event(sport->dma_wait, |
1210 | * @->stop_rx, so we call them ourselves. | 1227 | !sport->dma_is_rxing && !sport->dma_is_txing); |
1211 | */ | ||
1212 | imx_stop_tx(port); | ||
1213 | imx_stop_rx(port); | 1228 | imx_stop_rx(port); |
1214 | |||
1215 | imx_disable_dma(sport); | 1229 | imx_disable_dma(sport); |
1216 | imx_uart_dma_exit(sport); | 1230 | imx_uart_dma_exit(sport); |
1217 | } | 1231 | } |