aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/serial/sirfsoc_uart.c
diff options
context:
space:
mode:
authorQipan Li <Qipan.Li@csr.com>2015-07-13 20:52:22 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-07-23 18:32:04 -0400
commit1d26c9ff420f647df4a7a3e9a28736b9cff6359a (patch)
treea5c9695dc19810e0e6857f559f5a10e0a8712a39 /drivers/tty/serial/sirfsoc_uart.c
parente9bb4b510046d41e3e5a4adc56c76ec8d6812962 (diff)
serial: sirf: workaround rx process to avoid possible data loss
when UART works in DMA mode and left bytes in rx fifo less than a dma transfer unit, DMA engine can't transfer the bytes out to rx DMA buffer. so it need a way to fetch them out and flush them into tty buffer in time. in the above case, we want UART switch from DMA mode to PIO mode and fetch && flush bytes into tty layer buffer until rxfifo become empty, after that done let UART switch from PIO mode back to DMA mode. (record as method1) method1 result in the next receive result wrong. for example in PIO part of method1, we fetched && pushed X1...X3 bytes, when UART rxfifo newly received Y1...Y4 bytes, UART trigger a DMA unit transfer, the DMA unit's content is X1...X3Y1 and rxfifo fifo status is empty, so X1X2X3 pushed twice by PIO way and DMA way also the bytes Y2Y3Y4 missed. add rxfifo reset operation before UART switch back to DMA mode would resolve the issue. ([method1 + do fifo reset] record as method2) before the commit, UART driver use method2. but methd2 have a risk of data loss, as if UART's shift register receive a complete byte and transfer it into rxfifo before rxfifo reset operation the byte will loss. UART and USP have the similar bits CLEAR_RX_ADDR_EN(uart)/FRADDR_CLR_EN(usp), When found UART controller changing I/O to DMA mode, UART controller clears the two low bits of read point (rx_fifo_addr[1:0]). when enable the bit + method1(record as method3), in above example the DMA unit's content is X1...X3Y1 and there are Y2Y3Y4 in rxfifo by experiment, we just push bytes in rx DMA buffer. BTW, the workaround works only for UART receive DMA channel use SINGLE DMA mode. Signed-off-by: Qipan Li <Qipan.Li@csr.com> Signed-off-by: Barry Song <Baohua.Song@csr.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/tty/serial/sirfsoc_uart.c')
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c80
1 files changed, 62 insertions, 18 deletions
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 653cdd5fb508..8cac7ac497e8 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -886,9 +886,13 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
886 else 886 else
887 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE); 887 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
888 if (sirfport->rx_dma_chan) 888 if (sirfport->rx_dma_chan)
889 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE); 889 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
890 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
891 ~SIRFUART_IO_MODE);
890 else 892 else
891 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); 893 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
894 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
895 SIRFUART_IO_MODE);
892 sirfport->rx_period_time = 20000000; 896 sirfport->rx_period_time = 20000000;
893 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ 897 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
894 if (set_baud < 1000000) 898 if (set_baud < 1000000)
@@ -958,9 +962,9 @@ static int sirfsoc_uart_startup(struct uart_port *port)
958 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); 962 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
959 if (sirfport->rx_dma_chan) 963 if (sirfport->rx_dma_chan)
960 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk, 964 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
961 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) | 965 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) |
962 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) | 966 SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) |
963 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b)); 967 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4));
964 if (sirfport->tx_dma_chan) { 968 if (sirfport->tx_dma_chan) {
965 sirfport->tx_dma_state = TX_DMA_IDLE; 969 sirfport->tx_dma_state = TX_DMA_IDLE;
966 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk, 970 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
@@ -981,10 +985,21 @@ static int sirfsoc_uart_startup(struct uart_port *port)
981 goto init_rx_err; 985 goto init_rx_err;
982 } 986 }
983 } 987 }
988 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART &&
989 sirfport->rx_dma_chan)
990 wr_regl(port, ureg->sirfsoc_swh_dma_io,
991 SIRFUART_CLEAR_RX_ADDR_EN);
992 if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
993 sirfport->rx_dma_chan)
994 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
995 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
996 SIRFSOC_USP_FRADDR_CLR_EN);
984 enable_irq(port->irq); 997 enable_irq(port->irq);
985 if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) { 998 if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
986 sirfport->is_hrt_enabled = true; 999 sirfport->is_hrt_enabled = true;
987 sirfport->rx_period_time = 20000000; 1000 sirfport->rx_period_time = 20000000;
1001 sirfport->rx_last_pos = -1;
1002 sirfport->pio_fetch_cnt = 0;
988 sirfport->rx_dma_items.xmit.tail = 1003 sirfport->rx_dma_items.xmit.tail =
989 sirfport->rx_dma_items.xmit.head = 0; 1004 sirfport->rx_dma_items.xmit.head = 0;
990 hrtimer_start(&sirfport->hrt, 1005 hrtimer_start(&sirfport->hrt,
@@ -1003,6 +1018,9 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
1003{ 1018{
1004 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 1019 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1005 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 1020 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1021 struct circ_buf *xmit;
1022
1023 xmit = &sirfport->rx_dma_items.xmit;
1006 if (!sirfport->is_atlas7) 1024 if (!sirfport->is_atlas7)
1007 wr_regl(port, ureg->sirfsoc_int_en_reg, 0); 1025 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1008 else 1026 else
@@ -1019,8 +1037,10 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
1019 if (sirfport->tx_dma_chan) 1037 if (sirfport->tx_dma_chan)
1020 sirfport->tx_dma_state = TX_DMA_IDLE; 1038 sirfport->tx_dma_state = TX_DMA_IDLE;
1021 if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) { 1039 if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
1022 while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1040 while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
1023 SIRFUART_RX_FIFO_MASK) > 0) 1041 SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) &&
1042 !CIRC_CNT(xmit->head, xmit->tail,
1043 SIRFSOC_RX_DMA_BUF_SIZE))
1024 ; 1044 ;
1025 sirfport->is_hrt_enabled = false; 1045 sirfport->is_hrt_enabled = false;
1026 hrtimer_cancel(&sirfport->hrt); 1046 hrtimer_cancel(&sirfport->hrt);
@@ -1169,6 +1189,8 @@ static enum hrtimer_restart
1169 struct tty_struct *tty; 1189 struct tty_struct *tty;
1170 struct sirfsoc_register *ureg; 1190 struct sirfsoc_register *ureg;
1171 struct circ_buf *xmit; 1191 struct circ_buf *xmit;
1192 struct sirfsoc_fifo_status *ufifo_st;
1193 int max_pio_cnt;
1172 1194
1173 sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt); 1195 sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
1174 port = &sirfport->port; 1196 port = &sirfport->port;
@@ -1176,9 +1198,16 @@ static enum hrtimer_restart
1176 tty = port->state->port.tty; 1198 tty = port->state->port.tty;
1177 ureg = &sirfport->uart_reg->uart_reg; 1199 ureg = &sirfport->uart_reg->uart_reg;
1178 xmit = &sirfport->rx_dma_items.xmit; 1200 xmit = &sirfport->rx_dma_items.xmit;
1201 ufifo_st = &sirfport->uart_reg->fifo_status;
1202
1179 dmaengine_tx_status(sirfport->rx_dma_chan, 1203 dmaengine_tx_status(sirfport->rx_dma_chan,
1180 sirfport->rx_dma_items.cookie, &tx_state); 1204 sirfport->rx_dma_items.cookie, &tx_state);
1181 xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; 1205 if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
1206 sirfport->rx_last_pos) {
1207 xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
1208 sirfport->rx_last_pos = xmit->head;
1209 sirfport->pio_fetch_cnt = 0;
1210 }
1182 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, 1211 count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
1183 SIRFSOC_RX_DMA_BUF_SIZE); 1212 SIRFSOC_RX_DMA_BUF_SIZE);
1184 while (count > 0) { 1213 while (count > 0) {
@@ -1200,23 +1229,38 @@ static enum hrtimer_restart
1200 */ 1229 */
1201 if (!inserted && !count && 1230 if (!inserted && !count &&
1202 ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1231 ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
1203 SIRFUART_RX_FIFO_MASK) > 0)) { 1232 SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
1233 dmaengine_pause(sirfport->rx_dma_chan);
1204 /* switch to pio mode */ 1234 /* switch to pio mode */
1205 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 1235 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1206 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | 1236 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1207 SIRFUART_IO_MODE); 1237 SIRFUART_IO_MODE);
1208 while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & 1238 /*
1209 SIRFUART_RX_FIFO_MASK) > 0) { 1239 * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
1210 if (sirfsoc_uart_pio_rx_chars(port, 16) > 0) 1240 * When found changing I/O to DMA mode, it clears
1211 tty_flip_buffer_push(tty->port); 1241 * two low bits of read point;
1242 * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
1243 * Fetch data out from rxfifo into DMA buffer in PIO mode,
1244 * while switch back to DMA mode, the data fetched will override
1245 * by DMA, as hardware have a strange behaviour:
1246 * after switch back to DMA mode, check rxfifo status it will
1247 * be the number PIO fetched, so record the fetched data count
1248 * to avoid the repeated fetch
1249 */
1250 max_pio_cnt = 3;
1251 while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
1252 ufifo_st->ff_empty(port)) && max_pio_cnt--) {
1253 xmit->buf[xmit->head] =
1254 rd_regl(port, ureg->sirfsoc_rx_fifo_data);
1255 xmit->head = (xmit->head + 1) &
1256 (SIRFSOC_RX_DMA_BUF_SIZE - 1);
1257 sirfport->pio_fetch_cnt++;
1212 } 1258 }
1213 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1214 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1215 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
1216 /* switch back to dma mode */ 1259 /* switch back to dma mode */
1217 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 1260 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1218 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & 1261 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
1219 ~SIRFUART_IO_MODE); 1262 ~SIRFUART_IO_MODE);
1263 dmaengine_resume(sirfport->rx_dma_chan);
1220 } 1264 }
1221next_hrt: 1265next_hrt:
1222 hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time)); 1266 hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
@@ -1239,7 +1283,7 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
1239 struct resource *res; 1283 struct resource *res;
1240 int ret; 1284 int ret;
1241 struct dma_slave_config slv_cfg = { 1285 struct dma_slave_config slv_cfg = {
1242 .src_maxburst = 2, 1286 .src_maxburst = 1,
1243 }; 1287 };
1244 struct dma_slave_config tx_slv_cfg = { 1288 struct dma_slave_config tx_slv_cfg = {
1245 .dst_maxburst = 2, 1289 .dst_maxburst = 2,