aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQipan Li <Qipan.Li@csr.com>2013-08-18 23:47:53 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-19 20:13:22 -0400
commit8316d04c42b94e94c8e54027d7c77ebe098ab5fa (patch)
tree365541927dac54b6d2f151366e59d999f5aa8fe7
parent15cdcb12cbcbd6abf16d6b6a52e04d452b464e3b (diff)
serial: sirf: add DMA support using dmaengine APIs
if we get the valid dma channels from dts, move to use dmaengine to do rx/tx. because the dma hardware requires dma address and length to be 4bytes aligned, in this driver, we will still use PIO for non-aligned bytes, and use dma for aligned bytes. for rx, to keep the dmaengine always active, we use double-buffer, so we issue two dma_desc at first, and maintain the status of both 1. dma transfer done: update in rx dma finish callback 2. dma buffer is inserted into tty: update in rx dma finish tasklet and rx timeout tasklet so we re-issue the dma_desc only if both 1&2 are finished. for tx, as we know the actual length for every transfer, we don't need the above double buffering. Signed-off-by: Qipan Li <Qipan.Li@csr.com> Signed-off-by: Barry Song <Baohua.Song@csr.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c608
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h63
2 files changed, 615 insertions, 56 deletions
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index d37609dfcf76..b8d7eb351d83 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -21,6 +21,10 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/of_gpio.h> 23#include <linux/of_gpio.h>
24#include <linux/dmaengine.h>
25#include <linux/dma-direction.h>
26#include <linux/dma-mapping.h>
27#include <linux/sirfsoc_dma.h>
24#include <asm/irq.h> 28#include <asm/irq.h>
25#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
26 30
@@ -32,6 +36,9 @@ static unsigned int
32sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count); 36sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
33static struct uart_driver sirfsoc_uart_drv; 37static struct uart_driver sirfsoc_uart_drv;
34 38
39static void sirfsoc_uart_tx_dma_complete_callback(void *param);
40static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
41static void sirfsoc_uart_rx_dma_complete_callback(void *param);
35static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { 42static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
36 {4000000, 2359296}, 43 {4000000, 2359296},
37 {3500000, 1310721}, 44 {3500000, 1310721},
@@ -158,16 +165,115 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
158 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 165 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
159 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 166 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
160 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 167 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
161 unsigned int regv;
162 168
163 if (!sirfport->is_marco) { 169 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
164 regv = rd_regl(port, ureg->sirfsoc_int_en_reg); 170 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
171 dmaengine_pause(sirfport->tx_dma_chan);
172 sirfport->tx_dma_state = TX_DMA_PAUSE;
173 } else {
174 if (!sirfport->is_marco)
175 wr_regl(port, ureg->sirfsoc_int_en_reg,
176 rd_regl(port, ureg->sirfsoc_int_en_reg) &
177 ~uint_en->sirfsoc_txfifo_empty_en);
178 else
179 wr_regl(port, SIRFUART_INT_EN_CLR,
180 uint_en->sirfsoc_txfifo_empty_en);
181 }
182 } else {
183 if (!sirfport->is_marco)
184 wr_regl(port, ureg->sirfsoc_int_en_reg,
185 rd_regl(port, ureg->sirfsoc_int_en_reg) &
186 ~uint_en->sirfsoc_txfifo_empty_en);
187 else
188 wr_regl(port, SIRFUART_INT_EN_CLR,
189 uint_en->sirfsoc_txfifo_empty_en);
190 }
191}
192
193static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
194{
195 struct uart_port *port = &sirfport->port;
196 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
197 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
198 struct circ_buf *xmit = &port->state->xmit;
199 unsigned long tran_size;
200 unsigned long tran_start;
201 unsigned long pio_tx_size;
202
203 tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
204 tran_start = (unsigned long)(xmit->buf + xmit->tail);
205 if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
206 !tran_size)
207 return;
208 if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
209 dmaengine_resume(sirfport->tx_dma_chan);
210 return;
211 }
212 if (sirfport->tx_dma_state == TX_DMA_RUNNING)
213 return;
214 if (!sirfport->is_marco)
165 wr_regl(port, ureg->sirfsoc_int_en_reg, 215 wr_regl(port, ureg->sirfsoc_int_en_reg,
166 regv & ~uint_en->sirfsoc_txfifo_empty_en); 216 rd_regl(port, ureg->sirfsoc_int_en_reg)&
167 } else 217 ~(uint_en->sirfsoc_txfifo_empty_en));
218 else
168 wr_regl(port, SIRFUART_INT_EN_CLR, 219 wr_regl(port, SIRFUART_INT_EN_CLR,
169 uint_en->sirfsoc_txfifo_empty_en); 220 uint_en->sirfsoc_txfifo_empty_en);
170 221 /*
222 * DMA requires buffer address and buffer length are both aligned with
223 * 4 bytes, so we use PIO for
224 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
225 * bytes, and move to DMA for the left part aligned with 4bytes
226 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
227 * part first, move to PIO for the left 1~3 bytes
228 */
229 if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
230 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
231 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
232 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
233 SIRFUART_IO_MODE);
234 if (BYTES_TO_ALIGN(tran_start)) {
235 pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
236 BYTES_TO_ALIGN(tran_start));
237 tran_size -= pio_tx_size;
238 }
239 if (tran_size < 4)
240 sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
241 if (!sirfport->is_marco)
242 wr_regl(port, ureg->sirfsoc_int_en_reg,
243 rd_regl(port, ureg->sirfsoc_int_en_reg)|
244 uint_en->sirfsoc_txfifo_empty_en);
245 else
246 wr_regl(port, ureg->sirfsoc_int_en_reg,
247 uint_en->sirfsoc_txfifo_empty_en);
248 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
249 } else {
250 /* tx transfer mode switch into dma mode */
251 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
252 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
253 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
254 ~SIRFUART_IO_MODE);
255 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
256 tran_size &= ~(0x3);
257
258 sirfport->tx_dma_addr = dma_map_single(port->dev,
259 xmit->buf + xmit->tail,
260 tran_size, DMA_TO_DEVICE);
261 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
262 sirfport->tx_dma_chan, sirfport->tx_dma_addr,
263 tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
264 if (!sirfport->tx_dma_desc) {
265 dev_err(port->dev, "DMA prep slave single fail\n");
266 return;
267 }
268 sirfport->tx_dma_desc->callback =
269 sirfsoc_uart_tx_dma_complete_callback;
270 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
271 sirfport->transfer_size = tran_size;
272
273 dmaengine_submit(sirfport->tx_dma_desc);
274 dma_async_issue_pending(sirfport->tx_dma_chan);
275 sirfport->tx_dma_state = TX_DMA_RUNNING;
276 }
171} 277}
172 278
173static void sirfsoc_uart_start_tx(struct uart_port *port) 279static void sirfsoc_uart_start_tx(struct uart_port *port)
@@ -175,17 +281,19 @@ static void sirfsoc_uart_start_tx(struct uart_port *port)
175 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 281 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
176 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 282 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
177 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 283 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
178 unsigned long regv; 284 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
179 285 sirfsoc_uart_tx_with_dma(sirfport);
180 sirfsoc_uart_pio_tx_chars(sirfport, 1); 286 else {
181 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); 287 sirfsoc_uart_pio_tx_chars(sirfport, 1);
182 if (!sirfport->is_marco) { 288 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
183 regv = rd_regl(port, ureg->sirfsoc_int_en_reg); 289 if (!sirfport->is_marco)
184 wr_regl(port, ureg->sirfsoc_int_en_reg, regv | 290 wr_regl(port, ureg->sirfsoc_int_en_reg,
185 uint_en->sirfsoc_txfifo_empty_en); 291 rd_regl(port, ureg->sirfsoc_int_en_reg)|
186 } else 292 uint_en->sirfsoc_txfifo_empty_en);
187 wr_regl(port, ureg->sirfsoc_int_en_reg, 293 else
188 uint_en->sirfsoc_txfifo_empty_en); 294 wr_regl(port, ureg->sirfsoc_int_en_reg,
295 uint_en->sirfsoc_txfifo_empty_en);
296 }
189} 297}
190 298
191static void sirfsoc_uart_stop_rx(struct uart_port *port) 299static void sirfsoc_uart_stop_rx(struct uart_port *port)
@@ -193,15 +301,28 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
193 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 301 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
194 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 302 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
195 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 303 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
196 unsigned long reg; 304
197 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 305 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
198 if (!sirfport->is_marco) { 306 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
199 reg = rd_regl(port, ureg->sirfsoc_int_en_reg); 307 if (!sirfport->is_marco)
200 wr_regl(port, ureg->sirfsoc_int_en_reg, 308 wr_regl(port, ureg->sirfsoc_int_en_reg,
201 reg & ~(SIRFUART_RX_IO_INT_EN(port, uint_en))); 309 rd_regl(port, ureg->sirfsoc_int_en_reg) &
202 } else 310 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
203 wr_regl(port, SIRFUART_INT_EN_CLR, 311 uint_en->sirfsoc_rx_done_en));
204 SIRFUART_RX_IO_INT_EN(port, uint_en)); 312 else
313 wr_regl(port, SIRFUART_INT_EN_CLR,
314 SIRFUART_RX_DMA_INT_EN(port, uint_en)|
315 uint_en->sirfsoc_rx_done_en);
316 dmaengine_terminate_all(sirfport->rx_dma_chan);
317 } else {
318 if (!sirfport->is_marco)
319 wr_regl(port, ureg->sirfsoc_int_en_reg,
320 rd_regl(port, ureg->sirfsoc_int_en_reg)&
321 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
322 else
323 wr_regl(port, SIRFUART_INT_EN_CLR,
324 SIRFUART_RX_IO_INT_EN(port, uint_en));
325 }
205} 326}
206 327
207static void sirfsoc_uart_disable_ms(struct uart_port *port) 328static void sirfsoc_uart_disable_ms(struct uart_port *port)
@@ -298,6 +419,7 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
298 break; 419 break;
299 } 420 }
300 421
422 sirfport->rx_io_count += rx_count;
301 port->icount.rx += rx_count; 423 port->icount.rx += rx_count;
302 tty_flip_buffer_push(&port->state->port); 424 tty_flip_buffer_push(&port->state->port);
303 425
@@ -327,6 +449,166 @@ sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
327 return num_tx; 449 return num_tx;
328} 450}
329 451
452static void sirfsoc_uart_tx_dma_complete_callback(void *param)
453{
454 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
455 struct uart_port *port = &sirfport->port;
456 struct circ_buf *xmit = &port->state->xmit;
457 unsigned long flags;
458
459 xmit->tail = (xmit->tail + sirfport->transfer_size) &
460 (UART_XMIT_SIZE - 1);
461 port->icount.tx += sirfport->transfer_size;
462 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
463 uart_write_wakeup(port);
464 if (sirfport->tx_dma_addr)
465 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
466 sirfport->transfer_size, DMA_TO_DEVICE);
467 spin_lock_irqsave(&sirfport->tx_lock, flags);
468 sirfport->tx_dma_state = TX_DMA_IDLE;
469 sirfsoc_uart_tx_with_dma(sirfport);
470 spin_unlock_irqrestore(&sirfport->tx_lock, flags);
471}
472
473static void sirfsoc_uart_insert_rx_buf_to_tty(
474 struct sirfsoc_uart_port *sirfport, int count)
475{
476 struct uart_port *port = &sirfport->port;
477 struct tty_port *tport = &port->state->port;
478 int inserted;
479
480 inserted = tty_insert_flip_string(tport,
481 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
482 port->icount.rx += inserted;
483 tty_flip_buffer_push(tport);
484}
485
486static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
487{
488 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
489
490 sirfport->rx_dma_items[index].xmit.tail =
491 sirfport->rx_dma_items[index].xmit.head = 0;
492 sirfport->rx_dma_items[index].desc =
493 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
494 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
495 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
496 if (!sirfport->rx_dma_items[index].desc) {
497 dev_err(port->dev, "DMA slave single fail\n");
498 return;
499 }
500 sirfport->rx_dma_items[index].desc->callback =
501 sirfsoc_uart_rx_dma_complete_callback;
502 sirfport->rx_dma_items[index].desc->callback_param = sirfport;
503 sirfport->rx_dma_items[index].cookie =
504 dmaengine_submit(sirfport->rx_dma_items[index].desc);
505 dma_async_issue_pending(sirfport->rx_dma_chan);
506}
507
508static void sirfsoc_rx_tmo_process_tl(unsigned long param)
509{
510 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
511 struct uart_port *port = &sirfport->port;
512 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
513 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
514 struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
515 unsigned int count;
516 unsigned long flags;
517
518 spin_lock_irqsave(&sirfport->rx_lock, flags);
519 while (sirfport->rx_completed != sirfport->rx_issued) {
520 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
521 SIRFSOC_RX_DMA_BUF_SIZE);
522 sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
523 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
524 }
525 count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
526 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
527 SIRFSOC_RX_DMA_BUF_SIZE);
528 if (count > 0)
529 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
530 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
531 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
532 SIRFUART_IO_MODE);
533 sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
534 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
535 if (sirfport->rx_io_count == 4) {
536 spin_lock_irqsave(&sirfport->rx_lock, flags);
537 sirfport->rx_io_count = 0;
538 wr_regl(port, ureg->sirfsoc_int_st_reg,
539 uint_st->sirfsoc_rx_done);
540 if (!sirfport->is_marco)
541 wr_regl(port, ureg->sirfsoc_int_en_reg,
542 rd_regl(port, ureg->sirfsoc_int_en_reg) &
543 ~(uint_en->sirfsoc_rx_done_en));
544 else
545 wr_regl(port, SIRFUART_INT_EN_CLR,
546 uint_en->sirfsoc_rx_done_en);
547 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
548
549 sirfsoc_uart_start_next_rx_dma(port);
550 } else {
551 spin_lock_irqsave(&sirfport->rx_lock, flags);
552 wr_regl(port, ureg->sirfsoc_int_st_reg,
553 uint_st->sirfsoc_rx_done);
554 if (!sirfport->is_marco)
555 wr_regl(port, ureg->sirfsoc_int_en_reg,
556 rd_regl(port, ureg->sirfsoc_int_en_reg) |
557 (uint_en->sirfsoc_rx_done_en));
558 else
559 wr_regl(port, ureg->sirfsoc_int_en_reg,
560 uint_en->sirfsoc_rx_done_en);
561 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
562 }
563}
564
565static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
566{
567 struct uart_port *port = &sirfport->port;
568 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
569 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
570 struct dma_tx_state tx_state;
571 spin_lock(&sirfport->rx_lock);
572
573 dmaengine_tx_status(sirfport->rx_dma_chan,
574 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
575 dmaengine_terminate_all(sirfport->rx_dma_chan);
576 sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
577 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
578 if (!sirfport->is_marco)
579 wr_regl(port, ureg->sirfsoc_int_en_reg,
580 rd_regl(port, ureg->sirfsoc_int_en_reg) &
581 ~(uint_en->sirfsoc_rx_timeout_en));
582 else
583 wr_regl(port, SIRFUART_INT_EN_CLR,
584 uint_en->sirfsoc_rx_timeout_en);
585 spin_unlock(&sirfport->rx_lock);
586 tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
587}
588
589static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
590{
591 struct uart_port *port = &sirfport->port;
592 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
593 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
594 struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
595
596 sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
597 if (sirfport->rx_io_count == 4) {
598 sirfport->rx_io_count = 0;
599 if (!sirfport->is_marco)
600 wr_regl(port, ureg->sirfsoc_int_en_reg,
601 rd_regl(port, ureg->sirfsoc_int_en_reg) &
602 ~(uint_en->sirfsoc_rx_done_en));
603 else
604 wr_regl(port, SIRFUART_INT_EN_CLR,
605 uint_en->sirfsoc_rx_done_en);
606 wr_regl(port, ureg->sirfsoc_int_st_reg,
607 uint_st->sirfsoc_rx_timeout);
608 sirfsoc_uart_start_next_rx_dma(port);
609 }
610}
611
330static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) 612static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
331{ 613{
332 unsigned long intr_status; 614 unsigned long intr_status;
@@ -343,6 +625,7 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
343 spin_lock(&port->lock); 625 spin_lock(&port->lock);
344 intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); 626 intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
345 wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); 627 wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
628 intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
346 if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) { 629 if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
347 if (intr_status & uint_st->sirfsoc_rxd_brk) { 630 if (intr_status & uint_st->sirfsoc_rxd_brk) {
348 port->icount.brk++; 631 port->icount.brk++;
@@ -367,7 +650,8 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
367 } 650 }
368recv_char: 651recv_char:
369 if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && 652 if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
370 (intr_status & SIRFUART_CTS_INT_ST(uint_st))) { 653 (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
654 !sirfport->tx_dma_state) {
371 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & 655 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
372 SIRFUART_AFC_CTS_STATUS; 656 SIRFUART_AFC_CTS_STATUS;
373 if (cts_status != 0) 657 if (cts_status != 0)
@@ -377,41 +661,111 @@ recv_char:
377 uart_handle_cts_change(port, cts_status); 661 uart_handle_cts_change(port, cts_status);
378 wake_up_interruptible(&state->port.delta_msr_wait); 662 wake_up_interruptible(&state->port.delta_msr_wait);
379 } 663 }
380 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st)) 664 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
381 sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT); 665 if (intr_status & uint_st->sirfsoc_rx_timeout)
666 sirfsoc_uart_handle_rx_tmo(sirfport);
667 if (intr_status & uint_st->sirfsoc_rx_done)
668 sirfsoc_uart_handle_rx_done(sirfport);
669 } else {
670 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
671 sirfsoc_uart_pio_rx_chars(port,
672 SIRFSOC_UART_IO_RX_MAX_CNT);
673 }
382 if (intr_status & uint_st->sirfsoc_txfifo_empty) { 674 if (intr_status & uint_st->sirfsoc_txfifo_empty) {
383 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 675 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
384 spin_unlock(&port->lock); 676 sirfsoc_uart_tx_with_dma(sirfport);
385 return IRQ_HANDLED; 677 else {
386 } else { 678 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
387 sirfsoc_uart_pio_tx_chars(sirfport, 679 spin_unlock(&port->lock);
680 return IRQ_HANDLED;
681 } else {
682 sirfsoc_uart_pio_tx_chars(sirfport,
388 SIRFSOC_UART_IO_TX_REASONABLE_CNT); 683 SIRFSOC_UART_IO_TX_REASONABLE_CNT);
389 if ((uart_circ_empty(xmit)) && 684 if ((uart_circ_empty(xmit)) &&
390 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & 685 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
391 ufifo_st->ff_empty(port->line))) 686 ufifo_st->ff_empty(port->line)))
392 sirfsoc_uart_stop_tx(port); 687 sirfsoc_uart_stop_tx(port);
688 }
393 } 689 }
394 } 690 }
395 spin_unlock(&port->lock); 691 spin_unlock(&port->lock);
396 return IRQ_HANDLED; 692 return IRQ_HANDLED;
397} 693}
398 694
399static void sirfsoc_uart_start_rx(struct uart_port *port) 695static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
696{
697 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
698 struct uart_port *port = &sirfport->port;
699 unsigned long flags;
700 spin_lock_irqsave(&sirfport->rx_lock, flags);
701 while (sirfport->rx_completed != sirfport->rx_issued) {
702 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
703 SIRFSOC_RX_DMA_BUF_SIZE);
704 sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
705 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
706 }
707 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
708}
709
710static void sirfsoc_uart_rx_dma_complete_callback(void *param)
711{
712 struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
713 spin_lock(&sirfport->rx_lock);
714 sirfport->rx_issued++;
715 sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
716 spin_unlock(&sirfport->rx_lock);
717 tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
718}
719
720/* submit rx dma task into dmaengine */
721static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
400{ 722{
401 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 723 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
402 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; 724 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
403 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; 725 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
404 unsigned long regv; 726 unsigned long flags;
405 if (!sirfport->is_marco) { 727 int i;
406 regv = rd_regl(port, ureg->sirfsoc_int_en_reg); 728 spin_lock_irqsave(&sirfport->rx_lock, flags);
407 wr_regl(port, ureg->sirfsoc_int_en_reg, regv | 729 sirfport->rx_io_count = 0;
408 SIRFUART_RX_IO_INT_EN(port, uint_en)); 730 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
409 } else 731 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
732 ~SIRFUART_IO_MODE);
733 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
734 for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
735 sirfsoc_rx_submit_one_dma_desc(port, i);
736 sirfport->rx_completed = sirfport->rx_issued = 0;
737 spin_lock_irqsave(&sirfport->rx_lock, flags);
738 if (!sirfport->is_marco)
410 wr_regl(port, ureg->sirfsoc_int_en_reg, 739 wr_regl(port, ureg->sirfsoc_int_en_reg,
411 SIRFUART_RX_IO_INT_EN(port, uint_en)); 740 rd_regl(port, ureg->sirfsoc_int_en_reg) |
741 SIRFUART_RX_DMA_INT_EN(port, uint_en));
742 else
743 wr_regl(port, ureg->sirfsoc_int_en_reg,
744 SIRFUART_RX_DMA_INT_EN(port, uint_en));
745 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
746}
747
748static void sirfsoc_uart_start_rx(struct uart_port *port)
749{
750 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
751 struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
752 struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
753
754 sirfport->rx_io_count = 0;
412 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); 755 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
413 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 756 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
414 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); 757 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
758 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
759 sirfsoc_uart_start_next_rx_dma(port);
760 else {
761 if (!sirfport->is_marco)
762 wr_regl(port, ureg->sirfsoc_int_en_reg,
763 rd_regl(port, ureg->sirfsoc_int_en_reg) |
764 SIRFUART_RX_IO_INT_EN(port, uint_en));
765 else
766 wr_regl(port, ureg->sirfsoc_int_en_reg,
767 SIRFUART_RX_IO_INT_EN(port, uint_en));
768 }
415} 769}
416 770
417static unsigned int 771static unsigned int
@@ -488,10 +842,9 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
488 unsigned long flags; 842 unsigned long flags;
489 unsigned long ic; 843 unsigned long ic;
490 unsigned int clk_div_reg = 0; 844 unsigned int clk_div_reg = 0;
491 unsigned long temp_reg_val, ioclk_rate; 845 unsigned long txfifo_op_reg, ioclk_rate;
492 unsigned long rx_time_out; 846 unsigned long rx_time_out;
493 int threshold_div; 847 int threshold_div;
494 int temp;
495 u32 data_bit_len, stop_bit_len, len_val; 848 u32 data_bit_len, stop_bit_len, len_val;
496 unsigned long sample_div_reg = 0xf; 849 unsigned long sample_div_reg = 0xf;
497 ioclk_rate = port->uartclk; 850 ioclk_rate = port->uartclk;
@@ -606,10 +959,10 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
606 /* set receive timeout && data bits len */ 959 /* set receive timeout && data bits len */
607 rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000); 960 rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
608 rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out); 961 rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
609 temp_reg_val = rd_regl(port, ureg->sirfsoc_tx_fifo_op); 962 txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
610 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); 963 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
611 wr_regl(port, ureg->sirfsoc_tx_fifo_op, 964 wr_regl(port, ureg->sirfsoc_tx_fifo_op,
612 (temp_reg_val & ~SIRFUART_FIFO_START)); 965 (txfifo_op_reg & ~SIRFUART_FIFO_START));
613 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { 966 if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
614 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out); 967 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
615 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg); 968 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
@@ -631,24 +984,118 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
631 (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) | 984 (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
632 (sample_div_reg & 0x3f) << 16); 985 (sample_div_reg & 0x3f) << 16);
633 } 986 }
634 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE); 987 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
635 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); 988 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
989 else
990 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
991 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
992 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
993 else
994 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
636 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ 995 /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
637 if (set_baud < 1000000) 996 if (set_baud < 1000000)
638 threshold_div = 1; 997 threshold_div = 1;
639 else 998 else
640 threshold_div = 2; 999 threshold_div = 2;
641 temp = SIRFUART_FIFO_THD(port); 1000 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
642 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, temp / threshold_div); 1001 SIRFUART_FIFO_THD(port) / threshold_div);
643 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, temp / threshold_div); 1002 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
644 temp_reg_val |= SIRFUART_FIFO_START; 1003 SIRFUART_FIFO_THD(port) / threshold_div);
645 wr_regl(port, ureg->sirfsoc_tx_fifo_op, temp_reg_val); 1004 txfifo_op_reg |= SIRFUART_FIFO_START;
1005 wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
646 uart_update_timeout(port, termios->c_cflag, set_baud); 1006 uart_update_timeout(port, termios->c_cflag, set_baud);
647 sirfsoc_uart_start_rx(port); 1007 sirfsoc_uart_start_rx(port);
648 wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN); 1008 wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
649 spin_unlock_irqrestore(&port->lock, flags); 1009 spin_unlock_irqrestore(&port->lock, flags);
650} 1010}
651 1011
1012static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
1013{
1014 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1015 dma_cap_mask_t dma_mask;
1016 struct dma_slave_config tx_slv_cfg = {
1017 .dst_maxburst = 2,
1018 };
1019
1020 dma_cap_zero(dma_mask);
1021 dma_cap_set(DMA_SLAVE, dma_mask);
1022 sirfport->tx_dma_chan = dma_request_channel(dma_mask,
1023 (dma_filter_fn)sirfsoc_dma_filter_id,
1024 (void *)sirfport->tx_dma_no);
1025 if (!sirfport->tx_dma_chan) {
1026 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1027 sirfport->tx_dma_no);
1028 return -EPROBE_DEFER;
1029 }
1030 dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1031
1032 return 0;
1033}
1034
1035static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
1036{
1037 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1038 dma_cap_mask_t dma_mask;
1039 int ret;
1040 int i, j;
1041 struct dma_slave_config slv_cfg = {
1042 .src_maxburst = 2,
1043 };
1044
1045 dma_cap_zero(dma_mask);
1046 dma_cap_set(DMA_SLAVE, dma_mask);
1047 sirfport->rx_dma_chan = dma_request_channel(dma_mask,
1048 (dma_filter_fn)sirfsoc_dma_filter_id,
1049 (void *)sirfport->rx_dma_no);
1050 if (!sirfport->rx_dma_chan) {
1051 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1052 sirfport->rx_dma_no);
1053 ret = -EPROBE_DEFER;
1054 goto request_err;
1055 }
1056 for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1057 sirfport->rx_dma_items[i].xmit.buf =
1058 dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1059 &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1060 if (!sirfport->rx_dma_items[i].xmit.buf) {
1061 dev_err(port->dev, "Uart alloc bufa failed\n");
1062 ret = -ENOMEM;
1063 goto alloc_coherent_err;
1064 }
1065 sirfport->rx_dma_items[i].xmit.head =
1066 sirfport->rx_dma_items[i].xmit.tail = 0;
1067 }
1068 dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1069
1070 return 0;
1071alloc_coherent_err:
1072 for (j = 0; j < i; j++)
1073 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1074 sirfport->rx_dma_items[j].xmit.buf,
1075 sirfport->rx_dma_items[j].dma_addr);
1076 dma_release_channel(sirfport->rx_dma_chan);
1077request_err:
1078 return ret;
1079}
1080
1081static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
1082{
1083 dmaengine_terminate_all(sirfport->tx_dma_chan);
1084 dma_release_channel(sirfport->tx_dma_chan);
1085}
1086
1087static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
1088{
1089 int i;
1090 struct uart_port *port = &sirfport->port;
1091 dmaengine_terminate_all(sirfport->rx_dma_chan);
1092 dma_release_channel(sirfport->rx_dma_chan);
1093 for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1094 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1095 sirfport->rx_dma_items[i].xmit.buf,
1096 sirfport->rx_dma_items[i].dma_addr);
1097}
1098
652static int sirfsoc_uart_startup(struct uart_port *port) 1099static int sirfsoc_uart_startup(struct uart_port *port)
653{ 1100{
654 struct sirfsoc_uart_port *sirfport = to_sirfport(port); 1101 struct sirfsoc_uart_port *sirfport = to_sirfport(port);
@@ -688,6 +1135,23 @@ static int sirfsoc_uart_startup(struct uart_port *port)
688 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port)); 1135 wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
689 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); 1136 wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
690 1137
1138 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
1139 ret = sirfsoc_uart_init_rx_dma(port);
1140 if (ret)
1141 goto init_rx_err;
1142 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1143 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1144 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1145 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1146 }
1147 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1148 sirfsoc_uart_init_tx_dma(port);
1149 sirfport->tx_dma_state = TX_DMA_IDLE;
1150 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1151 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1152 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1153 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1154 }
691 sirfport->ms_enabled = false; 1155 sirfport->ms_enabled = false;
692 if (sirfport->uart_reg->uart_type == SIRF_USP_UART && 1156 if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
693 sirfport->hw_flow_ctrl) { 1157 sirfport->hw_flow_ctrl) {
@@ -728,6 +1192,12 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
728 gpio_set_value(sirfport->rts_gpio, 1); 1192 gpio_set_value(sirfport->rts_gpio, 1);
729 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); 1193 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
730 } 1194 }
1195 if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1196 sirfsoc_uart_uninit_rx_dma(sirfport);
1197 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1198 sirfsoc_uart_uninit_tx_dma(sirfport);
1199 sirfport->tx_dma_state = TX_DMA_IDLE;
1200 }
731} 1201}
732 1202
733static const char *sirfsoc_uart_type(struct uart_port *port) 1203static const char *sirfsoc_uart_type(struct uart_port *port)
@@ -801,6 +1271,9 @@ sirfsoc_uart_console_setup(struct console *co, char *options)
801 uart_parse_options(options, &baud, &parity, &bits, &flow); 1271 uart_parse_options(options, &baud, &parity, &bits, &flow);
802 port->cons = co; 1272 port->cons = co;
803 1273
1274 /* default console tx/rx transfer using io mode */
1275 sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1276 sirfport->tx_dma_no = UNVALID_DMA_CHAN;
804 return uart_set_options(port, co, baud, parity, bits, flow); 1277 return uart_set_options(port, co, baud, parity, bits, flow);
805} 1278}
806 1279
@@ -888,10 +1361,27 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
888 1361
889 sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node, 1362 sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
890 "sirf,uart-has-rtscts"); 1363 "sirf,uart-has-rtscts");
891 if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) 1364 if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
892 sirfport->uart_reg->uart_type = SIRF_REAL_UART; 1365 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1366 if (of_property_read_u32(pdev->dev.of_node,
1367 "sirf,uart-dma-rx-channel",
1368 &sirfport->rx_dma_no))
1369 sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1370 if (of_property_read_u32(pdev->dev.of_node,
1371 "sirf,uart-dma-tx-channel",
1372 &sirfport->tx_dma_no))
1373 sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1374 }
893 if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) { 1375 if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
894 sirfport->uart_reg->uart_type = SIRF_USP_UART; 1376 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1377 if (of_property_read_u32(pdev->dev.of_node,
1378 "sirf,usp-dma-rx-channel",
1379 &sirfport->rx_dma_no))
1380 sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1381 if (of_property_read_u32(pdev->dev.of_node,
1382 "sirf,usp-dma-tx-channel",
1383 &sirfport->tx_dma_no))
1384 sirfport->tx_dma_no = UNVALID_DMA_CHAN;
895 if (!sirfport->hw_flow_ctrl) 1385 if (!sirfport->hw_flow_ctrl)
896 goto usp_no_flow_control; 1386 goto usp_no_flow_control;
897 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL)) 1387 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
@@ -946,6 +1436,12 @@ usp_no_flow_control:
946 ret = -EFAULT; 1436 ret = -EFAULT;
947 goto err; 1437 goto err;
948 } 1438 }
1439 spin_lock_init(&sirfport->rx_lock);
1440 spin_lock_init(&sirfport->tx_lock);
1441 tasklet_init(&sirfport->rx_dma_complete_tasklet,
1442 sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1443 tasklet_init(&sirfport->rx_tmo_process_tasklet,
1444 sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
949 port->mapbase = res->start; 1445 port->mapbase = res->start;
950 port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1446 port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
951 if (!port->membase) { 1447 if (!port->membase) {
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index e87035a9bbcb..173e00f84c67 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -338,6 +338,12 @@ struct sirfsoc_uart_register sirfsoc_uart = {
338 uint_st->sirfsoc_rxfifo_thd |\ 338 uint_st->sirfsoc_rxfifo_thd |\
339 uint_st->sirfsoc_rxfifo_full) 339 uint_st->sirfsoc_rxfifo_full)
340#define SIRFUART_CTS_INT_ST(uint_st) (uint_st->sirfsoc_cts) 340#define SIRFUART_CTS_INT_ST(uint_st) (uint_st->sirfsoc_cts)
341#define SIRFUART_RX_DMA_INT_EN(port, uint_en) \
342 (uint_en->sirfsoc_rx_timeout_en |\
343 uint_en->sirfsoc_frm_err_en |\
344 uint_en->sirfsoc_rx_oflow_en |\
345 uint_en->sirfsoc_rxd_brk_en |\
346 ((port->line > 2) ? 0 : uint_en->sirfsoc_parity_err_en))
341/* Generic Definitions */ 347/* Generic Definitions */
342#define SIRFSOC_UART_NAME "ttySiRF" 348#define SIRFSOC_UART_NAME "ttySiRF"
343#define SIRFSOC_UART_MAJOR 0 349#define SIRFSOC_UART_MAJOR 0
@@ -356,12 +362,52 @@ struct sirfsoc_uart_register sirfsoc_uart = {
356#define SIRF_SAMPLE_DIV_MASK 0x3f0000 362#define SIRF_SAMPLE_DIV_MASK 0x3f0000
357#define SIRF_BAUD_RATE_SUPPORT_NR 18 363#define SIRF_BAUD_RATE_SUPPORT_NR 18
358 364
365/* Uart Common Use Macro*/
366#define SIRFSOC_RX_DMA_BUF_SIZE 256
367#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
368#define LOOP_DMA_BUFA_FILL 1
369#define LOOP_DMA_BUFB_FILL 2
370#define TX_TRAN_PIO 1
371#define TX_TRAN_DMA 2
372/* Uart Fifo Level Chk */
373#define SIRFUART_TX_FIFO_SC_OFFSET 0
374#define SIRFUART_TX_FIFO_LC_OFFSET 10
375#define SIRFUART_TX_FIFO_HC_OFFSET 20
376#define SIRFUART_TX_FIFO_CHK_SC(line, value) ((((line) == 1) ? (value & 0x3) :\
377 (value & 0x1f)) << SIRFUART_TX_FIFO_SC_OFFSET)
378#define SIRFUART_TX_FIFO_CHK_LC(line, value) ((((line) == 1) ? (value & 0x3) :\
379 (value & 0x1f)) << SIRFUART_TX_FIFO_LC_OFFSET)
380#define SIRFUART_TX_FIFO_CHK_HC(line, value) ((((line) == 1) ? (value & 0x3) :\
381 (value & 0x1f)) << SIRFUART_TX_FIFO_HC_OFFSET)
382
383#define SIRFUART_RX_FIFO_CHK_SC SIRFUART_TX_FIFO_CHK_SC
384#define SIRFUART_RX_FIFO_CHK_LC SIRFUART_TX_FIFO_CHK_LC
385#define SIRFUART_RX_FIFO_CHK_HC SIRFUART_TX_FIFO_CHK_HC
386/* Indicate how many buffers used */
387#define SIRFSOC_RX_LOOP_BUF_CNT 2
388
389/* Indicate if DMA channel valid */
390#define IS_DMA_CHAN_VALID(x) ((x) != -1)
391#define UNVALID_DMA_CHAN -1
359/* For Fast Baud Rate Calculation */ 392/* For Fast Baud Rate Calculation */
360struct sirfsoc_baudrate_to_regv { 393struct sirfsoc_baudrate_to_regv {
361 unsigned int baud_rate; 394 unsigned int baud_rate;
362 unsigned int reg_val; 395 unsigned int reg_val;
363}; 396};
364 397
398enum sirfsoc_tx_state {
399 TX_DMA_IDLE,
400 TX_DMA_RUNNING,
401 TX_DMA_PAUSE,
402};
403
404struct sirfsoc_loop_buffer {
405 struct circ_buf xmit;
406 dma_cookie_t cookie;
407 struct dma_async_tx_descriptor *desc;
408 dma_addr_t dma_addr;
409};
410
365struct sirfsoc_uart_port { 411struct sirfsoc_uart_port {
366 bool hw_flow_ctrl; 412 bool hw_flow_ctrl;
367 bool ms_enabled; 413 bool ms_enabled;
@@ -371,8 +417,25 @@ struct sirfsoc_uart_port {
371 /* for SiRFmarco, there are SET/CLR for UART_INT_EN */ 417 /* for SiRFmarco, there are SET/CLR for UART_INT_EN */
372 bool is_marco; 418 bool is_marco;
373 struct sirfsoc_uart_register *uart_reg; 419 struct sirfsoc_uart_register *uart_reg;
420 int rx_dma_no;
421 int tx_dma_no;
422 struct dma_chan *rx_dma_chan;
423 struct dma_chan *tx_dma_chan;
424 dma_addr_t tx_dma_addr;
425 struct dma_async_tx_descriptor *tx_dma_desc;
426 spinlock_t rx_lock;
427 spinlock_t tx_lock;
428 struct tasklet_struct rx_dma_complete_tasklet;
429 struct tasklet_struct rx_tmo_process_tasklet;
430 unsigned int rx_io_count;
431 unsigned long transfer_size;
432 enum sirfsoc_tx_state tx_dma_state;
374 unsigned int cts_gpio; 433 unsigned int cts_gpio;
375 unsigned int rts_gpio; 434 unsigned int rts_gpio;
435
436 struct sirfsoc_loop_buffer rx_dma_items[SIRFSOC_RX_LOOP_BUF_CNT];
437 int rx_completed;
438 int rx_issued;
376}; 439};
377 440
378/* Hardware Flow Control */ 441/* Hardware Flow Control */