diff options
Diffstat (limited to 'drivers/tty/serial/sirfsoc_uart.c')
-rw-r--r-- | drivers/tty/serial/sirfsoc_uart.c | 341 |
1 files changed, 125 insertions, 216 deletions
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c index 6b1c92c1c27f..b6116413ca0d 100644 --- a/drivers/tty/serial/sirfsoc_uart.c +++ b/drivers/tty/serial/sirfsoc_uart.c | |||
@@ -36,8 +36,6 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count); | |||
36 | static struct uart_driver sirfsoc_uart_drv; | 36 | static struct uart_driver sirfsoc_uart_drv; |
37 | 37 | ||
38 | static void sirfsoc_uart_tx_dma_complete_callback(void *param); | 38 | static void sirfsoc_uart_tx_dma_complete_callback(void *param); |
39 | static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port); | ||
40 | static void sirfsoc_uart_rx_dma_complete_callback(void *param); | ||
41 | static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { | 39 | static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { |
42 | {4000000, 2359296}, | 40 | {4000000, 2359296}, |
43 | {3500000, 1310721}, | 41 | {3500000, 1310721}, |
@@ -465,144 +463,6 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param) | |||
465 | spin_unlock_irqrestore(&port->lock, flags); | 463 | spin_unlock_irqrestore(&port->lock, flags); |
466 | } | 464 | } |
467 | 465 | ||
468 | static void sirfsoc_uart_insert_rx_buf_to_tty( | ||
469 | struct sirfsoc_uart_port *sirfport, int count) | ||
470 | { | ||
471 | struct uart_port *port = &sirfport->port; | ||
472 | struct tty_port *tport = &port->state->port; | ||
473 | int inserted; | ||
474 | |||
475 | inserted = tty_insert_flip_string(tport, | ||
476 | sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count); | ||
477 | port->icount.rx += inserted; | ||
478 | } | ||
479 | |||
480 | static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index) | ||
481 | { | ||
482 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | ||
483 | |||
484 | sirfport->rx_dma_items[index].xmit.tail = | ||
485 | sirfport->rx_dma_items[index].xmit.head = 0; | ||
486 | sirfport->rx_dma_items[index].desc = | ||
487 | dmaengine_prep_slave_single(sirfport->rx_dma_chan, | ||
488 | sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, | ||
489 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); | ||
490 | if (IS_ERR_OR_NULL(sirfport->rx_dma_items[index].desc)) { | ||
491 | dev_err(port->dev, "DMA slave single fail\n"); | ||
492 | return; | ||
493 | } | ||
494 | sirfport->rx_dma_items[index].desc->callback = | ||
495 | sirfsoc_uart_rx_dma_complete_callback; | ||
496 | sirfport->rx_dma_items[index].desc->callback_param = sirfport; | ||
497 | sirfport->rx_dma_items[index].cookie = | ||
498 | dmaengine_submit(sirfport->rx_dma_items[index].desc); | ||
499 | dma_async_issue_pending(sirfport->rx_dma_chan); | ||
500 | } | ||
501 | |||
502 | static void sirfsoc_rx_tmo_process_tl(unsigned long param) | ||
503 | { | ||
504 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | ||
505 | struct uart_port *port = &sirfport->port; | ||
506 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | ||
507 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | ||
508 | struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; | ||
509 | unsigned int count; | ||
510 | struct dma_tx_state tx_state; | ||
511 | unsigned long flags; | ||
512 | int i = 0; | ||
513 | |||
514 | spin_lock_irqsave(&port->lock, flags); | ||
515 | while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan, | ||
516 | sirfport->rx_dma_items[sirfport->rx_completed].cookie, | ||
517 | &tx_state)) { | ||
518 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, | ||
519 | SIRFSOC_RX_DMA_BUF_SIZE); | ||
520 | sirfport->rx_completed++; | ||
521 | sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; | ||
522 | i++; | ||
523 | if (i > SIRFSOC_RX_LOOP_BUF_CNT) | ||
524 | break; | ||
525 | } | ||
526 | count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head, | ||
527 | sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail, | ||
528 | SIRFSOC_RX_DMA_BUF_SIZE); | ||
529 | if (count > 0) | ||
530 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count); | ||
531 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | ||
532 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | ||
533 | SIRFUART_IO_MODE); | ||
534 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
535 | if (sirfport->rx_io_count == 4) { | ||
536 | sirfport->rx_io_count = 0; | ||
537 | wr_regl(port, ureg->sirfsoc_int_st_reg, | ||
538 | uint_st->sirfsoc_rx_done); | ||
539 | if (!sirfport->is_atlas7) | ||
540 | wr_regl(port, ureg->sirfsoc_int_en_reg, | ||
541 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | ||
542 | ~(uint_en->sirfsoc_rx_done_en)); | ||
543 | else | ||
544 | wr_regl(port, ureg->sirfsoc_int_en_clr_reg, | ||
545 | uint_en->sirfsoc_rx_done_en); | ||
546 | sirfsoc_uart_start_next_rx_dma(port); | ||
547 | } else { | ||
548 | wr_regl(port, ureg->sirfsoc_int_st_reg, | ||
549 | uint_st->sirfsoc_rx_done); | ||
550 | if (!sirfport->is_atlas7) | ||
551 | wr_regl(port, ureg->sirfsoc_int_en_reg, | ||
552 | rd_regl(port, ureg->sirfsoc_int_en_reg) | | ||
553 | (uint_en->sirfsoc_rx_done_en)); | ||
554 | else | ||
555 | wr_regl(port, ureg->sirfsoc_int_en_reg, | ||
556 | uint_en->sirfsoc_rx_done_en); | ||
557 | } | ||
558 | spin_unlock_irqrestore(&port->lock, flags); | ||
559 | tty_flip_buffer_push(&port->state->port); | ||
560 | } | ||
561 | |||
562 | static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport) | ||
563 | { | ||
564 | struct uart_port *port = &sirfport->port; | ||
565 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | ||
566 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | ||
567 | struct dma_tx_state tx_state; | ||
568 | dmaengine_tx_status(sirfport->rx_dma_chan, | ||
569 | sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state); | ||
570 | dmaengine_terminate_all(sirfport->rx_dma_chan); | ||
571 | sirfport->rx_dma_items[sirfport->rx_issued].xmit.head = | ||
572 | SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; | ||
573 | if (!sirfport->is_atlas7) | ||
574 | wr_regl(port, ureg->sirfsoc_int_en_reg, | ||
575 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | ||
576 | ~(uint_en->sirfsoc_rx_timeout_en)); | ||
577 | else | ||
578 | wr_regl(port, ureg->sirfsoc_int_en_clr_reg, | ||
579 | uint_en->sirfsoc_rx_timeout_en); | ||
580 | tasklet_schedule(&sirfport->rx_tmo_process_tasklet); | ||
581 | } | ||
582 | |||
583 | static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport) | ||
584 | { | ||
585 | struct uart_port *port = &sirfport->port; | ||
586 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | ||
587 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | ||
588 | struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; | ||
589 | |||
590 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
591 | if (sirfport->rx_io_count == 4) { | ||
592 | sirfport->rx_io_count = 0; | ||
593 | if (!sirfport->is_atlas7) | ||
594 | wr_regl(port, ureg->sirfsoc_int_en_reg, | ||
595 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | ||
596 | ~(uint_en->sirfsoc_rx_done_en)); | ||
597 | else | ||
598 | wr_regl(port, ureg->sirfsoc_int_en_clr_reg, | ||
599 | uint_en->sirfsoc_rx_done_en); | ||
600 | wr_regl(port, ureg->sirfsoc_int_st_reg, | ||
601 | uint_st->sirfsoc_rx_timeout); | ||
602 | sirfsoc_uart_start_next_rx_dma(port); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) | 466 | static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) |
607 | { | 467 | { |
608 | unsigned long intr_status; | 468 | unsigned long intr_status; |
@@ -659,12 +519,8 @@ recv_char: | |||
659 | uart_handle_cts_change(port, cts_status); | 519 | uart_handle_cts_change(port, cts_status); |
660 | wake_up_interruptible(&state->port.delta_msr_wait); | 520 | wake_up_interruptible(&state->port.delta_msr_wait); |
661 | } | 521 | } |
662 | if (sirfport->rx_dma_chan) { | 522 | if (!sirfport->rx_dma_chan && |
663 | if (intr_status & uint_st->sirfsoc_rx_timeout) | 523 | (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) { |
664 | sirfsoc_uart_handle_rx_tmo(sirfport); | ||
665 | if (intr_status & uint_st->sirfsoc_rx_done) | ||
666 | sirfsoc_uart_handle_rx_done(sirfport); | ||
667 | } else if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st)) { | ||
668 | /* | 524 | /* |
669 | * chip will trigger continuous RX_TIMEOUT interrupt | 525 | * chip will trigger continuous RX_TIMEOUT interrupt |
670 | * in RXFIFO empty and not trigger if RXFIFO recevice | 526 | * in RXFIFO empty and not trigger if RXFIFO recevice |
@@ -734,47 +590,8 @@ recv_char: | |||
734 | return IRQ_HANDLED; | 590 | return IRQ_HANDLED; |
735 | } | 591 | } |
736 | 592 | ||
737 | static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param) | ||
738 | { | ||
739 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | ||
740 | struct uart_port *port = &sirfport->port; | ||
741 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | ||
742 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | ||
743 | struct dma_tx_state tx_state; | ||
744 | unsigned long flags; | ||
745 | int i = 0; | ||
746 | |||
747 | spin_lock_irqsave(&port->lock, flags); | ||
748 | while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan, | ||
749 | sirfport->rx_dma_items[sirfport->rx_completed].cookie, | ||
750 | &tx_state)) { | ||
751 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, | ||
752 | SIRFSOC_RX_DMA_BUF_SIZE); | ||
753 | if (rd_regl(port, ureg->sirfsoc_int_en_reg) & | ||
754 | uint_en->sirfsoc_rx_timeout_en) | ||
755 | sirfsoc_rx_submit_one_dma_desc(port, | ||
756 | sirfport->rx_completed++); | ||
757 | else | ||
758 | sirfport->rx_completed++; | ||
759 | sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; | ||
760 | i++; | ||
761 | if (i > SIRFSOC_RX_LOOP_BUF_CNT) | ||
762 | break; | ||
763 | } | ||
764 | spin_unlock_irqrestore(&port->lock, flags); | ||
765 | tty_flip_buffer_push(&port->state->port); | ||
766 | } | ||
767 | |||
768 | static void sirfsoc_uart_rx_dma_complete_callback(void *param) | 593 | static void sirfsoc_uart_rx_dma_complete_callback(void *param) |
769 | { | 594 | { |
770 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | ||
771 | unsigned long flags; | ||
772 | |||
773 | spin_lock_irqsave(&sirfport->port.lock, flags); | ||
774 | sirfport->rx_issued++; | ||
775 | sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT; | ||
776 | tasklet_schedule(&sirfport->rx_dma_complete_tasklet); | ||
777 | spin_unlock_irqrestore(&sirfport->port.lock, flags); | ||
778 | } | 595 | } |
779 | 596 | ||
780 | /* submit rx dma task into dmaengine */ | 597 | /* submit rx dma task into dmaengine */ |
@@ -783,14 +600,27 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) | |||
783 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | 600 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
784 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | 601 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
785 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | 602 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; |
786 | int i; | ||
787 | sirfport->rx_io_count = 0; | 603 | sirfport->rx_io_count = 0; |
788 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | 604 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, |
789 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & | 605 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & |
790 | ~SIRFUART_IO_MODE); | 606 | ~SIRFUART_IO_MODE); |
791 | for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) | 607 | sirfport->rx_dma_items.xmit.tail = |
792 | sirfsoc_rx_submit_one_dma_desc(port, i); | 608 | sirfport->rx_dma_items.xmit.head = 0; |
793 | sirfport->rx_completed = sirfport->rx_issued = 0; | 609 | sirfport->rx_dma_items.desc = |
610 | dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan, | ||
611 | sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, | ||
612 | SIRFSOC_RX_DMA_BUF_SIZE / 2, | ||
613 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); | ||
614 | if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) { | ||
615 | dev_err(port->dev, "DMA slave single fail\n"); | ||
616 | return; | ||
617 | } | ||
618 | sirfport->rx_dma_items.desc->callback = | ||
619 | sirfsoc_uart_rx_dma_complete_callback; | ||
620 | sirfport->rx_dma_items.desc->callback_param = sirfport; | ||
621 | sirfport->rx_dma_items.cookie = | ||
622 | dmaengine_submit(sirfport->rx_dma_items.desc); | ||
623 | dma_async_issue_pending(sirfport->rx_dma_chan); | ||
794 | if (!sirfport->is_atlas7) | 624 | if (!sirfport->is_atlas7) |
795 | wr_regl(port, ureg->sirfsoc_int_en_reg, | 625 | wr_regl(port, ureg->sirfsoc_int_en_reg, |
796 | rd_regl(port, ureg->sirfsoc_int_en_reg) | | 626 | rd_regl(port, ureg->sirfsoc_int_en_reg) | |
@@ -1059,6 +889,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port, | |||
1059 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE); | 889 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE); |
1060 | else | 890 | else |
1061 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); | 891 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); |
892 | sirfport->rx_period_time = 20000000; | ||
1062 | /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ | 893 | /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ |
1063 | if (set_baud < 1000000) | 894 | if (set_baud < 1000000) |
1064 | threshold_div = 1; | 895 | threshold_div = 1; |
@@ -1110,6 +941,9 @@ static int sirfsoc_uart_startup(struct uart_port *port) | |||
1110 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | 941 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, |
1111 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | 942 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | |
1112 | SIRFUART_IO_MODE); | 943 | SIRFUART_IO_MODE); |
944 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | ||
945 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & | ||
946 | ~SIRFUART_RX_DMA_FLUSH); | ||
1113 | wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); | 947 | wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); |
1114 | wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); | 948 | wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); |
1115 | wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); | 949 | wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); |
@@ -1147,8 +981,16 @@ static int sirfsoc_uart_startup(struct uart_port *port) | |||
1147 | goto init_rx_err; | 981 | goto init_rx_err; |
1148 | } | 982 | } |
1149 | } | 983 | } |
1150 | |||
1151 | enable_irq(port->irq); | 984 | enable_irq(port->irq); |
985 | if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) { | ||
986 | sirfport->is_hrt_enabled = true; | ||
987 | sirfport->rx_period_time = 20000000; | ||
988 | sirfport->rx_dma_items.xmit.tail = | ||
989 | sirfport->rx_dma_items.xmit.head = 0; | ||
990 | hrtimer_start(&sirfport->hrt, | ||
991 | ns_to_ktime(sirfport->rx_period_time), | ||
992 | HRTIMER_MODE_REL); | ||
993 | } | ||
1152 | 994 | ||
1153 | return 0; | 995 | return 0; |
1154 | init_rx_err: | 996 | init_rx_err: |
@@ -1176,6 +1018,13 @@ static void sirfsoc_uart_shutdown(struct uart_port *port) | |||
1176 | } | 1018 | } |
1177 | if (sirfport->tx_dma_chan) | 1019 | if (sirfport->tx_dma_chan) |
1178 | sirfport->tx_dma_state = TX_DMA_IDLE; | 1020 | sirfport->tx_dma_state = TX_DMA_IDLE; |
1021 | if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) { | ||
1022 | while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & | ||
1023 | SIRFUART_RX_FIFO_MASK) > 0) | ||
1024 | ; | ||
1025 | sirfport->is_hrt_enabled = false; | ||
1026 | hrtimer_cancel(&sirfport->hrt); | ||
1027 | } | ||
1179 | } | 1028 | } |
1180 | 1029 | ||
1181 | static const char *sirfsoc_uart_type(struct uart_port *port) | 1030 | static const char *sirfsoc_uart_type(struct uart_port *port) |
@@ -1310,6 +1159,70 @@ static struct uart_driver sirfsoc_uart_drv = { | |||
1310 | #endif | 1159 | #endif |
1311 | }; | 1160 | }; |
1312 | 1161 | ||
1162 | static enum hrtimer_restart | ||
1163 | sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt) | ||
1164 | { | ||
1165 | struct sirfsoc_uart_port *sirfport; | ||
1166 | struct uart_port *port; | ||
1167 | int count, inserted; | ||
1168 | struct dma_tx_state tx_state; | ||
1169 | struct tty_struct *tty; | ||
1170 | struct sirfsoc_register *ureg; | ||
1171 | struct circ_buf *xmit; | ||
1172 | |||
1173 | sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt); | ||
1174 | port = &sirfport->port; | ||
1175 | inserted = 0; | ||
1176 | tty = port->state->port.tty; | ||
1177 | ureg = &sirfport->uart_reg->uart_reg; | ||
1178 | xmit = &sirfport->rx_dma_items.xmit; | ||
1179 | dmaengine_tx_status(sirfport->rx_dma_chan, | ||
1180 | sirfport->rx_dma_items.cookie, &tx_state); | ||
1181 | xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; | ||
1182 | count = CIRC_CNT_TO_END(xmit->head, xmit->tail, | ||
1183 | SIRFSOC_RX_DMA_BUF_SIZE); | ||
1184 | while (count > 0) { | ||
1185 | inserted = tty_insert_flip_string(tty->port, | ||
1186 | (const unsigned char *)&xmit->buf[xmit->tail], count); | ||
1187 | if (!inserted) | ||
1188 | goto next_hrt; | ||
1189 | port->icount.rx += inserted; | ||
1190 | xmit->tail = (xmit->tail + inserted) & | ||
1191 | (SIRFSOC_RX_DMA_BUF_SIZE - 1); | ||
1192 | count = CIRC_CNT_TO_END(xmit->head, xmit->tail, | ||
1193 | SIRFSOC_RX_DMA_BUF_SIZE); | ||
1194 | tty_flip_buffer_push(tty->port); | ||
1195 | } | ||
1196 | /* | ||
1197 | * if RX DMA buffer data have all push into tty buffer, and there is | ||
1198 | * only little data(less than a dma transfer unit) left in rxfifo, | ||
1199 | * fetch it out in pio mode and switch back to dma immediately | ||
1200 | */ | ||
1201 | if (!inserted && !count && | ||
1202 | ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & | ||
1203 | SIRFUART_RX_FIFO_MASK) > 0)) { | ||
1204 | /* switch to pio mode */ | ||
1205 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | ||
1206 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | ||
1207 | SIRFUART_IO_MODE); | ||
1208 | while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & | ||
1209 | SIRFUART_RX_FIFO_MASK) > 0) { | ||
1210 | if (sirfsoc_uart_pio_rx_chars(port, 16) > 0) | ||
1211 | tty_flip_buffer_push(tty->port); | ||
1212 | } | ||
1213 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); | ||
1214 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); | ||
1215 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); | ||
1216 | /* switch back to dma mode */ | ||
1217 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | ||
1218 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & | ||
1219 | ~SIRFUART_IO_MODE); | ||
1220 | } | ||
1221 | next_hrt: | ||
1222 | hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time)); | ||
1223 | return HRTIMER_RESTART; | ||
1224 | } | ||
1225 | |||
1313 | static struct of_device_id sirfsoc_uart_ids[] = { | 1226 | static struct of_device_id sirfsoc_uart_ids[] = { |
1314 | { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,}, | 1227 | { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,}, |
1315 | { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart}, | 1228 | { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart}, |
@@ -1325,7 +1238,6 @@ static int sirfsoc_uart_probe(struct platform_device *pdev) | |||
1325 | struct uart_port *port; | 1238 | struct uart_port *port; |
1326 | struct resource *res; | 1239 | struct resource *res; |
1327 | int ret; | 1240 | int ret; |
1328 | int i, j; | ||
1329 | struct dma_slave_config slv_cfg = { | 1241 | struct dma_slave_config slv_cfg = { |
1330 | .src_maxburst = 2, | 1242 | .src_maxburst = 2, |
1331 | }; | 1243 | }; |
@@ -1413,12 +1325,9 @@ usp_no_flow_control: | |||
1413 | ret = -EFAULT; | 1325 | ret = -EFAULT; |
1414 | goto err; | 1326 | goto err; |
1415 | } | 1327 | } |
1416 | tasklet_init(&sirfport->rx_dma_complete_tasklet, | ||
1417 | sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport); | ||
1418 | tasklet_init(&sirfport->rx_tmo_process_tasklet, | ||
1419 | sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport); | ||
1420 | port->mapbase = res->start; | 1328 | port->mapbase = res->start; |
1421 | port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); | 1329 | port->membase = devm_ioremap(&pdev->dev, |
1330 | res->start, resource_size(res)); | ||
1422 | if (!port->membase) { | 1331 | if (!port->membase) { |
1423 | dev_err(&pdev->dev, "Cannot remap resource.\n"); | 1332 | dev_err(&pdev->dev, "Cannot remap resource.\n"); |
1424 | ret = -ENOMEM; | 1333 | ret = -ENOMEM; |
@@ -1450,30 +1359,32 @@ usp_no_flow_control: | |||
1450 | } | 1359 | } |
1451 | 1360 | ||
1452 | sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); | 1361 | sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); |
1453 | for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) { | 1362 | sirfport->rx_dma_items.xmit.buf = |
1454 | sirfport->rx_dma_items[i].xmit.buf = | 1363 | dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, |
1455 | dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | 1364 | &sirfport->rx_dma_items.dma_addr, GFP_KERNEL); |
1456 | &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL); | 1365 | if (!sirfport->rx_dma_items.xmit.buf) { |
1457 | if (!sirfport->rx_dma_items[i].xmit.buf) { | 1366 | dev_err(port->dev, "Uart alloc bufa failed\n"); |
1458 | dev_err(port->dev, "Uart alloc bufa failed\n"); | 1367 | ret = -ENOMEM; |
1459 | ret = -ENOMEM; | 1368 | goto alloc_coherent_err; |
1460 | goto alloc_coherent_err; | ||
1461 | } | ||
1462 | sirfport->rx_dma_items[i].xmit.head = | ||
1463 | sirfport->rx_dma_items[i].xmit.tail = 0; | ||
1464 | } | 1369 | } |
1370 | sirfport->rx_dma_items.xmit.head = | ||
1371 | sirfport->rx_dma_items.xmit.tail = 0; | ||
1465 | if (sirfport->rx_dma_chan) | 1372 | if (sirfport->rx_dma_chan) |
1466 | dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); | 1373 | dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); |
1467 | sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); | 1374 | sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); |
1468 | if (sirfport->tx_dma_chan) | 1375 | if (sirfport->tx_dma_chan) |
1469 | dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); | 1376 | dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); |
1377 | if (sirfport->rx_dma_chan) { | ||
1378 | hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1379 | sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback; | ||
1380 | sirfport->is_hrt_enabled = false; | ||
1381 | } | ||
1470 | 1382 | ||
1471 | return 0; | 1383 | return 0; |
1472 | alloc_coherent_err: | 1384 | alloc_coherent_err: |
1473 | for (j = 0; j < i; j++) | 1385 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, |
1474 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | 1386 | sirfport->rx_dma_items.xmit.buf, |
1475 | sirfport->rx_dma_items[j].xmit.buf, | 1387 | sirfport->rx_dma_items.dma_addr); |
1476 | sirfport->rx_dma_items[j].dma_addr); | ||
1477 | dma_release_channel(sirfport->rx_dma_chan); | 1388 | dma_release_channel(sirfport->rx_dma_chan); |
1478 | err: | 1389 | err: |
1479 | return ret; | 1390 | return ret; |
@@ -1485,13 +1396,11 @@ static int sirfsoc_uart_remove(struct platform_device *pdev) | |||
1485 | struct uart_port *port = &sirfport->port; | 1396 | struct uart_port *port = &sirfport->port; |
1486 | uart_remove_one_port(&sirfsoc_uart_drv, port); | 1397 | uart_remove_one_port(&sirfsoc_uart_drv, port); |
1487 | if (sirfport->rx_dma_chan) { | 1398 | if (sirfport->rx_dma_chan) { |
1488 | int i; | ||
1489 | dmaengine_terminate_all(sirfport->rx_dma_chan); | 1399 | dmaengine_terminate_all(sirfport->rx_dma_chan); |
1490 | dma_release_channel(sirfport->rx_dma_chan); | 1400 | dma_release_channel(sirfport->rx_dma_chan); |
1491 | for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) | 1401 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, |
1492 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | 1402 | sirfport->rx_dma_items.xmit.buf, |
1493 | sirfport->rx_dma_items[i].xmit.buf, | 1403 | sirfport->rx_dma_items.dma_addr); |
1494 | sirfport->rx_dma_items[i].dma_addr); | ||
1495 | } | 1404 | } |
1496 | if (sirfport->tx_dma_chan) { | 1405 | if (sirfport->tx_dma_chan) { |
1497 | dmaengine_terminate_all(sirfport->tx_dma_chan); | 1406 | dmaengine_terminate_all(sirfport->tx_dma_chan); |