diff options
author | Elen Song <elen.song@atmel.com> | 2013-07-22 04:30:26 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-07-29 16:03:29 -0400 |
commit | 08f738be88bb7a0163afd810a19b9cb13c79808f (patch) | |
tree | 521da5c1b71338e9b44020ae7e4561b06cf9ceff | |
parent | a930e52875e96709cc01e4babcb8351687c5a58e (diff) |
serial: at91: add tx dma support
Request a slave dma channel for tx dma use. Tx dma will setup a single transfer,
when transfer complete, it will call atmel_complete_tx_dma to do finish stuff.
Signed-off-by: Elen Song <elen.song@atmel.com>
Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/tty/serial/atmel_serial.c | 210 |
1 files changed, 206 insertions, 4 deletions
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index b56123d3affe..13c1d3170119 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -143,9 +143,15 @@ struct atmel_uart_port { | |||
143 | short pdc_rx_idx; /* current PDC RX buffer */ | 143 | short pdc_rx_idx; /* current PDC RX buffer */ |
144 | struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ | 144 | struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ |
145 | 145 | ||
146 | bool use_dma_tx; /* enable DMA transmitter */ | ||
146 | bool use_pdc_tx; /* enable PDC transmitter */ | 147 | bool use_pdc_tx; /* enable PDC transmitter */ |
147 | struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ | 148 | struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ |
148 | 149 | ||
150 | spinlock_t lock_tx; /* port lock */ | ||
151 | struct dma_chan *chan_tx; | ||
152 | struct dma_async_tx_descriptor *desc_tx; | ||
153 | dma_cookie_t cookie_tx; | ||
154 | struct scatterlist sg_tx; | ||
149 | struct tasklet_struct tasklet; | 155 | struct tasklet_struct tasklet; |
150 | unsigned int irq_status; | 156 | unsigned int irq_status; |
151 | unsigned int irq_status_prev; | 157 | unsigned int irq_status_prev; |
@@ -211,6 +217,13 @@ static bool atmel_use_pdc_tx(struct uart_port *port) | |||
211 | } | 217 | } |
212 | #endif | 218 | #endif |
213 | 219 | ||
220 | static bool atmel_use_dma_tx(struct uart_port *port) | ||
221 | { | ||
222 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
223 | |||
224 | return atmel_port->use_dma_tx; | ||
225 | } | ||
226 | |||
214 | /* Enable or disable the rs485 support */ | 227 | /* Enable or disable the rs485 support */ |
215 | void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) | 228 | void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) |
216 | { | 229 | { |
@@ -569,6 +582,182 @@ static void atmel_tx_chars(struct uart_port *port) | |||
569 | UART_PUT_IER(port, atmel_port->tx_done_mask); | 582 | UART_PUT_IER(port, atmel_port->tx_done_mask); |
570 | } | 583 | } |
571 | 584 | ||
585 | static void atmel_complete_tx_dma(void *arg) | ||
586 | { | ||
587 | struct atmel_uart_port *atmel_port = arg; | ||
588 | struct uart_port *port = &atmel_port->uart; | ||
589 | struct circ_buf *xmit = &port->state->xmit; | ||
590 | struct dma_chan *chan = atmel_port->chan_tx; | ||
591 | unsigned long flags; | ||
592 | |||
593 | spin_lock_irqsave(&port->lock, flags); | ||
594 | |||
595 | if (chan) | ||
596 | dmaengine_terminate_all(chan); | ||
597 | xmit->tail += sg_dma_len(&atmel_port->sg_tx); | ||
598 | xmit->tail &= UART_XMIT_SIZE - 1; | ||
599 | |||
600 | port->icount.tx += sg_dma_len(&atmel_port->sg_tx); | ||
601 | |||
602 | spin_lock_irq(&atmel_port->lock_tx); | ||
603 | async_tx_ack(atmel_port->desc_tx); | ||
604 | atmel_port->cookie_tx = -EINVAL; | ||
605 | atmel_port->desc_tx = NULL; | ||
606 | spin_unlock_irq(&atmel_port->lock_tx); | ||
607 | |||
608 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
609 | uart_write_wakeup(port); | ||
610 | |||
611 | /* Do we really need this? */ | ||
612 | if (!uart_circ_empty(xmit)) | ||
613 | tasklet_schedule(&atmel_port->tasklet); | ||
614 | |||
615 | spin_unlock_irqrestore(&port->lock, flags); | ||
616 | } | ||
617 | |||
618 | static void atmel_release_tx_dma(struct uart_port *port) | ||
619 | { | ||
620 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
621 | struct dma_chan *chan = atmel_port->chan_tx; | ||
622 | |||
623 | if (chan) { | ||
624 | dmaengine_terminate_all(chan); | ||
625 | dma_release_channel(chan); | ||
626 | dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, | ||
627 | DMA_MEM_TO_DEV); | ||
628 | } | ||
629 | |||
630 | atmel_port->desc_tx = NULL; | ||
631 | atmel_port->chan_tx = NULL; | ||
632 | atmel_port->cookie_tx = -EINVAL; | ||
633 | } | ||
634 | |||
635 | /* | ||
636 | * Called from tasklet with TXRDY interrupt is disabled. | ||
637 | */ | ||
638 | static void atmel_tx_dma(struct uart_port *port) | ||
639 | { | ||
640 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
641 | struct circ_buf *xmit = &port->state->xmit; | ||
642 | struct dma_chan *chan = atmel_port->chan_tx; | ||
643 | struct dma_async_tx_descriptor *desc; | ||
644 | struct scatterlist *sg = &atmel_port->sg_tx; | ||
645 | |||
646 | /* Make sure we have an idle channel */ | ||
647 | if (atmel_port->desc_tx != NULL) | ||
648 | return; | ||
649 | |||
650 | if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { | ||
651 | /* | ||
652 | * DMA is idle now. | ||
653 | * Port xmit buffer is already mapped, | ||
654 | * and it is one page... Just adjust | ||
655 | * offsets and lengths. Since it is a circular buffer, | ||
656 | * we have to transmit till the end, and then the rest. | ||
657 | * Take the port lock to get a | ||
658 | * consistent xmit buffer state. | ||
659 | */ | ||
660 | sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); | ||
661 | sg_dma_address(sg) = (sg_dma_address(sg) & | ||
662 | ~(UART_XMIT_SIZE - 1)) | ||
663 | + sg->offset; | ||
664 | sg_dma_len(sg) = CIRC_CNT_TO_END(xmit->head, | ||
665 | xmit->tail, | ||
666 | UART_XMIT_SIZE); | ||
667 | BUG_ON(!sg_dma_len(sg)); | ||
668 | |||
669 | desc = dmaengine_prep_slave_sg(chan, | ||
670 | sg, | ||
671 | 1, | ||
672 | DMA_MEM_TO_DEV, | ||
673 | DMA_PREP_INTERRUPT | | ||
674 | DMA_CTRL_ACK); | ||
675 | if (!desc) { | ||
676 | dev_err(port->dev, "Failed to send via dma!\n"); | ||
677 | return; | ||
678 | } | ||
679 | |||
680 | dma_sync_sg_for_device(port->dev, sg, 1, DMA_MEM_TO_DEV); | ||
681 | |||
682 | atmel_port->desc_tx = desc; | ||
683 | desc->callback = atmel_complete_tx_dma; | ||
684 | desc->callback_param = atmel_port; | ||
685 | atmel_port->cookie_tx = dmaengine_submit(desc); | ||
686 | |||
687 | } else { | ||
688 | if (atmel_port->rs485.flags & SER_RS485_ENABLED) { | ||
689 | /* DMA done, stop TX, start RX for RS485 */ | ||
690 | atmel_start_rx(port); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
695 | uart_write_wakeup(port); | ||
696 | } | ||
697 | |||
698 | static int atmel_prepare_tx_dma(struct uart_port *port) | ||
699 | { | ||
700 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
701 | dma_cap_mask_t mask; | ||
702 | struct dma_slave_config config; | ||
703 | int ret, nent; | ||
704 | |||
705 | dma_cap_zero(mask); | ||
706 | dma_cap_set(DMA_SLAVE, mask); | ||
707 | |||
708 | atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx"); | ||
709 | if (atmel_port->chan_tx == NULL) | ||
710 | goto chan_err; | ||
711 | dev_info(port->dev, "using %s for tx DMA transfers\n", | ||
712 | dma_chan_name(atmel_port->chan_tx)); | ||
713 | |||
714 | spin_lock_init(&atmel_port->lock_tx); | ||
715 | sg_init_table(&atmel_port->sg_tx, 1); | ||
716 | /* UART circular tx buffer is an aligned page. */ | ||
717 | BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); | ||
718 | sg_set_page(&atmel_port->sg_tx, | ||
719 | virt_to_page(port->state->xmit.buf), | ||
720 | UART_XMIT_SIZE, | ||
721 | (int)port->state->xmit.buf & ~PAGE_MASK); | ||
722 | nent = dma_map_sg(port->dev, | ||
723 | &atmel_port->sg_tx, | ||
724 | 1, | ||
725 | DMA_MEM_TO_DEV); | ||
726 | |||
727 | if (!nent) { | ||
728 | dev_dbg(port->dev, "need to release resource of dma\n"); | ||
729 | goto chan_err; | ||
730 | } else { | ||
731 | dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, | ||
732 | sg_dma_len(&atmel_port->sg_tx), | ||
733 | port->state->xmit.buf, | ||
734 | sg_dma_address(&atmel_port->sg_tx)); | ||
735 | } | ||
736 | |||
737 | /* Configure the slave DMA */ | ||
738 | memset(&config, 0, sizeof(config)); | ||
739 | config.direction = DMA_MEM_TO_DEV; | ||
740 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
741 | config.dst_addr = port->mapbase + ATMEL_US_THR; | ||
742 | |||
743 | ret = dmaengine_device_control(atmel_port->chan_tx, | ||
744 | DMA_SLAVE_CONFIG, | ||
745 | (unsigned long)&config); | ||
746 | if (ret) { | ||
747 | dev_err(port->dev, "DMA tx slave configuration failed\n"); | ||
748 | goto chan_err; | ||
749 | } | ||
750 | |||
751 | return 0; | ||
752 | |||
753 | chan_err: | ||
754 | dev_err(port->dev, "TX channel not available, switch to pio\n"); | ||
755 | atmel_port->use_dma_tx = 0; | ||
756 | if (atmel_port->chan_tx) | ||
757 | atmel_release_tx_dma(port); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | |||
572 | /* | 761 | /* |
573 | * receive interrupt handler. | 762 | * receive interrupt handler. |
574 | */ | 763 | */ |
@@ -997,7 +1186,11 @@ static void atmel_set_ops(struct uart_port *port) | |||
997 | atmel_port->release_rx = NULL; | 1186 | atmel_port->release_rx = NULL; |
998 | } | 1187 | } |
999 | 1188 | ||
1000 | if (atmel_use_pdc_tx(port)) { | 1189 | if (atmel_use_dma_tx(port)) { |
1190 | atmel_port->prepare_tx = &atmel_prepare_tx_dma; | ||
1191 | atmel_port->schedule_tx = &atmel_tx_dma; | ||
1192 | atmel_port->release_tx = &atmel_release_tx_dma; | ||
1193 | } else if (atmel_use_pdc_tx(port)) { | ||
1001 | atmel_port->prepare_tx = &atmel_prepare_tx_pdc; | 1194 | atmel_port->prepare_tx = &atmel_prepare_tx_pdc; |
1002 | atmel_port->schedule_tx = &atmel_tx_pdc; | 1195 | atmel_port->schedule_tx = &atmel_tx_pdc; |
1003 | atmel_port->release_tx = &atmel_release_tx_pdc; | 1196 | atmel_port->release_tx = &atmel_release_tx_pdc; |
@@ -1488,10 +1681,18 @@ static void atmel_of_init_port(struct atmel_uart_port *atmel_port, | |||
1488 | else | 1681 | else |
1489 | atmel_port->use_pdc_rx = false; | 1682 | atmel_port->use_pdc_rx = false; |
1490 | 1683 | ||
1491 | if (of_get_property(np, "atmel,use-dma-tx", NULL)) | 1684 | if (of_get_property(np, "atmel,use-dma-tx", NULL)) { |
1492 | atmel_port->use_pdc_tx = true; | 1685 | if (of_get_property(np, "dmas", NULL)) { |
1493 | else | 1686 | atmel_port->use_dma_tx = true; |
1687 | atmel_port->use_pdc_tx = false; | ||
1688 | } else { | ||
1689 | atmel_port->use_dma_tx = false; | ||
1690 | atmel_port->use_pdc_tx = true; | ||
1691 | } | ||
1692 | } else { | ||
1693 | atmel_port->use_dma_tx = false; | ||
1494 | atmel_port->use_pdc_tx = false; | 1694 | atmel_port->use_pdc_tx = false; |
1695 | } | ||
1495 | 1696 | ||
1496 | /* rs485 properties */ | 1697 | /* rs485 properties */ |
1497 | if (of_property_read_u32_array(np, "rs485-rts-delay", | 1698 | if (of_property_read_u32_array(np, "rs485-rts-delay", |
@@ -1525,6 +1726,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port, | |||
1525 | } else { | 1726 | } else { |
1526 | atmel_port->use_pdc_rx = pdata->use_dma_rx; | 1727 | atmel_port->use_pdc_rx = pdata->use_dma_rx; |
1527 | atmel_port->use_pdc_tx = pdata->use_dma_tx; | 1728 | atmel_port->use_pdc_tx = pdata->use_dma_tx; |
1729 | atmel_port->use_dma_tx = false; | ||
1528 | atmel_port->rs485 = pdata->rs485; | 1730 | atmel_port->rs485 = pdata->rs485; |
1529 | } | 1731 | } |
1530 | 1732 | ||