diff options
Diffstat (limited to 'drivers/tty')
-rw-r--r-- | drivers/tty/serial/amba-pl010.c | 2 | ||||
-rw-r--r-- | drivers/tty/serial/amba-pl011.c | 513 |
2 files changed, 452 insertions, 63 deletions
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 2904aa044126..d742dd2c525c 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c | |||
@@ -676,7 +676,7 @@ static struct uart_driver amba_reg = { | |||
676 | .cons = AMBA_CONSOLE, | 676 | .cons = AMBA_CONSOLE, |
677 | }; | 677 | }; |
678 | 678 | ||
679 | static int pl010_probe(struct amba_device *dev, struct amba_id *id) | 679 | static int pl010_probe(struct amba_device *dev, const struct amba_id *id) |
680 | { | 680 | { |
681 | struct uart_amba_port *uap; | 681 | struct uart_amba_port *uap; |
682 | void __iomem *base; | 682 | void __iomem *base; |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index e76d7d000128..57731e870085 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = { | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | /* Deals with DMA transactions */ | 98 | /* Deals with DMA transactions */ |
99 | |||
100 | struct pl011_sgbuf { | ||
101 | struct scatterlist sg; | ||
102 | char *buf; | ||
103 | }; | ||
104 | |||
105 | struct pl011_dmarx_data { | ||
106 | struct dma_chan *chan; | ||
107 | struct completion complete; | ||
108 | bool use_buf_b; | ||
109 | struct pl011_sgbuf sgbuf_a; | ||
110 | struct pl011_sgbuf sgbuf_b; | ||
111 | dma_cookie_t cookie; | ||
112 | bool running; | ||
113 | }; | ||
114 | |||
99 | struct pl011_dmatx_data { | 115 | struct pl011_dmatx_data { |
100 | struct dma_chan *chan; | 116 | struct dma_chan *chan; |
101 | struct scatterlist sg; | 117 | struct scatterlist sg; |
@@ -120,12 +136,70 @@ struct uart_amba_port { | |||
120 | char type[12]; | 136 | char type[12]; |
121 | #ifdef CONFIG_DMA_ENGINE | 137 | #ifdef CONFIG_DMA_ENGINE |
122 | /* DMA stuff */ | 138 | /* DMA stuff */ |
123 | bool using_dma; | 139 | bool using_tx_dma; |
140 | bool using_rx_dma; | ||
141 | struct pl011_dmarx_data dmarx; | ||
124 | struct pl011_dmatx_data dmatx; | 142 | struct pl011_dmatx_data dmatx; |
125 | #endif | 143 | #endif |
126 | }; | 144 | }; |
127 | 145 | ||
128 | /* | 146 | /* |
147 | * Reads up to 256 characters from the FIFO or until it's empty and | ||
148 | * inserts them into the TTY layer. Returns the number of characters | ||
149 | * read from the FIFO. | ||
150 | */ | ||
151 | static int pl011_fifo_to_tty(struct uart_amba_port *uap) | ||
152 | { | ||
153 | u16 status, ch; | ||
154 | unsigned int flag, max_count = 256; | ||
155 | int fifotaken = 0; | ||
156 | |||
157 | while (max_count--) { | ||
158 | status = readw(uap->port.membase + UART01x_FR); | ||
159 | if (status & UART01x_FR_RXFE) | ||
160 | break; | ||
161 | |||
162 | /* Take chars from the FIFO and update status */ | ||
163 | ch = readw(uap->port.membase + UART01x_DR) | | ||
164 | UART_DUMMY_DR_RX; | ||
165 | flag = TTY_NORMAL; | ||
166 | uap->port.icount.rx++; | ||
167 | fifotaken++; | ||
168 | |||
169 | if (unlikely(ch & UART_DR_ERROR)) { | ||
170 | if (ch & UART011_DR_BE) { | ||
171 | ch &= ~(UART011_DR_FE | UART011_DR_PE); | ||
172 | uap->port.icount.brk++; | ||
173 | if (uart_handle_break(&uap->port)) | ||
174 | continue; | ||
175 | } else if (ch & UART011_DR_PE) | ||
176 | uap->port.icount.parity++; | ||
177 | else if (ch & UART011_DR_FE) | ||
178 | uap->port.icount.frame++; | ||
179 | if (ch & UART011_DR_OE) | ||
180 | uap->port.icount.overrun++; | ||
181 | |||
182 | ch &= uap->port.read_status_mask; | ||
183 | |||
184 | if (ch & UART011_DR_BE) | ||
185 | flag = TTY_BREAK; | ||
186 | else if (ch & UART011_DR_PE) | ||
187 | flag = TTY_PARITY; | ||
188 | else if (ch & UART011_DR_FE) | ||
189 | flag = TTY_FRAME; | ||
190 | } | ||
191 | |||
192 | if (uart_handle_sysrq_char(&uap->port, ch & 255)) | ||
193 | continue; | ||
194 | |||
195 | uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); | ||
196 | } | ||
197 | |||
198 | return fifotaken; | ||
199 | } | ||
200 | |||
201 | |||
202 | /* | ||
129 | * All the DMA operation mode stuff goes inside this ifdef. | 203 | * All the DMA operation mode stuff goes inside this ifdef. |
130 | * This assumes that you have a generic DMA device interface, | 204 | * This assumes that you have a generic DMA device interface, |
131 | * no custom DMA interfaces are supported. | 205 | * no custom DMA interfaces are supported. |
@@ -134,6 +208,31 @@ struct uart_amba_port { | |||
134 | 208 | ||
135 | #define PL011_DMA_BUFFER_SIZE PAGE_SIZE | 209 | #define PL011_DMA_BUFFER_SIZE PAGE_SIZE |
136 | 210 | ||
211 | static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, | ||
212 | enum dma_data_direction dir) | ||
213 | { | ||
214 | sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); | ||
215 | if (!sg->buf) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); | ||
219 | |||
220 | if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { | ||
221 | kfree(sg->buf); | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, | ||
228 | enum dma_data_direction dir) | ||
229 | { | ||
230 | if (sg->buf) { | ||
231 | dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); | ||
232 | kfree(sg->buf); | ||
233 | } | ||
234 | } | ||
235 | |||
137 | static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | 236 | static void pl011_dma_probe_initcall(struct uart_amba_port *uap) |
138 | { | 237 | { |
139 | /* DMA is the sole user of the platform data right now */ | 238 | /* DMA is the sole user of the platform data right now */ |
@@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
153 | return; | 252 | return; |
154 | } | 253 | } |
155 | 254 | ||
156 | /* Try to acquire a generic DMA engine slave channel */ | 255 | /* Try to acquire a generic DMA engine slave TX channel */ |
157 | dma_cap_zero(mask); | 256 | dma_cap_zero(mask); |
158 | dma_cap_set(DMA_SLAVE, mask); | 257 | dma_cap_set(DMA_SLAVE, mask); |
159 | 258 | ||
@@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
168 | 267 | ||
169 | dev_info(uap->port.dev, "DMA channel TX %s\n", | 268 | dev_info(uap->port.dev, "DMA channel TX %s\n", |
170 | dma_chan_name(uap->dmatx.chan)); | 269 | dma_chan_name(uap->dmatx.chan)); |
270 | |||
271 | /* Optionally make use of an RX channel as well */ | ||
272 | if (plat->dma_rx_param) { | ||
273 | struct dma_slave_config rx_conf = { | ||
274 | .src_addr = uap->port.mapbase + UART01x_DR, | ||
275 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | ||
276 | .direction = DMA_FROM_DEVICE, | ||
277 | .src_maxburst = uap->fifosize >> 1, | ||
278 | }; | ||
279 | |||
280 | chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); | ||
281 | if (!chan) { | ||
282 | dev_err(uap->port.dev, "no RX DMA channel!\n"); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | dmaengine_slave_config(chan, &rx_conf); | ||
287 | uap->dmarx.chan = chan; | ||
288 | |||
289 | dev_info(uap->port.dev, "DMA channel RX %s\n", | ||
290 | dma_chan_name(uap->dmarx.chan)); | ||
291 | } | ||
171 | } | 292 | } |
172 | 293 | ||
173 | #ifndef MODULE | 294 | #ifndef MODULE |
@@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap) | |||
219 | /* TODO: remove the initcall if it has not yet executed */ | 340 | /* TODO: remove the initcall if it has not yet executed */ |
220 | if (uap->dmatx.chan) | 341 | if (uap->dmatx.chan) |
221 | dma_release_channel(uap->dmatx.chan); | 342 | dma_release_channel(uap->dmatx.chan); |
343 | if (uap->dmarx.chan) | ||
344 | dma_release_channel(uap->dmarx.chan); | ||
222 | } | 345 | } |
223 | 346 | ||
224 | |||
225 | /* Forward declare this for the refill routine */ | 347 | /* Forward declare this for the refill routine */ |
226 | static int pl011_dma_tx_refill(struct uart_amba_port *uap); | 348 | static int pl011_dma_tx_refill(struct uart_amba_port *uap); |
227 | 349 | ||
@@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) | |||
380 | */ | 502 | */ |
381 | static bool pl011_dma_tx_irq(struct uart_amba_port *uap) | 503 | static bool pl011_dma_tx_irq(struct uart_amba_port *uap) |
382 | { | 504 | { |
383 | if (!uap->using_dma) | 505 | if (!uap->using_tx_dma) |
384 | return false; | 506 | return false; |
385 | 507 | ||
386 | /* | 508 | /* |
@@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) | |||
432 | { | 554 | { |
433 | u16 dmacr; | 555 | u16 dmacr; |
434 | 556 | ||
435 | if (!uap->using_dma) | 557 | if (!uap->using_tx_dma) |
436 | return false; | 558 | return false; |
437 | 559 | ||
438 | if (!uap->port.x_char) { | 560 | if (!uap->port.x_char) { |
@@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port) | |||
492 | { | 614 | { |
493 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | 615 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
494 | 616 | ||
495 | if (!uap->using_dma) | 617 | if (!uap->using_tx_dma) |
496 | return; | 618 | return; |
497 | 619 | ||
498 | /* Avoid deadlock with the DMA engine callback */ | 620 | /* Avoid deadlock with the DMA engine callback */ |
@@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port) | |||
508 | } | 630 | } |
509 | } | 631 | } |
510 | 632 | ||
633 | static void pl011_dma_rx_callback(void *data); | ||
634 | |||
635 | static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) | ||
636 | { | ||
637 | struct dma_chan *rxchan = uap->dmarx.chan; | ||
638 | struct dma_device *dma_dev; | ||
639 | struct pl011_dmarx_data *dmarx = &uap->dmarx; | ||
640 | struct dma_async_tx_descriptor *desc; | ||
641 | struct pl011_sgbuf *sgbuf; | ||
642 | |||
643 | if (!rxchan) | ||
644 | return -EIO; | ||
645 | |||
646 | /* Start the RX DMA job */ | ||
647 | sgbuf = uap->dmarx.use_buf_b ? | ||
648 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | ||
649 | dma_dev = rxchan->device; | ||
650 | desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, | ||
651 | DMA_FROM_DEVICE, | ||
652 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
653 | /* | ||
654 | * If the DMA engine is busy and cannot prepare a | ||
655 | * channel, no big deal, the driver will fall back | ||
656 | * to interrupt mode as a result of this error code. | ||
657 | */ | ||
658 | if (!desc) { | ||
659 | uap->dmarx.running = false; | ||
660 | dmaengine_terminate_all(rxchan); | ||
661 | return -EBUSY; | ||
662 | } | ||
663 | |||
664 | /* Some data to go along to the callback */ | ||
665 | desc->callback = pl011_dma_rx_callback; | ||
666 | desc->callback_param = uap; | ||
667 | dmarx->cookie = dmaengine_submit(desc); | ||
668 | dma_async_issue_pending(rxchan); | ||
669 | |||
670 | uap->dmacr |= UART011_RXDMAE; | ||
671 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
672 | uap->dmarx.running = true; | ||
673 | |||
674 | uap->im &= ~UART011_RXIM; | ||
675 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * This is called when either the DMA job is complete, or | ||
682 | * the FIFO timeout interrupt occurred. This must be called | ||
683 | * with the port spinlock uap->port.lock held. | ||
684 | */ | ||
685 | static void pl011_dma_rx_chars(struct uart_amba_port *uap, | ||
686 | u32 pending, bool use_buf_b, | ||
687 | bool readfifo) | ||
688 | { | ||
689 | struct tty_struct *tty = uap->port.state->port.tty; | ||
690 | struct pl011_sgbuf *sgbuf = use_buf_b ? | ||
691 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | ||
692 | struct device *dev = uap->dmarx.chan->device->dev; | ||
693 | int dma_count = 0; | ||
694 | u32 fifotaken = 0; /* only used for vdbg() */ | ||
695 | |||
696 | /* Pick everything from the DMA first */ | ||
697 | if (pending) { | ||
698 | /* Sync in buffer */ | ||
699 | dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); | ||
700 | |||
701 | /* | ||
702 | * First take all chars in the DMA pipe, then look in the FIFO. | ||
703 | * Note that tty_insert_flip_buf() tries to take as many chars | ||
704 | * as it can. | ||
705 | */ | ||
706 | dma_count = tty_insert_flip_string(uap->port.state->port.tty, | ||
707 | sgbuf->buf, pending); | ||
708 | |||
709 | /* Return buffer to device */ | ||
710 | dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); | ||
711 | |||
712 | uap->port.icount.rx += dma_count; | ||
713 | if (dma_count < pending) | ||
714 | dev_warn(uap->port.dev, | ||
715 | "couldn't insert all characters (TTY is full?)\n"); | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * Only continue with trying to read the FIFO if all DMA chars have | ||
720 | * been taken first. | ||
721 | */ | ||
722 | if (dma_count == pending && readfifo) { | ||
723 | /* Clear any error flags */ | ||
724 | writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, | ||
725 | uap->port.membase + UART011_ICR); | ||
726 | |||
727 | /* | ||
728 | * If we read all the DMA'd characters, and we had an | ||
729 | * incomplete buffer, that could be due to an rx error, or | ||
730 | * maybe we just timed out. Read any pending chars and check | ||
731 | * the error status. | ||
732 | * | ||
733 | * Error conditions will only occur in the FIFO, these will | ||
734 | * trigger an immediate interrupt and stop the DMA job, so we | ||
735 | * will always find the error in the FIFO, never in the DMA | ||
736 | * buffer. | ||
737 | */ | ||
738 | fifotaken = pl011_fifo_to_tty(uap); | ||
739 | } | ||
740 | |||
741 | spin_unlock(&uap->port.lock); | ||
742 | dev_vdbg(uap->port.dev, | ||
743 | "Took %d chars from DMA buffer and %d chars from the FIFO\n", | ||
744 | dma_count, fifotaken); | ||
745 | tty_flip_buffer_push(tty); | ||
746 | spin_lock(&uap->port.lock); | ||
747 | } | ||
748 | |||
749 | static void pl011_dma_rx_irq(struct uart_amba_port *uap) | ||
750 | { | ||
751 | struct pl011_dmarx_data *dmarx = &uap->dmarx; | ||
752 | struct dma_chan *rxchan = dmarx->chan; | ||
753 | struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? | ||
754 | &dmarx->sgbuf_b : &dmarx->sgbuf_a; | ||
755 | size_t pending; | ||
756 | struct dma_tx_state state; | ||
757 | enum dma_status dmastat; | ||
758 | |||
759 | /* | ||
760 | * Pause the transfer so we can trust the current counter, | ||
761 | * do this before we pause the PL011 block, else we may | ||
762 | * overflow the FIFO. | ||
763 | */ | ||
764 | if (dmaengine_pause(rxchan)) | ||
765 | dev_err(uap->port.dev, "unable to pause DMA transfer\n"); | ||
766 | dmastat = rxchan->device->device_tx_status(rxchan, | ||
767 | dmarx->cookie, &state); | ||
768 | if (dmastat != DMA_PAUSED) | ||
769 | dev_err(uap->port.dev, "unable to pause DMA transfer\n"); | ||
770 | |||
771 | /* Disable RX DMA - incoming data will wait in the FIFO */ | ||
772 | uap->dmacr &= ~UART011_RXDMAE; | ||
773 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
774 | uap->dmarx.running = false; | ||
775 | |||
776 | pending = sgbuf->sg.length - state.residue; | ||
777 | BUG_ON(pending > PL011_DMA_BUFFER_SIZE); | ||
778 | /* Then we terminate the transfer - we now know our residue */ | ||
779 | dmaengine_terminate_all(rxchan); | ||
780 | |||
781 | /* | ||
782 | * This will take the chars we have so far and insert | ||
783 | * into the framework. | ||
784 | */ | ||
785 | pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); | ||
786 | |||
787 | /* Switch buffer & re-trigger DMA job */ | ||
788 | dmarx->use_buf_b = !dmarx->use_buf_b; | ||
789 | if (pl011_dma_rx_trigger_dma(uap)) { | ||
790 | dev_dbg(uap->port.dev, "could not retrigger RX DMA job " | ||
791 | "fall back to interrupt mode\n"); | ||
792 | uap->im |= UART011_RXIM; | ||
793 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
794 | } | ||
795 | } | ||
796 | |||
797 | static void pl011_dma_rx_callback(void *data) | ||
798 | { | ||
799 | struct uart_amba_port *uap = data; | ||
800 | struct pl011_dmarx_data *dmarx = &uap->dmarx; | ||
801 | bool lastbuf = dmarx->use_buf_b; | ||
802 | int ret; | ||
803 | |||
804 | /* | ||
805 | * This completion interrupt occurs typically when the | ||
806 | * RX buffer is totally stuffed but no timeout has yet | ||
807 | * occurred. When that happens, we just want the RX | ||
808 | * routine to flush out the secondary DMA buffer while | ||
809 | * we immediately trigger the next DMA job. | ||
810 | */ | ||
811 | spin_lock_irq(&uap->port.lock); | ||
812 | uap->dmarx.running = false; | ||
813 | dmarx->use_buf_b = !lastbuf; | ||
814 | ret = pl011_dma_rx_trigger_dma(uap); | ||
815 | |||
816 | pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false); | ||
817 | spin_unlock_irq(&uap->port.lock); | ||
818 | /* | ||
819 | * Do this check after we picked the DMA chars so we don't | ||
820 | * get some IRQ immediately from RX. | ||
821 | */ | ||
822 | if (ret) { | ||
823 | dev_dbg(uap->port.dev, "could not retrigger RX DMA job " | ||
824 | "fall back to interrupt mode\n"); | ||
825 | uap->im |= UART011_RXIM; | ||
826 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
827 | } | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * Stop accepting received characters, when we're shutting down or | ||
832 | * suspending this port. | ||
833 | * Locking: called with port lock held and IRQs disabled. | ||
834 | */ | ||
835 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) | ||
836 | { | ||
837 | /* FIXME. Just disable the DMA enable */ | ||
838 | uap->dmacr &= ~UART011_RXDMAE; | ||
839 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | ||
840 | } | ||
511 | 841 | ||
512 | static void pl011_dma_startup(struct uart_amba_port *uap) | 842 | static void pl011_dma_startup(struct uart_amba_port *uap) |
513 | { | 843 | { |
844 | int ret; | ||
845 | |||
514 | if (!uap->dmatx.chan) | 846 | if (!uap->dmatx.chan) |
515 | return; | 847 | return; |
516 | 848 | ||
@@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap) | |||
525 | 857 | ||
526 | /* The DMA buffer is now the FIFO the TTY subsystem can use */ | 858 | /* The DMA buffer is now the FIFO the TTY subsystem can use */ |
527 | uap->port.fifosize = PL011_DMA_BUFFER_SIZE; | 859 | uap->port.fifosize = PL011_DMA_BUFFER_SIZE; |
528 | uap->using_dma = true; | 860 | uap->using_tx_dma = true; |
861 | |||
862 | if (!uap->dmarx.chan) | ||
863 | goto skip_rx; | ||
864 | |||
865 | /* Allocate and map DMA RX buffers */ | ||
866 | ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, | ||
867 | DMA_FROM_DEVICE); | ||
868 | if (ret) { | ||
869 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n", | ||
870 | "RX buffer A", ret); | ||
871 | goto skip_rx; | ||
872 | } | ||
873 | |||
874 | ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, | ||
875 | DMA_FROM_DEVICE); | ||
876 | if (ret) { | ||
877 | dev_err(uap->port.dev, "failed to init DMA %s: %d\n", | ||
878 | "RX buffer B", ret); | ||
879 | pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, | ||
880 | DMA_FROM_DEVICE); | ||
881 | goto skip_rx; | ||
882 | } | ||
529 | 883 | ||
884 | uap->using_rx_dma = true; | ||
885 | |||
886 | skip_rx: | ||
530 | /* Turn on DMA error (RX/TX will be enabled on demand) */ | 887 | /* Turn on DMA error (RX/TX will be enabled on demand) */ |
531 | uap->dmacr |= UART011_DMAONERR; | 888 | uap->dmacr |= UART011_DMAONERR; |
532 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | 889 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); |
@@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap) | |||
539 | if (uap->vendor->dma_threshold) | 896 | if (uap->vendor->dma_threshold) |
540 | writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, | 897 | writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, |
541 | uap->port.membase + ST_UART011_DMAWM); | 898 | uap->port.membase + ST_UART011_DMAWM); |
899 | |||
900 | if (uap->using_rx_dma) { | ||
901 | if (pl011_dma_rx_trigger_dma(uap)) | ||
902 | dev_dbg(uap->port.dev, "could not trigger initial " | ||
903 | "RX DMA job, fall back to interrupt mode\n"); | ||
904 | } | ||
542 | } | 905 | } |
543 | 906 | ||
544 | static void pl011_dma_shutdown(struct uart_amba_port *uap) | 907 | static void pl011_dma_shutdown(struct uart_amba_port *uap) |
545 | { | 908 | { |
546 | if (!uap->using_dma) | 909 | if (!(uap->using_tx_dma || uap->using_rx_dma)) |
547 | return; | 910 | return; |
548 | 911 | ||
549 | /* Disable RX and TX DMA */ | 912 | /* Disable RX and TX DMA */ |
@@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) | |||
555 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); | 918 | writew(uap->dmacr, uap->port.membase + UART011_DMACR); |
556 | spin_unlock_irq(&uap->port.lock); | 919 | spin_unlock_irq(&uap->port.lock); |
557 | 920 | ||
558 | /* In theory, this should already be done by pl011_dma_flush_buffer */ | 921 | if (uap->using_tx_dma) { |
559 | dmaengine_terminate_all(uap->dmatx.chan); | 922 | /* In theory, this should already be done by pl011_dma_flush_buffer */ |
560 | if (uap->dmatx.queued) { | 923 | dmaengine_terminate_all(uap->dmatx.chan); |
561 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, | 924 | if (uap->dmatx.queued) { |
562 | DMA_TO_DEVICE); | 925 | dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, |
563 | uap->dmatx.queued = false; | 926 | DMA_TO_DEVICE); |
927 | uap->dmatx.queued = false; | ||
928 | } | ||
929 | |||
930 | kfree(uap->dmatx.buf); | ||
931 | uap->using_tx_dma = false; | ||
564 | } | 932 | } |
565 | 933 | ||
566 | kfree(uap->dmatx.buf); | 934 | if (uap->using_rx_dma) { |
935 | dmaengine_terminate_all(uap->dmarx.chan); | ||
936 | /* Clean up the RX DMA */ | ||
937 | pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); | ||
938 | pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); | ||
939 | uap->using_rx_dma = false; | ||
940 | } | ||
941 | } | ||
567 | 942 | ||
568 | uap->using_dma = false; | 943 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) |
944 | { | ||
945 | return uap->using_rx_dma; | ||
569 | } | 946 | } |
570 | 947 | ||
948 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) | ||
949 | { | ||
950 | return uap->using_rx_dma && uap->dmarx.running; | ||
951 | } | ||
952 | |||
953 | |||
571 | #else | 954 | #else |
572 | /* Blank functions if the DMA engine is not available */ | 955 | /* Blank functions if the DMA engine is not available */ |
573 | static inline void pl011_dma_probe(struct uart_amba_port *uap) | 956 | static inline void pl011_dma_probe(struct uart_amba_port *uap) |
@@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) | |||
600 | return false; | 983 | return false; |
601 | } | 984 | } |
602 | 985 | ||
986 | static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) | ||
987 | { | ||
988 | } | ||
989 | |||
990 | static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) | ||
991 | { | ||
992 | } | ||
993 | |||
994 | static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) | ||
995 | { | ||
996 | return -EIO; | ||
997 | } | ||
998 | |||
999 | static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) | ||
1000 | { | ||
1001 | return false; | ||
1002 | } | ||
1003 | |||
1004 | static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) | ||
1005 | { | ||
1006 | return false; | ||
1007 | } | ||
1008 | |||
603 | #define pl011_dma_flush_buffer NULL | 1009 | #define pl011_dma_flush_buffer NULL |
604 | #endif | 1010 | #endif |
605 | 1011 | ||
@@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port) | |||
630 | uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| | 1036 | uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| |
631 | UART011_PEIM|UART011_BEIM|UART011_OEIM); | 1037 | UART011_PEIM|UART011_BEIM|UART011_OEIM); |
632 | writew(uap->im, uap->port.membase + UART011_IMSC); | 1038 | writew(uap->im, uap->port.membase + UART011_IMSC); |
1039 | |||
1040 | pl011_dma_rx_stop(uap); | ||
633 | } | 1041 | } |
634 | 1042 | ||
635 | static void pl011_enable_ms(struct uart_port *port) | 1043 | static void pl011_enable_ms(struct uart_port *port) |
@@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port) | |||
643 | static void pl011_rx_chars(struct uart_amba_port *uap) | 1051 | static void pl011_rx_chars(struct uart_amba_port *uap) |
644 | { | 1052 | { |
645 | struct tty_struct *tty = uap->port.state->port.tty; | 1053 | struct tty_struct *tty = uap->port.state->port.tty; |
646 | unsigned int status, ch, flag, max_count = 256; | ||
647 | |||
648 | status = readw(uap->port.membase + UART01x_FR); | ||
649 | while ((status & UART01x_FR_RXFE) == 0 && max_count--) { | ||
650 | ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX; | ||
651 | flag = TTY_NORMAL; | ||
652 | uap->port.icount.rx++; | ||
653 | |||
654 | /* | ||
655 | * Note that the error handling code is | ||
656 | * out of the main execution path | ||
657 | */ | ||
658 | if (unlikely(ch & UART_DR_ERROR)) { | ||
659 | if (ch & UART011_DR_BE) { | ||
660 | ch &= ~(UART011_DR_FE | UART011_DR_PE); | ||
661 | uap->port.icount.brk++; | ||
662 | if (uart_handle_break(&uap->port)) | ||
663 | goto ignore_char; | ||
664 | } else if (ch & UART011_DR_PE) | ||
665 | uap->port.icount.parity++; | ||
666 | else if (ch & UART011_DR_FE) | ||
667 | uap->port.icount.frame++; | ||
668 | if (ch & UART011_DR_OE) | ||
669 | uap->port.icount.overrun++; | ||
670 | |||
671 | ch &= uap->port.read_status_mask; | ||
672 | |||
673 | if (ch & UART011_DR_BE) | ||
674 | flag = TTY_BREAK; | ||
675 | else if (ch & UART011_DR_PE) | ||
676 | flag = TTY_PARITY; | ||
677 | else if (ch & UART011_DR_FE) | ||
678 | flag = TTY_FRAME; | ||
679 | } | ||
680 | 1054 | ||
681 | if (uart_handle_sysrq_char(&uap->port, ch & 255)) | 1055 | pl011_fifo_to_tty(uap); |
682 | goto ignore_char; | ||
683 | |||
684 | uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); | ||
685 | 1056 | ||
686 | ignore_char: | ||
687 | status = readw(uap->port.membase + UART01x_FR); | ||
688 | } | ||
689 | spin_unlock(&uap->port.lock); | 1057 | spin_unlock(&uap->port.lock); |
690 | tty_flip_buffer_push(tty); | 1058 | tty_flip_buffer_push(tty); |
1059 | /* | ||
1060 | * If we were temporarily out of DMA mode for a while, | ||
1061 | * attempt to switch back to DMA mode again. | ||
1062 | */ | ||
1063 | if (pl011_dma_rx_available(uap)) { | ||
1064 | if (pl011_dma_rx_trigger_dma(uap)) { | ||
1065 | dev_dbg(uap->port.dev, "could not trigger RX DMA job " | ||
1066 | "fall back to interrupt mode again\n"); | ||
1067 | uap->im |= UART011_RXIM; | ||
1068 | } else | ||
1069 | uap->im &= ~UART011_RXIM; | ||
1070 | writew(uap->im, uap->port.membase + UART011_IMSC); | ||
1071 | } | ||
691 | spin_lock(&uap->port.lock); | 1072 | spin_lock(&uap->port.lock); |
692 | } | 1073 | } |
693 | 1074 | ||
@@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id) | |||
767 | UART011_RXIS), | 1148 | UART011_RXIS), |
768 | uap->port.membase + UART011_ICR); | 1149 | uap->port.membase + UART011_ICR); |
769 | 1150 | ||
770 | if (status & (UART011_RTIS|UART011_RXIS)) | 1151 | if (status & (UART011_RTIS|UART011_RXIS)) { |
771 | pl011_rx_chars(uap); | 1152 | if (pl011_dma_rx_running(uap)) |
1153 | pl011_dma_rx_irq(uap); | ||
1154 | else | ||
1155 | pl011_rx_chars(uap); | ||
1156 | } | ||
772 | if (status & (UART011_DSRMIS|UART011_DCDMIS| | 1157 | if (status & (UART011_DSRMIS|UART011_DCDMIS| |
773 | UART011_CTSMIS|UART011_RIMIS)) | 1158 | UART011_CTSMIS|UART011_RIMIS)) |
774 | pl011_modem_status(uap); | 1159 | pl011_modem_status(uap); |
@@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port) | |||
945 | pl011_dma_startup(uap); | 1330 | pl011_dma_startup(uap); |
946 | 1331 | ||
947 | /* | 1332 | /* |
948 | * Finally, enable interrupts | 1333 | * Finally, enable interrupts, only timeouts when using DMA |
1334 | * if initial RX DMA job failed, start in interrupt mode | ||
1335 | * as well. | ||
949 | */ | 1336 | */ |
950 | spin_lock_irq(&uap->port.lock); | 1337 | spin_lock_irq(&uap->port.lock); |
951 | uap->im = UART011_RXIM | UART011_RTIM; | 1338 | uap->im = UART011_RTIM; |
1339 | if (!pl011_dma_rx_running(uap)) | ||
1340 | uap->im |= UART011_RXIM; | ||
952 | writew(uap->im, uap->port.membase + UART011_IMSC); | 1341 | writew(uap->im, uap->port.membase + UART011_IMSC); |
953 | spin_unlock_irq(&uap->port.lock); | 1342 | spin_unlock_irq(&uap->port.lock); |
954 | 1343 | ||
@@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = { | |||
1349 | .cons = AMBA_CONSOLE, | 1738 | .cons = AMBA_CONSOLE, |
1350 | }; | 1739 | }; |
1351 | 1740 | ||
1352 | static int pl011_probe(struct amba_device *dev, struct amba_id *id) | 1741 | static int pl011_probe(struct amba_device *dev, const struct amba_id *id) |
1353 | { | 1742 | { |
1354 | struct uart_amba_port *uap; | 1743 | struct uart_amba_port *uap; |
1355 | struct vendor_data *vendor = id->data; | 1744 | struct vendor_data *vendor = id->data; |