aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/tty/serial/amba-pl011.c157
-rw-r--r--include/linux/amba/serial.h3
2 files changed, 141 insertions, 19 deletions
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3ea5408fcbeb..b031abf43a7a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -29,6 +29,7 @@
29 * and hooked into this driver. 29 * and hooked into this driver.
30 */ 30 */
31 31
32
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 33#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ 34#define SUPPORT_SYSRQ
34#endif 35#endif
@@ -117,6 +118,12 @@ struct pl011_dmarx_data {
117 struct pl011_sgbuf sgbuf_b; 118 struct pl011_sgbuf sgbuf_b;
118 dma_cookie_t cookie; 119 dma_cookie_t cookie;
119 bool running; 120 bool running;
121 struct timer_list timer;
122 unsigned int last_residue;
123 unsigned long last_jiffies;
124 bool auto_poll_rate;
125 unsigned int poll_rate;
126 unsigned int poll_timeout;
120}; 127};
121 128
122struct pl011_dmatx_data { 129struct pl011_dmatx_data {
@@ -223,16 +230,18 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
223static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 230static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
224 enum dma_data_direction dir) 231 enum dma_data_direction dir)
225{ 232{
226 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); 233 dma_addr_t dma_addr;
234
235 sg->buf = dma_alloc_coherent(chan->device->dev,
236 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
227 if (!sg->buf) 237 if (!sg->buf)
228 return -ENOMEM; 238 return -ENOMEM;
229 239
230 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); 240 sg_init_table(&sg->sg, 1);
241 sg_set_page(&sg->sg, phys_to_page(dma_addr),
242 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
243 sg_dma_address(&sg->sg) = dma_addr;
231 244
232 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
233 kfree(sg->buf);
234 return -EINVAL;
235 }
236 return 0; 245 return 0;
237} 246}
238 247
@@ -240,8 +249,9 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
240 enum dma_data_direction dir) 249 enum dma_data_direction dir)
241{ 250{
242 if (sg->buf) { 251 if (sg->buf) {
243 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); 252 dma_free_coherent(chan->device->dev,
244 kfree(sg->buf); 253 PL011_DMA_BUFFER_SIZE, sg->buf,
254 sg_dma_address(&sg->sg));
245 } 255 }
246} 256}
247 257
@@ -300,6 +310,29 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
300 dmaengine_slave_config(chan, &rx_conf); 310 dmaengine_slave_config(chan, &rx_conf);
301 uap->dmarx.chan = chan; 311 uap->dmarx.chan = chan;
302 312
313 if (plat->dma_rx_poll_enable) {
314 /* Set poll rate if specified. */
315 if (plat->dma_rx_poll_rate) {
316 uap->dmarx.auto_poll_rate = false;
317 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
318 } else {
319 /*
320 * 100 ms defaults to poll rate if not
321 * specified. This will be adjusted with
322 * the baud rate at set_termios.
323 */
324 uap->dmarx.auto_poll_rate = true;
325 uap->dmarx.poll_rate = 100;
326 }
327 /* 3 secs defaults poll_timeout if not specified. */
328 if (plat->dma_rx_poll_timeout)
329 uap->dmarx.poll_timeout =
330 plat->dma_rx_poll_timeout;
331 else
332 uap->dmarx.poll_timeout = 3000;
333 } else
334 uap->dmarx.auto_poll_rate = false;
335
303 dev_info(uap->port.dev, "DMA channel RX %s\n", 336 dev_info(uap->port.dev, "DMA channel RX %s\n",
304 dma_chan_name(uap->dmarx.chan)); 337 dma_chan_name(uap->dmarx.chan));
305 } 338 }
@@ -701,24 +734,30 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
701 struct tty_port *port = &uap->port.state->port; 734 struct tty_port *port = &uap->port.state->port;
702 struct pl011_sgbuf *sgbuf = use_buf_b ? 735 struct pl011_sgbuf *sgbuf = use_buf_b ?
703 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 736 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
704 struct device *dev = uap->dmarx.chan->device->dev;
705 int dma_count = 0; 737 int dma_count = 0;
706 u32 fifotaken = 0; /* only used for vdbg() */ 738 u32 fifotaken = 0; /* only used for vdbg() */
707 739
708 /* Pick everything from the DMA first */ 740 struct pl011_dmarx_data *dmarx = &uap->dmarx;
741 int dmataken = 0;
742
743 if (uap->dmarx.poll_rate) {
744 /* The data can be taken by polling */
745 dmataken = sgbuf->sg.length - dmarx->last_residue;
746 /* Recalculate the pending size */
747 if (pending >= dmataken)
748 pending -= dmataken;
749 }
750
751 /* Pick the remain data from the DMA */
709 if (pending) { 752 if (pending) {
710 /* Sync in buffer */
711 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
712 753
713 /* 754 /*
714 * First take all chars in the DMA pipe, then look in the FIFO. 755 * First take all chars in the DMA pipe, then look in the FIFO.
715 * Note that tty_insert_flip_buf() tries to take as many chars 756 * Note that tty_insert_flip_buf() tries to take as many chars
716 * as it can. 757 * as it can.
717 */ 758 */
718 dma_count = tty_insert_flip_string(port, sgbuf->buf, pending); 759 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
719 760 pending);
720 /* Return buffer to device */
721 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
722 761
723 uap->port.icount.rx += dma_count; 762 uap->port.icount.rx += dma_count;
724 if (dma_count < pending) 763 if (dma_count < pending)
@@ -726,6 +765,10 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
726 "couldn't insert all characters (TTY is full?)\n"); 765 "couldn't insert all characters (TTY is full?)\n");
727 } 766 }
728 767
768 /* Reset the last_residue for Rx DMA poll */
769 if (uap->dmarx.poll_rate)
770 dmarx->last_residue = sgbuf->sg.length;
771
729 /* 772 /*
730 * Only continue with trying to read the FIFO if all DMA chars have 773 * Only continue with trying to read the FIFO if all DMA chars have
731 * been taken first. 774 * been taken first.
@@ -865,6 +908,57 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
865 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 908 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
866} 909}
867 910
911/*
912 * Timer handler for Rx DMA polling.
913 * Every polling, It checks the residue in the dma buffer and transfer
914 * data to the tty. Also, last_residue is updated for the next polling.
915 */
916static void pl011_dma_rx_poll(unsigned long args)
917{
918 struct uart_amba_port *uap = (struct uart_amba_port *)args;
919 struct tty_port *port = &uap->port.state->port;
920 struct pl011_dmarx_data *dmarx = &uap->dmarx;
921 struct dma_chan *rxchan = uap->dmarx.chan;
922 unsigned long flags = 0;
923 unsigned int dmataken = 0;
924 unsigned int size = 0;
925 struct pl011_sgbuf *sgbuf;
926 int dma_count;
927 struct dma_tx_state state;
928
929 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
930 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
931 if (likely(state.residue < dmarx->last_residue)) {
932 dmataken = sgbuf->sg.length - dmarx->last_residue;
933 size = dmarx->last_residue - state.residue;
934 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
935 size);
936 if (dma_count == size)
937 dmarx->last_residue = state.residue;
938 dmarx->last_jiffies = jiffies;
939 }
940 tty_flip_buffer_push(port);
941
942 /*
943 * If no data is received in poll_timeout, the driver will fall back
944 * to interrupt mode. We will retrigger DMA at the first interrupt.
945 */
946 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
947 > uap->dmarx.poll_timeout) {
948
949 spin_lock_irqsave(&uap->port.lock, flags);
950 pl011_dma_rx_stop(uap);
951 spin_unlock_irqrestore(&uap->port.lock, flags);
952
953 uap->dmarx.running = false;
954 dmaengine_terminate_all(rxchan);
955 del_timer(&uap->dmarx.timer);
956 } else {
957 mod_timer(&uap->dmarx.timer,
958 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
959 }
960}
961
868static void pl011_dma_startup(struct uart_amba_port *uap) 962static void pl011_dma_startup(struct uart_amba_port *uap)
869{ 963{
870 int ret; 964 int ret;
@@ -927,6 +1021,16 @@ skip_rx:
927 if (pl011_dma_rx_trigger_dma(uap)) 1021 if (pl011_dma_rx_trigger_dma(uap))
928 dev_dbg(uap->port.dev, "could not trigger initial " 1022 dev_dbg(uap->port.dev, "could not trigger initial "
929 "RX DMA job, fall back to interrupt mode\n"); 1023 "RX DMA job, fall back to interrupt mode\n");
1024 if (uap->dmarx.poll_rate) {
1025 init_timer(&(uap->dmarx.timer));
1026 uap->dmarx.timer.function = pl011_dma_rx_poll;
1027 uap->dmarx.timer.data = (unsigned long)uap;
1028 mod_timer(&uap->dmarx.timer,
1029 jiffies +
1030 msecs_to_jiffies(uap->dmarx.poll_rate));
1031 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1032 uap->dmarx.last_jiffies = jiffies;
1033 }
930 } 1034 }
931} 1035}
932 1036
@@ -962,6 +1066,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
962 /* Clean up the RX DMA */ 1066 /* Clean up the RX DMA */
963 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1067 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
964 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 1068 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1069 if (uap->dmarx.poll_rate)
1070 del_timer_sync(&uap->dmarx.timer);
965 uap->using_rx_dma = false; 1071 uap->using_rx_dma = false;
966 } 1072 }
967} 1073}
@@ -976,7 +1082,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
976 return uap->using_rx_dma && uap->dmarx.running; 1082 return uap->using_rx_dma && uap->dmarx.running;
977} 1083}
978 1084
979
980#else 1085#else
981/* Blank functions if the DMA engine is not available */ 1086/* Blank functions if the DMA engine is not available */
982static inline void pl011_dma_probe(struct uart_amba_port *uap) 1087static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -1088,8 +1193,18 @@ static void pl011_rx_chars(struct uart_amba_port *uap)
1088 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1193 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1089 "fall back to interrupt mode again\n"); 1194 "fall back to interrupt mode again\n");
1090 uap->im |= UART011_RXIM; 1195 uap->im |= UART011_RXIM;
1091 } else 1196 } else {
1092 uap->im &= ~UART011_RXIM; 1197 uap->im &= ~UART011_RXIM;
1198 /* Start Rx DMA poll */
1199 if (uap->dmarx.poll_rate) {
1200 uap->dmarx.last_jiffies = jiffies;
1201 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1202 mod_timer(&uap->dmarx.timer,
1203 jiffies +
1204 msecs_to_jiffies(uap->dmarx.poll_rate));
1205 }
1206 }
1207
1093 writew(uap->im, uap->port.membase + UART011_IMSC); 1208 writew(uap->im, uap->port.membase + UART011_IMSC);
1094 } 1209 }
1095 spin_lock(&uap->port.lock); 1210 spin_lock(&uap->port.lock);
@@ -1164,7 +1279,6 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
1164 unsigned int dummy_read; 1279 unsigned int dummy_read;
1165 1280
1166 spin_lock_irqsave(&uap->port.lock, flags); 1281 spin_lock_irqsave(&uap->port.lock, flags);
1167
1168 status = readw(uap->port.membase + UART011_MIS); 1282 status = readw(uap->port.membase + UART011_MIS);
1169 if (status) { 1283 if (status) {
1170 do { 1284 do {
@@ -1551,6 +1665,11 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1551 */ 1665 */
1552 baud = uart_get_baud_rate(port, termios, old, 0, 1666 baud = uart_get_baud_rate(port, termios, old, 0,
1553 port->uartclk / clkdiv); 1667 port->uartclk / clkdiv);
1668 /*
1669 * Adjust RX DMA polling rate with baud rate if not specified.
1670 */
1671 if (uap->dmarx.auto_poll_rate)
1672 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1554 1673
1555 if (baud > port->uartclk/16) 1674 if (baud > port->uartclk/16)
1556 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1675 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index f612c783170f..62d9303c2837 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -203,6 +203,9 @@ struct amba_pl011_data {
203 bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 203 bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
204 void *dma_rx_param; 204 void *dma_rx_param;
205 void *dma_tx_param; 205 void *dma_tx_param;
206 bool dma_rx_poll_enable;
207 unsigned int dma_rx_poll_rate;
208 unsigned int dma_rx_poll_timeout;
206 void (*init) (void); 209 void (*init) (void);
207 void (*exit) (void); 210 void (*exit) (void);
208}; 211};