diff options
| author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2010-03-01 21:39:15 -0500 |
|---|---|---|
| committer | Paul Mundt <lethal@linux-sh.org> | 2010-03-01 21:39:15 -0500 |
| commit | 73a19e4c0301908ce6346715fd08a74308451f5a (patch) | |
| tree | 252aa89c1e04f1febb71e5133a78e16bf730479c | |
| parent | c014906a870ce70e009def0c9d170ccabeb0be63 (diff) | |
serial: sh-sci: Add DMA support.
Support using DMA for sending and receiving data over SCI(F) interfaces of
various SH SoCs.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
| -rw-r--r-- | drivers/serial/Kconfig | 4 | ||||
| -rw-r--r-- | drivers/serial/sh-sci.c | 618 | ||||
| -rw-r--r-- | include/linux/serial_sci.h | 6 |
3 files changed, 582 insertions, 46 deletions
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 888a0ce91c4b..11ebe862457b 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
| @@ -1009,6 +1009,10 @@ config SERIAL_SH_SCI_CONSOLE | |||
| 1009 | depends on SERIAL_SH_SCI=y | 1009 | depends on SERIAL_SH_SCI=y |
| 1010 | select SERIAL_CORE_CONSOLE | 1010 | select SERIAL_CORE_CONSOLE |
| 1011 | 1011 | ||
| 1012 | config SERIAL_SH_SCI_DMA | ||
| 1013 | bool "DMA support" | ||
| 1014 | depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL | ||
| 1015 | |||
| 1012 | config SERIAL_PNX8XXX | 1016 | config SERIAL_PNX8XXX |
| 1013 | bool "Enable PNX8XXX SoCs' UART Support" | 1017 | bool "Enable PNX8XXX SoCs' UART Support" |
| 1014 | depends on MIPS && (SOC_PNX8550 || SOC_PNX833X) | 1018 | depends on MIPS && (SOC_PNX8550 || SOC_PNX833X) |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 42f3333c4ad0..f3841cd8fc5d 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
| @@ -48,6 +48,9 @@ | |||
| 48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
| 49 | #include <linux/err.h> | 49 | #include <linux/err.h> |
| 50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
| 51 | #include <linux/dmaengine.h> | ||
| 52 | #include <linux/scatterlist.h> | ||
| 53 | #include <linux/timer.h> | ||
| 51 | 54 | ||
| 52 | #ifdef CONFIG_SUPERH | 55 | #ifdef CONFIG_SUPERH |
| 53 | #include <asm/sh_bios.h> | 56 | #include <asm/sh_bios.h> |
| @@ -84,6 +87,27 @@ struct sci_port { | |||
| 84 | struct clk *dclk; | 87 | struct clk *dclk; |
| 85 | 88 | ||
| 86 | struct list_head node; | 89 | struct list_head node; |
| 90 | struct dma_chan *chan_tx; | ||
| 91 | struct dma_chan *chan_rx; | ||
| 92 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 93 | struct device *dma_dev; | ||
| 94 | enum sh_dmae_slave_chan_id slave_tx; | ||
| 95 | enum sh_dmae_slave_chan_id slave_rx; | ||
| 96 | struct dma_async_tx_descriptor *desc_tx; | ||
| 97 | struct dma_async_tx_descriptor *desc_rx[2]; | ||
| 98 | dma_cookie_t cookie_tx; | ||
| 99 | dma_cookie_t cookie_rx[2]; | ||
| 100 | dma_cookie_t active_rx; | ||
| 101 | struct scatterlist sg_tx; | ||
| 102 | unsigned int sg_len_tx; | ||
| 103 | struct scatterlist sg_rx[2]; | ||
| 104 | size_t buf_len_rx; | ||
| 105 | struct sh_dmae_slave param_tx; | ||
| 106 | struct sh_dmae_slave param_rx; | ||
| 107 | struct work_struct work_tx; | ||
| 108 | struct work_struct work_rx; | ||
| 109 | struct timer_list rx_timer; | ||
| 110 | #endif | ||
| 87 | }; | 111 | }; |
| 88 | 112 | ||
| 89 | struct sh_sci_priv { | 113 | struct sh_sci_priv { |
| @@ -269,29 +293,44 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | |||
| 269 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 293 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 270 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 294 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 271 | defined(CONFIG_CPU_SUBTYPE_SH7786) | 295 | defined(CONFIG_CPU_SUBTYPE_SH7786) |
| 272 | static inline int scif_txroom(struct uart_port *port) | 296 | static int scif_txfill(struct uart_port *port) |
| 273 | { | 297 | { |
| 274 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 298 | return sci_in(port, SCTFDR) & 0xff; |
| 275 | } | 299 | } |
| 276 | 300 | ||
| 277 | static inline int scif_rxroom(struct uart_port *port) | 301 | static int scif_txroom(struct uart_port *port) |
| 302 | { | ||
| 303 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
| 304 | } | ||
| 305 | |||
| 306 | static int scif_rxfill(struct uart_port *port) | ||
| 278 | { | 307 | { |
| 279 | return sci_in(port, SCRFDR) & 0xff; | 308 | return sci_in(port, SCRFDR) & 0xff; |
| 280 | } | 309 | } |
| 281 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | 310 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) |
| 282 | static inline int scif_txroom(struct uart_port *port) | 311 | static int scif_txfill(struct uart_port *port) |
| 283 | { | 312 | { |
| 284 | if ((port->mapbase == 0xffe00000) || | 313 | if (port->mapbase == 0xffe00000 || |
| 285 | (port->mapbase == 0xffe08000)) { | 314 | port->mapbase == 0xffe08000) |
| 286 | /* SCIF0/1*/ | 315 | /* SCIF0/1*/ |
| 287 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 316 | return sci_in(port, SCTFDR) & 0xff; |
| 288 | } else { | 317 | else |
| 289 | /* SCIF2 */ | 318 | /* SCIF2 */ |
| 290 | return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); | 319 | return sci_in(port, SCFDR) >> 8; |
| 291 | } | 320 | } |
| 321 | |||
| 322 | static int scif_txroom(struct uart_port *port) | ||
| 323 | { | ||
| 324 | if (port->mapbase == 0xffe00000 || | ||
| 325 | port->mapbase == 0xffe08000) | ||
| 326 | /* SCIF0/1*/ | ||
| 327 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
| 328 | else | ||
| 329 | /* SCIF2 */ | ||
| 330 | return SCIF2_TXROOM_MAX - scif_txfill(port); | ||
| 292 | } | 331 | } |
| 293 | 332 | ||
| 294 | static inline int scif_rxroom(struct uart_port *port) | 333 | static int scif_rxfill(struct uart_port *port) |
| 295 | { | 334 | { |
| 296 | if ((port->mapbase == 0xffe00000) || | 335 | if ((port->mapbase == 0xffe00000) || |
| 297 | (port->mapbase == 0xffe08000)) { | 336 | (port->mapbase == 0xffe08000)) { |
| @@ -303,23 +342,33 @@ static inline int scif_rxroom(struct uart_port *port) | |||
| 303 | } | 342 | } |
| 304 | } | 343 | } |
| 305 | #else | 344 | #else |
| 306 | static inline int scif_txroom(struct uart_port *port) | 345 | static int scif_txfill(struct uart_port *port) |
| 307 | { | 346 | { |
| 308 | return SCIF_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); | 347 | return sci_in(port, SCFDR) >> 8; |
| 309 | } | 348 | } |
| 310 | 349 | ||
| 311 | static inline int scif_rxroom(struct uart_port *port) | 350 | static int scif_txroom(struct uart_port *port) |
| 351 | { | ||
| 352 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
| 353 | } | ||
| 354 | |||
| 355 | static int scif_rxfill(struct uart_port *port) | ||
| 312 | { | 356 | { |
| 313 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; | 357 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; |
| 314 | } | 358 | } |
| 315 | #endif | 359 | #endif |
| 316 | 360 | ||
| 317 | static inline int sci_txroom(struct uart_port *port) | 361 | static int sci_txfill(struct uart_port *port) |
| 318 | { | 362 | { |
| 319 | return (sci_in(port, SCxSR) & SCI_TDRE) != 0; | 363 | return !(sci_in(port, SCxSR) & SCI_TDRE); |
| 320 | } | 364 | } |
| 321 | 365 | ||
| 322 | static inline int sci_rxroom(struct uart_port *port) | 366 | static int sci_txroom(struct uart_port *port) |
| 367 | { | ||
| 368 | return !sci_txfill(port); | ||
| 369 | } | ||
| 370 | |||
| 371 | static int sci_rxfill(struct uart_port *port) | ||
| 323 | { | 372 | { |
| 324 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; | 373 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; |
| 325 | } | 374 | } |
| @@ -406,9 +455,9 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
| 406 | 455 | ||
| 407 | while (1) { | 456 | while (1) { |
| 408 | if (port->type == PORT_SCI) | 457 | if (port->type == PORT_SCI) |
| 409 | count = sci_rxroom(port); | 458 | count = sci_rxfill(port); |
| 410 | else | 459 | else |
| 411 | count = scif_rxroom(port); | 460 | count = scif_rxfill(port); |
| 412 | 461 | ||
| 413 | /* Don't copy more bytes than there is room for in the buffer */ | 462 | /* Don't copy more bytes than there is room for in the buffer */ |
| 414 | count = tty_buffer_request_room(tty, count); | 463 | count = tty_buffer_request_room(tty, count); |
| @@ -453,10 +502,10 @@ static inline void sci_receive_chars(struct uart_port *port) | |||
| 453 | } | 502 | } |
| 454 | 503 | ||
| 455 | /* Store data and status */ | 504 | /* Store data and status */ |
| 456 | if (status&SCxSR_FER(port)) { | 505 | if (status & SCxSR_FER(port)) { |
| 457 | flag = TTY_FRAME; | 506 | flag = TTY_FRAME; |
| 458 | dev_notice(port->dev, "frame error\n"); | 507 | dev_notice(port->dev, "frame error\n"); |
| 459 | } else if (status&SCxSR_PER(port)) { | 508 | } else if (status & SCxSR_PER(port)) { |
| 460 | flag = TTY_PARITY; | 509 | flag = TTY_PARITY; |
| 461 | dev_notice(port->dev, "parity error\n"); | 510 | dev_notice(port->dev, "parity error\n"); |
| 462 | } else | 511 | } else |
| @@ -618,13 +667,39 @@ static inline int sci_handle_breaks(struct uart_port *port) | |||
| 618 | return copied; | 667 | return copied; |
| 619 | } | 668 | } |
| 620 | 669 | ||
| 621 | static irqreturn_t sci_rx_interrupt(int irq, void *port) | 670 | static irqreturn_t sci_rx_interrupt(int irq, void *ptr) |
| 622 | { | 671 | { |
| 672 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 673 | struct uart_port *port = ptr; | ||
| 674 | struct sci_port *s = to_sci_port(port); | ||
| 675 | |||
| 676 | if (s->chan_rx) { | ||
| 677 | unsigned long tout; | ||
| 678 | u16 scr = sci_in(port, SCSCR); | ||
| 679 | u16 ssr = sci_in(port, SCxSR); | ||
| 680 | |||
| 681 | /* Disable future Rx interrupts */ | ||
| 682 | sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); | ||
| 683 | /* Clear current interrupt */ | ||
| 684 | sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); | ||
| 685 | /* Calculate delay for 1.5 DMA buffers */ | ||
| 686 | tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / | ||
| 687 | port->fifosize / 2; | ||
| 688 | dev_dbg(port->dev, "Rx IRQ: setup timeout in %u ms\n", | ||
| 689 | tout * 1000 / HZ); | ||
| 690 | if (tout < 2) | ||
| 691 | tout = 2; | ||
| 692 | mod_timer(&s->rx_timer, jiffies + tout); | ||
| 693 | |||
| 694 | return IRQ_HANDLED; | ||
| 695 | } | ||
| 696 | #endif | ||
| 697 | |||
| 623 | /* I think sci_receive_chars has to be called irrespective | 698 | /* I think sci_receive_chars has to be called irrespective |
| 624 | * of whether the I_IXOFF is set, otherwise, how is the interrupt | 699 | * of whether the I_IXOFF is set, otherwise, how is the interrupt |
| 625 | * to be disabled? | 700 | * to be disabled? |
| 626 | */ | 701 | */ |
| 627 | sci_receive_chars(port); | 702 | sci_receive_chars(ptr); |
| 628 | 703 | ||
| 629 | return IRQ_HANDLED; | 704 | return IRQ_HANDLED; |
| 630 | } | 705 | } |
| @@ -680,6 +755,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 680 | { | 755 | { |
| 681 | unsigned short ssr_status, scr_status, err_enabled; | 756 | unsigned short ssr_status, scr_status, err_enabled; |
| 682 | struct uart_port *port = ptr; | 757 | struct uart_port *port = ptr; |
| 758 | struct sci_port *s = to_sci_port(port); | ||
| 683 | irqreturn_t ret = IRQ_NONE; | 759 | irqreturn_t ret = IRQ_NONE; |
| 684 | 760 | ||
| 685 | ssr_status = sci_in(port, SCxSR); | 761 | ssr_status = sci_in(port, SCxSR); |
| @@ -687,10 +763,15 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 687 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); | 763 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); |
| 688 | 764 | ||
| 689 | /* Tx Interrupt */ | 765 | /* Tx Interrupt */ |
| 690 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE)) | 766 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && |
| 767 | !s->chan_tx) | ||
| 691 | ret = sci_tx_interrupt(irq, ptr); | 768 | ret = sci_tx_interrupt(irq, ptr); |
| 692 | /* Rx Interrupt */ | 769 | /* |
| 693 | if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE)) | 770 | * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / |
| 771 | * DR flags | ||
| 772 | */ | ||
| 773 | if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && | ||
| 774 | (scr_status & SCI_CTRL_FLAGS_RIE)) | ||
| 694 | ret = sci_rx_interrupt(irq, ptr); | 775 | ret = sci_rx_interrupt(irq, ptr); |
| 695 | /* Error Interrupt */ | 776 | /* Error Interrupt */ |
| 696 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) | 777 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) |
| @@ -699,6 +780,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
| 699 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) | 780 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) |
| 700 | ret = sci_br_interrupt(irq, ptr); | 781 | ret = sci_br_interrupt(irq, ptr); |
| 701 | 782 | ||
| 783 | WARN_ONCE(ret == IRQ_NONE, | ||
| 784 | "%s: %d IRQ %d, status %x, control %x\n", __func__, | ||
| 785 | irq, port->line, ssr_status, scr_status); | ||
| 786 | |||
| 702 | return ret; | 787 | return ret; |
| 703 | } | 788 | } |
| 704 | 789 | ||
| @@ -800,7 +885,9 @@ static void sci_free_irq(struct sci_port *port) | |||
| 800 | static unsigned int sci_tx_empty(struct uart_port *port) | 885 | static unsigned int sci_tx_empty(struct uart_port *port) |
| 801 | { | 886 | { |
| 802 | unsigned short status = sci_in(port, SCxSR); | 887 | unsigned short status = sci_in(port, SCxSR); |
| 803 | return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0; | 888 | unsigned short in_tx_fifo = scif_txfill(port); |
| 889 | |||
| 890 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; | ||
| 804 | } | 891 | } |
| 805 | 892 | ||
| 806 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | 893 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) |
| @@ -812,16 +899,299 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
| 812 | 899 | ||
| 813 | static unsigned int sci_get_mctrl(struct uart_port *port) | 900 | static unsigned int sci_get_mctrl(struct uart_port *port) |
| 814 | { | 901 | { |
| 815 | /* This routine is used for geting signals of: DTR, DCD, DSR, RI, | 902 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, |
| 816 | and CTS/RTS */ | 903 | and CTS/RTS */ |
| 817 | 904 | ||
| 818 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; | 905 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; |
| 819 | } | 906 | } |
| 820 | 907 | ||
| 908 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 909 | static void sci_dma_tx_complete(void *arg) | ||
| 910 | { | ||
| 911 | struct sci_port *s = arg; | ||
| 912 | struct uart_port *port = &s->port; | ||
| 913 | struct circ_buf *xmit = &port->state->xmit; | ||
| 914 | unsigned long flags; | ||
| 915 | |||
| 916 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 917 | |||
| 918 | spin_lock_irqsave(&port->lock, flags); | ||
| 919 | |||
| 920 | xmit->tail += s->sg_tx.length; | ||
| 921 | xmit->tail &= UART_XMIT_SIZE - 1; | ||
| 922 | |||
| 923 | port->icount.tx += s->sg_tx.length; | ||
| 924 | |||
| 925 | async_tx_ack(s->desc_tx); | ||
| 926 | s->cookie_tx = -EINVAL; | ||
| 927 | s->desc_tx = NULL; | ||
| 928 | |||
| 929 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 930 | |||
| 931 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
| 932 | uart_write_wakeup(port); | ||
| 933 | |||
| 934 | if (uart_circ_chars_pending(xmit)) | ||
| 935 | schedule_work(&s->work_tx); | ||
| 936 | } | ||
| 937 | |||
| 938 | /* Locking: called with port lock held */ | ||
| 939 | static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, | ||
| 940 | size_t count) | ||
| 941 | { | ||
| 942 | struct uart_port *port = &s->port; | ||
| 943 | int i, active, room; | ||
| 944 | |||
| 945 | room = tty_buffer_request_room(tty, count); | ||
| 946 | |||
| 947 | if (s->active_rx == s->cookie_rx[0]) { | ||
| 948 | active = 0; | ||
| 949 | } else if (s->active_rx == s->cookie_rx[1]) { | ||
| 950 | active = 1; | ||
| 951 | } else { | ||
| 952 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | ||
| 953 | return 0; | ||
| 954 | } | ||
| 955 | |||
| 956 | if (room < count) | ||
| 957 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", | ||
| 958 | count - room); | ||
| 959 | if (!room) | ||
| 960 | return room; | ||
| 961 | |||
| 962 | for (i = 0; i < room; i++) | ||
| 963 | tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], | ||
| 964 | TTY_NORMAL); | ||
| 965 | |||
| 966 | port->icount.rx += room; | ||
| 967 | |||
| 968 | return room; | ||
| 969 | } | ||
| 970 | |||
| 971 | static void sci_dma_rx_complete(void *arg) | ||
| 972 | { | ||
| 973 | struct sci_port *s = arg; | ||
| 974 | struct uart_port *port = &s->port; | ||
| 975 | struct tty_struct *tty = port->state->port.tty; | ||
| 976 | unsigned long flags; | ||
| 977 | int count; | ||
| 978 | |||
| 979 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 980 | |||
| 981 | spin_lock_irqsave(&port->lock, flags); | ||
| 982 | |||
| 983 | count = sci_dma_rx_push(s, tty, s->buf_len_rx); | ||
| 984 | |||
| 985 | mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); | ||
| 986 | |||
| 987 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 988 | |||
| 989 | if (count) | ||
| 990 | tty_flip_buffer_push(tty); | ||
| 991 | |||
| 992 | schedule_work(&s->work_rx); | ||
| 993 | } | ||
| 994 | |||
| 995 | static void sci_start_rx(struct uart_port *port); | ||
| 996 | static void sci_start_tx(struct uart_port *port); | ||
| 997 | |||
| 998 | static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) | ||
| 999 | { | ||
| 1000 | struct dma_chan *chan = s->chan_rx; | ||
| 1001 | struct uart_port *port = &s->port; | ||
| 1002 | unsigned long flags; | ||
| 1003 | |||
| 1004 | s->chan_rx = NULL; | ||
| 1005 | s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; | ||
| 1006 | dma_release_channel(chan); | ||
| 1007 | dma_free_coherent(port->dev, s->buf_len_rx * 2, | ||
| 1008 | sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); | ||
| 1009 | if (enable_pio) | ||
| 1010 | sci_start_rx(port); | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) | ||
| 1014 | { | ||
| 1015 | struct dma_chan *chan = s->chan_tx; | ||
| 1016 | struct uart_port *port = &s->port; | ||
| 1017 | unsigned long flags; | ||
| 1018 | |||
| 1019 | s->chan_tx = NULL; | ||
| 1020 | s->cookie_tx = -EINVAL; | ||
| 1021 | dma_release_channel(chan); | ||
| 1022 | if (enable_pio) | ||
| 1023 | sci_start_tx(port); | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | static void sci_submit_rx(struct sci_port *s) | ||
| 1027 | { | ||
| 1028 | struct dma_chan *chan = s->chan_rx; | ||
| 1029 | int i; | ||
| 1030 | |||
| 1031 | for (i = 0; i < 2; i++) { | ||
| 1032 | struct scatterlist *sg = &s->sg_rx[i]; | ||
| 1033 | struct dma_async_tx_descriptor *desc; | ||
| 1034 | |||
| 1035 | desc = chan->device->device_prep_slave_sg(chan, | ||
| 1036 | sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); | ||
| 1037 | |||
| 1038 | if (desc) { | ||
| 1039 | s->desc_rx[i] = desc; | ||
| 1040 | desc->callback = sci_dma_rx_complete; | ||
| 1041 | desc->callback_param = s; | ||
| 1042 | s->cookie_rx[i] = desc->tx_submit(desc); | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | if (!desc || s->cookie_rx[i] < 0) { | ||
| 1046 | if (i) { | ||
| 1047 | async_tx_ack(s->desc_rx[0]); | ||
| 1048 | s->cookie_rx[0] = -EINVAL; | ||
| 1049 | } | ||
| 1050 | if (desc) { | ||
| 1051 | async_tx_ack(desc); | ||
| 1052 | s->cookie_rx[i] = -EINVAL; | ||
| 1053 | } | ||
| 1054 | dev_warn(s->port.dev, | ||
| 1055 | "failed to re-start DMA, using PIO\n"); | ||
| 1056 | sci_rx_dma_release(s, true); | ||
| 1057 | return; | ||
| 1058 | } | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | s->active_rx = s->cookie_rx[0]; | ||
| 1062 | |||
| 1063 | dma_async_issue_pending(chan); | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | static void work_fn_rx(struct work_struct *work) | ||
| 1067 | { | ||
| 1068 | struct sci_port *s = container_of(work, struct sci_port, work_rx); | ||
| 1069 | struct uart_port *port = &s->port; | ||
| 1070 | struct dma_async_tx_descriptor *desc; | ||
| 1071 | int new; | ||
| 1072 | |||
| 1073 | if (s->active_rx == s->cookie_rx[0]) { | ||
| 1074 | new = 0; | ||
| 1075 | } else if (s->active_rx == s->cookie_rx[1]) { | ||
| 1076 | new = 1; | ||
| 1077 | } else { | ||
| 1078 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | ||
| 1079 | return; | ||
| 1080 | } | ||
| 1081 | desc = s->desc_rx[new]; | ||
| 1082 | |||
| 1083 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != | ||
| 1084 | DMA_SUCCESS) { | ||
| 1085 | /* Handle incomplete DMA receive */ | ||
| 1086 | struct tty_struct *tty = port->state->port.tty; | ||
| 1087 | struct dma_chan *chan = s->chan_rx; | ||
| 1088 | struct sh_desc *sh_desc = container_of(desc, struct sh_desc, | ||
| 1089 | async_tx); | ||
| 1090 | unsigned long flags; | ||
| 1091 | int count; | ||
| 1092 | |||
| 1093 | chan->device->device_terminate_all(chan); | ||
| 1094 | dev_dbg(port->dev, "Read %u bytes with cookie %d\n", | ||
| 1095 | sh_desc->partial, sh_desc->cookie); | ||
| 1096 | |||
| 1097 | spin_lock_irqsave(&port->lock, flags); | ||
| 1098 | count = sci_dma_rx_push(s, tty, sh_desc->partial); | ||
| 1099 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 1100 | |||
| 1101 | if (count) | ||
| 1102 | tty_flip_buffer_push(tty); | ||
| 1103 | |||
| 1104 | sci_submit_rx(s); | ||
| 1105 | |||
| 1106 | return; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | s->cookie_rx[new] = desc->tx_submit(desc); | ||
| 1110 | if (s->cookie_rx[new] < 0) { | ||
| 1111 | dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); | ||
| 1112 | sci_rx_dma_release(s, true); | ||
| 1113 | return; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__, | ||
| 1117 | s->cookie_rx[new], new); | ||
| 1118 | |||
| 1119 | s->active_rx = s->cookie_rx[!new]; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | static void work_fn_tx(struct work_struct *work) | ||
| 1123 | { | ||
| 1124 | struct sci_port *s = container_of(work, struct sci_port, work_tx); | ||
| 1125 | struct dma_async_tx_descriptor *desc; | ||
| 1126 | struct dma_chan *chan = s->chan_tx; | ||
| 1127 | struct uart_port *port = &s->port; | ||
| 1128 | struct circ_buf *xmit = &port->state->xmit; | ||
| 1129 | struct scatterlist *sg = &s->sg_tx; | ||
| 1130 | |||
| 1131 | /* | ||
| 1132 | * DMA is idle now. | ||
| 1133 | * Port xmit buffer is already mapped, and it is one page... Just adjust | ||
| 1134 | * offsets and lengths. Since it is a circular buffer, we have to | ||
| 1135 | * transmit till the end, and then the rest. Take the port lock to get a | ||
| 1136 | * consistent xmit buffer state. | ||
| 1137 | */ | ||
| 1138 | spin_lock_irq(&port->lock); | ||
| 1139 | sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); | ||
| 1140 | sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + | ||
| 1141 | sg->offset; | ||
| 1142 | sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), | ||
| 1143 | CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); | ||
| 1144 | sg->dma_length = sg->length; | ||
| 1145 | spin_unlock_irq(&port->lock); | ||
| 1146 | |||
| 1147 | BUG_ON(!sg->length); | ||
| 1148 | |||
| 1149 | desc = chan->device->device_prep_slave_sg(chan, | ||
| 1150 | sg, s->sg_len_tx, DMA_TO_DEVICE, | ||
| 1151 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1152 | if (!desc) { | ||
| 1153 | /* switch to PIO */ | ||
| 1154 | sci_tx_dma_release(s, true); | ||
| 1155 | return; | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); | ||
| 1159 | |||
| 1160 | spin_lock_irq(&port->lock); | ||
| 1161 | s->desc_tx = desc; | ||
| 1162 | desc->callback = sci_dma_tx_complete; | ||
| 1163 | desc->callback_param = s; | ||
| 1164 | spin_unlock_irq(&port->lock); | ||
| 1165 | s->cookie_tx = desc->tx_submit(desc); | ||
| 1166 | if (s->cookie_tx < 0) { | ||
| 1167 | dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); | ||
| 1168 | /* switch to PIO */ | ||
| 1169 | sci_tx_dma_release(s, true); | ||
| 1170 | return; | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, | ||
| 1174 | xmit->buf, xmit->tail, xmit->head, s->cookie_tx); | ||
| 1175 | |||
| 1176 | dma_async_issue_pending(chan); | ||
| 1177 | } | ||
| 1178 | #endif | ||
| 1179 | |||
| 821 | static void sci_start_tx(struct uart_port *port) | 1180 | static void sci_start_tx(struct uart_port *port) |
| 822 | { | 1181 | { |
| 823 | unsigned short ctrl; | 1182 | unsigned short ctrl; |
| 824 | 1183 | ||
| 1184 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1185 | struct sci_port *s = to_sci_port(port); | ||
| 1186 | |||
| 1187 | if (s->chan_tx) { | ||
| 1188 | if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) | ||
| 1189 | schedule_work(&s->work_tx); | ||
| 1190 | |||
| 1191 | return; | ||
| 1192 | } | ||
| 1193 | #endif | ||
| 1194 | |||
| 825 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ | 1195 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ |
| 826 | ctrl = sci_in(port, SCSCR); | 1196 | ctrl = sci_in(port, SCSCR); |
| 827 | ctrl |= SCI_CTRL_FLAGS_TIE; | 1197 | ctrl |= SCI_CTRL_FLAGS_TIE; |
| @@ -838,13 +1208,12 @@ static void sci_stop_tx(struct uart_port *port) | |||
| 838 | sci_out(port, SCSCR, ctrl); | 1208 | sci_out(port, SCSCR, ctrl); |
| 839 | } | 1209 | } |
| 840 | 1210 | ||
| 841 | static void sci_start_rx(struct uart_port *port, unsigned int tty_start) | 1211 | static void sci_start_rx(struct uart_port *port) |
| 842 | { | 1212 | { |
| 843 | unsigned short ctrl; | 1213 | unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; |
| 844 | 1214 | ||
| 845 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ | 1215 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ |
| 846 | ctrl = sci_in(port, SCSCR); | 1216 | ctrl |= sci_in(port, SCSCR); |
| 847 | ctrl |= SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; | ||
| 848 | sci_out(port, SCSCR, ctrl); | 1217 | sci_out(port, SCSCR, ctrl); |
| 849 | } | 1218 | } |
| 850 | 1219 | ||
| @@ -868,16 +1237,154 @@ static void sci_break_ctl(struct uart_port *port, int break_state) | |||
| 868 | /* Nothing here yet .. */ | 1237 | /* Nothing here yet .. */ |
| 869 | } | 1238 | } |
| 870 | 1239 | ||
| 1240 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1241 | static bool filter(struct dma_chan *chan, void *slave) | ||
| 1242 | { | ||
| 1243 | struct sh_dmae_slave *param = slave; | ||
| 1244 | |||
| 1245 | dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, | ||
| 1246 | param->slave_id); | ||
| 1247 | |||
| 1248 | if (param->dma_dev == chan->device->dev) { | ||
| 1249 | chan->private = param; | ||
| 1250 | return true; | ||
| 1251 | } else { | ||
| 1252 | return false; | ||
| 1253 | } | ||
| 1254 | } | ||
| 1255 | |||
| 1256 | static void rx_timer_fn(unsigned long arg) | ||
| 1257 | { | ||
| 1258 | struct sci_port *s = (struct sci_port *)arg; | ||
| 1259 | struct uart_port *port = &s->port; | ||
| 1260 | |||
| 1261 | u16 scr = sci_in(port, SCSCR); | ||
| 1262 | sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); | ||
| 1263 | dev_dbg(port->dev, "DMA Rx timed out\n"); | ||
| 1264 | schedule_work(&s->work_rx); | ||
| 1265 | } | ||
| 1266 | |||
| 1267 | static void sci_request_dma(struct uart_port *port) | ||
| 1268 | { | ||
| 1269 | struct sci_port *s = to_sci_port(port); | ||
| 1270 | struct sh_dmae_slave *param; | ||
| 1271 | struct dma_chan *chan; | ||
| 1272 | dma_cap_mask_t mask; | ||
| 1273 | int nent; | ||
| 1274 | |||
| 1275 | dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, | ||
| 1276 | port->line, s->dma_dev); | ||
| 1277 | |||
| 1278 | if (!s->dma_dev) | ||
| 1279 | return; | ||
| 1280 | |||
| 1281 | dma_cap_zero(mask); | ||
| 1282 | dma_cap_set(DMA_SLAVE, mask); | ||
| 1283 | |||
| 1284 | param = &s->param_tx; | ||
| 1285 | |||
| 1286 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ | ||
| 1287 | param->slave_id = s->slave_tx; | ||
| 1288 | param->dma_dev = s->dma_dev; | ||
| 1289 | |||
| 1290 | s->cookie_tx = -EINVAL; | ||
| 1291 | chan = dma_request_channel(mask, filter, param); | ||
| 1292 | dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); | ||
| 1293 | if (chan) { | ||
| 1294 | s->chan_tx = chan; | ||
| 1295 | sg_init_table(&s->sg_tx, 1); | ||
| 1296 | /* UART circular tx buffer is an aligned page. */ | ||
| 1297 | BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); | ||
| 1298 | sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), | ||
| 1299 | UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); | ||
| 1300 | nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); | ||
| 1301 | if (!nent) | ||
| 1302 | sci_tx_dma_release(s, false); | ||
| 1303 | else | ||
| 1304 | dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, | ||
| 1305 | sg_dma_len(&s->sg_tx), | ||
| 1306 | port->state->xmit.buf, sg_dma_address(&s->sg_tx)); | ||
| 1307 | |||
| 1308 | s->sg_len_tx = nent; | ||
| 1309 | |||
| 1310 | INIT_WORK(&s->work_tx, work_fn_tx); | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | param = &s->param_rx; | ||
| 1314 | |||
| 1315 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ | ||
| 1316 | param->slave_id = s->slave_rx; | ||
| 1317 | param->dma_dev = s->dma_dev; | ||
| 1318 | |||
| 1319 | chan = dma_request_channel(mask, filter, param); | ||
| 1320 | dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); | ||
| 1321 | if (chan) { | ||
| 1322 | dma_addr_t dma[2]; | ||
| 1323 | void *buf[2]; | ||
| 1324 | int i; | ||
| 1325 | |||
| 1326 | s->chan_rx = chan; | ||
| 1327 | |||
| 1328 | s->buf_len_rx = 2 * max(16, (int)port->fifosize); | ||
| 1329 | buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, | ||
| 1330 | &dma[0], GFP_KERNEL); | ||
| 1331 | |||
| 1332 | if (!buf[0]) { | ||
| 1333 | dev_warn(port->dev, | ||
| 1334 | "failed to allocate dma buffer, using PIO\n"); | ||
| 1335 | sci_rx_dma_release(s, true); | ||
| 1336 | return; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | buf[1] = buf[0] + s->buf_len_rx; | ||
| 1340 | dma[1] = dma[0] + s->buf_len_rx; | ||
| 1341 | |||
| 1342 | for (i = 0; i < 2; i++) { | ||
| 1343 | struct scatterlist *sg = &s->sg_rx[i]; | ||
| 1344 | |||
| 1345 | sg_init_table(sg, 1); | ||
| 1346 | sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, | ||
| 1347 | (int)buf[i] & ~PAGE_MASK); | ||
| 1348 | sg->dma_address = dma[i]; | ||
| 1349 | sg->dma_length = sg->length; | ||
| 1350 | } | ||
| 1351 | |||
| 1352 | INIT_WORK(&s->work_rx, work_fn_rx); | ||
| 1353 | setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); | ||
| 1354 | |||
| 1355 | sci_submit_rx(s); | ||
| 1356 | } | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | static void sci_free_dma(struct uart_port *port) | ||
| 1360 | { | ||
| 1361 | struct sci_port *s = to_sci_port(port); | ||
| 1362 | |||
| 1363 | if (!s->dma_dev) | ||
| 1364 | return; | ||
| 1365 | |||
| 1366 | if (s->chan_tx) | ||
| 1367 | sci_tx_dma_release(s, false); | ||
| 1368 | if (s->chan_rx) | ||
| 1369 | sci_rx_dma_release(s, false); | ||
| 1370 | } | ||
| 1371 | #endif | ||
| 1372 | |||
| 871 | static int sci_startup(struct uart_port *port) | 1373 | static int sci_startup(struct uart_port *port) |
| 872 | { | 1374 | { |
| 873 | struct sci_port *s = to_sci_port(port); | 1375 | struct sci_port *s = to_sci_port(port); |
| 874 | 1376 | ||
| 1377 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 1378 | |||
| 875 | if (s->enable) | 1379 | if (s->enable) |
| 876 | s->enable(port); | 1380 | s->enable(port); |
| 877 | 1381 | ||
| 878 | sci_request_irq(s); | 1382 | sci_request_irq(s); |
| 1383 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1384 | sci_request_dma(port); | ||
| 1385 | #endif | ||
| 879 | sci_start_tx(port); | 1386 | sci_start_tx(port); |
| 880 | sci_start_rx(port, 1); | 1387 | sci_start_rx(port); |
| 881 | 1388 | ||
| 882 | return 0; | 1389 | return 0; |
| 883 | } | 1390 | } |
| @@ -886,8 +1393,13 @@ static void sci_shutdown(struct uart_port *port) | |||
| 886 | { | 1393 | { |
| 887 | struct sci_port *s = to_sci_port(port); | 1394 | struct sci_port *s = to_sci_port(port); |
| 888 | 1395 | ||
| 1396 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | ||
| 1397 | |||
| 889 | sci_stop_rx(port); | 1398 | sci_stop_rx(port); |
| 890 | sci_stop_tx(port); | 1399 | sci_stop_tx(port); |
| 1400 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1401 | sci_free_dma(port); | ||
| 1402 | #endif | ||
| 891 | sci_free_irq(s); | 1403 | sci_free_irq(s); |
| 892 | 1404 | ||
| 893 | if (s->disable) | 1405 | if (s->disable) |
| @@ -937,6 +1449,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 937 | 1449 | ||
| 938 | sci_out(port, SCSMR, smr_val); | 1450 | sci_out(port, SCSMR, smr_val); |
| 939 | 1451 | ||
| 1452 | dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, | ||
| 1453 | SCSCR_INIT(port)); | ||
| 1454 | |||
| 940 | if (t > 0) { | 1455 | if (t > 0) { |
| 941 | if (t >= 256) { | 1456 | if (t >= 256) { |
| 942 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); | 1457 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); |
| @@ -954,7 +1469,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 954 | sci_out(port, SCSCR, SCSCR_INIT(port)); | 1469 | sci_out(port, SCSCR, SCSCR_INIT(port)); |
| 955 | 1470 | ||
| 956 | if ((termios->c_cflag & CREAD) != 0) | 1471 | if ((termios->c_cflag & CREAD) != 0) |
| 957 | sci_start_rx(port, 0); | 1472 | sci_start_rx(port); |
| 958 | } | 1473 | } |
| 959 | 1474 | ||
| 960 | static const char *sci_type(struct uart_port *port) | 1475 | static const char *sci_type(struct uart_port *port) |
| @@ -1049,19 +1564,21 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
| 1049 | unsigned int index, | 1564 | unsigned int index, |
| 1050 | struct plat_sci_port *p) | 1565 | struct plat_sci_port *p) |
| 1051 | { | 1566 | { |
| 1052 | sci_port->port.ops = &sci_uart_ops; | 1567 | struct uart_port *port = &sci_port->port; |
| 1053 | sci_port->port.iotype = UPIO_MEM; | 1568 | |
| 1054 | sci_port->port.line = index; | 1569 | port->ops = &sci_uart_ops; |
| 1570 | port->iotype = UPIO_MEM; | ||
| 1571 | port->line = index; | ||
| 1055 | 1572 | ||
| 1056 | switch (p->type) { | 1573 | switch (p->type) { |
| 1057 | case PORT_SCIFA: | 1574 | case PORT_SCIFA: |
| 1058 | sci_port->port.fifosize = 64; | 1575 | port->fifosize = 64; |
| 1059 | break; | 1576 | break; |
| 1060 | case PORT_SCIF: | 1577 | case PORT_SCIF: |
| 1061 | sci_port->port.fifosize = 16; | 1578 | port->fifosize = 16; |
| 1062 | break; | 1579 | break; |
| 1063 | default: | 1580 | default: |
| 1064 | sci_port->port.fifosize = 1; | 1581 | port->fifosize = 1; |
| 1065 | break; | 1582 | break; |
| 1066 | } | 1583 | } |
| 1067 | 1584 | ||
| @@ -1070,19 +1587,28 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
| 1070 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); | 1587 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); |
| 1071 | sci_port->enable = sci_clk_enable; | 1588 | sci_port->enable = sci_clk_enable; |
| 1072 | sci_port->disable = sci_clk_disable; | 1589 | sci_port->disable = sci_clk_disable; |
| 1073 | sci_port->port.dev = &dev->dev; | 1590 | port->dev = &dev->dev; |
| 1074 | } | 1591 | } |
| 1075 | 1592 | ||
| 1076 | sci_port->break_timer.data = (unsigned long)sci_port; | 1593 | sci_port->break_timer.data = (unsigned long)sci_port; |
| 1077 | sci_port->break_timer.function = sci_break_timer; | 1594 | sci_port->break_timer.function = sci_break_timer; |
| 1078 | init_timer(&sci_port->break_timer); | 1595 | init_timer(&sci_port->break_timer); |
| 1079 | 1596 | ||
| 1080 | sci_port->port.mapbase = p->mapbase; | 1597 | port->mapbase = p->mapbase; |
| 1081 | sci_port->port.membase = p->membase; | 1598 | port->membase = p->membase; |
| 1082 | 1599 | ||
| 1083 | sci_port->port.irq = p->irqs[SCIx_TXI_IRQ]; | 1600 | port->irq = p->irqs[SCIx_TXI_IRQ]; |
| 1084 | sci_port->port.flags = p->flags; | 1601 | port->flags = p->flags; |
| 1085 | sci_port->type = sci_port->port.type = p->type; | 1602 | sci_port->type = port->type = p->type; |
| 1603 | |||
| 1604 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | ||
| 1605 | sci_port->dma_dev = p->dma_dev; | ||
| 1606 | sci_port->slave_tx = p->dma_slave_tx; | ||
| 1607 | sci_port->slave_rx = p->dma_slave_rx; | ||
| 1608 | |||
| 1609 | dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, | ||
| 1610 | p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); | ||
| 1611 | #endif | ||
| 1086 | 1612 | ||
| 1087 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); | 1613 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); |
| 1088 | } | 1614 | } |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 1c297ddc9d5a..1b177d29a7f0 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define __LINUX_SERIAL_SCI_H | 2 | #define __LINUX_SERIAL_SCI_H |
| 3 | 3 | ||
| 4 | #include <linux/serial_core.h> | 4 | #include <linux/serial_core.h> |
| 5 | #include <asm/dmaengine.h> | ||
| 5 | 6 | ||
| 6 | /* | 7 | /* |
| 7 | * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) | 8 | * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) |
| @@ -16,6 +17,8 @@ enum { | |||
| 16 | SCIx_NR_IRQS, | 17 | SCIx_NR_IRQS, |
| 17 | }; | 18 | }; |
| 18 | 19 | ||
| 20 | struct device; | ||
| 21 | |||
| 19 | /* | 22 | /* |
| 20 | * Platform device specific platform_data struct | 23 | * Platform device specific platform_data struct |
| 21 | */ | 24 | */ |
| @@ -26,6 +29,9 @@ struct plat_sci_port { | |||
| 26 | unsigned int type; /* SCI / SCIF / IRDA */ | 29 | unsigned int type; /* SCI / SCIF / IRDA */ |
| 27 | upf_t flags; /* UPF_* flags */ | 30 | upf_t flags; /* UPF_* flags */ |
| 28 | char *clk; /* clock string */ | 31 | char *clk; /* clock string */ |
| 32 | struct device *dma_dev; | ||
| 33 | enum sh_dmae_slave_chan_id dma_slave_tx; | ||
| 34 | enum sh_dmae_slave_chan_id dma_slave_rx; | ||
| 29 | }; | 35 | }; |
| 30 | 36 | ||
| 31 | #endif /* __LINUX_SERIAL_SCI_H */ | 37 | #endif /* __LINUX_SERIAL_SCI_H */ |
