aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/serial/sh-sci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/serial/sh-sci.c')
-rw-r--r--drivers/serial/sh-sci.c197
1 files changed, 132 insertions, 65 deletions
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..4f73fb756745 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
83 83
84 /* Interface clock */ 84 /* Interface clock */
85 struct clk *iclk; 85 struct clk *iclk;
86 /* Data clock */ 86 /* Function clock */
87 struct clk *dclk; 87 struct clk *fclk;
88 88
89 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx; 90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx; 91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA 92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev; 93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx; 94 unsigned int slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx; 95 unsigned int slave_rx;
96 struct dma_async_tx_descriptor *desc_tx; 96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2]; 97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx; 98 dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
107 struct work_struct work_tx; 107 struct work_struct work_tx;
108 struct work_struct work_rx; 108 struct work_struct work_rx;
109 struct timer_list rx_timer; 109 struct timer_list rx_timer;
110 unsigned int rx_timeout;
110#endif 111#endif
111}; 112};
112 113
@@ -150,7 +151,11 @@ static int sci_poll_get_char(struct uart_port *port)
150 handle_error(port); 151 handle_error(port);
151 continue; 152 continue;
152 } 153 }
153 } while (!(status & SCxSR_RDxF(port))); 154 break;
155 } while (1);
156
157 if (!(status & SCxSR_RDxF(port)))
158 return NO_POLL_CHAR;
154 159
155 c = sci_in(port, SCxRDR); 160 c = sci_in(port, SCxRDR);
156 161
@@ -674,22 +679,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
674 struct sci_port *s = to_sci_port(port); 679 struct sci_port *s = to_sci_port(port);
675 680
676 if (s->chan_rx) { 681 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR); 682 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR); 683 u16 ssr = sci_in(port, SCxSR);
680 684
681 /* Disable future Rx interrupts */ 685 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 686 if (port->type == PORT_SCIFA) {
687 disable_irq_nosync(irq);
688 scr |= 0x4000;
689 } else {
690 scr &= ~SCI_CTRL_FLAGS_RIE;
691 }
692 sci_out(port, SCSCR, scr);
683 /* Clear current interrupt */ 693 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 694 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */ 695 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 696 jiffies, s->rx_timeout);
687 port->fifosize / 2; 697 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693 698
694 return IRQ_HANDLED; 699 return IRQ_HANDLED;
695 } 700 }
@@ -799,7 +804,7 @@ static int sci_notifier(struct notifier_block *self,
799 (phase == CPUFREQ_RESUMECHANGE)) { 804 (phase == CPUFREQ_RESUMECHANGE)) {
800 spin_lock_irqsave(&priv->lock, flags); 805 spin_lock_irqsave(&priv->lock, flags);
801 list_for_each_entry(sci_port, &priv->ports, node) 806 list_for_each_entry(sci_port, &priv->ports, node)
802 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 807 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
803 spin_unlock_irqrestore(&priv->lock, flags); 808 spin_unlock_irqrestore(&priv->lock, flags);
804 } 809 }
805 810
@@ -810,21 +815,17 @@ static void sci_clk_enable(struct uart_port *port)
810{ 815{
811 struct sci_port *sci_port = to_sci_port(port); 816 struct sci_port *sci_port = to_sci_port(port);
812 817
813 clk_enable(sci_port->dclk); 818 clk_enable(sci_port->iclk);
814 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 819 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
815 820 clk_enable(sci_port->fclk);
816 if (sci_port->iclk)
817 clk_enable(sci_port->iclk);
818} 821}
819 822
820static void sci_clk_disable(struct uart_port *port) 823static void sci_clk_disable(struct uart_port *port)
821{ 824{
822 struct sci_port *sci_port = to_sci_port(port); 825 struct sci_port *sci_port = to_sci_port(port);
823 826
824 if (sci_port->iclk) 827 clk_disable(sci_port->fclk);
825 clk_disable(sci_port->iclk); 828 clk_disable(sci_port->iclk);
826
827 clk_disable(sci_port->dclk);
828} 829}
829 830
830static int sci_request_irq(struct sci_port *port) 831static int sci_request_irq(struct sci_port *port)
@@ -913,22 +914,26 @@ static void sci_dma_tx_complete(void *arg)
913 914
914 spin_lock_irqsave(&port->lock, flags); 915 spin_lock_irqsave(&port->lock, flags);
915 916
916 xmit->tail += s->sg_tx.length; 917 xmit->tail += sg_dma_len(&s->sg_tx);
917 xmit->tail &= UART_XMIT_SIZE - 1; 918 xmit->tail &= UART_XMIT_SIZE - 1;
918 919
919 port->icount.tx += s->sg_tx.length; 920 port->icount.tx += sg_dma_len(&s->sg_tx);
920 921
921 async_tx_ack(s->desc_tx); 922 async_tx_ack(s->desc_tx);
922 s->cookie_tx = -EINVAL; 923 s->cookie_tx = -EINVAL;
923 s->desc_tx = NULL; 924 s->desc_tx = NULL;
924 925
925 spin_unlock_irqrestore(&port->lock, flags);
926
927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 926 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
928 uart_write_wakeup(port); 927 uart_write_wakeup(port);
929 928
930 if (uart_circ_chars_pending(xmit)) 929 if (!uart_circ_empty(xmit)) {
931 schedule_work(&s->work_tx); 930 schedule_work(&s->work_tx);
931 } else if (port->type == PORT_SCIFA) {
932 u16 ctrl = sci_in(port, SCSCR);
933 sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
934 }
935
936 spin_unlock_irqrestore(&port->lock, flags);
932} 937}
933 938
934/* Locking: called with port lock held */ 939/* Locking: called with port lock held */
@@ -972,13 +977,13 @@ static void sci_dma_rx_complete(void *arg)
972 unsigned long flags; 977 unsigned long flags;
973 int count; 978 int count;
974 979
975 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 980 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
976 981
977 spin_lock_irqsave(&port->lock, flags); 982 spin_lock_irqsave(&port->lock, flags);
978 983
979 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 984 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
980 985
981 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 986 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
982 987
983 spin_unlock_irqrestore(&port->lock, flags); 988 spin_unlock_irqrestore(&port->lock, flags);
984 989
@@ -1050,6 +1055,8 @@ static void sci_submit_rx(struct sci_port *s)
1050 sci_rx_dma_release(s, true); 1055 sci_rx_dma_release(s, true);
1051 return; 1056 return;
1052 } 1057 }
1058 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1059 s->cookie_rx[i], i);
1053 } 1060 }
1054 1061
1055 s->active_rx = s->cookie_rx[0]; 1062 s->active_rx = s->cookie_rx[0];
@@ -1084,7 +1091,7 @@ static void work_fn_rx(struct work_struct *work)
1084 unsigned long flags; 1091 unsigned long flags;
1085 int count; 1092 int count;
1086 1093
1087 chan->device->device_terminate_all(chan); 1094 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1088 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1095 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1089 sh_desc->partial, sh_desc->cookie); 1096 sh_desc->partial, sh_desc->cookie);
1090 1097
@@ -1107,10 +1114,10 @@ static void work_fn_rx(struct work_struct *work)
1107 return; 1114 return;
1108 } 1115 }
1109 1116
1110 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1111 s->cookie_rx[new], new);
1112
1113 s->active_rx = s->cookie_rx[!new]; 1117 s->active_rx = s->cookie_rx[!new];
1118
1119 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1120 s->cookie_rx[new], new, s->active_rx);
1114} 1121}
1115 1122
1116static void work_fn_tx(struct work_struct *work) 1123static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1138,13 @@ static void work_fn_tx(struct work_struct *work)
1131 */ 1138 */
1132 spin_lock_irq(&port->lock); 1139 spin_lock_irq(&port->lock);
1133 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1140 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1134 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1141 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1135 sg->offset; 1142 sg->offset;
1136 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1143 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1137 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1144 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1138 sg->dma_length = sg->length;
1139 spin_unlock_irq(&port->lock); 1145 spin_unlock_irq(&port->lock);
1140 1146
1141 BUG_ON(!sg->length); 1147 BUG_ON(!sg_dma_len(sg));
1142 1148
1143 desc = chan->device->device_prep_slave_sg(chan, 1149 desc = chan->device->device_prep_slave_sg(chan,
1144 sg, s->sg_len_tx, DMA_TO_DEVICE, 1150 sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1179,28 @@ static void work_fn_tx(struct work_struct *work)
1173 1179
1174static void sci_start_tx(struct uart_port *port) 1180static void sci_start_tx(struct uart_port *port)
1175{ 1181{
1182 struct sci_port *s = to_sci_port(port);
1176 unsigned short ctrl; 1183 unsigned short ctrl;
1177 1184
1178#ifdef CONFIG_SERIAL_SH_SCI_DMA 1185#ifdef CONFIG_SERIAL_SH_SCI_DMA
1179 struct sci_port *s = to_sci_port(port); 1186 if (port->type == PORT_SCIFA) {
1180 1187 u16 new, scr = sci_in(port, SCSCR);
1181 if (s->chan_tx) { 1188 if (s->chan_tx)
1182 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1189 new = scr | 0x8000;
1183 schedule_work(&s->work_tx); 1190 else
1184 1191 new = scr & ~0x8000;
1185 return; 1192 if (new != scr)
1193 sci_out(port, SCSCR, new);
1186 } 1194 }
1195 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1196 s->cookie_tx < 0)
1197 schedule_work(&s->work_tx);
1187#endif 1198#endif
1188 1199 if (!s->chan_tx || port->type == PORT_SCIFA) {
1189 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1200 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1190 ctrl = sci_in(port, SCSCR); 1201 ctrl = sci_in(port, SCSCR);
1191 ctrl |= SCI_CTRL_FLAGS_TIE; 1202 sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
1192 sci_out(port, SCSCR, ctrl); 1203 }
1193} 1204}
1194 1205
1195static void sci_stop_tx(struct uart_port *port) 1206static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1209,8 @@ static void sci_stop_tx(struct uart_port *port)
1198 1209
1199 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1210 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1200 ctrl = sci_in(port, SCSCR); 1211 ctrl = sci_in(port, SCSCR);
1212 if (port->type == PORT_SCIFA)
1213 ctrl &= ~0x8000;
1201 ctrl &= ~SCI_CTRL_FLAGS_TIE; 1214 ctrl &= ~SCI_CTRL_FLAGS_TIE;
1202 sci_out(port, SCSCR, ctrl); 1215 sci_out(port, SCSCR, ctrl);
1203} 1216}
@@ -1208,6 +1221,8 @@ static void sci_start_rx(struct uart_port *port)
1208 1221
1209 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1222 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
1210 ctrl |= sci_in(port, SCSCR); 1223 ctrl |= sci_in(port, SCSCR);
1224 if (port->type == PORT_SCIFA)
1225 ctrl &= ~0x4000;
1211 sci_out(port, SCSCR, ctrl); 1226 sci_out(port, SCSCR, ctrl);
1212} 1227}
1213 1228
@@ -1217,6 +1232,8 @@ static void sci_stop_rx(struct uart_port *port)
1217 1232
1218 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ 1233 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
1219 ctrl = sci_in(port, SCSCR); 1234 ctrl = sci_in(port, SCSCR);
1235 if (port->type == PORT_SCIFA)
1236 ctrl &= ~0x4000;
1220 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); 1237 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
1221 sci_out(port, SCSCR, ctrl); 1238 sci_out(port, SCSCR, ctrl);
1222} 1239}
@@ -1251,8 +1268,12 @@ static void rx_timer_fn(unsigned long arg)
1251{ 1268{
1252 struct sci_port *s = (struct sci_port *)arg; 1269 struct sci_port *s = (struct sci_port *)arg;
1253 struct uart_port *port = &s->port; 1270 struct uart_port *port = &s->port;
1254
1255 u16 scr = sci_in(port, SCSCR); 1271 u16 scr = sci_in(port, SCSCR);
1272
1273 if (port->type == PORT_SCIFA) {
1274 scr &= ~0x4000;
1275 enable_irq(s->irqs[1]);
1276 }
1256 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 1277 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1257 dev_dbg(port->dev, "DMA Rx timed out\n"); 1278 dev_dbg(port->dev, "DMA Rx timed out\n");
1258 schedule_work(&s->work_rx); 1279 schedule_work(&s->work_rx);
@@ -1339,8 +1360,7 @@ static void sci_request_dma(struct uart_port *port)
1339 sg_init_table(sg, 1); 1360 sg_init_table(sg, 1);
1340 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1361 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1341 (int)buf[i] & ~PAGE_MASK); 1362 (int)buf[i] & ~PAGE_MASK);
1342 sg->dma_address = dma[i]; 1363 sg_dma_address(sg) = dma[i];
1343 sg->dma_length = sg->length;
1344 } 1364 }
1345 1365
1346 INIT_WORK(&s->work_rx, work_fn_rx); 1366 INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1423,12 @@ static void sci_shutdown(struct uart_port *port)
1403static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1423static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1404 struct ktermios *old) 1424 struct ktermios *old)
1405{ 1425{
1426#ifdef CONFIG_SERIAL_SH_SCI_DMA
1427 struct sci_port *s = to_sci_port(port);
1428#endif
1406 unsigned int status, baud, smr_val, max_baud; 1429 unsigned int status, baud, smr_val, max_baud;
1407 int t = -1; 1430 int t = -1;
1431 u16 scfcr = 0;
1408 1432
1409 /* 1433 /*
1410 * earlyprintk comes here early on with port->uartclk set to zero. 1434 * earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1451,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1427 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1451 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1428 1452
1429 if (port->type != PORT_SCI) 1453 if (port->type != PORT_SCI)
1430 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1454 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1431 1455
1432 smr_val = sci_in(port, SCSMR) & 3; 1456 smr_val = sci_in(port, SCSMR) & 3;
1433 if ((termios->c_cflag & CSIZE) == CS7) 1457 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1482,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1458 } 1482 }
1459 1483
1460 sci_init_pins(port, termios->c_cflag); 1484 sci_init_pins(port, termios->c_cflag);
1461 sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); 1485 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1462 1486
1463 sci_out(port, SCSCR, SCSCR_INIT(port)); 1487 sci_out(port, SCSCR, SCSCR_INIT(port));
1464 1488
1489#ifdef CONFIG_SERIAL_SH_SCI_DMA
1490 /*
1491 * Calculate delay for 1.5 DMA buffers: see
1492 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1493 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1494 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1495 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1496 * sizes), but it has been found out experimentally, that this is not
1497 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1498 * as a minimum seem to work perfectly.
1499 */
1500 if (s->chan_rx) {
1501 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1502 port->fifosize / 2;
1503 dev_dbg(port->dev,
1504 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1505 s->rx_timeout * 1000 / HZ, port->timeout);
1506 if (s->rx_timeout < msecs_to_jiffies(20))
1507 s->rx_timeout = msecs_to_jiffies(20);
1508 }
1509#endif
1510
1465 if ((termios->c_cflag & CREAD) != 0) 1511 if ((termios->c_cflag & CREAD) != 0)
1466 sci_start_rx(port); 1512 sci_start_rx(port);
1467} 1513}
@@ -1553,10 +1599,10 @@ static struct uart_ops sci_uart_ops = {
1553#endif 1599#endif
1554}; 1600};
1555 1601
1556static void __devinit sci_init_single(struct platform_device *dev, 1602static int __devinit sci_init_single(struct platform_device *dev,
1557 struct sci_port *sci_port, 1603 struct sci_port *sci_port,
1558 unsigned int index, 1604 unsigned int index,
1559 struct plat_sci_port *p) 1605 struct plat_sci_port *p)
1560{ 1606{
1561 struct uart_port *port = &sci_port->port; 1607 struct uart_port *port = &sci_port->port;
1562 1608
@@ -1577,8 +1623,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
1577 } 1623 }
1578 1624
1579 if (dev) { 1625 if (dev) {
1580 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1626 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1581 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1627 if (IS_ERR(sci_port->iclk)) {
1628 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1629 if (IS_ERR(sci_port->iclk)) {
1630 dev_err(&dev->dev, "can't get iclk\n");
1631 return PTR_ERR(sci_port->iclk);
1632 }
1633 }
1634
1635 /*
1636 * The function clock is optional, ignore it if we can't
1637 * find it.
1638 */
1639 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1640 if (IS_ERR(sci_port->fclk))
1641 sci_port->fclk = NULL;
1642
1582 sci_port->enable = sci_clk_enable; 1643 sci_port->enable = sci_clk_enable;
1583 sci_port->disable = sci_clk_disable; 1644 sci_port->disable = sci_clk_disable;
1584 port->dev = &dev->dev; 1645 port->dev = &dev->dev;
@@ -1605,6 +1666,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
1605#endif 1666#endif
1606 1667
1607 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1668 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1669 return 0;
1608} 1670}
1609 1671
1610#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1672#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1816,11 @@ static int sci_remove(struct platform_device *dev)
1754 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); 1816 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1755 1817
1756 spin_lock_irqsave(&priv->lock, flags); 1818 spin_lock_irqsave(&priv->lock, flags);
1757 list_for_each_entry(p, &priv->ports, node) 1819 list_for_each_entry(p, &priv->ports, node) {
1758 uart_remove_one_port(&sci_uart_driver, &p->port); 1820 uart_remove_one_port(&sci_uart_driver, &p->port);
1821 clk_put(p->iclk);
1822 clk_put(p->fclk);
1823 }
1759 spin_unlock_irqrestore(&priv->lock, flags); 1824 spin_unlock_irqrestore(&priv->lock, flags);
1760 1825
1761 kfree(priv); 1826 kfree(priv);
@@ -1781,7 +1846,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
1781 return 0; 1846 return 0;
1782 } 1847 }
1783 1848
1784 sci_init_single(dev, sciport, index, p); 1849 ret = sci_init_single(dev, sciport, index, p);
1850 if (ret)
1851 return ret;
1785 1852
1786 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 1853 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1787 if (ret) 1854 if (ret)