aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/serial/sh-sci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/serial/sh-sci.c')
-rw-r--r--drivers/serial/sh-sci.c202
1 files changed, 135 insertions, 67 deletions
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61b..5f90fcd7d107 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
83 83
84 /* Interface clock */ 84 /* Interface clock */
85 struct clk *iclk; 85 struct clk *iclk;
86 /* Data clock */ 86 /* Function clock */
87 struct clk *dclk; 87 struct clk *fclk;
88 88
89 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx; 90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx; 91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA 92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev; 93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx; 94 unsigned int slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx; 95 unsigned int slave_rx;
96 struct dma_async_tx_descriptor *desc_tx; 96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2]; 97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx; 98 dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
107 struct work_struct work_tx; 107 struct work_struct work_tx;
108 struct work_struct work_rx; 108 struct work_struct work_rx;
109 struct timer_list rx_timer; 109 struct timer_list rx_timer;
110 unsigned int rx_timeout;
110#endif 111#endif
111}; 112};
112 113
@@ -150,7 +151,11 @@ static int sci_poll_get_char(struct uart_port *port)
150 handle_error(port); 151 handle_error(port);
151 continue; 152 continue;
152 } 153 }
153 } while (!(status & SCxSR_RDxF(port))); 154 break;
155 } while (1);
156
157 if (!(status & SCxSR_RDxF(port)))
158 return NO_POLL_CHAR;
154 159
155 c = sci_in(port, SCxRDR); 160 c = sci_in(port, SCxRDR);
156 161
@@ -674,22 +679,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
674 struct sci_port *s = to_sci_port(port); 679 struct sci_port *s = to_sci_port(port);
675 680
676 if (s->chan_rx) { 681 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR); 682 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR); 683 u16 ssr = sci_in(port, SCxSR);
680 684
681 /* Disable future Rx interrupts */ 685 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 686 if (port->type == PORT_SCIFA) {
687 disable_irq_nosync(irq);
688 scr |= 0x4000;
689 } else {
690 scr &= ~SCI_CTRL_FLAGS_RIE;
691 }
692 sci_out(port, SCSCR, scr);
683 /* Clear current interrupt */ 693 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 694 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */ 695 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 696 jiffies, s->rx_timeout);
687 port->fifosize / 2; 697 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693 698
694 return IRQ_HANDLED; 699 return IRQ_HANDLED;
695 } 700 }
@@ -799,7 +804,7 @@ static int sci_notifier(struct notifier_block *self,
799 (phase == CPUFREQ_RESUMECHANGE)) { 804 (phase == CPUFREQ_RESUMECHANGE)) {
800 spin_lock_irqsave(&priv->lock, flags); 805 spin_lock_irqsave(&priv->lock, flags);
801 list_for_each_entry(sci_port, &priv->ports, node) 806 list_for_each_entry(sci_port, &priv->ports, node)
802 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 807 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
803 spin_unlock_irqrestore(&priv->lock, flags); 808 spin_unlock_irqrestore(&priv->lock, flags);
804 } 809 }
805 810
@@ -810,21 +815,17 @@ static void sci_clk_enable(struct uart_port *port)
810{ 815{
811 struct sci_port *sci_port = to_sci_port(port); 816 struct sci_port *sci_port = to_sci_port(port);
812 817
813 clk_enable(sci_port->dclk); 818 clk_enable(sci_port->iclk);
814 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 819 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
815 820 clk_enable(sci_port->fclk);
816 if (sci_port->iclk)
817 clk_enable(sci_port->iclk);
818} 821}
819 822
820static void sci_clk_disable(struct uart_port *port) 823static void sci_clk_disable(struct uart_port *port)
821{ 824{
822 struct sci_port *sci_port = to_sci_port(port); 825 struct sci_port *sci_port = to_sci_port(port);
823 826
824 if (sci_port->iclk) 827 clk_disable(sci_port->fclk);
825 clk_disable(sci_port->iclk); 828 clk_disable(sci_port->iclk);
826
827 clk_disable(sci_port->dclk);
828} 829}
829 830
830static int sci_request_irq(struct sci_port *port) 831static int sci_request_irq(struct sci_port *port)
@@ -913,22 +914,26 @@ static void sci_dma_tx_complete(void *arg)
913 914
914 spin_lock_irqsave(&port->lock, flags); 915 spin_lock_irqsave(&port->lock, flags);
915 916
916 xmit->tail += s->sg_tx.length; 917 xmit->tail += sg_dma_len(&s->sg_tx);
917 xmit->tail &= UART_XMIT_SIZE - 1; 918 xmit->tail &= UART_XMIT_SIZE - 1;
918 919
919 port->icount.tx += s->sg_tx.length; 920 port->icount.tx += sg_dma_len(&s->sg_tx);
920 921
921 async_tx_ack(s->desc_tx); 922 async_tx_ack(s->desc_tx);
922 s->cookie_tx = -EINVAL; 923 s->cookie_tx = -EINVAL;
923 s->desc_tx = NULL; 924 s->desc_tx = NULL;
924 925
925 spin_unlock_irqrestore(&port->lock, flags);
926
927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 926 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
928 uart_write_wakeup(port); 927 uart_write_wakeup(port);
929 928
930 if (uart_circ_chars_pending(xmit)) 929 if (!uart_circ_empty(xmit)) {
931 schedule_work(&s->work_tx); 930 schedule_work(&s->work_tx);
931 } else if (port->type == PORT_SCIFA) {
932 u16 ctrl = sci_in(port, SCSCR);
933 sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
934 }
935
936 spin_unlock_irqrestore(&port->lock, flags);
932} 937}
933 938
934/* Locking: called with port lock held */ 939/* Locking: called with port lock held */
@@ -972,13 +977,13 @@ static void sci_dma_rx_complete(void *arg)
972 unsigned long flags; 977 unsigned long flags;
973 int count; 978 int count;
974 979
975 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 980 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
976 981
977 spin_lock_irqsave(&port->lock, flags); 982 spin_lock_irqsave(&port->lock, flags);
978 983
979 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 984 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
980 985
981 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 986 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
982 987
983 spin_unlock_irqrestore(&port->lock, flags); 988 spin_unlock_irqrestore(&port->lock, flags);
984 989
@@ -999,8 +1004,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
999 s->chan_rx = NULL; 1004 s->chan_rx = NULL;
1000 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; 1005 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1001 dma_release_channel(chan); 1006 dma_release_channel(chan);
1002 dma_free_coherent(port->dev, s->buf_len_rx * 2, 1007 if (sg_dma_address(&s->sg_rx[0]))
1003 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); 1008 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1009 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1004 if (enable_pio) 1010 if (enable_pio)
1005 sci_start_rx(port); 1011 sci_start_rx(port);
1006} 1012}
@@ -1050,6 +1056,8 @@ static void sci_submit_rx(struct sci_port *s)
1050 sci_rx_dma_release(s, true); 1056 sci_rx_dma_release(s, true);
1051 return; 1057 return;
1052 } 1058 }
1059 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1060 s->cookie_rx[i], i);
1053 } 1061 }
1054 1062
1055 s->active_rx = s->cookie_rx[0]; 1063 s->active_rx = s->cookie_rx[0];
@@ -1084,7 +1092,7 @@ static void work_fn_rx(struct work_struct *work)
1084 unsigned long flags; 1092 unsigned long flags;
1085 int count; 1093 int count;
1086 1094
1087 chan->device->device_terminate_all(chan); 1095 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1088 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1096 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1089 sh_desc->partial, sh_desc->cookie); 1097 sh_desc->partial, sh_desc->cookie);
1090 1098
@@ -1107,10 +1115,10 @@ static void work_fn_rx(struct work_struct *work)
1107 return; 1115 return;
1108 } 1116 }
1109 1117
1110 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1111 s->cookie_rx[new], new);
1112
1113 s->active_rx = s->cookie_rx[!new]; 1118 s->active_rx = s->cookie_rx[!new];
1119
1120 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1121 s->cookie_rx[new], new, s->active_rx);
1114} 1122}
1115 1123
1116static void work_fn_tx(struct work_struct *work) 1124static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1139,13 @@ static void work_fn_tx(struct work_struct *work)
1131 */ 1139 */
1132 spin_lock_irq(&port->lock); 1140 spin_lock_irq(&port->lock);
1133 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1141 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1134 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1142 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1135 sg->offset; 1143 sg->offset;
1136 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1144 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1137 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1145 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1138 sg->dma_length = sg->length;
1139 spin_unlock_irq(&port->lock); 1146 spin_unlock_irq(&port->lock);
1140 1147
1141 BUG_ON(!sg->length); 1148 BUG_ON(!sg_dma_len(sg));
1142 1149
1143 desc = chan->device->device_prep_slave_sg(chan, 1150 desc = chan->device->device_prep_slave_sg(chan,
1144 sg, s->sg_len_tx, DMA_TO_DEVICE, 1151 sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1180,28 @@ static void work_fn_tx(struct work_struct *work)
1173 1180
1174static void sci_start_tx(struct uart_port *port) 1181static void sci_start_tx(struct uart_port *port)
1175{ 1182{
1183 struct sci_port *s = to_sci_port(port);
1176 unsigned short ctrl; 1184 unsigned short ctrl;
1177 1185
1178#ifdef CONFIG_SERIAL_SH_SCI_DMA 1186#ifdef CONFIG_SERIAL_SH_SCI_DMA
1179 struct sci_port *s = to_sci_port(port); 1187 if (port->type == PORT_SCIFA) {
1180 1188 u16 new, scr = sci_in(port, SCSCR);
1181 if (s->chan_tx) { 1189 if (s->chan_tx)
1182 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1190 new = scr | 0x8000;
1183 schedule_work(&s->work_tx); 1191 else
1184 1192 new = scr & ~0x8000;
1185 return; 1193 if (new != scr)
1194 sci_out(port, SCSCR, new);
1186 } 1195 }
1196 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1197 s->cookie_tx < 0)
1198 schedule_work(&s->work_tx);
1187#endif 1199#endif
1188 1200 if (!s->chan_tx || port->type == PORT_SCIFA) {
1189 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1201 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1190 ctrl = sci_in(port, SCSCR); 1202 ctrl = sci_in(port, SCSCR);
1191 ctrl |= SCI_CTRL_FLAGS_TIE; 1203 sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
1192 sci_out(port, SCSCR, ctrl); 1204 }
1193} 1205}
1194 1206
1195static void sci_stop_tx(struct uart_port *port) 1207static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1210,8 @@ static void sci_stop_tx(struct uart_port *port)
1198 1210
1199 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1211 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1200 ctrl = sci_in(port, SCSCR); 1212 ctrl = sci_in(port, SCSCR);
1213 if (port->type == PORT_SCIFA)
1214 ctrl &= ~0x8000;
1201 ctrl &= ~SCI_CTRL_FLAGS_TIE; 1215 ctrl &= ~SCI_CTRL_FLAGS_TIE;
1202 sci_out(port, SCSCR, ctrl); 1216 sci_out(port, SCSCR, ctrl);
1203} 1217}
@@ -1208,6 +1222,8 @@ static void sci_start_rx(struct uart_port *port)
1208 1222
1209 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1223 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
1210 ctrl |= sci_in(port, SCSCR); 1224 ctrl |= sci_in(port, SCSCR);
1225 if (port->type == PORT_SCIFA)
1226 ctrl &= ~0x4000;
1211 sci_out(port, SCSCR, ctrl); 1227 sci_out(port, SCSCR, ctrl);
1212} 1228}
1213 1229
@@ -1217,6 +1233,8 @@ static void sci_stop_rx(struct uart_port *port)
1217 1233
1218 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ 1234 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
1219 ctrl = sci_in(port, SCSCR); 1235 ctrl = sci_in(port, SCSCR);
1236 if (port->type == PORT_SCIFA)
1237 ctrl &= ~0x4000;
1220 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); 1238 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
1221 sci_out(port, SCSCR, ctrl); 1239 sci_out(port, SCSCR, ctrl);
1222} 1240}
@@ -1251,8 +1269,12 @@ static void rx_timer_fn(unsigned long arg)
1251{ 1269{
1252 struct sci_port *s = (struct sci_port *)arg; 1270 struct sci_port *s = (struct sci_port *)arg;
1253 struct uart_port *port = &s->port; 1271 struct uart_port *port = &s->port;
1254
1255 u16 scr = sci_in(port, SCSCR); 1272 u16 scr = sci_in(port, SCSCR);
1273
1274 if (port->type == PORT_SCIFA) {
1275 scr &= ~0x4000;
1276 enable_irq(s->irqs[1]);
1277 }
1256 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 1278 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1257 dev_dbg(port->dev, "DMA Rx timed out\n"); 1279 dev_dbg(port->dev, "DMA Rx timed out\n");
1258 schedule_work(&s->work_rx); 1280 schedule_work(&s->work_rx);
@@ -1339,8 +1361,7 @@ static void sci_request_dma(struct uart_port *port)
1339 sg_init_table(sg, 1); 1361 sg_init_table(sg, 1);
1340 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1362 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1341 (int)buf[i] & ~PAGE_MASK); 1363 (int)buf[i] & ~PAGE_MASK);
1342 sg->dma_address = dma[i]; 1364 sg_dma_address(sg) = dma[i];
1343 sg->dma_length = sg->length;
1344 } 1365 }
1345 1366
1346 INIT_WORK(&s->work_rx, work_fn_rx); 1367 INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1424,12 @@ static void sci_shutdown(struct uart_port *port)
1403static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1424static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1404 struct ktermios *old) 1425 struct ktermios *old)
1405{ 1426{
1427#ifdef CONFIG_SERIAL_SH_SCI_DMA
1428 struct sci_port *s = to_sci_port(port);
1429#endif
1406 unsigned int status, baud, smr_val, max_baud; 1430 unsigned int status, baud, smr_val, max_baud;
1407 int t = -1; 1431 int t = -1;
1432 u16 scfcr = 0;
1408 1433
1409 /* 1434 /*
1410 * earlyprintk comes here early on with port->uartclk set to zero. 1435 * earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1452,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1427 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1452 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1428 1453
1429 if (port->type != PORT_SCI) 1454 if (port->type != PORT_SCI)
1430 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1455 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1431 1456
1432 smr_val = sci_in(port, SCSMR) & 3; 1457 smr_val = sci_in(port, SCSMR) & 3;
1433 if ((termios->c_cflag & CSIZE) == CS7) 1458 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1483,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1458 } 1483 }
1459 1484
1460 sci_init_pins(port, termios->c_cflag); 1485 sci_init_pins(port, termios->c_cflag);
1461 sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); 1486 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1462 1487
1463 sci_out(port, SCSCR, SCSCR_INIT(port)); 1488 sci_out(port, SCSCR, SCSCR_INIT(port));
1464 1489
1490#ifdef CONFIG_SERIAL_SH_SCI_DMA
1491 /*
1492 * Calculate delay for 1.5 DMA buffers: see
1493 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1494 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1495 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1496 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1497 * sizes), but it has been found out experimentally, that this is not
1498 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1499 * as a minimum seem to work perfectly.
1500 */
1501 if (s->chan_rx) {
1502 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1503 port->fifosize / 2;
1504 dev_dbg(port->dev,
1505 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1506 s->rx_timeout * 1000 / HZ, port->timeout);
1507 if (s->rx_timeout < msecs_to_jiffies(20))
1508 s->rx_timeout = msecs_to_jiffies(20);
1509 }
1510#endif
1511
1465 if ((termios->c_cflag & CREAD) != 0) 1512 if ((termios->c_cflag & CREAD) != 0)
1466 sci_start_rx(port); 1513 sci_start_rx(port);
1467} 1514}
@@ -1553,10 +1600,10 @@ static struct uart_ops sci_uart_ops = {
1553#endif 1600#endif
1554}; 1601};
1555 1602
1556static void __devinit sci_init_single(struct platform_device *dev, 1603static int __devinit sci_init_single(struct platform_device *dev,
1557 struct sci_port *sci_port, 1604 struct sci_port *sci_port,
1558 unsigned int index, 1605 unsigned int index,
1559 struct plat_sci_port *p) 1606 struct plat_sci_port *p)
1560{ 1607{
1561 struct uart_port *port = &sci_port->port; 1608 struct uart_port *port = &sci_port->port;
1562 1609
@@ -1577,8 +1624,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
1577 } 1624 }
1578 1625
1579 if (dev) { 1626 if (dev) {
1580 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1627 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1581 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1628 if (IS_ERR(sci_port->iclk)) {
1629 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1630 if (IS_ERR(sci_port->iclk)) {
1631 dev_err(&dev->dev, "can't get iclk\n");
1632 return PTR_ERR(sci_port->iclk);
1633 }
1634 }
1635
1636 /*
1637 * The function clock is optional, ignore it if we can't
1638 * find it.
1639 */
1640 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1641 if (IS_ERR(sci_port->fclk))
1642 sci_port->fclk = NULL;
1643
1582 sci_port->enable = sci_clk_enable; 1644 sci_port->enable = sci_clk_enable;
1583 sci_port->disable = sci_clk_disable; 1645 sci_port->disable = sci_clk_disable;
1584 port->dev = &dev->dev; 1646 port->dev = &dev->dev;
@@ -1605,6 +1667,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
1605#endif 1667#endif
1606 1668
1607 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1669 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1670 return 0;
1608} 1671}
1609 1672
1610#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1673#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1817,11 @@ static int sci_remove(struct platform_device *dev)
1754 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); 1817 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1755 1818
1756 spin_lock_irqsave(&priv->lock, flags); 1819 spin_lock_irqsave(&priv->lock, flags);
1757 list_for_each_entry(p, &priv->ports, node) 1820 list_for_each_entry(p, &priv->ports, node) {
1758 uart_remove_one_port(&sci_uart_driver, &p->port); 1821 uart_remove_one_port(&sci_uart_driver, &p->port);
1822 clk_put(p->iclk);
1823 clk_put(p->fclk);
1824 }
1759 spin_unlock_irqrestore(&priv->lock, flags); 1825 spin_unlock_irqrestore(&priv->lock, flags);
1760 1826
1761 kfree(priv); 1827 kfree(priv);
@@ -1781,7 +1847,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
1781 return 0; 1847 return 0;
1782 } 1848 }
1783 1849
1784 sci_init_single(dev, sciport, index, p); 1850 ret = sci_init_single(dev, sciport, index, p);
1851 if (ret)
1852 return ret;
1785 1853
1786 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 1854 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1787 if (ret) 1855 if (ret)