aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/sh_cmt.c116
-rw-r--r--drivers/clocksource/sh_mtu2.c357
-rw-r--r--drivers/clocksource/sh_tmu.c461
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/serial/sh-sci.c388
-rw-r--r--drivers/serial/sh-sci.h42
-rw-r--r--drivers/sh/intc.c11
-rw-r--r--drivers/video/hitfb.c4
10 files changed, 1180 insertions, 205 deletions
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1efb2879a94f..eef216f7f61d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o 3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o 4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
5obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 5obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
6obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
7obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 1c92c39a53aa..cf56a2af5fe1 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/platform_device.h> 21#include <linux/platform_device.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
@@ -29,7 +28,7 @@
29#include <linux/err.h> 28#include <linux/err.h>
30#include <linux/clocksource.h> 29#include <linux/clocksource.h>
31#include <linux/clockchips.h> 30#include <linux/clockchips.h>
32#include <linux/sh_cmt.h> 31#include <linux/sh_timer.h>
33 32
34struct sh_cmt_priv { 33struct sh_cmt_priv {
35 void __iomem *mapbase; 34 void __iomem *mapbase;
@@ -47,6 +46,7 @@ struct sh_cmt_priv {
47 unsigned long rate; 46 unsigned long rate;
48 spinlock_t lock; 47 spinlock_t lock;
49 struct clock_event_device ced; 48 struct clock_event_device ced;
49 struct clocksource cs;
50 unsigned long total_cycles; 50 unsigned long total_cycles;
51}; 51};
52 52
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(sh_cmt_lock);
59 59
60static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) 60static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
61{ 61{
62 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 62 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
63 void __iomem *base = p->mapbase; 63 void __iomem *base = p->mapbase;
64 unsigned long offs; 64 unsigned long offs;
65 65
@@ -83,7 +83,7 @@ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
83static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, 83static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
84 unsigned long value) 84 unsigned long value)
85{ 85{
86 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 86 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
87 void __iomem *base = p->mapbase; 87 void __iomem *base = p->mapbase;
88 unsigned long offs; 88 unsigned long offs;
89 89
@@ -110,23 +110,28 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
110 int *has_wrapped) 110 int *has_wrapped)
111{ 111{
112 unsigned long v1, v2, v3; 112 unsigned long v1, v2, v3;
113 int o1, o2;
114
115 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
113 116
114 /* Make sure the timer value is stable. Stolen from acpi_pm.c */ 117 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
115 do { 118 do {
119 o2 = o1;
116 v1 = sh_cmt_read(p, CMCNT); 120 v1 = sh_cmt_read(p, CMCNT);
117 v2 = sh_cmt_read(p, CMCNT); 121 v2 = sh_cmt_read(p, CMCNT);
118 v3 = sh_cmt_read(p, CMCNT); 122 v3 = sh_cmt_read(p, CMCNT);
119 } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) 123 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
120 || (v3 > v1 && v3 < v2))); 124 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
125 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
121 126
122 *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit; 127 *has_wrapped = o1;
123 return v2; 128 return v2;
124} 129}
125 130
126 131
127static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) 132static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
128{ 133{
129 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 134 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
130 unsigned long flags, value; 135 unsigned long flags, value;
131 136
132 /* start stop register shared by multiple timer channels */ 137 /* start stop register shared by multiple timer channels */
@@ -144,7 +149,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
144 149
145static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 150static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
146{ 151{
147 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 152 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
148 int ret; 153 int ret;
149 154
150 /* enable clock */ 155 /* enable clock */
@@ -153,16 +158,18 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
153 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 158 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
154 return ret; 159 return ret;
155 } 160 }
156 *rate = clk_get_rate(p->clk) / 8;
157 161
158 /* make sure channel is disabled */ 162 /* make sure channel is disabled */
159 sh_cmt_start_stop_ch(p, 0); 163 sh_cmt_start_stop_ch(p, 0);
160 164
161 /* configure channel, periodic mode and maximum timeout */ 165 /* configure channel, periodic mode and maximum timeout */
162 if (p->width == 16) 166 if (p->width == 16) {
163 sh_cmt_write(p, CMCSR, 0); 167 *rate = clk_get_rate(p->clk) / 512;
164 else 168 sh_cmt_write(p, CMCSR, 0x43);
169 } else {
170 *rate = clk_get_rate(p->clk) / 8;
165 sh_cmt_write(p, CMCSR, 0x01a4); 171 sh_cmt_write(p, CMCSR, 0x01a4);
172 }
166 173
167 sh_cmt_write(p, CMCOR, 0xffffffff); 174 sh_cmt_write(p, CMCOR, 0xffffffff);
168 sh_cmt_write(p, CMCNT, 0); 175 sh_cmt_write(p, CMCNT, 0);
@@ -376,6 +383,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
376 spin_unlock_irqrestore(&p->lock, flags); 383 spin_unlock_irqrestore(&p->lock, flags);
377} 384}
378 385
386static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
387{
388 return container_of(cs, struct sh_cmt_priv, cs);
389}
390
391static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
392{
393 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
394 unsigned long flags, raw;
395 unsigned long value;
396 int has_wrapped;
397
398 spin_lock_irqsave(&p->lock, flags);
399 value = p->total_cycles;
400 raw = sh_cmt_get_counter(p, &has_wrapped);
401
402 if (unlikely(has_wrapped))
403 raw += p->match_value;
404 spin_unlock_irqrestore(&p->lock, flags);
405
406 return value + raw;
407}
408
409static int sh_cmt_clocksource_enable(struct clocksource *cs)
410{
411 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
412 int ret;
413
414 p->total_cycles = 0;
415
416 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
417 if (ret)
418 return ret;
419
420 /* TODO: calculate good shift from rate and counter bit width */
421 cs->shift = 0;
422 cs->mult = clocksource_hz2mult(p->rate, cs->shift);
423 return 0;
424}
425
426static void sh_cmt_clocksource_disable(struct clocksource *cs)
427{
428 sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
429}
430
431static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
432 char *name, unsigned long rating)
433{
434 struct clocksource *cs = &p->cs;
435
436 cs->name = name;
437 cs->rating = rating;
438 cs->read = sh_cmt_clocksource_read;
439 cs->enable = sh_cmt_clocksource_enable;
440 cs->disable = sh_cmt_clocksource_disable;
441 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
442 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
443 pr_info("sh_cmt: %s used as clock source\n", cs->name);
444 clocksource_register(cs);
445 return 0;
446}
447
379static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) 448static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
380{ 449{
381 return container_of(ced, struct sh_cmt_priv, ced); 450 return container_of(ced, struct sh_cmt_priv, ced);
@@ -468,9 +537,9 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
468 clockevents_register_device(ced); 537 clockevents_register_device(ced);
469} 538}
470 539
471int sh_cmt_register(struct sh_cmt_priv *p, char *name, 540static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
472 unsigned long clockevent_rating, 541 unsigned long clockevent_rating,
473 unsigned long clocksource_rating) 542 unsigned long clocksource_rating)
474{ 543{
475 if (p->width == (sizeof(p->max_match_value) * 8)) 544 if (p->width == (sizeof(p->max_match_value) * 8))
476 p->max_match_value = ~0; 545 p->max_match_value = ~0;
@@ -483,12 +552,15 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
483 if (clockevent_rating) 552 if (clockevent_rating)
484 sh_cmt_register_clockevent(p, name, clockevent_rating); 553 sh_cmt_register_clockevent(p, name, clockevent_rating);
485 554
555 if (clocksource_rating)
556 sh_cmt_register_clocksource(p, name, clocksource_rating);
557
486 return 0; 558 return 0;
487} 559}
488 560
489static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) 561static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
490{ 562{
491 struct sh_cmt_config *cfg = pdev->dev.platform_data; 563 struct sh_timer_config *cfg = pdev->dev.platform_data;
492 struct resource *res; 564 struct resource *res;
493 int irq, ret; 565 int irq, ret;
494 ret = -ENXIO; 566 ret = -ENXIO;
@@ -545,7 +617,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
545 if (resource_size(res) == 6) { 617 if (resource_size(res) == 6) {
546 p->width = 16; 618 p->width = 16;
547 p->overflow_bit = 0x80; 619 p->overflow_bit = 0x80;
548 p->clear_bits = ~0xc0; 620 p->clear_bits = ~0x80;
549 } else { 621 } else {
550 p->width = 32; 622 p->width = 32;
551 p->overflow_bit = 0x8000; 623 p->overflow_bit = 0x8000;
@@ -566,8 +638,14 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
566static int __devinit sh_cmt_probe(struct platform_device *pdev) 638static int __devinit sh_cmt_probe(struct platform_device *pdev)
567{ 639{
568 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 640 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
641 struct sh_timer_config *cfg = pdev->dev.platform_data;
569 int ret; 642 int ret;
570 643
644 if (p) {
645 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
646 return 0;
647 }
648
571 p = kmalloc(sizeof(*p), GFP_KERNEL); 649 p = kmalloc(sizeof(*p), GFP_KERNEL);
572 if (p == NULL) { 650 if (p == NULL) {
573 dev_err(&pdev->dev, "failed to allocate driver data\n"); 651 dev_err(&pdev->dev, "failed to allocate driver data\n");
@@ -577,7 +655,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
577 ret = sh_cmt_setup(p, pdev); 655 ret = sh_cmt_setup(p, pdev);
578 if (ret) { 656 if (ret) {
579 kfree(p); 657 kfree(p);
580
581 platform_set_drvdata(pdev, NULL); 658 platform_set_drvdata(pdev, NULL);
582 } 659 }
583 return ret; 660 return ret;
@@ -606,6 +683,7 @@ static void __exit sh_cmt_exit(void)
606 platform_driver_unregister(&sh_cmt_device_driver); 683 platform_driver_unregister(&sh_cmt_device_driver);
607} 684}
608 685
686early_platform_init("earlytimer", &sh_cmt_device_driver);
609module_init(sh_cmt_init); 687module_init(sh_cmt_init);
610module_exit(sh_cmt_exit); 688module_exit(sh_cmt_exit);
611 689
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
new file mode 100644
index 000000000000..d1ae75454d10
--- /dev/null
+++ b/drivers/clocksource/sh_mtu2.c
@@ -0,0 +1,357 @@
1/*
2 * SuperH Timer Support - MTU2
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clockchips.h>
31#include <linux/sh_timer.h>
32
33struct sh_mtu2_priv {
34 void __iomem *mapbase;
35 struct clk *clk;
36 struct irqaction irqaction;
37 struct platform_device *pdev;
38 unsigned long rate;
39 unsigned long periodic;
40 struct clock_event_device ced;
41};
42
43static DEFINE_SPINLOCK(sh_mtu2_lock);
44
45#define TSTR -1 /* shared register */
46#define TCR 0 /* channel register */
47#define TMDR 1 /* channel register */
48#define TIOR 2 /* channel register */
49#define TIER 3 /* channel register */
50#define TSR 4 /* channel register */
51#define TCNT 5 /* channel register */
52#define TGR 6 /* channel register */
53
54static unsigned long mtu2_reg_offs[] = {
55 [TCR] = 0,
56 [TMDR] = 1,
57 [TIOR] = 2,
58 [TIER] = 4,
59 [TSR] = 5,
60 [TCNT] = 6,
61 [TGR] = 8,
62};
63
64static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
65{
66 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
67 void __iomem *base = p->mapbase;
68 unsigned long offs;
69
70 if (reg_nr == TSTR)
71 return ioread8(base + cfg->channel_offset);
72
73 offs = mtu2_reg_offs[reg_nr];
74
75 if ((reg_nr == TCNT) || (reg_nr == TGR))
76 return ioread16(base + offs);
77 else
78 return ioread8(base + offs);
79}
80
81static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
82 unsigned long value)
83{
84 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
85 void __iomem *base = p->mapbase;
86 unsigned long offs;
87
88 if (reg_nr == TSTR) {
89 iowrite8(value, base + cfg->channel_offset);
90 return;
91 }
92
93 offs = mtu2_reg_offs[reg_nr];
94
95 if ((reg_nr == TCNT) || (reg_nr == TGR))
96 iowrite16(value, base + offs);
97 else
98 iowrite8(value, base + offs);
99}
100
101static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
102{
103 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
104 unsigned long flags, value;
105
106 /* start stop register shared by multiple timer channels */
107 spin_lock_irqsave(&sh_mtu2_lock, flags);
108 value = sh_mtu2_read(p, TSTR);
109
110 if (start)
111 value |= 1 << cfg->timer_bit;
112 else
113 value &= ~(1 << cfg->timer_bit);
114
115 sh_mtu2_write(p, TSTR, value);
116 spin_unlock_irqrestore(&sh_mtu2_lock, flags);
117}
118
119static int sh_mtu2_enable(struct sh_mtu2_priv *p)
120{
121 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
122 int ret;
123
124 /* enable clock */
125 ret = clk_enable(p->clk);
126 if (ret) {
127 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
128 return ret;
129 }
130
131 /* make sure channel is disabled */
132 sh_mtu2_start_stop_ch(p, 0);
133
134 p->rate = clk_get_rate(p->clk) / 64;
135 p->periodic = (p->rate + HZ/2) / HZ;
136
137 /* "Periodic Counter Operation" */
138 sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
139 sh_mtu2_write(p, TIOR, 0);
140 sh_mtu2_write(p, TGR, p->periodic);
141 sh_mtu2_write(p, TCNT, 0);
142 sh_mtu2_write(p, TMDR, 0);
143 sh_mtu2_write(p, TIER, 0x01);
144
145 /* enable channel */
146 sh_mtu2_start_stop_ch(p, 1);
147
148 return 0;
149}
150
151static void sh_mtu2_disable(struct sh_mtu2_priv *p)
152{
153 /* disable channel */
154 sh_mtu2_start_stop_ch(p, 0);
155
156 /* stop clock */
157 clk_disable(p->clk);
158}
159
160static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
161{
162 struct sh_mtu2_priv *p = dev_id;
163
164 /* acknowledge interrupt */
165 sh_mtu2_read(p, TSR);
166 sh_mtu2_write(p, TSR, 0xfe);
167
168 /* notify clockevent layer */
169 p->ced.event_handler(&p->ced);
170 return IRQ_HANDLED;
171}
172
173static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
174{
175 return container_of(ced, struct sh_mtu2_priv, ced);
176}
177
178static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
179 struct clock_event_device *ced)
180{
181 struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
182 int disabled = 0;
183
184 /* deal with old setting first */
185 switch (ced->mode) {
186 case CLOCK_EVT_MODE_PERIODIC:
187 sh_mtu2_disable(p);
188 disabled = 1;
189 break;
190 default:
191 break;
192 }
193
194 switch (mode) {
195 case CLOCK_EVT_MODE_PERIODIC:
196 pr_info("sh_mtu2: %s used for periodic clock events\n",
197 ced->name);
198 sh_mtu2_enable(p);
199 break;
200 case CLOCK_EVT_MODE_UNUSED:
201 if (!disabled)
202 sh_mtu2_disable(p);
203 break;
204 case CLOCK_EVT_MODE_SHUTDOWN:
205 default:
206 break;
207 }
208}
209
210static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
211 char *name, unsigned long rating)
212{
213 struct clock_event_device *ced = &p->ced;
214 int ret;
215
216 memset(ced, 0, sizeof(*ced));
217
218 ced->name = name;
219 ced->features = CLOCK_EVT_FEAT_PERIODIC;
220 ced->rating = rating;
221 ced->cpumask = cpumask_of(0);
222 ced->set_mode = sh_mtu2_clock_event_mode;
223
224 ret = setup_irq(p->irqaction.irq, &p->irqaction);
225 if (ret) {
226 pr_err("sh_mtu2: failed to request irq %d\n",
227 p->irqaction.irq);
228 return;
229 }
230
231 pr_info("sh_mtu2: %s used for clock events\n", ced->name);
232 clockevents_register_device(ced);
233}
234
235static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
236 unsigned long clockevent_rating)
237{
238 if (clockevent_rating)
239 sh_mtu2_register_clockevent(p, name, clockevent_rating);
240
241 return 0;
242}
243
244static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
245{
246 struct sh_timer_config *cfg = pdev->dev.platform_data;
247 struct resource *res;
248 int irq, ret;
249 ret = -ENXIO;
250
251 memset(p, 0, sizeof(*p));
252 p->pdev = pdev;
253
254 if (!cfg) {
255 dev_err(&p->pdev->dev, "missing platform data\n");
256 goto err0;
257 }
258
259 platform_set_drvdata(pdev, p);
260
261 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
262 if (!res) {
263 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
264 goto err0;
265 }
266
267 irq = platform_get_irq(p->pdev, 0);
268 if (irq < 0) {
269 dev_err(&p->pdev->dev, "failed to get irq\n");
270 goto err0;
271 }
272
273 /* map memory, let mapbase point to our channel */
274 p->mapbase = ioremap_nocache(res->start, resource_size(res));
275 if (p->mapbase == NULL) {
276 pr_err("sh_mtu2: failed to remap I/O memory\n");
277 goto err0;
278 }
279
280 /* setup data for setup_irq() (too early for request_irq()) */
281 p->irqaction.name = cfg->name;
282 p->irqaction.handler = sh_mtu2_interrupt;
283 p->irqaction.dev_id = p;
284 p->irqaction.irq = irq;
285 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
286 p->irqaction.mask = CPU_MASK_NONE;
287
288 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk);
290 if (IS_ERR(p->clk)) {
291 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
292 ret = PTR_ERR(p->clk);
293 goto err1;
294 }
295
296 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
297 err1:
298 iounmap(p->mapbase);
299 err0:
300 return ret;
301}
302
303static int __devinit sh_mtu2_probe(struct platform_device *pdev)
304{
305 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
306 struct sh_timer_config *cfg = pdev->dev.platform_data;
307 int ret;
308
309 if (p) {
310 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
311 return 0;
312 }
313
314 p = kmalloc(sizeof(*p), GFP_KERNEL);
315 if (p == NULL) {
316 dev_err(&pdev->dev, "failed to allocate driver data\n");
317 return -ENOMEM;
318 }
319
320 ret = sh_mtu2_setup(p, pdev);
321 if (ret) {
322 kfree(p);
323 platform_set_drvdata(pdev, NULL);
324 }
325 return ret;
326}
327
328static int __devexit sh_mtu2_remove(struct platform_device *pdev)
329{
330 return -EBUSY; /* cannot unregister clockevent */
331}
332
333static struct platform_driver sh_mtu2_device_driver = {
334 .probe = sh_mtu2_probe,
335 .remove = __devexit_p(sh_mtu2_remove),
336 .driver = {
337 .name = "sh_mtu2",
338 }
339};
340
341static int __init sh_mtu2_init(void)
342{
343 return platform_driver_register(&sh_mtu2_device_driver);
344}
345
346static void __exit sh_mtu2_exit(void)
347{
348 platform_driver_unregister(&sh_mtu2_device_driver);
349}
350
351early_platform_init("earlytimer", &sh_mtu2_device_driver);
352module_init(sh_mtu2_init);
353module_exit(sh_mtu2_exit);
354
355MODULE_AUTHOR("Magnus Damm");
356MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
357MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
new file mode 100644
index 000000000000..d6ea4398bf62
--- /dev/null
+++ b/drivers/clocksource/sh_tmu.c
@@ -0,0 +1,461 @@
1/*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33
34struct sh_tmu_priv {
35 void __iomem *mapbase;
36 struct clk *clk;
37 struct irqaction irqaction;
38 struct platform_device *pdev;
39 unsigned long rate;
40 unsigned long periodic;
41 struct clock_event_device ced;
42 struct clocksource cs;
43};
44
45static DEFINE_SPINLOCK(sh_tmu_lock);
46
47#define TSTR -1 /* shared register */
48#define TCOR 0 /* channel register */
49#define TCNT 1 /* channel register */
50#define TCR 2 /* channel register */
51
52static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
53{
54 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
55 void __iomem *base = p->mapbase;
56 unsigned long offs;
57
58 if (reg_nr == TSTR)
59 return ioread8(base - cfg->channel_offset);
60
61 offs = reg_nr << 2;
62
63 if (reg_nr == TCR)
64 return ioread16(base + offs);
65 else
66 return ioread32(base + offs);
67}
68
69static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
70 unsigned long value)
71{
72 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
73 void __iomem *base = p->mapbase;
74 unsigned long offs;
75
76 if (reg_nr == TSTR) {
77 iowrite8(value, base - cfg->channel_offset);
78 return;
79 }
80
81 offs = reg_nr << 2;
82
83 if (reg_nr == TCR)
84 iowrite16(value, base + offs);
85 else
86 iowrite32(value, base + offs);
87}
88
89static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
90{
91 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
92 unsigned long flags, value;
93
94 /* start stop register shared by multiple timer channels */
95 spin_lock_irqsave(&sh_tmu_lock, flags);
96 value = sh_tmu_read(p, TSTR);
97
98 if (start)
99 value |= 1 << cfg->timer_bit;
100 else
101 value &= ~(1 << cfg->timer_bit);
102
103 sh_tmu_write(p, TSTR, value);
104 spin_unlock_irqrestore(&sh_tmu_lock, flags);
105}
106
107static int sh_tmu_enable(struct sh_tmu_priv *p)
108{
109 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
110 int ret;
111
112 /* enable clock */
113 ret = clk_enable(p->clk);
114 if (ret) {
115 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
116 return ret;
117 }
118
119 /* make sure channel is disabled */
120 sh_tmu_start_stop_ch(p, 0);
121
122 /* maximum timeout */
123 sh_tmu_write(p, TCOR, 0xffffffff);
124 sh_tmu_write(p, TCNT, 0xffffffff);
125
126 /* configure channel to parent clock / 4, irq off */
127 p->rate = clk_get_rate(p->clk) / 4;
128 sh_tmu_write(p, TCR, 0x0000);
129
130 /* enable channel */
131 sh_tmu_start_stop_ch(p, 1);
132
133 return 0;
134}
135
136static void sh_tmu_disable(struct sh_tmu_priv *p)
137{
138 /* disable channel */
139 sh_tmu_start_stop_ch(p, 0);
140
141 /* stop clock */
142 clk_disable(p->clk);
143}
144
145static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
146 int periodic)
147{
148 /* stop timer */
149 sh_tmu_start_stop_ch(p, 0);
150
151 /* acknowledge interrupt */
152 sh_tmu_read(p, TCR);
153
154 /* enable interrupt */
155 sh_tmu_write(p, TCR, 0x0020);
156
157 /* reload delta value in case of periodic timer */
158 if (periodic)
159 sh_tmu_write(p, TCOR, delta);
160 else
161 sh_tmu_write(p, TCOR, 0);
162
163 sh_tmu_write(p, TCNT, delta);
164
165 /* start timer */
166 sh_tmu_start_stop_ch(p, 1);
167}
168
169static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
170{
171 struct sh_tmu_priv *p = dev_id;
172
173 /* disable or acknowledge interrupt */
174 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
175 sh_tmu_write(p, TCR, 0x0000);
176 else
177 sh_tmu_write(p, TCR, 0x0020);
178
179 /* notify clockevent layer */
180 p->ced.event_handler(&p->ced);
181 return IRQ_HANDLED;
182}
183
184static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
185{
186 return container_of(cs, struct sh_tmu_priv, cs);
187}
188
189static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
190{
191 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
192
193 return sh_tmu_read(p, TCNT) ^ 0xffffffff;
194}
195
196static int sh_tmu_clocksource_enable(struct clocksource *cs)
197{
198 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
199 int ret;
200
201 ret = sh_tmu_enable(p);
202 if (ret)
203 return ret;
204
205 /* TODO: calculate good shift from rate and counter bit width */
206 cs->shift = 10;
207 cs->mult = clocksource_hz2mult(p->rate, cs->shift);
208 return 0;
209}
210
211static void sh_tmu_clocksource_disable(struct clocksource *cs)
212{
213 sh_tmu_disable(cs_to_sh_tmu(cs));
214}
215
216static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
217 char *name, unsigned long rating)
218{
219 struct clocksource *cs = &p->cs;
220
221 cs->name = name;
222 cs->rating = rating;
223 cs->read = sh_tmu_clocksource_read;
224 cs->enable = sh_tmu_clocksource_enable;
225 cs->disable = sh_tmu_clocksource_disable;
226 cs->mask = CLOCKSOURCE_MASK(32);
227 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
228 pr_info("sh_tmu: %s used as clock source\n", cs->name);
229 clocksource_register(cs);
230 return 0;
231}
232
233static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
234{
235 return container_of(ced, struct sh_tmu_priv, ced);
236}
237
238static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
239{
240 struct clock_event_device *ced = &p->ced;
241
242 sh_tmu_enable(p);
243
244 /* TODO: calculate good shift from rate and counter bit width */
245
246 ced->shift = 32;
247 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
248 ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
249 ced->min_delta_ns = 5000;
250
251 if (periodic) {
252 p->periodic = (p->rate + HZ/2) / HZ;
253 sh_tmu_set_next(p, p->periodic, 1);
254 }
255}
256
257static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
258 struct clock_event_device *ced)
259{
260 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
261 int disabled = 0;
262
263 /* deal with old setting first */
264 switch (ced->mode) {
265 case CLOCK_EVT_MODE_PERIODIC:
266 case CLOCK_EVT_MODE_ONESHOT:
267 sh_tmu_disable(p);
268 disabled = 1;
269 break;
270 default:
271 break;
272 }
273
274 switch (mode) {
275 case CLOCK_EVT_MODE_PERIODIC:
276 pr_info("sh_tmu: %s used for periodic clock events\n",
277 ced->name);
278 sh_tmu_clock_event_start(p, 1);
279 break;
280 case CLOCK_EVT_MODE_ONESHOT:
281 pr_info("sh_tmu: %s used for oneshot clock events\n",
282 ced->name);
283 sh_tmu_clock_event_start(p, 0);
284 break;
285 case CLOCK_EVT_MODE_UNUSED:
286 if (!disabled)
287 sh_tmu_disable(p);
288 break;
289 case CLOCK_EVT_MODE_SHUTDOWN:
290 default:
291 break;
292 }
293}
294
295static int sh_tmu_clock_event_next(unsigned long delta,
296 struct clock_event_device *ced)
297{
298 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
299
300 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
301
302 /* program new delta value */
303 sh_tmu_set_next(p, delta, 0);
304 return 0;
305}
306
307static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
308 char *name, unsigned long rating)
309{
310 struct clock_event_device *ced = &p->ced;
311 int ret;
312
313 memset(ced, 0, sizeof(*ced));
314
315 ced->name = name;
316 ced->features = CLOCK_EVT_FEAT_PERIODIC;
317 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
318 ced->rating = rating;
319 ced->cpumask = cpumask_of(0);
320 ced->set_next_event = sh_tmu_clock_event_next;
321 ced->set_mode = sh_tmu_clock_event_mode;
322
323 ret = setup_irq(p->irqaction.irq, &p->irqaction);
324 if (ret) {
325 pr_err("sh_tmu: failed to request irq %d\n",
326 p->irqaction.irq);
327 return;
328 }
329
330 pr_info("sh_tmu: %s used for clock events\n", ced->name);
331 clockevents_register_device(ced);
332}
333
334static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
335 unsigned long clockevent_rating,
336 unsigned long clocksource_rating)
337{
338 if (clockevent_rating)
339 sh_tmu_register_clockevent(p, name, clockevent_rating);
340 else if (clocksource_rating)
341 sh_tmu_register_clocksource(p, name, clocksource_rating);
342
343 return 0;
344}
345
346static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
347{
348 struct sh_timer_config *cfg = pdev->dev.platform_data;
349 struct resource *res;
350 int irq, ret;
351 ret = -ENXIO;
352
353 memset(p, 0, sizeof(*p));
354 p->pdev = pdev;
355
356 if (!cfg) {
357 dev_err(&p->pdev->dev, "missing platform data\n");
358 goto err0;
359 }
360
361 platform_set_drvdata(pdev, p);
362
363 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
364 if (!res) {
365 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
366 goto err0;
367 }
368
369 irq = platform_get_irq(p->pdev, 0);
370 if (irq < 0) {
371 dev_err(&p->pdev->dev, "failed to get irq\n");
372 goto err0;
373 }
374
375 /* map memory, let mapbase point to our channel */
376 p->mapbase = ioremap_nocache(res->start, resource_size(res));
377 if (p->mapbase == NULL) {
378 pr_err("sh_tmu: failed to remap I/O memory\n");
379 goto err0;
380 }
381
382 /* setup data for setup_irq() (too early for request_irq()) */
383 p->irqaction.name = cfg->name;
384 p->irqaction.handler = sh_tmu_interrupt;
385 p->irqaction.dev_id = p;
386 p->irqaction.irq = irq;
387 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
388 p->irqaction.mask = CPU_MASK_NONE;
389
390 /* get hold of clock */
391 p->clk = clk_get(&p->pdev->dev, cfg->clk);
392 if (IS_ERR(p->clk)) {
393 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
394 ret = PTR_ERR(p->clk);
395 goto err1;
396 }
397
398 return sh_tmu_register(p, cfg->name,
399 cfg->clockevent_rating,
400 cfg->clocksource_rating);
401 err1:
402 iounmap(p->mapbase);
403 err0:
404 return ret;
405}
406
407static int __devinit sh_tmu_probe(struct platform_device *pdev)
408{
409 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
410 struct sh_timer_config *cfg = pdev->dev.platform_data;
411 int ret;
412
413 if (p) {
414 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
415 return 0;
416 }
417
418 p = kmalloc(sizeof(*p), GFP_KERNEL);
419 if (p == NULL) {
420 dev_err(&pdev->dev, "failed to allocate driver data\n");
421 return -ENOMEM;
422 }
423
424 ret = sh_tmu_setup(p, pdev);
425 if (ret) {
426 kfree(p);
427 platform_set_drvdata(pdev, NULL);
428 }
429 return ret;
430}
431
432static int __devexit sh_tmu_remove(struct platform_device *pdev)
433{
434 return -EBUSY; /* cannot unregister clockevent and clocksource */
435}
436
437static struct platform_driver sh_tmu_device_driver = {
438 .probe = sh_tmu_probe,
439 .remove = __devexit_p(sh_tmu_remove),
440 .driver = {
441 .name = "sh_tmu",
442 }
443};
444
445static int __init sh_tmu_init(void)
446{
447 return platform_driver_register(&sh_tmu_device_driver);
448}
449
450static void __exit sh_tmu_exit(void)
451{
452 platform_driver_unregister(&sh_tmu_device_driver);
453}
454
455early_platform_init("earlytimer", &sh_tmu_device_driver);
456module_init(sh_tmu_init);
457module_exit(sh_tmu_exit);
458
459MODULE_AUTHOR("Magnus Damm");
460MODULE_DESCRIPTION("SuperH TMU Timer Driver");
461MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index baa28b73ae42..b9680f50f541 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -396,7 +396,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
396 signed char cdf, cdfm; 396 signed char cdf, cdfm;
397 int scgd, scgdm, scgds; 397 int scgd, scgdm, scgds;
398 398
399 mclk = clk_get(NULL, "module_clk"); 399 mclk = clk_get(NULL, "peripheral_clk");
400 if (IS_ERR(mclk)) { 400 if (IS_ERR(mclk)) {
401 return PTR_ERR(mclk); 401 return PTR_ERR(mclk);
402 } else { 402 } else {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e9851fc1746..277d35d232fa 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -692,7 +692,7 @@ config RTC_DRV_GENERIC
692 tristate "Generic RTC support" 692 tristate "Generic RTC support"
693 # Please consider writing a new RTC driver instead of using the generic 693 # Please consider writing a new RTC driver instead of using the generic
694 # RTC abstraction 694 # RTC abstraction
695 depends on PARISC || M68K || PPC 695 depends on PARISC || M68K || PPC || SUPERH32
696 help 696 help
697 Say Y or M here to enable RTC support on systems using the generic 697 Say Y or M here to enable RTC support on systems using the generic
698 RTC abstraction. If you do not know what you are doing, you should 698 RTC abstraction. If you do not know what you are doing, you should
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index dbf5357a77b3..a4cf1079b312 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -47,12 +47,17 @@
47#include <linux/clk.h> 47#include <linux/clk.h>
48#include <linux/ctype.h> 48#include <linux/ctype.h>
49#include <linux/err.h> 49#include <linux/err.h>
50#include <linux/list.h>
50 51
51#ifdef CONFIG_SUPERH 52#ifdef CONFIG_SUPERH
52#include <asm/clock.h> 53#include <asm/clock.h>
53#include <asm/sh_bios.h> 54#include <asm/sh_bios.h>
54#endif 55#endif
55 56
57#ifdef CONFIG_H8300
58#include <asm/gpio.h>
59#endif
60
56#include "sh-sci.h" 61#include "sh-sci.h"
57 62
58struct sci_port { 63struct sci_port {
@@ -75,14 +80,22 @@ struct sci_port {
75 int break_flag; 80 int break_flag;
76 81
77#ifdef CONFIG_HAVE_CLK 82#ifdef CONFIG_HAVE_CLK
78 /* Port clock */ 83 /* Interface clock */
79 struct clk *clk; 84 struct clk *iclk;
85 /* Data clock */
86 struct clk *dclk;
80#endif 87#endif
88 struct list_head node;
81}; 89};
82 90
83#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 91struct sh_sci_priv {
84static struct sci_port *serial_console_port; 92 spinlock_t lock;
93 struct list_head ports;
94
95#ifdef CONFIG_HAVE_CLK
96 struct notifier_block clk_nb;
85#endif 97#endif
98};
86 99
87/* Function prototypes */ 100/* Function prototypes */
88static void sci_stop_tx(struct uart_port *port); 101static void sci_stop_tx(struct uart_port *port);
@@ -138,9 +151,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
138 status = sci_in(port, SCxSR); 151 status = sci_in(port, SCxSR);
139 } while (!(status & SCxSR_TDxE(port))); 152 } while (!(status & SCxSR_TDxE(port)));
140 153
141 sci_in(port, SCxSR); /* Dummy read */
142 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
143 sci_out(port, SCxTDR, c); 154 sci_out(port, SCxTDR, c);
155 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
144} 156}
145#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ 157#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
146 158
@@ -159,12 +171,12 @@ static void h8300_sci_config(struct uart_port *port, unsigned int ctrl)
159 *mstpcrl &= ~mask; 171 *mstpcrl &= ~mask;
160} 172}
161 173
162static inline void h8300_sci_enable(struct uart_port *port) 174static void h8300_sci_enable(struct uart_port *port)
163{ 175{
164 h8300_sci_config(port, sci_enable); 176 h8300_sci_config(port, sci_enable);
165} 177}
166 178
167static inline void h8300_sci_disable(struct uart_port *port) 179static void h8300_sci_disable(struct uart_port *port)
168{ 180{
169 h8300_sci_config(port, sci_disable); 181 h8300_sci_config(port, sci_disable);
170} 182}
@@ -611,7 +623,7 @@ static inline int sci_handle_breaks(struct uart_port *port)
611 int copied = 0; 623 int copied = 0;
612 unsigned short status = sci_in(port, SCxSR); 624 unsigned short status = sci_in(port, SCxSR);
613 struct tty_struct *tty = port->info->port.tty; 625 struct tty_struct *tty = port->info->port.tty;
614 struct sci_port *s = &sci_ports[port->line]; 626 struct sci_port *s = to_sci_port(port);
615 627
616 if (uart_handle_break(port)) 628 if (uart_handle_break(port))
617 return 0; 629 return 0;
@@ -726,19 +738,43 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
726static int sci_notifier(struct notifier_block *self, 738static int sci_notifier(struct notifier_block *self,
727 unsigned long phase, void *p) 739 unsigned long phase, void *p)
728{ 740{
729 int i; 741 struct sh_sci_priv *priv = container_of(self,
742 struct sh_sci_priv, clk_nb);
743 struct sci_port *sci_port;
744 unsigned long flags;
730 745
731 if ((phase == CPUFREQ_POSTCHANGE) || 746 if ((phase == CPUFREQ_POSTCHANGE) ||
732 (phase == CPUFREQ_RESUMECHANGE)) 747 (phase == CPUFREQ_RESUMECHANGE)) {
733 for (i = 0; i < SCI_NPORTS; i++) { 748 spin_lock_irqsave(&priv->lock, flags);
734 struct sci_port *s = &sci_ports[i]; 749 list_for_each_entry(sci_port, &priv->ports, node)
735 s->port.uartclk = clk_get_rate(s->clk); 750 sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
736 } 751
752 spin_unlock_irqrestore(&priv->lock, flags);
753 }
737 754
738 return NOTIFY_OK; 755 return NOTIFY_OK;
739} 756}
740 757
741static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 }; 758static void sci_clk_enable(struct uart_port *port)
759{
760 struct sci_port *sci_port = to_sci_port(port);
761
762 clk_enable(sci_port->dclk);
763 sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
764
765 if (sci_port->iclk)
766 clk_enable(sci_port->iclk);
767}
768
769static void sci_clk_disable(struct uart_port *port)
770{
771 struct sci_port *sci_port = to_sci_port(port);
772
773 if (sci_port->iclk)
774 clk_disable(sci_port->iclk);
775
776 clk_disable(sci_port->dclk);
777}
742#endif 778#endif
743 779
744static int sci_request_irq(struct sci_port *port) 780static int sci_request_irq(struct sci_port *port)
@@ -865,15 +901,11 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
865 901
866static int sci_startup(struct uart_port *port) 902static int sci_startup(struct uart_port *port)
867{ 903{
868 struct sci_port *s = &sci_ports[port->line]; 904 struct sci_port *s = to_sci_port(port);
869 905
870 if (s->enable) 906 if (s->enable)
871 s->enable(port); 907 s->enable(port);
872 908
873#ifdef CONFIG_HAVE_CLK
874 s->clk = clk_get(NULL, "module_clk");
875#endif
876
877 sci_request_irq(s); 909 sci_request_irq(s);
878 sci_start_tx(port); 910 sci_start_tx(port);
879 sci_start_rx(port, 1); 911 sci_start_rx(port, 1);
@@ -883,7 +915,7 @@ static int sci_startup(struct uart_port *port)
883 915
884static void sci_shutdown(struct uart_port *port) 916static void sci_shutdown(struct uart_port *port)
885{ 917{
886 struct sci_port *s = &sci_ports[port->line]; 918 struct sci_port *s = to_sci_port(port);
887 919
888 sci_stop_rx(port); 920 sci_stop_rx(port);
889 sci_stop_tx(port); 921 sci_stop_tx(port);
@@ -891,11 +923,6 @@ static void sci_shutdown(struct uart_port *port)
891 923
892 if (s->disable) 924 if (s->disable)
893 s->disable(port); 925 s->disable(port);
894
895#ifdef CONFIG_HAVE_CLK
896 clk_put(s->clk);
897 s->clk = NULL;
898#endif
899} 926}
900 927
901static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 928static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -980,25 +1007,31 @@ static int sci_request_port(struct uart_port *port)
980 1007
981static void sci_config_port(struct uart_port *port, int flags) 1008static void sci_config_port(struct uart_port *port, int flags)
982{ 1009{
983 struct sci_port *s = &sci_ports[port->line]; 1010 struct sci_port *s = to_sci_port(port);
984 1011
985 port->type = s->type; 1012 port->type = s->type;
986 1013
987 if (port->flags & UPF_IOREMAP && !port->membase) { 1014 if (port->membase)
988#if defined(CONFIG_SUPERH64) 1015 return;
989 port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); 1016
990 port->membase = (void __iomem *)port->mapbase; 1017 if (port->flags & UPF_IOREMAP) {
991#else
992 port->membase = ioremap_nocache(port->mapbase, 0x40); 1018 port->membase = ioremap_nocache(port->mapbase, 0x40);
993#endif
994 1019
995 dev_err(port->dev, "can't remap port#%d\n", port->line); 1020 if (IS_ERR(port->membase))
1021 dev_err(port->dev, "can't remap port#%d\n", port->line);
1022 } else {
1023 /*
1024 * For the simple (and majority of) cases where we don't
1025 * need to do any remapping, just cast the cookie
1026 * directly.
1027 */
1028 port->membase = (void __iomem *)port->mapbase;
996 } 1029 }
997} 1030}
998 1031
999static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) 1032static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1000{ 1033{
1001 struct sci_port *s = &sci_ports[port->line]; 1034 struct sci_port *s = to_sci_port(port);
1002 1035
1003 if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) 1036 if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1004 return -EINVAL; 1037 return -EINVAL;
@@ -1032,63 +1065,60 @@ static struct uart_ops sci_uart_ops = {
1032#endif 1065#endif
1033}; 1066};
1034 1067
1035static void __init sci_init_ports(void) 1068static void __devinit sci_init_single(struct platform_device *dev,
1069 struct sci_port *sci_port,
1070 unsigned int index,
1071 struct plat_sci_port *p)
1036{ 1072{
1037 static int first = 1; 1073 sci_port->port.ops = &sci_uart_ops;
1038 int i; 1074 sci_port->port.iotype = UPIO_MEM;
1039 1075 sci_port->port.line = index;
1040 if (!first) 1076 sci_port->port.fifosize = 1;
1041 return;
1042
1043 first = 0;
1044
1045 for (i = 0; i < SCI_NPORTS; i++) {
1046 sci_ports[i].port.ops = &sci_uart_ops;
1047 sci_ports[i].port.iotype = UPIO_MEM;
1048 sci_ports[i].port.line = i;
1049 sci_ports[i].port.fifosize = 1;
1050 1077
1051#if defined(__H8300H__) || defined(__H8300S__) 1078#if defined(__H8300H__) || defined(__H8300S__)
1052#ifdef __H8300S__ 1079#ifdef __H8300S__
1053 sci_ports[i].enable = h8300_sci_enable; 1080 sci_port->enable = h8300_sci_enable;
1054 sci_ports[i].disable = h8300_sci_disable; 1081 sci_port->disable = h8300_sci_disable;
1055#endif 1082#endif
1056 sci_ports[i].port.uartclk = CONFIG_CPU_CLOCK; 1083 sci_port->port.uartclk = CONFIG_CPU_CLOCK;
1057#elif defined(CONFIG_HAVE_CLK) 1084#elif defined(CONFIG_HAVE_CLK)
1058 /* 1085 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
1059 * XXX: We should use a proper SCI/SCIF clock 1086 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
1060 */ 1087 sci_port->enable = sci_clk_enable;
1061 { 1088 sci_port->disable = sci_clk_disable;
1062 struct clk *clk = clk_get(NULL, "module_clk");
1063 sci_ports[i].port.uartclk = clk_get_rate(clk);
1064 clk_put(clk);
1065 }
1066#else 1089#else
1067#error "Need a valid uartclk" 1090#error "Need a valid uartclk"
1068#endif 1091#endif
1069 1092
1070 sci_ports[i].break_timer.data = (unsigned long)&sci_ports[i]; 1093 sci_port->break_timer.data = (unsigned long)sci_port;
1071 sci_ports[i].break_timer.function = sci_break_timer; 1094 sci_port->break_timer.function = sci_break_timer;
1095 init_timer(&sci_port->break_timer);
1072 1096
1073 init_timer(&sci_ports[i].break_timer); 1097 sci_port->port.mapbase = p->mapbase;
1074 } 1098 sci_port->port.membase = p->membase;
1075}
1076
1077int __init early_sci_setup(struct uart_port *port)
1078{
1079 if (unlikely(port->line > SCI_NPORTS))
1080 return -ENODEV;
1081 1099
1082 sci_init_ports(); 1100 sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
1101 sci_port->port.flags = p->flags;
1102 sci_port->port.dev = &dev->dev;
1103 sci_port->type = sci_port->port.type = p->type;
1083 1104
1084 sci_ports[port->line].port.membase = port->membase; 1105 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1085 sci_ports[port->line].port.mapbase = port->mapbase;
1086 sci_ports[port->line].port.type = port->type;
1087 1106
1088 return 0;
1089} 1107}
1090 1108
1091#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1109#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1110static struct tty_driver *serial_console_device(struct console *co, int *index)
1111{
1112 struct uart_driver *p = &sci_uart_driver;
1113 *index = co->index;
1114 return p->tty_driver;
1115}
1116
1117static void serial_console_putchar(struct uart_port *port, int ch)
1118{
1119 sci_poll_put_char(port, ch);
1120}
1121
1092/* 1122/*
1093 * Print a string to the serial port trying not to disturb 1123 * Print a string to the serial port trying not to disturb
1094 * any possible real use of the port... 1124 * any possible real use of the port...
@@ -1096,25 +1126,27 @@ int __init early_sci_setup(struct uart_port *port)
1096static void serial_console_write(struct console *co, const char *s, 1126static void serial_console_write(struct console *co, const char *s,
1097 unsigned count) 1127 unsigned count)
1098{ 1128{
1099 struct uart_port *port = &serial_console_port->port; 1129 struct uart_port *port = co->data;
1130 struct sci_port *sci_port = to_sci_port(port);
1100 unsigned short bits; 1131 unsigned short bits;
1101 int i;
1102 1132
1103 for (i = 0; i < count; i++) { 1133 if (sci_port->enable)
1104 if (*s == 10) 1134 sci_port->enable(port);
1105 sci_poll_put_char(port, '\r');
1106 1135
1107 sci_poll_put_char(port, *s++); 1136 uart_console_write(port, s, count, serial_console_putchar);
1108 }
1109 1137
1110 /* wait until fifo is empty and last bit has been transmitted */ 1138 /* wait until fifo is empty and last bit has been transmitted */
1111 bits = SCxSR_TDxE(port) | SCxSR_TEND(port); 1139 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1112 while ((sci_in(port, SCxSR) & bits) != bits) 1140 while ((sci_in(port, SCxSR) & bits) != bits)
1113 cpu_relax(); 1141 cpu_relax();
1142
1143 if (sci_port->disable);
1144 sci_port->disable(port);
1114} 1145}
1115 1146
1116static int __init serial_console_setup(struct console *co, char *options) 1147static int __init serial_console_setup(struct console *co, char *options)
1117{ 1148{
1149 struct sci_port *sci_port;
1118 struct uart_port *port; 1150 struct uart_port *port;
1119 int baud = 115200; 1151 int baud = 115200;
1120 int bits = 8; 1152 int bits = 8;
@@ -1130,8 +1162,9 @@ static int __init serial_console_setup(struct console *co, char *options)
1130 if (co->index >= SCI_NPORTS) 1162 if (co->index >= SCI_NPORTS)
1131 co->index = 0; 1163 co->index = 0;
1132 1164
1133 serial_console_port = &sci_ports[co->index]; 1165 sci_port = &sci_ports[co->index];
1134 port = &serial_console_port->port; 1166 port = &sci_port->port;
1167 co->data = port;
1135 1168
1136 /* 1169 /*
1137 * Also need to check port->type, we don't actually have any 1170 * Also need to check port->type, we don't actually have any
@@ -1141,21 +1174,11 @@ static int __init serial_console_setup(struct console *co, char *options)
1141 */ 1174 */
1142 if (!port->type) 1175 if (!port->type)
1143 return -ENODEV; 1176 return -ENODEV;
1144 if (!port->membase || !port->mapbase)
1145 return -ENODEV;
1146
1147 port->type = serial_console_port->type;
1148
1149#ifdef CONFIG_HAVE_CLK
1150 if (!serial_console_port->clk)
1151 serial_console_port->clk = clk_get(NULL, "module_clk");
1152#endif
1153 1177
1154 if (port->flags & UPF_IOREMAP) 1178 sci_config_port(port, 0);
1155 sci_config_port(port, 0);
1156 1179
1157 if (serial_console_port->enable) 1180 if (sci_port->enable)
1158 serial_console_port->enable(port); 1181 sci_port->enable(port);
1159 1182
1160 if (options) 1183 if (options)
1161 uart_parse_options(options, &baud, &parity, &bits, &flow); 1184 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1166,22 +1189,21 @@ static int __init serial_console_setup(struct console *co, char *options)
1166 if (ret == 0) 1189 if (ret == 0)
1167 sci_stop_rx(port); 1190 sci_stop_rx(port);
1168#endif 1191#endif
1192 /* TODO: disable clock */
1169 return ret; 1193 return ret;
1170} 1194}
1171 1195
1172static struct console serial_console = { 1196static struct console serial_console = {
1173 .name = "ttySC", 1197 .name = "ttySC",
1174 .device = uart_console_device, 1198 .device = serial_console_device,
1175 .write = serial_console_write, 1199 .write = serial_console_write,
1176 .setup = serial_console_setup, 1200 .setup = serial_console_setup,
1177 .flags = CON_PRINTBUFFER, 1201 .flags = CON_PRINTBUFFER,
1178 .index = -1, 1202 .index = -1,
1179 .data = &sci_uart_driver,
1180}; 1203};
1181 1204
1182static int __init sci_console_init(void) 1205static int __init sci_console_init(void)
1183{ 1206{
1184 sci_init_ports();
1185 register_console(&serial_console); 1207 register_console(&serial_console);
1186 return 0; 1208 return 0;
1187} 1209}
@@ -1207,6 +1229,61 @@ static struct uart_driver sci_uart_driver = {
1207 .cons = SCI_CONSOLE, 1229 .cons = SCI_CONSOLE,
1208}; 1230};
1209 1231
1232
1233static int sci_remove(struct platform_device *dev)
1234{
1235 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1236 struct sci_port *p;
1237 unsigned long flags;
1238
1239#ifdef CONFIG_HAVE_CLK
1240 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1241#endif
1242
1243 spin_lock_irqsave(&priv->lock, flags);
1244 list_for_each_entry(p, &priv->ports, node)
1245 uart_remove_one_port(&sci_uart_driver, &p->port);
1246
1247 spin_unlock_irqrestore(&priv->lock, flags);
1248
1249 kfree(priv);
1250 return 0;
1251}
1252
1253static int __devinit sci_probe_single(struct platform_device *dev,
1254 unsigned int index,
1255 struct plat_sci_port *p,
1256 struct sci_port *sciport)
1257{
1258 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1259 unsigned long flags;
1260 int ret;
1261
1262 /* Sanity check */
1263 if (unlikely(index >= SCI_NPORTS)) {
1264 dev_notice(&dev->dev, "Attempting to register port "
1265 "%d when only %d are available.\n",
1266 index+1, SCI_NPORTS);
1267 dev_notice(&dev->dev, "Consider bumping "
1268 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1269 return 0;
1270 }
1271
1272 sci_init_single(dev, sciport, index, p);
1273
1274 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1275 if (ret)
1276 return ret;
1277
1278 INIT_LIST_HEAD(&sciport->node);
1279
1280 spin_lock_irqsave(&priv->lock, flags);
1281 list_add(&sciport->node, &priv->ports);
1282 spin_unlock_irqrestore(&priv->lock, flags);
1283
1284 return 0;
1285}
1286
1210/* 1287/*
1211 * Register a set of serial devices attached to a platform device. The 1288 * Register a set of serial devices attached to a platform device. The
1212 * list is terminated with a zero flags entry, which means we expect 1289 * list is terminated with a zero flags entry, which means we expect
@@ -1216,57 +1293,34 @@ static struct uart_driver sci_uart_driver = {
1216static int __devinit sci_probe(struct platform_device *dev) 1293static int __devinit sci_probe(struct platform_device *dev)
1217{ 1294{
1218 struct plat_sci_port *p = dev->dev.platform_data; 1295 struct plat_sci_port *p = dev->dev.platform_data;
1296 struct sh_sci_priv *priv;
1219 int i, ret = -EINVAL; 1297 int i, ret = -EINVAL;
1220 1298
1221 for (i = 0; p && p->flags != 0; p++, i++) { 1299 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1222 struct sci_port *sciport = &sci_ports[i]; 1300 if (!priv)
1301 return -ENOMEM;
1223 1302
1224 /* Sanity check */ 1303 INIT_LIST_HEAD(&priv->ports);
1225 if (unlikely(i == SCI_NPORTS)) { 1304 spin_lock_init(&priv->lock);
1226 dev_notice(&dev->dev, "Attempting to register port " 1305 platform_set_drvdata(dev, priv);
1227 "%d when only %d are available.\n",
1228 i+1, SCI_NPORTS);
1229 dev_notice(&dev->dev, "Consider bumping "
1230 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1231 break;
1232 }
1233 1306
1234 sciport->port.mapbase = p->mapbase; 1307#ifdef CONFIG_HAVE_CLK
1308 priv->clk_nb.notifier_call = sci_notifier;
1309 cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1310#endif
1235 1311
1236 if (p->mapbase && !p->membase) { 1312 if (dev->id != -1) {
1237 if (p->flags & UPF_IOREMAP) { 1313 ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
1238 p->membase = ioremap_nocache(p->mapbase, 0x40); 1314 if (ret)
1239 if (IS_ERR(p->membase)) { 1315 goto err_unreg;
1240 ret = PTR_ERR(p->membase); 1316 } else {
1241 goto err_unreg; 1317 for (i = 0; p && p->flags != 0; p++, i++) {
1242 } 1318 ret = sci_probe_single(dev, i, p, &sci_ports[i]);
1243 } else { 1319 if (ret)
1244 /* 1320 goto err_unreg;
1245 * For the simple (and majority of) cases
1246 * where we don't need to do any remapping,
1247 * just cast the cookie directly.
1248 */
1249 p->membase = (void __iomem *)p->mapbase;
1250 }
1251 } 1321 }
1252
1253 sciport->port.membase = p->membase;
1254
1255 sciport->port.irq = p->irqs[SCIx_TXI_IRQ];
1256 sciport->port.flags = p->flags;
1257 sciport->port.dev = &dev->dev;
1258
1259 sciport->type = sciport->port.type = p->type;
1260
1261 memcpy(&sciport->irqs, &p->irqs, sizeof(p->irqs));
1262
1263 uart_add_one_port(&sci_uart_driver, &sciport->port);
1264 } 1322 }
1265 1323
1266#ifdef CONFIG_HAVE_CLK
1267 cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
1268#endif
1269
1270#ifdef CONFIG_SH_STANDARD_BIOS 1324#ifdef CONFIG_SH_STANDARD_BIOS
1271 sh_bios_gdb_detach(); 1325 sh_bios_gdb_detach();
1272#endif 1326#endif
@@ -1274,50 +1328,36 @@ static int __devinit sci_probe(struct platform_device *dev)
1274 return 0; 1328 return 0;
1275 1329
1276err_unreg: 1330err_unreg:
1277 for (i = i - 1; i >= 0; i--) 1331 sci_remove(dev);
1278 uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
1279
1280 return ret; 1332 return ret;
1281} 1333}
1282 1334
1283static int __devexit sci_remove(struct platform_device *dev)
1284{
1285 int i;
1286
1287#ifdef CONFIG_HAVE_CLK
1288 cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
1289#endif
1290
1291 for (i = 0; i < SCI_NPORTS; i++)
1292 uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
1293
1294 return 0;
1295}
1296
1297static int sci_suspend(struct platform_device *dev, pm_message_t state) 1335static int sci_suspend(struct platform_device *dev, pm_message_t state)
1298{ 1336{
1299 int i; 1337 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1338 struct sci_port *p;
1339 unsigned long flags;
1300 1340
1301 for (i = 0; i < SCI_NPORTS; i++) { 1341 spin_lock_irqsave(&priv->lock, flags);
1302 struct sci_port *p = &sci_ports[i]; 1342 list_for_each_entry(p, &priv->ports, node)
1343 uart_suspend_port(&sci_uart_driver, &p->port);
1303 1344
1304 if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev) 1345 spin_unlock_irqrestore(&priv->lock, flags);
1305 uart_suspend_port(&sci_uart_driver, &p->port);
1306 }
1307 1346
1308 return 0; 1347 return 0;
1309} 1348}
1310 1349
1311static int sci_resume(struct platform_device *dev) 1350static int sci_resume(struct platform_device *dev)
1312{ 1351{
1313 int i; 1352 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1353 struct sci_port *p;
1354 unsigned long flags;
1314 1355
1315 for (i = 0; i < SCI_NPORTS; i++) { 1356 spin_lock_irqsave(&priv->lock, flags);
1316 struct sci_port *p = &sci_ports[i]; 1357 list_for_each_entry(p, &priv->ports, node)
1358 uart_resume_port(&sci_uart_driver, &p->port);
1317 1359
1318 if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev) 1360 spin_unlock_irqrestore(&priv->lock, flags);
1319 uart_resume_port(&sci_uart_driver, &p->port);
1320 }
1321 1361
1322 return 0; 1362 return 0;
1323} 1363}
@@ -1339,8 +1379,6 @@ static int __init sci_init(void)
1339 1379
1340 printk(banner); 1380 printk(banner);
1341 1381
1342 sci_init_ports();
1343
1344 ret = uart_register_driver(&sci_uart_driver); 1382 ret = uart_register_driver(&sci_uart_driver);
1345 if (likely(ret == 0)) { 1383 if (likely(ret == 0)) {
1346 ret = platform_driver_register(&sci_driver); 1384 ret = platform_driver_register(&sci_driver);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index d0aa82d7fce0..38072c15b845 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -91,6 +91,9 @@
91# define SCSPTR5 0xa4050128 91# define SCSPTR5 0xa4050128
92# define SCIF_ORER 0x0001 /* overrun error bit */ 92# define SCIF_ORER 0x0001 /* overrun error bit */
93# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 93# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
94#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
95# define SCIF_ORER 0x0001 /* overrun error bit */
96# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
94#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 97#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
95# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ 98# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
96# define SCIF_ORER 0x0001 /* overrun error bit */ 99# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -314,7 +317,18 @@
314 } \ 317 } \
315 } 318 }
316 319
317#define CPU_SCIF_FNS(name, scif_offset, scif_size) \ 320#ifdef CONFIG_H8300
321/* h8300 don't have SCIF */
322#define CPU_SCIF_FNS(name) \
323 static inline unsigned int sci_##name##_in(struct uart_port *port) \
324 { \
325 return 0; \
326 } \
327 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
328 { \
329 }
330#else
331#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
318 static inline unsigned int sci_##name##_in(struct uart_port *port) \ 332 static inline unsigned int sci_##name##_in(struct uart_port *port) \
319 { \ 333 { \
320 SCI_IN(scif_size, scif_offset); \ 334 SCI_IN(scif_size, scif_offset); \
@@ -323,6 +337,7 @@
323 { \ 337 { \
324 SCI_OUT(scif_size, scif_offset, value); \ 338 SCI_OUT(scif_size, scif_offset, value); \
325 } 339 }
340#endif
326 341
327#define CPU_SCI_FNS(name, sci_offset, sci_size) \ 342#define CPU_SCI_FNS(name, sci_offset, sci_size) \
328 static inline unsigned int sci_##name##_in(struct uart_port* port) \ 343 static inline unsigned int sci_##name##_in(struct uart_port* port) \
@@ -360,8 +375,10 @@
360 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ 375 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
361 h8_sci_offset, h8_sci_size) \ 376 h8_sci_offset, h8_sci_size) \
362 CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size) 377 CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
363#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) 378#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
364#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 379 CPU_SCIF_FNS(name)
380#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
381 defined(CONFIG_CPU_SUBTYPE_SH7724)
365 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ 382 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
366 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) 383 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
367 #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \ 384 #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
@@ -390,7 +407,8 @@ SCIF_FNS(SCFDR, 0x1c, 16)
390SCIF_FNS(SCxTDR, 0x20, 8) 407SCIF_FNS(SCxTDR, 0x20, 8)
391SCIF_FNS(SCxRDR, 0x24, 8) 408SCIF_FNS(SCxRDR, 0x24, 8)
392SCIF_FNS(SCLSR, 0x24, 16) 409SCIF_FNS(SCLSR, 0x24, 16)
393#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 410#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
411 defined(CONFIG_CPU_SUBTYPE_SH7724)
394SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) 412SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
395SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8) 413SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
396SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16) 414SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
@@ -604,10 +622,21 @@ static inline int sci_rxd_in(struct uart_port *port)
604 return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */ 622 return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
605 return 1; 623 return 1;
606} 624}
625#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
626# define SCFSR 0x0010
627# define SCASSR 0x0014
628static inline int sci_rxd_in(struct uart_port *port)
629{
630 if (port->type == PORT_SCIF)
631 return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
632 if (port->type == PORT_SCIFA)
633 return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
634 return 1;
635}
607#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 636#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
608static inline int sci_rxd_in(struct uart_port *port) 637static inline int sci_rxd_in(struct uart_port *port)
609{ 638{
610 return sci_in(port, SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ 639 return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
611} 640}
612#elif defined(__H8300H__) || defined(__H8300S__) 641#elif defined(__H8300H__) || defined(__H8300S__)
613static inline int sci_rxd_in(struct uart_port *port) 642static inline int sci_rxd_in(struct uart_port *port)
@@ -757,7 +786,8 @@ static inline int sci_rxd_in(struct uart_port *port)
757 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 786 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
758 defined(CONFIG_CPU_SUBTYPE_SH7721) 787 defined(CONFIG_CPU_SUBTYPE_SH7721)
759#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) 788#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
760#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 789#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
790 defined(CONFIG_CPU_SUBTYPE_SH7724)
761static inline int scbrr_calc(struct uart_port *port, int bps, int clk) 791static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
762{ 792{
763 if (port->type == PORT_SCIF) 793 if (port->type == PORT_SCIF)
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 12d13d99b6f0..d687a9b93d03 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -24,6 +24,7 @@
24#include <linux/sh_intc.h> 24#include <linux/sh_intc.h>
25#include <linux/sysdev.h> 25#include <linux/sysdev.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/topology.h>
27 28
28#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 29#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
29 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 30 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -770,11 +771,19 @@ void __init register_intc_controller(struct intc_desc *desc)
770 /* register the vectors one by one */ 771 /* register the vectors one by one */
771 for (i = 0; i < desc->nr_vectors; i++) { 772 for (i = 0; i < desc->nr_vectors; i++) {
772 struct intc_vect *vect = desc->vectors + i; 773 struct intc_vect *vect = desc->vectors + i;
774 unsigned int irq = evt2irq(vect->vect);
775 struct irq_desc *irq_desc;
773 776
774 if (!vect->enum_id) 777 if (!vect->enum_id)
775 continue; 778 continue;
776 779
777 intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); 780 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
781 if (unlikely(!irq_desc)) {
782 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
783 continue;
784 }
785
786 intc_register_irq(desc, d, vect->enum_id, irq);
778 } 787 }
779} 788}
780 789
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e6467cf9f19f..020db7fc9153 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -335,9 +335,9 @@ static int __init hitfb_probe(struct platform_device *dev)
335 if (fb_get_options("hitfb", NULL)) 335 if (fb_get_options("hitfb", NULL))
336 return -ENODEV; 336 return -ENODEV;
337 337
338 hitfb_fix.mmio_start = CONFIG_HD64461_IOBASE+0x1000; 338 hitfb_fix.mmio_start = HD64461_IO_OFFSET(0x1000);
339 hitfb_fix.mmio_len = 0x1000; 339 hitfb_fix.mmio_len = 0x1000;
340 hitfb_fix.smem_start = CONFIG_HD64461_IOBASE + 0x02000000; 340 hitfb_fix.smem_start = HD64461_IO_OFFSET(0x02000000);
341 hitfb_fix.smem_len = 512 * 1024; 341 hitfb_fix.smem_len = 512 * 1024;
342 342
343 lcdclor = fb_readw(HD64461_LCDCLOR); 343 lcdclor = fb_readw(HD64461_LCDCLOR);