diff options
Diffstat (limited to 'arch/sh/kernel')
84 files changed, 2372 insertions, 1297 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 0d587da1ef12..02fd3ae8b0ee 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -13,8 +13,9 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
13 | 13 | ||
14 | obj-y := debugtraps.o dma-nommu.o dumpstack.o \ | 14 | obj-y := debugtraps.o dma-nommu.o dumpstack.o \ |
15 | idle.o io.o io_generic.o irq.o \ | 15 | idle.o io.o io_generic.o irq.o \ |
16 | irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \ | 16 | irq_$(BITS).o machvec.o nmi_debug.o process.o \ |
17 | ptrace_$(BITS).o return_address.o \ | 17 | process_$(BITS).o ptrace_$(BITS).o \ |
18 | reboot.o return_address.o \ | ||
18 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ | 19 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ |
19 | syscalls_$(BITS).o time.o topology.o traps.o \ | 20 | syscalls_$(BITS).o time.o topology.o traps.o \ |
20 | traps_$(BITS).o unwinder.o | 21 | traps_$(BITS).o unwinder.o |
@@ -22,7 +23,7 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \ | |||
22 | obj-y += cpu/ | 23 | obj-y += cpu/ |
23 | obj-$(CONFIG_VSYSCALL) += vsyscall/ | 24 | obj-$(CONFIG_VSYSCALL) += vsyscall/ |
24 | obj-$(CONFIG_SMP) += smp.o | 25 | obj-$(CONFIG_SMP) += smp.o |
25 | obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o | 26 | obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o |
26 | obj-$(CONFIG_KGDB) += kgdb.o | 27 | obj-$(CONFIG_KGDB) += kgdb.o |
27 | obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o | 28 | obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o |
28 | obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o | 29 | obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o |
@@ -39,6 +40,7 @@ obj-$(CONFIG_HIBERNATION) += swsusp.o | |||
39 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o | 40 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o |
40 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o | 41 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o |
41 | 42 | ||
43 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
42 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o | 44 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o |
43 | 45 | ||
44 | EXTRA_CFLAGS += -Werror | 46 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index d97c803719ec..0e48bc61c272 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile | |||
@@ -17,5 +17,7 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ | |||
17 | 17 | ||
18 | obj-$(CONFIG_SH_ADC) += adc.o | 18 | obj-$(CONFIG_SH_ADC) += adc.o |
19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o | 19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o |
20 | obj-$(CONFIG_SH_FPU) += fpu.o | ||
21 | obj-$(CONFIG_SH_FPU_EMU) += fpu.o | ||
20 | 22 | ||
21 | obj-y += irq/ init.o clock.o hwblk.o | 23 | obj-y += irq/ init.o clock.o hwblk.o |
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c index da3d6877f93d..d307571d54b6 100644 --- a/arch/sh/kernel/cpu/adc.c +++ b/arch/sh/kernel/cpu/adc.c | |||
@@ -18,19 +18,19 @@ int adc_single(unsigned int channel) | |||
18 | 18 | ||
19 | off = (channel & 0x03) << 2; | 19 | off = (channel & 0x03) << 2; |
20 | 20 | ||
21 | csr = ctrl_inb(ADCSR); | 21 | csr = __raw_readb(ADCSR); |
22 | csr = channel | ADCSR_ADST | ADCSR_CKS; | 22 | csr = channel | ADCSR_ADST | ADCSR_CKS; |
23 | ctrl_outb(csr, ADCSR); | 23 | __raw_writeb(csr, ADCSR); |
24 | 24 | ||
25 | do { | 25 | do { |
26 | csr = ctrl_inb(ADCSR); | 26 | csr = __raw_readb(ADCSR); |
27 | } while ((csr & ADCSR_ADF) == 0); | 27 | } while ((csr & ADCSR_ADF) == 0); |
28 | 28 | ||
29 | csr &= ~(ADCSR_ADF | ADCSR_ADST); | 29 | csr &= ~(ADCSR_ADF | ADCSR_ADST); |
30 | ctrl_outb(csr, ADCSR); | 30 | __raw_writeb(csr, ADCSR); |
31 | 31 | ||
32 | return (((ctrl_inb(ADDRAH + off) << 8) | | 32 | return (((__raw_readb(ADDRAH + off) << 8) | |
33 | ctrl_inb(ADDRAL + off)) >> 6); | 33 | __raw_readb(ADDRAL + off)) >> 6); |
34 | } | 34 | } |
35 | 35 | ||
36 | EXPORT_SYMBOL(adc_single); | 36 | EXPORT_SYMBOL(adc_single); |
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c index 6dfe2cced3fc..eed5eaff96ba 100644 --- a/arch/sh/kernel/cpu/clock-cpg.c +++ b/arch/sh/kernel/cpu/clock-cpg.c | |||
@@ -149,7 +149,8 @@ int __init sh_clk_div6_register(struct clk *clks, int nr) | |||
149 | 149 | ||
150 | static unsigned long sh_clk_div4_recalc(struct clk *clk) | 150 | static unsigned long sh_clk_div4_recalc(struct clk *clk) |
151 | { | 151 | { |
152 | struct clk_div_mult_table *table = clk->priv; | 152 | struct clk_div4_table *d4t = clk->priv; |
153 | struct clk_div_mult_table *table = d4t->div_mult_table; | ||
153 | unsigned int idx; | 154 | unsigned int idx; |
154 | 155 | ||
155 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | 156 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
@@ -160,17 +161,90 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk) | |||
160 | return clk->freq_table[idx].frequency; | 161 | return clk->freq_table[idx].frequency; |
161 | } | 162 | } |
162 | 163 | ||
164 | static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) | ||
165 | { | ||
166 | struct clk_div4_table *d4t = clk->priv; | ||
167 | struct clk_div_mult_table *table = d4t->div_mult_table; | ||
168 | u32 value; | ||
169 | int ret; | ||
170 | |||
171 | if (!strcmp("pll_clk", parent->name)) | ||
172 | value = __raw_readl(clk->enable_reg) & ~(1 << 7); | ||
173 | else | ||
174 | value = __raw_readl(clk->enable_reg) | (1 << 7); | ||
175 | |||
176 | ret = clk_reparent(clk, parent); | ||
177 | if (ret < 0) | ||
178 | return ret; | ||
179 | |||
180 | __raw_writel(value, clk->enable_reg); | ||
181 | |||
182 | /* Rebiuld the frequency table */ | ||
183 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | ||
184 | table, &clk->arch_flags); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) | ||
190 | { | ||
191 | struct clk_div4_table *d4t = clk->priv; | ||
192 | unsigned long value; | ||
193 | int idx = clk_rate_table_find(clk, clk->freq_table, rate); | ||
194 | if (idx < 0) | ||
195 | return idx; | ||
196 | |||
197 | value = __raw_readl(clk->enable_reg); | ||
198 | value &= ~(0xf << clk->enable_bit); | ||
199 | value |= (idx << clk->enable_bit); | ||
200 | __raw_writel(value, clk->enable_reg); | ||
201 | |||
202 | if (d4t->kick) | ||
203 | d4t->kick(clk); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int sh_clk_div4_enable(struct clk *clk) | ||
209 | { | ||
210 | __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static void sh_clk_div4_disable(struct clk *clk) | ||
215 | { | ||
216 | __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg); | ||
217 | } | ||
218 | |||
163 | static struct clk_ops sh_clk_div4_clk_ops = { | 219 | static struct clk_ops sh_clk_div4_clk_ops = { |
164 | .recalc = sh_clk_div4_recalc, | 220 | .recalc = sh_clk_div4_recalc, |
221 | .set_rate = sh_clk_div4_set_rate, | ||
165 | .round_rate = sh_clk_div_round_rate, | 222 | .round_rate = sh_clk_div_round_rate, |
166 | }; | 223 | }; |
167 | 224 | ||
168 | int __init sh_clk_div4_register(struct clk *clks, int nr, | 225 | static struct clk_ops sh_clk_div4_enable_clk_ops = { |
169 | struct clk_div_mult_table *table) | 226 | .recalc = sh_clk_div4_recalc, |
227 | .set_rate = sh_clk_div4_set_rate, | ||
228 | .round_rate = sh_clk_div_round_rate, | ||
229 | .enable = sh_clk_div4_enable, | ||
230 | .disable = sh_clk_div4_disable, | ||
231 | }; | ||
232 | |||
233 | static struct clk_ops sh_clk_div4_reparent_clk_ops = { | ||
234 | .recalc = sh_clk_div4_recalc, | ||
235 | .set_rate = sh_clk_div4_set_rate, | ||
236 | .round_rate = sh_clk_div_round_rate, | ||
237 | .enable = sh_clk_div4_enable, | ||
238 | .disable = sh_clk_div4_disable, | ||
239 | .set_parent = sh_clk_div4_set_parent, | ||
240 | }; | ||
241 | |||
242 | static int __init sh_clk_div4_register_ops(struct clk *clks, int nr, | ||
243 | struct clk_div4_table *table, struct clk_ops *ops) | ||
170 | { | 244 | { |
171 | struct clk *clkp; | 245 | struct clk *clkp; |
172 | void *freq_table; | 246 | void *freq_table; |
173 | int nr_divs = table->nr_divisors; | 247 | int nr_divs = table->div_mult_table->nr_divisors; |
174 | int freq_table_size = sizeof(struct cpufreq_frequency_table); | 248 | int freq_table_size = sizeof(struct cpufreq_frequency_table); |
175 | int ret = 0; | 249 | int ret = 0; |
176 | int k; | 250 | int k; |
@@ -185,7 +259,7 @@ int __init sh_clk_div4_register(struct clk *clks, int nr, | |||
185 | for (k = 0; !ret && (k < nr); k++) { | 259 | for (k = 0; !ret && (k < nr); k++) { |
186 | clkp = clks + k; | 260 | clkp = clks + k; |
187 | 261 | ||
188 | clkp->ops = &sh_clk_div4_clk_ops; | 262 | clkp->ops = ops; |
189 | clkp->id = -1; | 263 | clkp->id = -1; |
190 | clkp->priv = table; | 264 | clkp->priv = table; |
191 | 265 | ||
@@ -198,6 +272,26 @@ int __init sh_clk_div4_register(struct clk *clks, int nr, | |||
198 | return ret; | 272 | return ret; |
199 | } | 273 | } |
200 | 274 | ||
275 | int __init sh_clk_div4_register(struct clk *clks, int nr, | ||
276 | struct clk_div4_table *table) | ||
277 | { | ||
278 | return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops); | ||
279 | } | ||
280 | |||
281 | int __init sh_clk_div4_enable_register(struct clk *clks, int nr, | ||
282 | struct clk_div4_table *table) | ||
283 | { | ||
284 | return sh_clk_div4_register_ops(clks, nr, table, | ||
285 | &sh_clk_div4_enable_clk_ops); | ||
286 | } | ||
287 | |||
288 | int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, | ||
289 | struct clk_div4_table *table) | ||
290 | { | ||
291 | return sh_clk_div4_register_ops(clks, nr, table, | ||
292 | &sh_clk_div4_reparent_clk_ops); | ||
293 | } | ||
294 | |||
201 | #ifdef CONFIG_SH_CLK_CPG_LEGACY | 295 | #ifdef CONFIG_SH_CLK_CPG_LEGACY |
202 | static struct clk master_clk = { | 296 | static struct clk master_clk = { |
203 | .name = "master_clk", | 297 | .name = "master_clk", |
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c new file mode 100644 index 000000000000..f059ed62cf57 --- /dev/null +++ b/arch/sh/kernel/cpu/fpu.c | |||
@@ -0,0 +1,84 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <asm/processor.h> | ||
3 | #include <asm/fpu.h> | ||
4 | |||
5 | int init_fpu(struct task_struct *tsk) | ||
6 | { | ||
7 | if (tsk_used_math(tsk)) { | ||
8 | if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) | ||
9 | unlazy_fpu(tsk, task_pt_regs(tsk)); | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | /* | ||
14 | * Memory allocation at the first usage of the FPU and other state. | ||
15 | */ | ||
16 | if (!tsk->thread.xstate) { | ||
17 | tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | ||
18 | GFP_KERNEL); | ||
19 | if (!tsk->thread.xstate) | ||
20 | return -ENOMEM; | ||
21 | } | ||
22 | |||
23 | if (boot_cpu_data.flags & CPU_HAS_FPU) { | ||
24 | struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; | ||
25 | memset(fp, 0, xstate_size); | ||
26 | fp->fpscr = FPSCR_INIT; | ||
27 | } else { | ||
28 | struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; | ||
29 | memset(fp, 0, xstate_size); | ||
30 | fp->fpscr = FPSCR_INIT; | ||
31 | } | ||
32 | |||
33 | set_stopped_child_used_math(tsk); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | #ifdef CONFIG_SH_FPU | ||
38 | void __fpu_state_restore(void) | ||
39 | { | ||
40 | struct task_struct *tsk = current; | ||
41 | |||
42 | restore_fpu(tsk); | ||
43 | |||
44 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
45 | tsk->fpu_counter++; | ||
46 | } | ||
47 | |||
48 | void fpu_state_restore(struct pt_regs *regs) | ||
49 | { | ||
50 | struct task_struct *tsk = current; | ||
51 | |||
52 | if (unlikely(!user_mode(regs))) { | ||
53 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
54 | BUG(); | ||
55 | return; | ||
56 | } | ||
57 | |||
58 | if (!tsk_used_math(tsk)) { | ||
59 | local_irq_enable(); | ||
60 | /* | ||
61 | * does a slab alloc which can sleep | ||
62 | */ | ||
63 | if (init_fpu(tsk)) { | ||
64 | /* | ||
65 | * ran out of memory! | ||
66 | */ | ||
67 | do_group_exit(SIGKILL); | ||
68 | return; | ||
69 | } | ||
70 | local_irq_disable(); | ||
71 | } | ||
72 | |||
73 | grab_fpu(regs); | ||
74 | |||
75 | __fpu_state_restore(); | ||
76 | } | ||
77 | |||
78 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
79 | { | ||
80 | TRAP_HANDLER_DECL; | ||
81 | |||
82 | fpu_state_restore(regs); | ||
83 | } | ||
84 | #endif /* CONFIG_SH_FPU */ | ||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 89b4b76c0d76..c736422344eb 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -24,22 +24,32 @@ | |||
24 | #include <asm/elf.h> | 24 | #include <asm/elf.h> |
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/smp.h> | 26 | #include <asm/smp.h> |
27 | #ifdef CONFIG_SUPERH32 | 27 | #include <asm/sh_bios.h> |
28 | #include <asm/ubc.h> | 28 | |
29 | #ifdef CONFIG_SH_FPU | ||
30 | #define cpu_has_fpu 1 | ||
31 | #else | ||
32 | #define cpu_has_fpu 0 | ||
33 | #endif | ||
34 | |||
35 | #ifdef CONFIG_SH_DSP | ||
36 | #define cpu_has_dsp 1 | ||
37 | #else | ||
38 | #define cpu_has_dsp 0 | ||
29 | #endif | 39 | #endif |
30 | 40 | ||
31 | /* | 41 | /* |
32 | * Generic wrapper for command line arguments to disable on-chip | 42 | * Generic wrapper for command line arguments to disable on-chip |
33 | * peripherals (nofpu, nodsp, and so forth). | 43 | * peripherals (nofpu, nodsp, and so forth). |
34 | */ | 44 | */ |
35 | #define onchip_setup(x) \ | 45 | #define onchip_setup(x) \ |
36 | static int x##_disabled __initdata = 0; \ | 46 | static int x##_disabled __initdata = !cpu_has_##x; \ |
37 | \ | 47 | \ |
38 | static int __init x##_setup(char *opts) \ | 48 | static int __init x##_setup(char *opts) \ |
39 | { \ | 49 | { \ |
40 | x##_disabled = 1; \ | 50 | x##_disabled = 1; \ |
41 | return 1; \ | 51 | return 1; \ |
42 | } \ | 52 | } \ |
43 | __setup("no" __stringify(x), x##_setup); | 53 | __setup("no" __stringify(x), x##_setup); |
44 | 54 | ||
45 | onchip_setup(fpu); | 55 | onchip_setup(fpu); |
@@ -52,10 +62,10 @@ onchip_setup(dsp); | |||
52 | static void __init speculative_execution_init(void) | 62 | static void __init speculative_execution_init(void) |
53 | { | 63 | { |
54 | /* Clear RABD */ | 64 | /* Clear RABD */ |
55 | ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); | 65 | __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); |
56 | 66 | ||
57 | /* Flush the update */ | 67 | /* Flush the update */ |
58 | (void)ctrl_inl(CPUOPM); | 68 | (void)__raw_readl(CPUOPM); |
59 | ctrl_barrier(); | 69 | ctrl_barrier(); |
60 | } | 70 | } |
61 | #else | 71 | #else |
@@ -89,7 +99,7 @@ static void __init expmask_init(void) | |||
89 | #endif | 99 | #endif |
90 | 100 | ||
91 | /* 2nd-level cache init */ | 101 | /* 2nd-level cache init */ |
92 | void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) | 102 | void __attribute__ ((weak)) l2_cache_init(void) |
93 | { | 103 | { |
94 | } | 104 | } |
95 | 105 | ||
@@ -97,12 +107,12 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) | |||
97 | * Generic first-level cache init | 107 | * Generic first-level cache init |
98 | */ | 108 | */ |
99 | #ifdef CONFIG_SUPERH32 | 109 | #ifdef CONFIG_SUPERH32 |
100 | static void __uses_jump_to_uncached cache_init(void) | 110 | static void cache_init(void) |
101 | { | 111 | { |
102 | unsigned long ccr, flags; | 112 | unsigned long ccr, flags; |
103 | 113 | ||
104 | jump_to_uncached(); | 114 | jump_to_uncached(); |
105 | ccr = ctrl_inl(CCR); | 115 | ccr = __raw_readl(CCR); |
106 | 116 | ||
107 | /* | 117 | /* |
108 | * At this point we don't know whether the cache is enabled or not - a | 118 | * At this point we don't know whether the cache is enabled or not - a |
@@ -146,7 +156,7 @@ static void __uses_jump_to_uncached cache_init(void) | |||
146 | for (addr = addrstart; | 156 | for (addr = addrstart; |
147 | addr < addrstart + waysize; | 157 | addr < addrstart + waysize; |
148 | addr += current_cpu_data.dcache.linesz) | 158 | addr += current_cpu_data.dcache.linesz) |
149 | ctrl_outl(0, addr); | 159 | __raw_writel(0, addr); |
150 | 160 | ||
151 | addrstart += current_cpu_data.dcache.way_incr; | 161 | addrstart += current_cpu_data.dcache.way_incr; |
152 | } while (--ways); | 162 | } while (--ways); |
@@ -179,7 +189,7 @@ static void __uses_jump_to_uncached cache_init(void) | |||
179 | 189 | ||
180 | l2_cache_init(); | 190 | l2_cache_init(); |
181 | 191 | ||
182 | ctrl_outl(flags, CCR); | 192 | __raw_writel(flags, CCR); |
183 | back_to_cached(); | 193 | back_to_cached(); |
184 | } | 194 | } |
185 | #else | 195 | #else |
@@ -207,6 +217,18 @@ static void detect_cache_shape(void) | |||
207 | l2_cache_shape = -1; /* No S-cache */ | 217 | l2_cache_shape = -1; /* No S-cache */ |
208 | } | 218 | } |
209 | 219 | ||
220 | static void __init fpu_init(void) | ||
221 | { | ||
222 | /* Disable the FPU */ | ||
223 | if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { | ||
224 | printk("FPU Disabled\n"); | ||
225 | current_cpu_data.flags &= ~CPU_HAS_FPU; | ||
226 | } | ||
227 | |||
228 | disable_fpu(); | ||
229 | clear_used_math(); | ||
230 | } | ||
231 | |||
210 | #ifdef CONFIG_SH_DSP | 232 | #ifdef CONFIG_SH_DSP |
211 | static void __init release_dsp(void) | 233 | static void __init release_dsp(void) |
212 | { | 234 | { |
@@ -244,28 +266,35 @@ static void __init dsp_init(void) | |||
244 | if (sr & SR_DSP) | 266 | if (sr & SR_DSP) |
245 | current_cpu_data.flags |= CPU_HAS_DSP; | 267 | current_cpu_data.flags |= CPU_HAS_DSP; |
246 | 268 | ||
269 | /* Disable the DSP */ | ||
270 | if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { | ||
271 | printk("DSP Disabled\n"); | ||
272 | current_cpu_data.flags &= ~CPU_HAS_DSP; | ||
273 | } | ||
274 | |||
247 | /* Now that we've determined the DSP status, clear the DSP bit. */ | 275 | /* Now that we've determined the DSP status, clear the DSP bit. */ |
248 | release_dsp(); | 276 | release_dsp(); |
249 | } | 277 | } |
278 | #else | ||
279 | static inline void __init dsp_init(void) { } | ||
250 | #endif /* CONFIG_SH_DSP */ | 280 | #endif /* CONFIG_SH_DSP */ |
251 | 281 | ||
252 | /** | 282 | /** |
253 | * sh_cpu_init | 283 | * sh_cpu_init |
254 | * | 284 | * |
255 | * This is our initial entry point for each CPU, and is invoked on the boot | 285 | * This is our initial entry point for each CPU, and is invoked on the |
256 | * CPU prior to calling start_kernel(). For SMP, a combination of this and | 286 | * boot CPU prior to calling start_kernel(). For SMP, a combination of |
257 | * start_secondary() will bring up each processor to a ready state prior | 287 | * this and start_secondary() will bring up each processor to a ready |
258 | * to hand forking the idle loop. | 288 | * state prior to hand forking the idle loop. |
259 | * | 289 | * |
260 | * We do all of the basic processor init here, including setting up the | 290 | * We do all of the basic processor init here, including setting up |
261 | * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is | 291 | * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and |
262 | * hit (and subsequently platform_setup()) things like determining the | 292 | * subsequently platform_setup()) things like determining the CPU |
263 | * CPU subtype and initial configuration will all be done. | 293 | * subtype and initial configuration will all be done. |
264 | * | 294 | * |
265 | * Each processor family is still responsible for doing its own probing | 295 | * Each processor family is still responsible for doing its own probing |
266 | * and cache configuration in detect_cpu_and_cache_system(). | 296 | * and cache configuration in detect_cpu_and_cache_system(). |
267 | */ | 297 | */ |
268 | |||
269 | asmlinkage void __init sh_cpu_init(void) | 298 | asmlinkage void __init sh_cpu_init(void) |
270 | { | 299 | { |
271 | current_thread_info()->cpu = hard_smp_processor_id(); | 300 | current_thread_info()->cpu = hard_smp_processor_id(); |
@@ -302,18 +331,8 @@ asmlinkage void __init sh_cpu_init(void) | |||
302 | detect_cache_shape(); | 331 | detect_cache_shape(); |
303 | } | 332 | } |
304 | 333 | ||
305 | /* Disable the FPU */ | 334 | fpu_init(); |
306 | if (fpu_disabled) { | 335 | dsp_init(); |
307 | printk("FPU Disabled\n"); | ||
308 | current_cpu_data.flags &= ~CPU_HAS_FPU; | ||
309 | } | ||
310 | |||
311 | /* FPU initialization */ | ||
312 | disable_fpu(); | ||
313 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { | ||
314 | current_thread_info()->status &= ~TS_USEDFPU; | ||
315 | clear_used_math(); | ||
316 | } | ||
317 | 336 | ||
318 | /* | 337 | /* |
319 | * Initialize the per-CPU ASID cache very early, since the | 338 | * Initialize the per-CPU ASID cache very early, since the |
@@ -321,18 +340,24 @@ asmlinkage void __init sh_cpu_init(void) | |||
321 | */ | 340 | */ |
322 | current_cpu_data.asid_cache = NO_CONTEXT; | 341 | current_cpu_data.asid_cache = NO_CONTEXT; |
323 | 342 | ||
324 | #ifdef CONFIG_SH_DSP | ||
325 | /* Probe for DSP */ | ||
326 | dsp_init(); | ||
327 | |||
328 | /* Disable the DSP */ | ||
329 | if (dsp_disabled) { | ||
330 | printk("DSP Disabled\n"); | ||
331 | current_cpu_data.flags &= ~CPU_HAS_DSP; | ||
332 | release_dsp(); | ||
333 | } | ||
334 | #endif | ||
335 | |||
336 | speculative_execution_init(); | 343 | speculative_execution_init(); |
337 | expmask_init(); | 344 | expmask_init(); |
345 | |||
346 | /* Do the rest of the boot processor setup */ | ||
347 | if (raw_smp_processor_id() == 0) { | ||
348 | /* Save off the BIOS VBR, if there is one */ | ||
349 | sh_bios_vbr_init(); | ||
350 | |||
351 | /* | ||
352 | * Setup VBR for boot CPU. Secondary CPUs do this through | ||
353 | * start_secondary(). | ||
354 | */ | ||
355 | per_cpu_trap_init(); | ||
356 | |||
357 | /* | ||
358 | * Boot processor to setup the FP and extended state | ||
359 | * context info. | ||
360 | */ | ||
361 | init_thread_xstate(); | ||
362 | } | ||
338 | } | 363 | } |
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c index 06e7e2959b54..96a239583948 100644 --- a/arch/sh/kernel/cpu/irq/intc-sh5.c +++ b/arch/sh/kernel/cpu/irq/intc-sh5.c | |||
@@ -123,7 +123,7 @@ static void enable_intc_irq(unsigned int irq) | |||
123 | bitmask = 1 << (irq - 32); | 123 | bitmask = 1 << (irq - 32); |
124 | } | 124 | } |
125 | 125 | ||
126 | ctrl_outl(bitmask, reg); | 126 | __raw_writel(bitmask, reg); |
127 | } | 127 | } |
128 | 128 | ||
129 | static void disable_intc_irq(unsigned int irq) | 129 | static void disable_intc_irq(unsigned int irq) |
@@ -139,7 +139,7 @@ static void disable_intc_irq(unsigned int irq) | |||
139 | bitmask = 1 << (irq - 32); | 139 | bitmask = 1 << (irq - 32); |
140 | } | 140 | } |
141 | 141 | ||
142 | ctrl_outl(bitmask, reg); | 142 | __raw_writel(bitmask, reg); |
143 | } | 143 | } |
144 | 144 | ||
145 | static void mask_and_ack_intc(unsigned int irq) | 145 | static void mask_and_ack_intc(unsigned int irq) |
@@ -170,11 +170,11 @@ void __init plat_irq_setup(void) | |||
170 | 170 | ||
171 | 171 | ||
172 | /* Disable all interrupts and set all priorities to 0 to avoid trouble */ | 172 | /* Disable all interrupts and set all priorities to 0 to avoid trouble */ |
173 | ctrl_outl(-1, INTC_INTDSB_0); | 173 | __raw_writel(-1, INTC_INTDSB_0); |
174 | ctrl_outl(-1, INTC_INTDSB_1); | 174 | __raw_writel(-1, INTC_INTDSB_1); |
175 | 175 | ||
176 | for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8) | 176 | for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8) |
177 | ctrl_outl( NO_PRIORITY, reg); | 177 | __raw_writel( NO_PRIORITY, reg); |
178 | 178 | ||
179 | 179 | ||
180 | #ifdef CONFIG_SH_CAYMAN | 180 | #ifdef CONFIG_SH_CAYMAN |
@@ -199,7 +199,7 @@ void __init plat_irq_setup(void) | |||
199 | reg = INTC_ICR_SET; | 199 | reg = INTC_ICR_SET; |
200 | i = IRQ_IRL0; | 200 | i = IRQ_IRL0; |
201 | } | 201 | } |
202 | ctrl_outl(INTC_ICR_IRLM, reg); | 202 | __raw_writel(INTC_ICR_IRLM, reg); |
203 | 203 | ||
204 | /* Set interrupt priorities according to platform description */ | 204 | /* Set interrupt priorities according to platform description */ |
205 | for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { | 205 | for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { |
@@ -207,7 +207,7 @@ void __init plat_irq_setup(void) | |||
207 | ((i % INTC_INTPRI_PPREG) * 4); | 207 | ((i % INTC_INTPRI_PPREG) * 4); |
208 | if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { | 208 | if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { |
209 | /* Upon the 7th, set Priority Register */ | 209 | /* Upon the 7th, set Priority Register */ |
210 | ctrl_outl(data, reg); | 210 | __raw_writel(data, reg); |
211 | data = 0; | 211 | data = 0; |
212 | reg += 8; | 212 | reg += 8; |
213 | } | 213 | } |
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c index 4fe863170e31..0c9f24d7a02f 100644 --- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c | |||
@@ -31,7 +31,7 @@ static const int pfc_divisors[] = {1,2,0,4}; | |||
31 | 31 | ||
32 | static void master_clk_init(struct clk *clk) | 32 | static void master_clk_init(struct clk *clk) |
33 | { | 33 | { |
34 | clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; | 34 | clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 7]; |
35 | } | 35 | } |
36 | 36 | ||
37 | static struct clk_ops sh7619_master_clk_ops = { | 37 | static struct clk_ops sh7619_master_clk_ops = { |
@@ -40,7 +40,7 @@ static struct clk_ops sh7619_master_clk_ops = { | |||
40 | 40 | ||
41 | static unsigned long module_clk_recalc(struct clk *clk) | 41 | static unsigned long module_clk_recalc(struct clk *clk) |
42 | { | 42 | { |
43 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 43 | int idx = (__raw_readw(FREQCR) & 0x0007); |
44 | return clk->parent->rate / pfc_divisors[idx]; | 44 | return clk->parent->rate / pfc_divisors[idx]; |
45 | } | 45 | } |
46 | 46 | ||
@@ -50,7 +50,7 @@ static struct clk_ops sh7619_module_clk_ops = { | |||
50 | 50 | ||
51 | static unsigned long bus_clk_recalc(struct clk *clk) | 51 | static unsigned long bus_clk_recalc(struct clk *clk) |
52 | { | 52 | { |
53 | return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; | 53 | return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7]; |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct clk_ops sh7619_bus_clk_ops = { | 56 | static struct clk_ops sh7619_bus_clk_ops = { |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c index 7814c76159a7..b26264dc2aef 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c | |||
@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12}; | |||
34 | 34 | ||
35 | static void master_clk_init(struct clk *clk) | 35 | static void master_clk_init(struct clk *clk) |
36 | { | 36 | { |
37 | return 10000000 * PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; | 37 | return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; |
38 | } | 38 | } |
39 | 39 | ||
40 | static struct clk_ops sh7201_master_clk_ops = { | 40 | static struct clk_ops sh7201_master_clk_ops = { |
@@ -43,7 +43,7 @@ static struct clk_ops sh7201_master_clk_ops = { | |||
43 | 43 | ||
44 | static unsigned long module_clk_recalc(struct clk *clk) | 44 | static unsigned long module_clk_recalc(struct clk *clk) |
45 | { | 45 | { |
46 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 46 | int idx = (__raw_readw(FREQCR) & 0x0007); |
47 | return clk->parent->rate / pfc_divisors[idx]; | 47 | return clk->parent->rate / pfc_divisors[idx]; |
48 | } | 48 | } |
49 | 49 | ||
@@ -53,7 +53,7 @@ static struct clk_ops sh7201_module_clk_ops = { | |||
53 | 53 | ||
54 | static unsigned long bus_clk_recalc(struct clk *clk) | 54 | static unsigned long bus_clk_recalc(struct clk *clk) |
55 | { | 55 | { |
56 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 56 | int idx = (__raw_readw(FREQCR) & 0x0007); |
57 | return clk->parent->rate / pfc_divisors[idx]; | 57 | return clk->parent->rate / pfc_divisors[idx]; |
58 | } | 58 | } |
59 | 59 | ||
@@ -63,7 +63,7 @@ static struct clk_ops sh7201_bus_clk_ops = { | |||
63 | 63 | ||
64 | static unsigned long cpu_clk_recalc(struct clk *clk) | 64 | static unsigned long cpu_clk_recalc(struct clk *clk) |
65 | { | 65 | { |
66 | int idx = ((ctrl_inw(FREQCR) >> 4) & 0x0007); | 66 | int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007); |
67 | return clk->parent->rate / ifc_divisors[idx]; | 67 | return clk->parent->rate / ifc_divisors[idx]; |
68 | } | 68 | } |
69 | 69 | ||
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c index 940986965102..7e75d8f79502 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c | |||
@@ -39,7 +39,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12}; | |||
39 | 39 | ||
40 | static void master_clk_init(struct clk *clk) | 40 | static void master_clk_init(struct clk *clk) |
41 | { | 41 | { |
42 | clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ; | 42 | clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * PLL2 ; |
43 | } | 43 | } |
44 | 44 | ||
45 | static struct clk_ops sh7203_master_clk_ops = { | 45 | static struct clk_ops sh7203_master_clk_ops = { |
@@ -48,7 +48,7 @@ static struct clk_ops sh7203_master_clk_ops = { | |||
48 | 48 | ||
49 | static unsigned long module_clk_recalc(struct clk *clk) | 49 | static unsigned long module_clk_recalc(struct clk *clk) |
50 | { | 50 | { |
51 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 51 | int idx = (__raw_readw(FREQCR) & 0x0007); |
52 | return clk->parent->rate / pfc_divisors[idx]; | 52 | return clk->parent->rate / pfc_divisors[idx]; |
53 | } | 53 | } |
54 | 54 | ||
@@ -58,7 +58,7 @@ static struct clk_ops sh7203_module_clk_ops = { | |||
58 | 58 | ||
59 | static unsigned long bus_clk_recalc(struct clk *clk) | 59 | static unsigned long bus_clk_recalc(struct clk *clk) |
60 | { | 60 | { |
61 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 61 | int idx = (__raw_readw(FREQCR) & 0x0007); |
62 | return clk->parent->rate / pfc_divisors[idx-2]; | 62 | return clk->parent->rate / pfc_divisors[idx-2]; |
63 | } | 63 | } |
64 | 64 | ||
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c index c2268bdeceeb..b27a5e2687ab 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c | |||
@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12}; | |||
34 | 34 | ||
35 | static void master_clk_init(struct clk *clk) | 35 | static void master_clk_init(struct clk *clk) |
36 | { | 36 | { |
37 | clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; | 37 | clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; |
38 | } | 38 | } |
39 | 39 | ||
40 | static struct clk_ops sh7206_master_clk_ops = { | 40 | static struct clk_ops sh7206_master_clk_ops = { |
@@ -43,7 +43,7 @@ static struct clk_ops sh7206_master_clk_ops = { | |||
43 | 43 | ||
44 | static unsigned long module_clk_recalc(struct clk *clk) | 44 | static unsigned long module_clk_recalc(struct clk *clk) |
45 | { | 45 | { |
46 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 46 | int idx = (__raw_readw(FREQCR) & 0x0007); |
47 | return clk->parent->rate / pfc_divisors[idx]; | 47 | return clk->parent->rate / pfc_divisors[idx]; |
48 | } | 48 | } |
49 | 49 | ||
@@ -53,7 +53,7 @@ static struct clk_ops sh7206_module_clk_ops = { | |||
53 | 53 | ||
54 | static unsigned long bus_clk_recalc(struct clk *clk) | 54 | static unsigned long bus_clk_recalc(struct clk *clk) |
55 | { | 55 | { |
56 | return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; | 56 | return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; |
57 | } | 57 | } |
58 | 58 | ||
59 | static struct clk_ops sh7206_bus_clk_ops = { | 59 | static struct clk_ops sh7206_bus_clk_ops = { |
@@ -62,7 +62,7 @@ static struct clk_ops sh7206_bus_clk_ops = { | |||
62 | 62 | ||
63 | static unsigned long cpu_clk_recalc(struct clk *clk) | 63 | static unsigned long cpu_clk_recalc(struct clk *clk) |
64 | { | 64 | { |
65 | int idx = (ctrl_inw(FREQCR) & 0x0007); | 65 | int idx = (__raw_readw(FREQCR) & 0x0007); |
66 | return clk->parent->rate / ifc_divisors[idx]; | 66 | return clk->parent->rate / ifc_divisors[idx]; |
67 | } | 67 | } |
68 | 68 | ||
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index d395ce5740e7..488d24e0cdf0 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c | |||
@@ -26,8 +26,7 @@ | |||
26 | /* | 26 | /* |
27 | * Save FPU registers onto task structure. | 27 | * Save FPU registers onto task structure. |
28 | */ | 28 | */ |
29 | void | 29 | void save_fpu(struct task_struct *tsk) |
30 | save_fpu(struct task_struct *tsk) | ||
31 | { | 30 | { |
32 | unsigned long dummy; | 31 | unsigned long dummy; |
33 | 32 | ||
@@ -52,7 +51,7 @@ save_fpu(struct task_struct *tsk) | |||
52 | "fmov.s fr0, @-%0\n\t" | 51 | "fmov.s fr0, @-%0\n\t" |
53 | "lds %3, fpscr\n\t" | 52 | "lds %3, fpscr\n\t" |
54 | : "=r" (dummy) | 53 | : "=r" (dummy) |
55 | : "0" ((char *)(&tsk->thread.fpu.hard.status)), | 54 | : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)), |
56 | "r" (FPSCR_RCHG), | 55 | "r" (FPSCR_RCHG), |
57 | "r" (FPSCR_INIT) | 56 | "r" (FPSCR_INIT) |
58 | : "memory"); | 57 | : "memory"); |
@@ -60,8 +59,7 @@ save_fpu(struct task_struct *tsk) | |||
60 | disable_fpu(); | 59 | disable_fpu(); |
61 | } | 60 | } |
62 | 61 | ||
63 | static void | 62 | void restore_fpu(struct task_struct *tsk) |
64 | restore_fpu(struct task_struct *tsk) | ||
65 | { | 63 | { |
66 | unsigned long dummy; | 64 | unsigned long dummy; |
67 | 65 | ||
@@ -85,45 +83,12 @@ restore_fpu(struct task_struct *tsk) | |||
85 | "lds.l @%0+, fpscr\n\t" | 83 | "lds.l @%0+, fpscr\n\t" |
86 | "lds.l @%0+, fpul\n\t" | 84 | "lds.l @%0+, fpul\n\t" |
87 | : "=r" (dummy) | 85 | : "=r" (dummy) |
88 | : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) | 86 | : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG) |
89 | : "memory"); | 87 | : "memory"); |
90 | disable_fpu(); | 88 | disable_fpu(); |
91 | } | 89 | } |
92 | 90 | ||
93 | /* | 91 | /* |
94 | * Load the FPU with signalling NANS. This bit pattern we're using | ||
95 | * has the property that no matter wether considered as single or as | ||
96 | * double precission represents signaling NANS. | ||
97 | */ | ||
98 | |||
99 | static void | ||
100 | fpu_init(void) | ||
101 | { | ||
102 | enable_fpu(); | ||
103 | asm volatile("lds %0, fpul\n\t" | ||
104 | "fsts fpul, fr0\n\t" | ||
105 | "fsts fpul, fr1\n\t" | ||
106 | "fsts fpul, fr2\n\t" | ||
107 | "fsts fpul, fr3\n\t" | ||
108 | "fsts fpul, fr4\n\t" | ||
109 | "fsts fpul, fr5\n\t" | ||
110 | "fsts fpul, fr6\n\t" | ||
111 | "fsts fpul, fr7\n\t" | ||
112 | "fsts fpul, fr8\n\t" | ||
113 | "fsts fpul, fr9\n\t" | ||
114 | "fsts fpul, fr10\n\t" | ||
115 | "fsts fpul, fr11\n\t" | ||
116 | "fsts fpul, fr12\n\t" | ||
117 | "fsts fpul, fr13\n\t" | ||
118 | "fsts fpul, fr14\n\t" | ||
119 | "fsts fpul, fr15\n\t" | ||
120 | "lds %2, fpscr\n\t" | ||
121 | : /* no output */ | ||
122 | : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT)); | ||
123 | disable_fpu(); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Emulate arithmetic ops on denormalized number for some FPU insns. | 92 | * Emulate arithmetic ops on denormalized number for some FPU insns. |
128 | */ | 93 | */ |
129 | 94 | ||
@@ -490,9 +455,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
490 | if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ | 455 | if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ |
491 | struct task_struct *tsk = current; | 456 | struct task_struct *tsk = current; |
492 | 457 | ||
493 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) { | 458 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) { |
494 | /* FPU error */ | 459 | /* FPU error */ |
495 | denormal_to_double (&tsk->thread.fpu.hard, | 460 | denormal_to_double (&tsk->thread.xstate->hardfpu, |
496 | (finsn >> 8) & 0xf); | 461 | (finsn >> 8) & 0xf); |
497 | } else | 462 | } else |
498 | return 0; | 463 | return 0; |
@@ -507,9 +472,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
507 | 472 | ||
508 | n = (finsn >> 8) & 0xf; | 473 | n = (finsn >> 8) & 0xf; |
509 | m = (finsn >> 4) & 0xf; | 474 | m = (finsn >> 4) & 0xf; |
510 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 475 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
511 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 476 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
512 | fpscr = tsk->thread.fpu.hard.fpscr; | 477 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
513 | prec = fpscr & (1 << 19); | 478 | prec = fpscr & (1 << 19); |
514 | 479 | ||
515 | if ((fpscr & FPSCR_FPU_ERROR) | 480 | if ((fpscr & FPSCR_FPU_ERROR) |
@@ -519,15 +484,15 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
519 | 484 | ||
520 | /* FPU error because of denormal */ | 485 | /* FPU error because of denormal */ |
521 | llx = ((long long) hx << 32) | 486 | llx = ((long long) hx << 32) |
522 | | tsk->thread.fpu.hard.fp_regs[n+1]; | 487 | | tsk->thread.xstate->hardfpu.fp_regs[n+1]; |
523 | lly = ((long long) hy << 32) | 488 | lly = ((long long) hy << 32) |
524 | | tsk->thread.fpu.hard.fp_regs[m+1]; | 489 | | tsk->thread.xstate->hardfpu.fp_regs[m+1]; |
525 | if ((hx & 0x7fffffff) >= 0x00100000) | 490 | if ((hx & 0x7fffffff) >= 0x00100000) |
526 | llx = denormal_muld(lly, llx); | 491 | llx = denormal_muld(lly, llx); |
527 | else | 492 | else |
528 | llx = denormal_muld(llx, lly); | 493 | llx = denormal_muld(llx, lly); |
529 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 494 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
530 | tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; | 495 | tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; |
531 | } else if ((fpscr & FPSCR_FPU_ERROR) | 496 | } else if ((fpscr & FPSCR_FPU_ERROR) |
532 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 497 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
533 | || (hy & 0x7fffffff) < 0x00800000))) { | 498 | || (hy & 0x7fffffff) < 0x00800000))) { |
@@ -536,7 +501,7 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
536 | hx = denormal_mulf(hy, hx); | 501 | hx = denormal_mulf(hy, hx); |
537 | else | 502 | else |
538 | hx = denormal_mulf(hx, hy); | 503 | hx = denormal_mulf(hx, hy); |
539 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 504 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
540 | } else | 505 | } else |
541 | return 0; | 506 | return 0; |
542 | 507 | ||
@@ -550,9 +515,9 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
550 | 515 | ||
551 | n = (finsn >> 8) & 0xf; | 516 | n = (finsn >> 8) & 0xf; |
552 | m = (finsn >> 4) & 0xf; | 517 | m = (finsn >> 4) & 0xf; |
553 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 518 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
554 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 519 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
555 | fpscr = tsk->thread.fpu.hard.fpscr; | 520 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
556 | prec = fpscr & (1 << 19); | 521 | prec = fpscr & (1 << 19); |
557 | 522 | ||
558 | if ((fpscr & FPSCR_FPU_ERROR) | 523 | if ((fpscr & FPSCR_FPU_ERROR) |
@@ -562,15 +527,15 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
562 | 527 | ||
563 | /* FPU error because of denormal */ | 528 | /* FPU error because of denormal */ |
564 | llx = ((long long) hx << 32) | 529 | llx = ((long long) hx << 32) |
565 | | tsk->thread.fpu.hard.fp_regs[n+1]; | 530 | | tsk->thread.xstate->hardfpu.fp_regs[n+1]; |
566 | lly = ((long long) hy << 32) | 531 | lly = ((long long) hy << 32) |
567 | | tsk->thread.fpu.hard.fp_regs[m+1]; | 532 | | tsk->thread.xstate->hardfpu.fp_regs[m+1]; |
568 | if ((finsn & 0xf00f) == 0xf000) | 533 | if ((finsn & 0xf00f) == 0xf000) |
569 | llx = denormal_addd(llx, lly); | 534 | llx = denormal_addd(llx, lly); |
570 | else | 535 | else |
571 | llx = denormal_addd(llx, lly ^ (1LL << 63)); | 536 | llx = denormal_addd(llx, lly ^ (1LL << 63)); |
572 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 537 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
573 | tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; | 538 | tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; |
574 | } else if ((fpscr & FPSCR_FPU_ERROR) | 539 | } else if ((fpscr & FPSCR_FPU_ERROR) |
575 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 540 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
576 | || (hy & 0x7fffffff) < 0x00800000))) { | 541 | || (hy & 0x7fffffff) < 0x00800000))) { |
@@ -579,7 +544,7 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
579 | hx = denormal_addf(hx, hy); | 544 | hx = denormal_addf(hx, hy); |
580 | else | 545 | else |
581 | hx = denormal_addf(hx, hy ^ 0x80000000); | 546 | hx = denormal_addf(hx, hy ^ 0x80000000); |
582 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 547 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
583 | } else | 548 | } else |
584 | return 0; | 549 | return 0; |
585 | 550 | ||
@@ -597,7 +562,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
597 | 562 | ||
598 | __unlazy_fpu(tsk, regs); | 563 | __unlazy_fpu(tsk, regs); |
599 | if (ieee_fpe_handler(regs)) { | 564 | if (ieee_fpe_handler(regs)) { |
600 | tsk->thread.fpu.hard.fpscr &= | 565 | tsk->thread.xstate->hardfpu.fpscr &= |
601 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 566 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
602 | grab_fpu(regs); | 567 | grab_fpu(regs); |
603 | restore_fpu(tsk); | 568 | restore_fpu(tsk); |
@@ -607,33 +572,3 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
607 | 572 | ||
608 | force_sig(SIGFPE, tsk); | 573 | force_sig(SIGFPE, tsk); |
609 | } | 574 | } |
610 | |||
611 | void fpu_state_restore(struct pt_regs *regs) | ||
612 | { | ||
613 | struct task_struct *tsk = current; | ||
614 | |||
615 | grab_fpu(regs); | ||
616 | if (unlikely(!user_mode(regs))) { | ||
617 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
618 | BUG(); | ||
619 | return; | ||
620 | } | ||
621 | |||
622 | if (likely(used_math())) { | ||
623 | /* Using the FPU again. */ | ||
624 | restore_fpu(tsk); | ||
625 | } else { | ||
626 | /* First time FPU user. */ | ||
627 | fpu_init(); | ||
628 | set_used_math(); | ||
629 | } | ||
630 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
631 | tsk->fpu_counter++; | ||
632 | } | ||
633 | |||
634 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
635 | { | ||
636 | TRAP_HANDLER_DECL; | ||
637 | |||
638 | fpu_state_restore(regs); | ||
639 | } | ||
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c index 27b8738f0b09..b78384afac09 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh3.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c | |||
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; | |||
28 | 28 | ||
29 | static void master_clk_init(struct clk *clk) | 29 | static void master_clk_init(struct clk *clk) |
30 | { | 30 | { |
31 | int frqcr = ctrl_inw(FRQCR); | 31 | int frqcr = __raw_readw(FRQCR); |
32 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 32 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
33 | 33 | ||
34 | clk->rate *= pfc_divisors[idx]; | 34 | clk->rate *= pfc_divisors[idx]; |
@@ -40,7 +40,7 @@ static struct clk_ops sh3_master_clk_ops = { | |||
40 | 40 | ||
41 | static unsigned long module_clk_recalc(struct clk *clk) | 41 | static unsigned long module_clk_recalc(struct clk *clk) |
42 | { | 42 | { |
43 | int frqcr = ctrl_inw(FRQCR); | 43 | int frqcr = __raw_readw(FRQCR); |
44 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 44 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
45 | 45 | ||
46 | return clk->parent->rate / pfc_divisors[idx]; | 46 | return clk->parent->rate / pfc_divisors[idx]; |
@@ -52,7 +52,7 @@ static struct clk_ops sh3_module_clk_ops = { | |||
52 | 52 | ||
53 | static unsigned long bus_clk_recalc(struct clk *clk) | 53 | static unsigned long bus_clk_recalc(struct clk *clk) |
54 | { | 54 | { |
55 | int frqcr = ctrl_inw(FRQCR); | 55 | int frqcr = __raw_readw(FRQCR); |
56 | int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); | 56 | int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); |
57 | 57 | ||
58 | return clk->parent->rate / stc_multipliers[idx]; | 58 | return clk->parent->rate / stc_multipliers[idx]; |
@@ -64,7 +64,7 @@ static struct clk_ops sh3_bus_clk_ops = { | |||
64 | 64 | ||
65 | static unsigned long cpu_clk_recalc(struct clk *clk) | 65 | static unsigned long cpu_clk_recalc(struct clk *clk) |
66 | { | 66 | { |
67 | int frqcr = ctrl_inw(FRQCR); | 67 | int frqcr = __raw_readw(FRQCR); |
68 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); | 68 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); |
69 | 69 | ||
70 | return clk->parent->rate / ifc_divisors[idx]; | 70 | return clk->parent->rate / ifc_divisors[idx]; |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c index 0ca8f2c3646c..0ecea1451c6f 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c | |||
@@ -32,7 +32,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; | |||
32 | 32 | ||
33 | static void master_clk_init(struct clk *clk) | 33 | static void master_clk_init(struct clk *clk) |
34 | { | 34 | { |
35 | clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0003]; | 35 | clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003]; |
36 | } | 36 | } |
37 | 37 | ||
38 | static struct clk_ops sh7705_master_clk_ops = { | 38 | static struct clk_ops sh7705_master_clk_ops = { |
@@ -41,7 +41,7 @@ static struct clk_ops sh7705_master_clk_ops = { | |||
41 | 41 | ||
42 | static unsigned long module_clk_recalc(struct clk *clk) | 42 | static unsigned long module_clk_recalc(struct clk *clk) |
43 | { | 43 | { |
44 | int idx = ctrl_inw(FRQCR) & 0x0003; | 44 | int idx = __raw_readw(FRQCR) & 0x0003; |
45 | return clk->parent->rate / pfc_divisors[idx]; | 45 | return clk->parent->rate / pfc_divisors[idx]; |
46 | } | 46 | } |
47 | 47 | ||
@@ -51,7 +51,7 @@ static struct clk_ops sh7705_module_clk_ops = { | |||
51 | 51 | ||
52 | static unsigned long bus_clk_recalc(struct clk *clk) | 52 | static unsigned long bus_clk_recalc(struct clk *clk) |
53 | { | 53 | { |
54 | int idx = (ctrl_inw(FRQCR) & 0x0300) >> 8; | 54 | int idx = (__raw_readw(FRQCR) & 0x0300) >> 8; |
55 | return clk->parent->rate / stc_multipliers[idx]; | 55 | return clk->parent->rate / stc_multipliers[idx]; |
56 | } | 56 | } |
57 | 57 | ||
@@ -61,7 +61,7 @@ static struct clk_ops sh7705_bus_clk_ops = { | |||
61 | 61 | ||
62 | static unsigned long cpu_clk_recalc(struct clk *clk) | 62 | static unsigned long cpu_clk_recalc(struct clk *clk) |
63 | { | 63 | { |
64 | int idx = (ctrl_inw(FRQCR) & 0x0030) >> 4; | 64 | int idx = (__raw_readw(FRQCR) & 0x0030) >> 4; |
65 | return clk->parent->rate / ifc_divisors[idx]; | 65 | return clk->parent->rate / ifc_divisors[idx]; |
66 | } | 66 | } |
67 | 67 | ||
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c index 4bf7887d310a..6f9ff8b57dd6 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c | |||
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; | |||
24 | 24 | ||
25 | static void master_clk_init(struct clk *clk) | 25 | static void master_clk_init(struct clk *clk) |
26 | { | 26 | { |
27 | int frqcr = ctrl_inw(FRQCR); | 27 | int frqcr = __raw_readw(FRQCR); |
28 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 28 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
29 | 29 | ||
30 | clk->rate *= pfc_divisors[idx]; | 30 | clk->rate *= pfc_divisors[idx]; |
@@ -36,7 +36,7 @@ static struct clk_ops sh7706_master_clk_ops = { | |||
36 | 36 | ||
37 | static unsigned long module_clk_recalc(struct clk *clk) | 37 | static unsigned long module_clk_recalc(struct clk *clk) |
38 | { | 38 | { |
39 | int frqcr = ctrl_inw(FRQCR); | 39 | int frqcr = __raw_readw(FRQCR); |
40 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 40 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
41 | 41 | ||
42 | return clk->parent->rate / pfc_divisors[idx]; | 42 | return clk->parent->rate / pfc_divisors[idx]; |
@@ -48,7 +48,7 @@ static struct clk_ops sh7706_module_clk_ops = { | |||
48 | 48 | ||
49 | static unsigned long bus_clk_recalc(struct clk *clk) | 49 | static unsigned long bus_clk_recalc(struct clk *clk) |
50 | { | 50 | { |
51 | int frqcr = ctrl_inw(FRQCR); | 51 | int frqcr = __raw_readw(FRQCR); |
52 | int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); | 52 | int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); |
53 | 53 | ||
54 | return clk->parent->rate / stc_multipliers[idx]; | 54 | return clk->parent->rate / stc_multipliers[idx]; |
@@ -60,7 +60,7 @@ static struct clk_ops sh7706_bus_clk_ops = { | |||
60 | 60 | ||
61 | static unsigned long cpu_clk_recalc(struct clk *clk) | 61 | static unsigned long cpu_clk_recalc(struct clk *clk) |
62 | { | 62 | { |
63 | int frqcr = ctrl_inw(FRQCR); | 63 | int frqcr = __raw_readw(FRQCR); |
64 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); | 64 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); |
65 | 65 | ||
66 | return clk->parent->rate / ifc_divisors[idx]; | 66 | return clk->parent->rate / ifc_divisors[idx]; |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c index e8749505bd2a..f302ba09e681 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c | |||
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; | |||
24 | 24 | ||
25 | static void master_clk_init(struct clk *clk) | 25 | static void master_clk_init(struct clk *clk) |
26 | { | 26 | { |
27 | int frqcr = ctrl_inw(FRQCR); | 27 | int frqcr = __raw_readw(FRQCR); |
28 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 28 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
29 | 29 | ||
30 | clk->rate *= pfc_divisors[idx]; | 30 | clk->rate *= pfc_divisors[idx]; |
@@ -36,7 +36,7 @@ static struct clk_ops sh7709_master_clk_ops = { | |||
36 | 36 | ||
37 | static unsigned long module_clk_recalc(struct clk *clk) | 37 | static unsigned long module_clk_recalc(struct clk *clk) |
38 | { | 38 | { |
39 | int frqcr = ctrl_inw(FRQCR); | 39 | int frqcr = __raw_readw(FRQCR); |
40 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); | 40 | int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); |
41 | 41 | ||
42 | return clk->parent->rate / pfc_divisors[idx]; | 42 | return clk->parent->rate / pfc_divisors[idx]; |
@@ -48,7 +48,7 @@ static struct clk_ops sh7709_module_clk_ops = { | |||
48 | 48 | ||
49 | static unsigned long bus_clk_recalc(struct clk *clk) | 49 | static unsigned long bus_clk_recalc(struct clk *clk) |
50 | { | 50 | { |
51 | int frqcr = ctrl_inw(FRQCR); | 51 | int frqcr = __raw_readw(FRQCR); |
52 | int idx = (frqcr & 0x0080) ? | 52 | int idx = (frqcr & 0x0080) ? |
53 | ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; | 53 | ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; |
54 | 54 | ||
@@ -61,7 +61,7 @@ static struct clk_ops sh7709_bus_clk_ops = { | |||
61 | 61 | ||
62 | static unsigned long cpu_clk_recalc(struct clk *clk) | 62 | static unsigned long cpu_clk_recalc(struct clk *clk) |
63 | { | 63 | { |
64 | int frqcr = ctrl_inw(FRQCR); | 64 | int frqcr = __raw_readw(FRQCR); |
65 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); | 65 | int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); |
66 | 66 | ||
67 | return clk->parent->rate / ifc_divisors[idx]; | 67 | return clk->parent->rate / ifc_divisors[idx]; |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c index 030a58ba18a5..29a87d8946a4 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c | |||
@@ -26,7 +26,7 @@ static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 }; | |||
26 | 26 | ||
27 | static void master_clk_init(struct clk *clk) | 27 | static void master_clk_init(struct clk *clk) |
28 | { | 28 | { |
29 | clk->rate *= md_table[ctrl_inw(FRQCR) & 0x0007]; | 29 | clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007]; |
30 | } | 30 | } |
31 | 31 | ||
32 | static struct clk_ops sh7710_master_clk_ops = { | 32 | static struct clk_ops sh7710_master_clk_ops = { |
@@ -35,7 +35,7 @@ static struct clk_ops sh7710_master_clk_ops = { | |||
35 | 35 | ||
36 | static unsigned long module_clk_recalc(struct clk *clk) | 36 | static unsigned long module_clk_recalc(struct clk *clk) |
37 | { | 37 | { |
38 | int idx = (ctrl_inw(FRQCR) & 0x0007); | 38 | int idx = (__raw_readw(FRQCR) & 0x0007); |
39 | return clk->parent->rate / md_table[idx]; | 39 | return clk->parent->rate / md_table[idx]; |
40 | } | 40 | } |
41 | 41 | ||
@@ -45,7 +45,7 @@ static struct clk_ops sh7710_module_clk_ops = { | |||
45 | 45 | ||
46 | static unsigned long bus_clk_recalc(struct clk *clk) | 46 | static unsigned long bus_clk_recalc(struct clk *clk) |
47 | { | 47 | { |
48 | int idx = (ctrl_inw(FRQCR) & 0x0700) >> 8; | 48 | int idx = (__raw_readw(FRQCR) & 0x0700) >> 8; |
49 | return clk->parent->rate / md_table[idx]; | 49 | return clk->parent->rate / md_table[idx]; |
50 | } | 50 | } |
51 | 51 | ||
@@ -55,7 +55,7 @@ static struct clk_ops sh7710_bus_clk_ops = { | |||
55 | 55 | ||
56 | static unsigned long cpu_clk_recalc(struct clk *clk) | 56 | static unsigned long cpu_clk_recalc(struct clk *clk) |
57 | { | 57 | { |
58 | int idx = (ctrl_inw(FRQCR) & 0x0070) >> 4; | 58 | int idx = (__raw_readw(FRQCR) & 0x0070) >> 4; |
59 | return clk->parent->rate / md_table[idx]; | 59 | return clk->parent->rate / md_table[idx]; |
60 | } | 60 | } |
61 | 61 | ||
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c index 6428ee6c77ed..b0d0c5203996 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c | |||
@@ -23,7 +23,7 @@ static int divisors[] = { 1, 2, 3, 4, 6 }; | |||
23 | 23 | ||
24 | static void master_clk_init(struct clk *clk) | 24 | static void master_clk_init(struct clk *clk) |
25 | { | 25 | { |
26 | int frqcr = ctrl_inw(FRQCR); | 26 | int frqcr = __raw_readw(FRQCR); |
27 | int idx = (frqcr & 0x0300) >> 8; | 27 | int idx = (frqcr & 0x0300) >> 8; |
28 | 28 | ||
29 | clk->rate *= multipliers[idx]; | 29 | clk->rate *= multipliers[idx]; |
@@ -35,7 +35,7 @@ static struct clk_ops sh7712_master_clk_ops = { | |||
35 | 35 | ||
36 | static unsigned long module_clk_recalc(struct clk *clk) | 36 | static unsigned long module_clk_recalc(struct clk *clk) |
37 | { | 37 | { |
38 | int frqcr = ctrl_inw(FRQCR); | 38 | int frqcr = __raw_readw(FRQCR); |
39 | int idx = frqcr & 0x0007; | 39 | int idx = frqcr & 0x0007; |
40 | 40 | ||
41 | return clk->parent->rate / divisors[idx]; | 41 | return clk->parent->rate / divisors[idx]; |
@@ -47,7 +47,7 @@ static struct clk_ops sh7712_module_clk_ops = { | |||
47 | 47 | ||
48 | static unsigned long cpu_clk_recalc(struct clk *clk) | 48 | static unsigned long cpu_clk_recalc(struct clk *clk) |
49 | { | 49 | { |
50 | int frqcr = ctrl_inw(FRQCR); | 50 | int frqcr = __raw_readw(FRQCR); |
51 | int idx = (frqcr & 0x0030) >> 4; | 51 | int idx = (frqcr & 0x0030) >> 4; |
52 | 52 | ||
53 | return clk->parent->rate / divisors[idx]; | 53 | return clk->parent->rate / divisors[idx]; |
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index 46610c35c232..99b4d020179a 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S | |||
@@ -49,7 +49,7 @@ ENTRY(exception_handling_table) | |||
49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ | 49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ |
50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ | 50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ |
51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger | 51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger |
52 | .long break_point_trap /* 1E0 */ | 52 | .long breakpoint_trap_handler /* 1E0 */ |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Pad the remainder of the table out, exceptions residing in far | 55 | * Pad the remainder of the table out, exceptions residing in far |
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index f9c7df64eb01..295ec4c99e98 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/cache.h> | 16 | #include <asm/cache.h> |
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | int __uses_jump_to_uncached detect_cpu_and_cache_system(void) | 19 | int detect_cpu_and_cache_system(void) |
20 | { | 20 | { |
21 | unsigned long addr0, addr1, data0, data1, data2, data3; | 21 | unsigned long addr0, addr1, data0, data1, data2, data3; |
22 | 22 | ||
@@ -30,23 +30,23 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void) | |||
30 | addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); | 30 | addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); |
31 | 31 | ||
32 | /* First, write back & invalidate */ | 32 | /* First, write back & invalidate */ |
33 | data0 = ctrl_inl(addr0); | 33 | data0 = __raw_readl(addr0); |
34 | ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); | 34 | __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); |
35 | data1 = ctrl_inl(addr1); | 35 | data1 = __raw_readl(addr1); |
36 | ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); | 36 | __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); |
37 | 37 | ||
38 | /* Next, check if there's shadow or not */ | 38 | /* Next, check if there's shadow or not */ |
39 | data0 = ctrl_inl(addr0); | 39 | data0 = __raw_readl(addr0); |
40 | data0 ^= SH_CACHE_VALID; | 40 | data0 ^= SH_CACHE_VALID; |
41 | ctrl_outl(data0, addr0); | 41 | __raw_writel(data0, addr0); |
42 | data1 = ctrl_inl(addr1); | 42 | data1 = __raw_readl(addr1); |
43 | data2 = data1 ^ SH_CACHE_VALID; | 43 | data2 = data1 ^ SH_CACHE_VALID; |
44 | ctrl_outl(data2, addr1); | 44 | __raw_writel(data2, addr1); |
45 | data3 = ctrl_inl(addr0); | 45 | data3 = __raw_readl(addr0); |
46 | 46 | ||
47 | /* Lastly, invaliate them. */ | 47 | /* Lastly, invaliate them. */ |
48 | ctrl_outl(data0&~SH_CACHE_VALID, addr0); | 48 | __raw_writel(data0&~SH_CACHE_VALID, addr0); |
49 | ctrl_outl(data2&~SH_CACHE_VALID, addr1); | 49 | __raw_writel(data2&~SH_CACHE_VALID, addr1); |
50 | 50 | ||
51 | back_to_cached(); | 51 | back_to_cached(); |
52 | 52 | ||
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void) | |||
94 | boot_cpu_data.dcache.way_incr = (1 << 13); | 94 | boot_cpu_data.dcache.way_incr = (1 << 13); |
95 | boot_cpu_data.dcache.entry_mask = 0x1ff0; | 95 | boot_cpu_data.dcache.entry_mask = 0x1ff0; |
96 | boot_cpu_data.dcache.sets = 512; | 96 | boot_cpu_data.dcache.sets = 512; |
97 | ctrl_outl(CCR_CACHE_32KB, CCR3_REG); | 97 | __raw_writel(CCR_CACHE_32KB, CCR3_REG); |
98 | #else | 98 | #else |
99 | ctrl_outl(CCR_CACHE_16KB, CCR3_REG); | 99 | __raw_writel(CCR_CACHE_16KB, CCR3_REG); |
100 | #endif | 100 | #endif |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c index c98846857855..53be70b98116 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh3.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c | |||
@@ -58,7 +58,7 @@ static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45", | |||
58 | void __init plat_irq_setup_pins(int mode) | 58 | void __init plat_irq_setup_pins(int mode) |
59 | { | 59 | { |
60 | if (mode == IRQ_MODE_IRQ) { | 60 | if (mode == IRQ_MODE_IRQ) { |
61 | ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); | 61 | __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); |
62 | register_intc_controller(&intc_desc_irq0123); | 62 | register_intc_controller(&intc_desc_irq0123); |
63 | return; | 63 | return; |
64 | } | 64 | } |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index 21421e34e7d5..6b80850294da 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -23,7 +23,7 @@ static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 }; | |||
23 | 23 | ||
24 | static unsigned long emi_clk_recalc(struct clk *clk) | 24 | static unsigned long emi_clk_recalc(struct clk *clk) |
25 | { | 25 | { |
26 | int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007; | 26 | int idx = __raw_readl(CPG2_FRQCR3) & 0x0007; |
27 | return clk->parent->rate / frqcr3_divisors[idx]; | 27 | return clk->parent->rate / frqcr3_divisors[idx]; |
28 | } | 28 | } |
29 | 29 | ||
@@ -52,7 +52,7 @@ static struct clk sh4202_emi_clk = { | |||
52 | 52 | ||
53 | static unsigned long femi_clk_recalc(struct clk *clk) | 53 | static unsigned long femi_clk_recalc(struct clk *clk) |
54 | { | 54 | { |
55 | int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007; | 55 | int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007; |
56 | return clk->parent->rate / frqcr3_divisors[idx]; | 56 | return clk->parent->rate / frqcr3_divisors[idx]; |
57 | } | 57 | } |
58 | 58 | ||
@@ -92,7 +92,7 @@ static void shoc_clk_init(struct clk *clk) | |||
92 | 92 | ||
93 | static unsigned long shoc_clk_recalc(struct clk *clk) | 93 | static unsigned long shoc_clk_recalc(struct clk *clk) |
94 | { | 94 | { |
95 | int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007; | 95 | int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007; |
96 | return clk->parent->rate / frqcr3_divisors[idx]; | 96 | return clk->parent->rate / frqcr3_divisors[idx]; |
97 | } | 97 | } |
98 | 98 | ||
@@ -122,10 +122,10 @@ static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id) | |||
122 | 122 | ||
123 | tmp = frqcr3_lookup(clk, rate); | 123 | tmp = frqcr3_lookup(clk, rate); |
124 | 124 | ||
125 | frqcr3 = ctrl_inl(CPG2_FRQCR3); | 125 | frqcr3 = __raw_readl(CPG2_FRQCR3); |
126 | frqcr3 &= ~(0x0007 << 6); | 126 | frqcr3 &= ~(0x0007 << 6); |
127 | frqcr3 |= tmp << 6; | 127 | frqcr3 |= tmp << 6; |
128 | ctrl_outl(frqcr3, CPG2_FRQCR3); | 128 | __raw_writel(frqcr3, CPG2_FRQCR3); |
129 | 129 | ||
130 | clk->rate = clk->parent->rate / frqcr3_divisors[tmp]; | 130 | clk->rate = clk->parent->rate / frqcr3_divisors[tmp]; |
131 | 131 | ||
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c index 73294d9cd049..5add75c1f539 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c | |||
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 }; | |||
28 | 28 | ||
29 | static void master_clk_init(struct clk *clk) | 29 | static void master_clk_init(struct clk *clk) |
30 | { | 30 | { |
31 | clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007]; | 31 | clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007]; |
32 | } | 32 | } |
33 | 33 | ||
34 | static struct clk_ops sh4_master_clk_ops = { | 34 | static struct clk_ops sh4_master_clk_ops = { |
@@ -37,7 +37,7 @@ static struct clk_ops sh4_master_clk_ops = { | |||
37 | 37 | ||
38 | static unsigned long module_clk_recalc(struct clk *clk) | 38 | static unsigned long module_clk_recalc(struct clk *clk) |
39 | { | 39 | { |
40 | int idx = (ctrl_inw(FRQCR) & 0x0007); | 40 | int idx = (__raw_readw(FRQCR) & 0x0007); |
41 | return clk->parent->rate / pfc_divisors[idx]; | 41 | return clk->parent->rate / pfc_divisors[idx]; |
42 | } | 42 | } |
43 | 43 | ||
@@ -47,7 +47,7 @@ static struct clk_ops sh4_module_clk_ops = { | |||
47 | 47 | ||
48 | static unsigned long bus_clk_recalc(struct clk *clk) | 48 | static unsigned long bus_clk_recalc(struct clk *clk) |
49 | { | 49 | { |
50 | int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007; | 50 | int idx = (__raw_readw(FRQCR) >> 3) & 0x0007; |
51 | return clk->parent->rate / bfc_divisors[idx]; | 51 | return clk->parent->rate / bfc_divisors[idx]; |
52 | } | 52 | } |
53 | 53 | ||
@@ -57,7 +57,7 @@ static struct clk_ops sh4_bus_clk_ops = { | |||
57 | 57 | ||
58 | static unsigned long cpu_clk_recalc(struct clk *clk) | 58 | static unsigned long cpu_clk_recalc(struct clk *clk) |
59 | { | 59 | { |
60 | int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007; | 60 | int idx = (__raw_readw(FRQCR) >> 6) & 0x0007; |
61 | return clk->parent->rate / ifc_divisors[idx]; | 61 | return clk->parent->rate / ifc_divisors[idx]; |
62 | } | 62 | } |
63 | 63 | ||
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e97857aec8a0..447482d7f65e 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
@@ -85,14 +85,14 @@ void save_fpu(struct task_struct *tsk) | |||
85 | "fmov.s fr1, @-%0\n\t" | 85 | "fmov.s fr1, @-%0\n\t" |
86 | "fmov.s fr0, @-%0\n\t" | 86 | "fmov.s fr0, @-%0\n\t" |
87 | "lds %3, fpscr\n\t":"=r" (dummy) | 87 | "lds %3, fpscr\n\t":"=r" (dummy) |
88 | :"0"((char *)(&tsk->thread.fpu.hard.status)), | 88 | :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), |
89 | "r"(FPSCR_RCHG), "r"(FPSCR_INIT) | 89 | "r"(FPSCR_RCHG), "r"(FPSCR_INIT) |
90 | :"memory"); | 90 | :"memory"); |
91 | 91 | ||
92 | disable_fpu(); | 92 | disable_fpu(); |
93 | } | 93 | } |
94 | 94 | ||
95 | static void restore_fpu(struct task_struct *tsk) | 95 | void restore_fpu(struct task_struct *tsk) |
96 | { | 96 | { |
97 | unsigned long dummy; | 97 | unsigned long dummy; |
98 | 98 | ||
@@ -135,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk) | |||
135 | "lds.l @%0+, fpscr\n\t" | 135 | "lds.l @%0+, fpscr\n\t" |
136 | "lds.l @%0+, fpul\n\t" | 136 | "lds.l @%0+, fpul\n\t" |
137 | :"=r" (dummy) | 137 | :"=r" (dummy) |
138 | :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG) | 138 | :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) |
139 | :"memory"); | 139 | :"memory"); |
140 | disable_fpu(); | 140 | disable_fpu(); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | ||
144 | * Load the FPU with signalling NANS. This bit pattern we're using | ||
145 | * has the property that no matter wether considered as single or as | ||
146 | * double precision represents signaling NANS. | ||
147 | */ | ||
148 | |||
149 | static void fpu_init(void) | ||
150 | { | ||
151 | enable_fpu(); | ||
152 | asm volatile ( "lds %0, fpul\n\t" | ||
153 | "lds %1, fpscr\n\t" | ||
154 | "fsts fpul, fr0\n\t" | ||
155 | "fsts fpul, fr1\n\t" | ||
156 | "fsts fpul, fr2\n\t" | ||
157 | "fsts fpul, fr3\n\t" | ||
158 | "fsts fpul, fr4\n\t" | ||
159 | "fsts fpul, fr5\n\t" | ||
160 | "fsts fpul, fr6\n\t" | ||
161 | "fsts fpul, fr7\n\t" | ||
162 | "fsts fpul, fr8\n\t" | ||
163 | "fsts fpul, fr9\n\t" | ||
164 | "fsts fpul, fr10\n\t" | ||
165 | "fsts fpul, fr11\n\t" | ||
166 | "fsts fpul, fr12\n\t" | ||
167 | "fsts fpul, fr13\n\t" | ||
168 | "fsts fpul, fr14\n\t" | ||
169 | "fsts fpul, fr15\n\t" | ||
170 | "frchg\n\t" | ||
171 | "fsts fpul, fr0\n\t" | ||
172 | "fsts fpul, fr1\n\t" | ||
173 | "fsts fpul, fr2\n\t" | ||
174 | "fsts fpul, fr3\n\t" | ||
175 | "fsts fpul, fr4\n\t" | ||
176 | "fsts fpul, fr5\n\t" | ||
177 | "fsts fpul, fr6\n\t" | ||
178 | "fsts fpul, fr7\n\t" | ||
179 | "fsts fpul, fr8\n\t" | ||
180 | "fsts fpul, fr9\n\t" | ||
181 | "fsts fpul, fr10\n\t" | ||
182 | "fsts fpul, fr11\n\t" | ||
183 | "fsts fpul, fr12\n\t" | ||
184 | "fsts fpul, fr13\n\t" | ||
185 | "fsts fpul, fr14\n\t" | ||
186 | "fsts fpul, fr15\n\t" | ||
187 | "frchg\n\t" | ||
188 | "lds %2, fpscr\n\t" | ||
189 | : /* no output */ | ||
190 | :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT)); | ||
191 | disable_fpu(); | ||
192 | } | ||
193 | |||
194 | /** | 143 | /** |
195 | * denormal_to_double - Given denormalized float number, | 144 | * denormal_to_double - Given denormalized float number, |
196 | * store double float | 145 | * store double float |
@@ -282,9 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
282 | /* fcnvsd */ | 231 | /* fcnvsd */ |
283 | struct task_struct *tsk = current; | 232 | struct task_struct *tsk = current; |
284 | 233 | ||
285 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) | 234 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) |
286 | /* FPU error */ | 235 | /* FPU error */ |
287 | denormal_to_double(&tsk->thread.fpu.hard, | 236 | denormal_to_double(&tsk->thread.xstate->hardfpu, |
288 | (finsn >> 8) & 0xf); | 237 | (finsn >> 8) & 0xf); |
289 | else | 238 | else |
290 | return 0; | 239 | return 0; |
@@ -300,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
300 | 249 | ||
301 | n = (finsn >> 8) & 0xf; | 250 | n = (finsn >> 8) & 0xf; |
302 | m = (finsn >> 4) & 0xf; | 251 | m = (finsn >> 4) & 0xf; |
303 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 252 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
304 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 253 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
305 | fpscr = tsk->thread.fpu.hard.fpscr; | 254 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
306 | prec = fpscr & FPSCR_DBL_PRECISION; | 255 | prec = fpscr & FPSCR_DBL_PRECISION; |
307 | 256 | ||
308 | if ((fpscr & FPSCR_CAUSE_ERROR) | 257 | if ((fpscr & FPSCR_CAUSE_ERROR) |
@@ -312,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
312 | 261 | ||
313 | /* FPU error because of denormal (doubles) */ | 262 | /* FPU error because of denormal (doubles) */ |
314 | llx = ((long long)hx << 32) | 263 | llx = ((long long)hx << 32) |
315 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 264 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
316 | lly = ((long long)hy << 32) | 265 | lly = ((long long)hy << 32) |
317 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 266 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
318 | llx = float64_mul(llx, lly); | 267 | llx = float64_mul(llx, lly); |
319 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 268 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
320 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 269 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
321 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 270 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
322 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 271 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
323 | || (hy & 0x7fffffff) < 0x00800000))) { | 272 | || (hy & 0x7fffffff) < 0x00800000))) { |
324 | /* FPU error because of denormal (floats) */ | 273 | /* FPU error because of denormal (floats) */ |
325 | hx = float32_mul(hx, hy); | 274 | hx = float32_mul(hx, hy); |
326 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 275 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
327 | } else | 276 | } else |
328 | return 0; | 277 | return 0; |
329 | 278 | ||
@@ -338,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
338 | 287 | ||
339 | n = (finsn >> 8) & 0xf; | 288 | n = (finsn >> 8) & 0xf; |
340 | m = (finsn >> 4) & 0xf; | 289 | m = (finsn >> 4) & 0xf; |
341 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 290 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
342 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 291 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
343 | fpscr = tsk->thread.fpu.hard.fpscr; | 292 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
344 | prec = fpscr & FPSCR_DBL_PRECISION; | 293 | prec = fpscr & FPSCR_DBL_PRECISION; |
345 | 294 | ||
346 | if ((fpscr & FPSCR_CAUSE_ERROR) | 295 | if ((fpscr & FPSCR_CAUSE_ERROR) |
@@ -350,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
350 | 299 | ||
351 | /* FPU error because of denormal (doubles) */ | 300 | /* FPU error because of denormal (doubles) */ |
352 | llx = ((long long)hx << 32) | 301 | llx = ((long long)hx << 32) |
353 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 302 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
354 | lly = ((long long)hy << 32) | 303 | lly = ((long long)hy << 32) |
355 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 304 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
356 | if ((finsn & 0xf00f) == 0xf000) | 305 | if ((finsn & 0xf00f) == 0xf000) |
357 | llx = float64_add(llx, lly); | 306 | llx = float64_add(llx, lly); |
358 | else | 307 | else |
359 | llx = float64_sub(llx, lly); | 308 | llx = float64_sub(llx, lly); |
360 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 309 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
361 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 310 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
362 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 311 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
363 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 312 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
364 | || (hy & 0x7fffffff) < 0x00800000))) { | 313 | || (hy & 0x7fffffff) < 0x00800000))) { |
@@ -367,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
367 | hx = float32_add(hx, hy); | 316 | hx = float32_add(hx, hy); |
368 | else | 317 | else |
369 | hx = float32_sub(hx, hy); | 318 | hx = float32_sub(hx, hy); |
370 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 319 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
371 | } else | 320 | } else |
372 | return 0; | 321 | return 0; |
373 | 322 | ||
@@ -382,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
382 | 331 | ||
383 | n = (finsn >> 8) & 0xf; | 332 | n = (finsn >> 8) & 0xf; |
384 | m = (finsn >> 4) & 0xf; | 333 | m = (finsn >> 4) & 0xf; |
385 | hx = tsk->thread.fpu.hard.fp_regs[n]; | 334 | hx = tsk->thread.xstate->hardfpu.fp_regs[n]; |
386 | hy = tsk->thread.fpu.hard.fp_regs[m]; | 335 | hy = tsk->thread.xstate->hardfpu.fp_regs[m]; |
387 | fpscr = tsk->thread.fpu.hard.fpscr; | 336 | fpscr = tsk->thread.xstate->hardfpu.fpscr; |
388 | prec = fpscr & FPSCR_DBL_PRECISION; | 337 | prec = fpscr & FPSCR_DBL_PRECISION; |
389 | 338 | ||
390 | if ((fpscr & FPSCR_CAUSE_ERROR) | 339 | if ((fpscr & FPSCR_CAUSE_ERROR) |
@@ -394,20 +343,20 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
394 | 343 | ||
395 | /* FPU error because of denormal (doubles) */ | 344 | /* FPU error because of denormal (doubles) */ |
396 | llx = ((long long)hx << 32) | 345 | llx = ((long long)hx << 32) |
397 | | tsk->thread.fpu.hard.fp_regs[n + 1]; | 346 | | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; |
398 | lly = ((long long)hy << 32) | 347 | lly = ((long long)hy << 32) |
399 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 348 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
400 | 349 | ||
401 | llx = float64_div(llx, lly); | 350 | llx = float64_div(llx, lly); |
402 | 351 | ||
403 | tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; | 352 | tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; |
404 | tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff; | 353 | tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; |
405 | } else if ((fpscr & FPSCR_CAUSE_ERROR) | 354 | } else if ((fpscr & FPSCR_CAUSE_ERROR) |
406 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 | 355 | && (!prec && ((hx & 0x7fffffff) < 0x00800000 |
407 | || (hy & 0x7fffffff) < 0x00800000))) { | 356 | || (hy & 0x7fffffff) < 0x00800000))) { |
408 | /* FPU error because of denormal (floats) */ | 357 | /* FPU error because of denormal (floats) */ |
409 | hx = float32_div(hx, hy); | 358 | hx = float32_div(hx, hy); |
410 | tsk->thread.fpu.hard.fp_regs[n] = hx; | 359 | tsk->thread.xstate->hardfpu.fp_regs[n] = hx; |
411 | } else | 360 | } else |
412 | return 0; | 361 | return 0; |
413 | 362 | ||
@@ -420,17 +369,17 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
420 | unsigned int hx; | 369 | unsigned int hx; |
421 | 370 | ||
422 | m = (finsn >> 8) & 0x7; | 371 | m = (finsn >> 8) & 0x7; |
423 | hx = tsk->thread.fpu.hard.fp_regs[m]; | 372 | hx = tsk->thread.xstate->hardfpu.fp_regs[m]; |
424 | 373 | ||
425 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) | 374 | if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR) |
426 | && ((hx & 0x7fffffff) < 0x00100000)) { | 375 | && ((hx & 0x7fffffff) < 0x00100000)) { |
427 | /* subnormal double to float conversion */ | 376 | /* subnormal double to float conversion */ |
428 | long long llx; | 377 | long long llx; |
429 | 378 | ||
430 | llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32) | 379 | llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32) |
431 | | tsk->thread.fpu.hard.fp_regs[m + 1]; | 380 | | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; |
432 | 381 | ||
433 | tsk->thread.fpu.hard.fpul = float64_to_float32(llx); | 382 | tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx); |
434 | } else | 383 | } else |
435 | return 0; | 384 | return 0; |
436 | 385 | ||
@@ -449,7 +398,7 @@ void float_raise(unsigned int flags) | |||
449 | int float_rounding_mode(void) | 398 | int float_rounding_mode(void) |
450 | { | 399 | { |
451 | struct task_struct *tsk = current; | 400 | struct task_struct *tsk = current; |
452 | int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr); | 401 | int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr); |
453 | return roundingMode; | 402 | return roundingMode; |
454 | } | 403 | } |
455 | 404 | ||
@@ -461,16 +410,16 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
461 | __unlazy_fpu(tsk, regs); | 410 | __unlazy_fpu(tsk, regs); |
462 | fpu_exception_flags = 0; | 411 | fpu_exception_flags = 0; |
463 | if (ieee_fpe_handler(regs)) { | 412 | if (ieee_fpe_handler(regs)) { |
464 | tsk->thread.fpu.hard.fpscr &= | 413 | tsk->thread.xstate->hardfpu.fpscr &= |
465 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 414 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
466 | tsk->thread.fpu.hard.fpscr |= fpu_exception_flags; | 415 | tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags; |
467 | /* Set the FPSCR flag as well as cause bits - simply | 416 | /* Set the FPSCR flag as well as cause bits - simply |
468 | * replicate the cause */ | 417 | * replicate the cause */ |
469 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); | 418 | tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10); |
470 | grab_fpu(regs); | 419 | grab_fpu(regs); |
471 | restore_fpu(tsk); | 420 | restore_fpu(tsk); |
472 | task_thread_info(tsk)->status |= TS_USEDFPU; | 421 | task_thread_info(tsk)->status |= TS_USEDFPU; |
473 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & | 422 | if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) & |
474 | (fpu_exception_flags >> 2)) == 0) { | 423 | (fpu_exception_flags >> 2)) == 0) { |
475 | return; | 424 | return; |
476 | } | 425 | } |
@@ -478,33 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
478 | 427 | ||
479 | force_sig(SIGFPE, tsk); | 428 | force_sig(SIGFPE, tsk); |
480 | } | 429 | } |
481 | |||
482 | void fpu_state_restore(struct pt_regs *regs) | ||
483 | { | ||
484 | struct task_struct *tsk = current; | ||
485 | |||
486 | grab_fpu(regs); | ||
487 | if (unlikely(!user_mode(regs))) { | ||
488 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | ||
489 | BUG(); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | if (likely(used_math())) { | ||
494 | /* Using the FPU again. */ | ||
495 | restore_fpu(tsk); | ||
496 | } else { | ||
497 | /* First time FPU user. */ | ||
498 | fpu_init(); | ||
499 | set_used_math(); | ||
500 | } | ||
501 | task_thread_info(tsk)->status |= TS_USEDFPU; | ||
502 | tsk->fpu_counter++; | ||
503 | } | ||
504 | |||
505 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
506 | { | ||
507 | TRAP_HANDLER_DECL; | ||
508 | |||
509 | fpu_state_restore(regs); | ||
510 | } | ||
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index d36f0c45f55f..822977a06d84 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c | |||
@@ -28,9 +28,9 @@ int __init detect_cpu_and_cache_system(void) | |||
28 | [9] = (1 << 16) | 28 | [9] = (1 << 16) |
29 | }; | 29 | }; |
30 | 30 | ||
31 | pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff; | 31 | pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff; |
32 | prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff; | 32 | prr = (__raw_readl(CCN_PRR) >> 4) & 0xff; |
33 | cvr = (ctrl_inl(CCN_CVR)); | 33 | cvr = (__raw_readl(CCN_CVR)); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Setup some sane SH-4 defaults for the icache | 36 | * Setup some sane SH-4 defaults for the icache |
@@ -71,11 +71,11 @@ int __init detect_cpu_and_cache_system(void) | |||
71 | boot_cpu_data.dcache.ways = 4; | 71 | boot_cpu_data.dcache.ways = 4; |
72 | } else { | 72 | } else { |
73 | /* And some SH-4 defaults.. */ | 73 | /* And some SH-4 defaults.. */ |
74 | boot_cpu_data.flags |= CPU_HAS_PTEA; | 74 | boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU; |
75 | boot_cpu_data.family = CPU_FAMILY_SH4; | 75 | boot_cpu_data.family = CPU_FAMILY_SH4; |
76 | } | 76 | } |
77 | 77 | ||
78 | /* FPU detection works for everyone */ | 78 | /* FPU detection works for almost everyone */ |
79 | if ((cvr & 0x20000000)) | 79 | if ((cvr & 0x20000000)) |
80 | boot_cpu_data.flags |= CPU_HAS_FPU; | 80 | boot_cpu_data.flags |= CPU_HAS_FPU; |
81 | 81 | ||
@@ -124,6 +124,7 @@ int __init detect_cpu_and_cache_system(void) | |||
124 | boot_cpu_data.type = CPU_SH7785; | 124 | boot_cpu_data.type = CPU_SH7785; |
125 | break; | 125 | break; |
126 | case 0x4004: | 126 | case 0x4004: |
127 | case 0x4005: | ||
127 | boot_cpu_data.type = CPU_SH7786; | 128 | boot_cpu_data.type = CPU_SH7786; |
128 | boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE; | 129 | boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE; |
129 | break; | 130 | break; |
@@ -160,6 +161,7 @@ int __init detect_cpu_and_cache_system(void) | |||
160 | break; | 161 | break; |
161 | case 0x700: | 162 | case 0x700: |
162 | boot_cpu_data.type = CPU_SH4_501; | 163 | boot_cpu_data.type = CPU_SH4_501; |
164 | boot_cpu_data.flags &= ~CPU_HAS_FPU; | ||
163 | boot_cpu_data.icache.ways = 2; | 165 | boot_cpu_data.icache.ways = 2; |
164 | boot_cpu_data.dcache.ways = 2; | 166 | boot_cpu_data.dcache.ways = 2; |
165 | break; | 167 | break; |
@@ -227,7 +229,7 @@ int __init detect_cpu_and_cache_system(void) | |||
227 | * Size calculation is much more sensible | 229 | * Size calculation is much more sensible |
228 | * than it is for the L1. | 230 | * than it is for the L1. |
229 | * | 231 | * |
230 | * Sizes are 128KB, 258KB, 512KB, and 1MB. | 232 | * Sizes are 128KB, 256KB, 512KB, and 1MB. |
231 | */ | 233 | */ |
232 | size = (cvr & 0xf) << 17; | 234 | size = (cvr & 0xf) << 17; |
233 | 235 | ||
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 4b733715cdb5..b9b7e10ad68f 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c | |||
@@ -198,7 +198,7 @@ void __init plat_irq_setup_pins(int mode) | |||
198 | { | 198 | { |
199 | switch (mode) { | 199 | switch (mode) { |
200 | case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ | 200 | case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ |
201 | ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); | 201 | __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); |
202 | register_intc_controller(&intc_desc_irlm); | 202 | register_intc_controller(&intc_desc_irlm); |
203 | break; | 203 | break; |
204 | default: | 204 | default: |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index b2a9df1af64c..ffd79e57254f 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -442,7 +442,7 @@ void __init plat_irq_setup_pins(int mode) | |||
442 | 442 | ||
443 | switch (mode) { | 443 | switch (mode) { |
444 | case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ | 444 | case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ |
445 | ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); | 445 | __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); |
446 | register_intc_controller(&intc_desc_irlm); | 446 | register_intc_controller(&intc_desc_irlm); |
447 | break; | 447 | break; |
448 | default: | 448 | default: |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 5b74cc0b43da..a16eb3656f4b 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c | |||
@@ -319,7 +319,7 @@ void __init plat_irq_setup_pins(int mode) | |||
319 | { | 319 | { |
320 | switch (mode) { | 320 | switch (mode) { |
321 | case IRQ_MODE_IRQ: | 321 | case IRQ_MODE_IRQ: |
322 | ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); | 322 | __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); |
323 | register_intc_controller(&intc_desc_irq); | 323 | register_intc_controller(&intc_desc_irq); |
324 | break; | 324 | break; |
325 | default: | 325 | default: |
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 8a8a993f55ea..fc065f9da6e5 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
@@ -43,9 +43,9 @@ static unsigned long *sq_bitmap; | |||
43 | 43 | ||
44 | #define store_queue_barrier() \ | 44 | #define store_queue_barrier() \ |
45 | do { \ | 45 | do { \ |
46 | (void)ctrl_inl(P4SEG_STORE_QUE); \ | 46 | (void)__raw_readl(P4SEG_STORE_QUE); \ |
47 | ctrl_outl(0, P4SEG_STORE_QUE + 0); \ | 47 | __raw_writel(0, P4SEG_STORE_QUE + 0); \ |
48 | ctrl_outl(0, P4SEG_STORE_QUE + 8); \ | 48 | __raw_writel(0, P4SEG_STORE_QUE + 8); \ |
49 | } while (0); | 49 | } while (0); |
50 | 50 | ||
51 | /** | 51 | /** |
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) | |||
100 | spin_unlock_irq(&sq_mapping_lock); | 100 | spin_unlock_irq(&sq_mapping_lock); |
101 | } | 101 | } |
102 | 102 | ||
103 | static int __sq_remap(struct sq_mapping *map, unsigned long flags) | 103 | static int __sq_remap(struct sq_mapping *map, pgprot_t prot) |
104 | { | 104 | { |
105 | #if defined(CONFIG_MMU) | 105 | #if defined(CONFIG_MMU) |
106 | struct vm_struct *vma; | 106 | struct vm_struct *vma; |
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
113 | 113 | ||
114 | if (ioremap_page_range((unsigned long)vma->addr, | 114 | if (ioremap_page_range((unsigned long)vma->addr, |
115 | (unsigned long)vma->addr + map->size, | 115 | (unsigned long)vma->addr + map->size, |
116 | vma->phys_addr, __pgprot(flags))) { | 116 | vma->phys_addr, prot)) { |
117 | vunmap(vma->addr); | 117 | vunmap(vma->addr); |
118 | return -EAGAIN; | 118 | return -EAGAIN; |
119 | } | 119 | } |
@@ -123,8 +123,8 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
123 | * straightforward, as we can just load up each queue's QACR with | 123 | * straightforward, as we can just load up each queue's QACR with |
124 | * the physical address appropriately masked. | 124 | * the physical address appropriately masked. |
125 | */ | 125 | */ |
126 | ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); | 126 | __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); |
127 | ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); | 127 | __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); |
128 | #endif | 128 | #endif |
129 | 129 | ||
130 | return 0; | 130 | return 0; |
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
135 | * @phys: Physical address of mapping. | 135 | * @phys: Physical address of mapping. |
136 | * @size: Length of mapping. | 136 | * @size: Length of mapping. |
137 | * @name: User invoking mapping. | 137 | * @name: User invoking mapping. |
138 | * @flags: Protection flags. | 138 | * @prot: Protection bits. |
139 | * | 139 | * |
140 | * Remaps the physical address @phys through the next available store queue | 140 | * Remaps the physical address @phys through the next available store queue |
141 | * address of @size length. @name is logged at boot time as well as through | 141 | * address of @size length. @name is logged at boot time as well as through |
142 | * the sysfs interface. | 142 | * the sysfs interface. |
143 | */ | 143 | */ |
144 | unsigned long sq_remap(unsigned long phys, unsigned int size, | 144 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
145 | const char *name, unsigned long flags) | 145 | const char *name, pgprot_t prot) |
146 | { | 146 | { |
147 | struct sq_mapping *map; | 147 | struct sq_mapping *map; |
148 | unsigned long end; | 148 | unsigned long end; |
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, | |||
177 | 177 | ||
178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); | 178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); |
179 | 179 | ||
180 | ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | 180 | ret = __sq_remap(map, prot); |
181 | if (unlikely(ret != 0)) | 181 | if (unlikely(ret != 0)) |
182 | goto out; | 182 | goto out; |
183 | 183 | ||
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) | |||
309 | return -EIO; | 309 | return -EIO; |
310 | 310 | ||
311 | if (likely(len)) { | 311 | if (likely(len)) { |
312 | int ret = sq_remap(base, len, "Userspace", | 312 | int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); |
313 | pgprot_val(PAGE_SHARED)); | ||
314 | if (ret < 0) | 313 | if (ret < 0) |
315 | return ret; | 314 | return ret; |
316 | } else | 315 | } else |
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 33bab477d2e2..b144e8af89dc 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile | |||
@@ -41,7 +41,8 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o | |||
41 | pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o | 41 | pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o |
42 | pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o | 42 | pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o |
43 | 43 | ||
44 | obj-y += $(clock-y) | 44 | obj-y += $(clock-y) |
45 | obj-$(CONFIG_SMP) += $(smp-y) | 45 | obj-$(CONFIG_SMP) += $(smp-y) |
46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) | 46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) |
47 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 47 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
48 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c index 0ee3ee861252..2c16df37eda6 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c | |||
@@ -107,13 +107,17 @@ struct clk *main_clks[] = { | |||
107 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; | 107 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; |
108 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; | 108 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; |
109 | 109 | ||
110 | static struct clk_div_mult_table div4_table = { | 110 | static struct clk_div_mult_table div4_div_mult_table = { |
111 | .divisors = divisors, | 111 | .divisors = divisors, |
112 | .nr_divisors = ARRAY_SIZE(divisors), | 112 | .nr_divisors = ARRAY_SIZE(divisors), |
113 | .multipliers = multipliers, | 113 | .multipliers = multipliers, |
114 | .nr_multipliers = ARRAY_SIZE(multipliers), | 114 | .nr_multipliers = ARRAY_SIZE(multipliers), |
115 | }; | 115 | }; |
116 | 116 | ||
117 | static struct clk_div4_table div4_table = { | ||
118 | .div_mult_table = &div4_div_mult_table, | ||
119 | }; | ||
120 | |||
117 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, | 121 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, |
118 | DIV4_SIUA, DIV4_SIUB, DIV4_NR }; | 122 | DIV4_SIUA, DIV4_SIUB, DIV4_NR }; |
119 | 123 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c index a95ebaba095c..91588d280cd8 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c | |||
@@ -110,13 +110,17 @@ struct clk *main_clks[] = { | |||
110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; | 110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; |
111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; | 111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; |
112 | 112 | ||
113 | static struct clk_div_mult_table div4_table = { | 113 | static struct clk_div_mult_table div4_div_mult_table = { |
114 | .divisors = divisors, | 114 | .divisors = divisors, |
115 | .nr_divisors = ARRAY_SIZE(divisors), | 115 | .nr_divisors = ARRAY_SIZE(divisors), |
116 | .multipliers = multipliers, | 116 | .multipliers = multipliers, |
117 | .nr_multipliers = ARRAY_SIZE(multipliers), | 117 | .nr_multipliers = ARRAY_SIZE(multipliers), |
118 | }; | 118 | }; |
119 | 119 | ||
120 | static struct clk_div4_table div4_table = { | ||
121 | .div_mult_table = &div4_div_mult_table, | ||
122 | }; | ||
123 | |||
120 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, | 124 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, |
121 | DIV4_SIUA, DIV4_SIUB, DIV4_NR }; | 125 | DIV4_SIUA, DIV4_SIUB, DIV4_NR }; |
122 | 126 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index ea38b554dc05..15db6d521c5c 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c | |||
@@ -110,19 +110,22 @@ struct clk *main_clks[] = { | |||
110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; | 110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; |
111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; | 111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; |
112 | 112 | ||
113 | static struct clk_div_mult_table div4_table = { | 113 | static struct clk_div_mult_table div4_div_mult_table = { |
114 | .divisors = divisors, | 114 | .divisors = divisors, |
115 | .nr_divisors = ARRAY_SIZE(divisors), | 115 | .nr_divisors = ARRAY_SIZE(divisors), |
116 | .multipliers = multipliers, | 116 | .multipliers = multipliers, |
117 | .nr_multipliers = ARRAY_SIZE(multipliers), | 117 | .nr_multipliers = ARRAY_SIZE(multipliers), |
118 | }; | 118 | }; |
119 | 119 | ||
120 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, | 120 | static struct clk_div4_table div4_table = { |
121 | DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR }; | 121 | .div_mult_table = &div4_div_mult_table, |
122 | }; | ||
122 | 123 | ||
123 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ | 124 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ |
124 | SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) | 125 | SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) |
125 | 126 | ||
127 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; | ||
128 | |||
126 | struct clk div4_clks[DIV4_NR] = { | 129 | struct clk div4_clks[DIV4_NR] = { |
127 | [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), | 130 | [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), |
128 | [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), | 131 | [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), |
@@ -130,9 +133,19 @@ struct clk div4_clks[DIV4_NR] = { | |||
130 | [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), | 133 | [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), |
131 | [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), | 134 | [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), |
132 | [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), | 135 | [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0), |
136 | }; | ||
137 | |||
138 | enum { DIV4_IRDA, DIV4_ENABLE_NR }; | ||
139 | |||
140 | struct clk div4_enable_clks[DIV4_ENABLE_NR] = { | ||
141 | [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0), | ||
142 | }; | ||
143 | |||
144 | enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; | ||
145 | |||
146 | struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { | ||
133 | [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), | 147 | [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0), |
134 | [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), | 148 | [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0), |
135 | [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0), | ||
136 | }; | 149 | }; |
137 | 150 | ||
138 | struct clk div6_clks[] = { | 151 | struct clk div6_clks[] = { |
@@ -189,6 +202,14 @@ int __init arch_clk_init(void) | |||
189 | ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); | 202 | ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); |
190 | 203 | ||
191 | if (!ret) | 204 | if (!ret) |
205 | ret = sh_clk_div4_enable_register(div4_enable_clks, | ||
206 | DIV4_ENABLE_NR, &div4_table); | ||
207 | |||
208 | if (!ret) | ||
209 | ret = sh_clk_div4_reparent_register(div4_reparent_clks, | ||
210 | DIV4_REPARENT_NR, &div4_table); | ||
211 | |||
212 | if (!ret) | ||
192 | ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); | 213 | ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); |
193 | 214 | ||
194 | if (!ret) | 215 | if (!ret) |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index 20a31c2255a8..50babe01fe44 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c | |||
@@ -110,15 +110,18 @@ struct clk *main_clks[] = { | |||
110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; | 110 | static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; |
111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; | 111 | static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; |
112 | 112 | ||
113 | static struct clk_div_mult_table div4_table = { | 113 | static struct clk_div_mult_table div4_div_mult_table = { |
114 | .divisors = divisors, | 114 | .divisors = divisors, |
115 | .nr_divisors = ARRAY_SIZE(divisors), | 115 | .nr_divisors = ARRAY_SIZE(divisors), |
116 | .multipliers = multipliers, | 116 | .multipliers = multipliers, |
117 | .nr_multipliers = ARRAY_SIZE(multipliers), | 117 | .nr_multipliers = ARRAY_SIZE(multipliers), |
118 | }; | 118 | }; |
119 | 119 | ||
120 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, | 120 | static struct clk_div4_table div4_table = { |
121 | DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR }; | 121 | .div_mult_table = &div4_div_mult_table, |
122 | }; | ||
123 | |||
124 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; | ||
122 | 125 | ||
123 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ | 126 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ |
124 | SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) | 127 | SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags) |
@@ -130,11 +133,20 @@ struct clk div4_clks[DIV4_NR] = { | |||
130 | [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT), | 133 | [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT), |
131 | [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT), | 134 | [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT), |
132 | [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0), | 135 | [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0), |
133 | [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0), | 136 | }; |
134 | [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0), | 137 | |
138 | enum { DIV4_IRDA, DIV4_ENABLE_NR }; | ||
139 | |||
140 | struct clk div4_enable_clks[DIV4_ENABLE_NR] = { | ||
135 | [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0), | 141 | [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0), |
136 | }; | 142 | }; |
137 | 143 | ||
144 | enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; | ||
145 | |||
146 | struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { | ||
147 | [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0), | ||
148 | [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0), | ||
149 | }; | ||
138 | struct clk div6_clks[] = { | 150 | struct clk div6_clks[] = { |
139 | SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), | 151 | SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), |
140 | }; | 152 | }; |
@@ -216,6 +228,14 @@ int __init arch_clk_init(void) | |||
216 | ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); | 228 | ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); |
217 | 229 | ||
218 | if (!ret) | 230 | if (!ret) |
231 | ret = sh_clk_div4_enable_register(div4_enable_clks, | ||
232 | DIV4_ENABLE_NR, &div4_table); | ||
233 | |||
234 | if (!ret) | ||
235 | ret = sh_clk_div4_reparent_register(div4_reparent_clks, | ||
236 | DIV4_REPARENT_NR, &div4_table); | ||
237 | |||
238 | if (!ret) | ||
219 | ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); | 239 | ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); |
220 | 240 | ||
221 | if (!ret) | 241 | if (!ret) |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 9db743802f06..6707061fbf54 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c | |||
@@ -127,13 +127,28 @@ struct clk *main_clks[] = { | |||
127 | &div3_clk, | 127 | &div3_clk, |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static void div4_kick(struct clk *clk) | ||
131 | { | ||
132 | unsigned long value; | ||
133 | |||
134 | /* set KICK bit in FRQCRA to update hardware setting */ | ||
135 | value = __raw_readl(FRQCRA); | ||
136 | value |= (1 << 31); | ||
137 | __raw_writel(value, FRQCRA); | ||
138 | } | ||
139 | |||
130 | static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; | 140 | static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; |
131 | 141 | ||
132 | static struct clk_div_mult_table div4_table = { | 142 | static struct clk_div_mult_table div4_div_mult_table = { |
133 | .divisors = divisors, | 143 | .divisors = divisors, |
134 | .nr_divisors = ARRAY_SIZE(divisors), | 144 | .nr_divisors = ARRAY_SIZE(divisors), |
135 | }; | 145 | }; |
136 | 146 | ||
147 | static struct clk_div4_table div4_table = { | ||
148 | .div_mult_table = &div4_div_mult_table, | ||
149 | .kick = div4_kick, | ||
150 | }; | ||
151 | |||
137 | enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; | 152 | enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; |
138 | 153 | ||
139 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ | 154 | #define DIV4(_str, _reg, _bit, _mask, _flags) \ |
@@ -144,7 +159,7 @@ struct clk div4_clks[DIV4_NR] = { | |||
144 | [DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), | 159 | [DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), |
145 | [DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), | 160 | [DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), |
146 | [DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0), | 161 | [DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0), |
147 | [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, 0), | 162 | [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT), |
148 | }; | 163 | }; |
149 | 164 | ||
150 | struct clk div6_clks[] = { | 165 | struct clk div6_clks[] = { |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c index ddc235ca9664..86aae60677dc 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c | |||
@@ -35,7 +35,7 @@ static struct clk_ops sh7757_master_clk_ops = { | |||
35 | 35 | ||
36 | static void module_clk_recalc(struct clk *clk) | 36 | static void module_clk_recalc(struct clk *clk) |
37 | { | 37 | { |
38 | int idx = ctrl_inl(FRQCR) & 0x0000000f; | 38 | int idx = __raw_readl(FRQCR) & 0x0000000f; |
39 | clk->rate = clk->parent->rate / p1fc_divisors[idx]; | 39 | clk->rate = clk->parent->rate / p1fc_divisors[idx]; |
40 | } | 40 | } |
41 | 41 | ||
@@ -45,7 +45,7 @@ static struct clk_ops sh7757_module_clk_ops = { | |||
45 | 45 | ||
46 | static void bus_clk_recalc(struct clk *clk) | 46 | static void bus_clk_recalc(struct clk *clk) |
47 | { | 47 | { |
48 | int idx = (ctrl_inl(FRQCR) >> 8) & 0x0000000f; | 48 | int idx = (__raw_readl(FRQCR) >> 8) & 0x0000000f; |
49 | clk->rate = clk->parent->rate / bfc_divisors[idx]; | 49 | clk->rate = clk->parent->rate / bfc_divisors[idx]; |
50 | } | 50 | } |
51 | 51 | ||
@@ -55,7 +55,7 @@ static struct clk_ops sh7757_bus_clk_ops = { | |||
55 | 55 | ||
56 | static void cpu_clk_recalc(struct clk *clk) | 56 | static void cpu_clk_recalc(struct clk *clk) |
57 | { | 57 | { |
58 | int idx = (ctrl_inl(FRQCR) >> 20) & 0x0000000f; | 58 | int idx = (__raw_readl(FRQCR) >> 20) & 0x0000000f; |
59 | clk->rate = clk->parent->rate / ifc_divisors[idx]; | 59 | clk->rate = clk->parent->rate / ifc_divisors[idx]; |
60 | } | 60 | } |
61 | 61 | ||
@@ -78,7 +78,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | |||
78 | 78 | ||
79 | static void shyway_clk_recalc(struct clk *clk) | 79 | static void shyway_clk_recalc(struct clk *clk) |
80 | { | 80 | { |
81 | int idx = (ctrl_inl(FRQCR) >> 12) & 0x0000000f; | 81 | int idx = (__raw_readl(FRQCR) >> 12) & 0x0000000f; |
82 | clk->rate = clk->parent->rate / sfc_divisors[idx]; | 82 | clk->rate = clk->parent->rate / sfc_divisors[idx]; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c index 370cd47642ef..9f401163e71e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c | |||
@@ -22,7 +22,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 }; | |||
22 | 22 | ||
23 | static void master_clk_init(struct clk *clk) | 23 | static void master_clk_init(struct clk *clk) |
24 | { | 24 | { |
25 | clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07]; | 25 | clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07]; |
26 | } | 26 | } |
27 | 27 | ||
28 | static struct clk_ops sh7763_master_clk_ops = { | 28 | static struct clk_ops sh7763_master_clk_ops = { |
@@ -31,7 +31,7 @@ static struct clk_ops sh7763_master_clk_ops = { | |||
31 | 31 | ||
32 | static unsigned long module_clk_recalc(struct clk *clk) | 32 | static unsigned long module_clk_recalc(struct clk *clk) |
33 | { | 33 | { |
34 | int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07); | 34 | int idx = ((__raw_readl(FRQCR) >> 4) & 0x07); |
35 | return clk->parent->rate / p0fc_divisors[idx]; | 35 | return clk->parent->rate / p0fc_divisors[idx]; |
36 | } | 36 | } |
37 | 37 | ||
@@ -41,7 +41,7 @@ static struct clk_ops sh7763_module_clk_ops = { | |||
41 | 41 | ||
42 | static unsigned long bus_clk_recalc(struct clk *clk) | 42 | static unsigned long bus_clk_recalc(struct clk *clk) |
43 | { | 43 | { |
44 | int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07); | 44 | int idx = ((__raw_readl(FRQCR) >> 16) & 0x07); |
45 | return clk->parent->rate / bfc_divisors[idx]; | 45 | return clk->parent->rate / bfc_divisors[idx]; |
46 | } | 46 | } |
47 | 47 | ||
@@ -68,7 +68,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | |||
68 | 68 | ||
69 | static unsigned long shyway_clk_recalc(struct clk *clk) | 69 | static unsigned long shyway_clk_recalc(struct clk *clk) |
70 | { | 70 | { |
71 | int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07); | 71 | int idx = ((__raw_readl(FRQCR) >> 20) & 0x07); |
72 | return clk->parent->rate / cfc_divisors[idx]; | 72 | return clk->parent->rate / cfc_divisors[idx]; |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c index e0b896769205..9e3354365d40 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c | |||
@@ -21,7 +21,7 @@ static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 }; | |||
21 | 21 | ||
22 | static void master_clk_init(struct clk *clk) | 22 | static void master_clk_init(struct clk *clk) |
23 | { | 23 | { |
24 | clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> 28) & 0x000f]; | 24 | clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f]; |
25 | } | 25 | } |
26 | 26 | ||
27 | static struct clk_ops sh7770_master_clk_ops = { | 27 | static struct clk_ops sh7770_master_clk_ops = { |
@@ -30,7 +30,7 @@ static struct clk_ops sh7770_master_clk_ops = { | |||
30 | 30 | ||
31 | static unsigned long module_clk_recalc(struct clk *clk) | 31 | static unsigned long module_clk_recalc(struct clk *clk) |
32 | { | 32 | { |
33 | int idx = ((ctrl_inl(FRQCR) >> 28) & 0x000f); | 33 | int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f); |
34 | return clk->parent->rate / pfc_divisors[idx]; | 34 | return clk->parent->rate / pfc_divisors[idx]; |
35 | } | 35 | } |
36 | 36 | ||
@@ -40,7 +40,7 @@ static struct clk_ops sh7770_module_clk_ops = { | |||
40 | 40 | ||
41 | static unsigned long bus_clk_recalc(struct clk *clk) | 41 | static unsigned long bus_clk_recalc(struct clk *clk) |
42 | { | 42 | { |
43 | int idx = (ctrl_inl(FRQCR) & 0x000f); | 43 | int idx = (__raw_readl(FRQCR) & 0x000f); |
44 | return clk->parent->rate / bfc_divisors[idx]; | 44 | return clk->parent->rate / bfc_divisors[idx]; |
45 | } | 45 | } |
46 | 46 | ||
@@ -50,7 +50,7 @@ static struct clk_ops sh7770_bus_clk_ops = { | |||
50 | 50 | ||
51 | static unsigned long cpu_clk_recalc(struct clk *clk) | 51 | static unsigned long cpu_clk_recalc(struct clk *clk) |
52 | { | 52 | { |
53 | int idx = ((ctrl_inl(FRQCR) >> 24) & 0x000f); | 53 | int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f); |
54 | return clk->parent->rate / ifc_divisors[idx]; | 54 | return clk->parent->rate / ifc_divisors[idx]; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c index a249d823578e..150963a6001e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c | |||
@@ -22,7 +22,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 }; | |||
22 | 22 | ||
23 | static void master_clk_init(struct clk *clk) | 23 | static void master_clk_init(struct clk *clk) |
24 | { | 24 | { |
25 | clk->rate *= pfc_divisors[ctrl_inl(FRQCR) & 0x0003]; | 25 | clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003]; |
26 | } | 26 | } |
27 | 27 | ||
28 | static struct clk_ops sh7780_master_clk_ops = { | 28 | static struct clk_ops sh7780_master_clk_ops = { |
@@ -31,7 +31,7 @@ static struct clk_ops sh7780_master_clk_ops = { | |||
31 | 31 | ||
32 | static unsigned long module_clk_recalc(struct clk *clk) | 32 | static unsigned long module_clk_recalc(struct clk *clk) |
33 | { | 33 | { |
34 | int idx = (ctrl_inl(FRQCR) & 0x0003); | 34 | int idx = (__raw_readl(FRQCR) & 0x0003); |
35 | return clk->parent->rate / pfc_divisors[idx]; | 35 | return clk->parent->rate / pfc_divisors[idx]; |
36 | } | 36 | } |
37 | 37 | ||
@@ -41,7 +41,7 @@ static struct clk_ops sh7780_module_clk_ops = { | |||
41 | 41 | ||
42 | static unsigned long bus_clk_recalc(struct clk *clk) | 42 | static unsigned long bus_clk_recalc(struct clk *clk) |
43 | { | 43 | { |
44 | int idx = ((ctrl_inl(FRQCR) >> 16) & 0x0007); | 44 | int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007); |
45 | return clk->parent->rate / bfc_divisors[idx]; | 45 | return clk->parent->rate / bfc_divisors[idx]; |
46 | } | 46 | } |
47 | 47 | ||
@@ -51,7 +51,7 @@ static struct clk_ops sh7780_bus_clk_ops = { | |||
51 | 51 | ||
52 | static unsigned long cpu_clk_recalc(struct clk *clk) | 52 | static unsigned long cpu_clk_recalc(struct clk *clk) |
53 | { | 53 | { |
54 | int idx = ((ctrl_inl(FRQCR) >> 24) & 0x0001); | 54 | int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001); |
55 | return clk->parent->rate / ifc_divisors[idx]; | 55 | return clk->parent->rate / ifc_divisors[idx]; |
56 | } | 56 | } |
57 | 57 | ||
@@ -74,7 +74,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | |||
74 | 74 | ||
75 | static unsigned long shyway_clk_recalc(struct clk *clk) | 75 | static unsigned long shyway_clk_recalc(struct clk *clk) |
76 | { | 76 | { |
77 | int idx = ((ctrl_inl(FRQCR) >> 20) & 0x0007); | 77 | int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007); |
78 | return clk->parent->rate / cfc_divisors[idx]; | 78 | return clk->parent->rate / cfc_divisors[idx]; |
79 | } | 79 | } |
80 | 80 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c index 73abfbf2f16d..d997f0a25b10 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c | |||
@@ -57,11 +57,15 @@ static struct clk *clks[] = { | |||
57 | static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, | 57 | static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, |
58 | 24, 32, 36, 48 }; | 58 | 24, 32, 36, 48 }; |
59 | 59 | ||
60 | static struct clk_div_mult_table div4_table = { | 60 | static struct clk_div_mult_table div4_div_mult_table = { |
61 | .divisors = div2, | 61 | .divisors = div2, |
62 | .nr_divisors = ARRAY_SIZE(div2), | 62 | .nr_divisors = ARRAY_SIZE(div2), |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static struct clk_div4_table div4_table = { | ||
66 | .div_mult_table = &div4_div_mult_table, | ||
67 | }; | ||
68 | |||
65 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, | 69 | enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, |
66 | DIV4_DU, DIV4_P, DIV4_NR }; | 70 | DIV4_DU, DIV4_P, DIV4_NR }; |
67 | 71 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c index a0e8869071ac..af69fd468703 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c | |||
@@ -3,11 +3,7 @@ | |||
3 | * | 3 | * |
4 | * SH7786 support for the clock framework | 4 | * SH7786 support for the clock framework |
5 | * | 5 | * |
6 | * Copyright (C) 2008, 2009 Renesas Solutions Corp. | 6 | * Copyright (C) 2010 Paul Mundt |
7 | * Kuninori Morimoto <morimoto.kuninori@renesas.com> | ||
8 | * | ||
9 | * Based on SH7785 | ||
10 | * Copyright (C) 2007 Paul Mundt | ||
11 | * | 7 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -15,127 +11,127 @@ | |||
15 | */ | 11 | */ |
16 | #include <linux/init.h> | 12 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | ||
18 | #include <asm/clock.h> | 16 | #include <asm/clock.h> |
19 | #include <asm/freq.h> | 17 | #include <asm/freq.h> |
20 | #include <asm/io.h> | ||
21 | |||
22 | static int ifc_divisors[] = { 1, 2, 4, 1 }; | ||
23 | static int sfc_divisors[] = { 1, 1, 4, 1 }; | ||
24 | static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1, | ||
25 | 24, 32, 1, 1, 1, 1, 1, 1 }; | ||
26 | static int mfc_divisors[] = { 1, 1, 4, 1 }; | ||
27 | static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1, | ||
28 | 24, 32, 1, 48, 1, 1, 1, 1 }; | ||
29 | 18 | ||
30 | static void master_clk_init(struct clk *clk) | 19 | /* |
31 | { | 20 | * Default rate for the root input clock, reset this with clk_set_rate() |
32 | clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f]; | 21 | * from the platform code. |
33 | } | 22 | */ |
34 | 23 | static struct clk extal_clk = { | |
35 | static struct clk_ops sh7786_master_clk_ops = { | 24 | .name = "extal", |
36 | .init = master_clk_init, | 25 | .id = -1, |
26 | .rate = 33333333, | ||
37 | }; | 27 | }; |
38 | 28 | ||
39 | static unsigned long module_clk_recalc(struct clk *clk) | 29 | static unsigned long pll_recalc(struct clk *clk) |
40 | { | 30 | { |
41 | int idx = (ctrl_inl(FRQMR1) & 0x000f); | 31 | int multiplier; |
42 | return clk->parent->rate / pfc_divisors[idx]; | ||
43 | } | ||
44 | 32 | ||
45 | static struct clk_ops sh7786_module_clk_ops = { | 33 | /* |
46 | .recalc = module_clk_recalc, | 34 | * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1, |
47 | }; | 35 | * while modes 3, 4, and 5 use an x32. |
36 | */ | ||
37 | multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32; | ||
48 | 38 | ||
49 | static unsigned long bus_clk_recalc(struct clk *clk) | 39 | return clk->parent->rate * multiplier; |
50 | { | ||
51 | int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f); | ||
52 | return clk->parent->rate / bfc_divisors[idx]; | ||
53 | } | 40 | } |
54 | 41 | ||
55 | static struct clk_ops sh7786_bus_clk_ops = { | 42 | static struct clk_ops pll_clk_ops = { |
56 | .recalc = bus_clk_recalc, | 43 | .recalc = pll_recalc, |
57 | }; | 44 | }; |
58 | 45 | ||
59 | static unsigned long cpu_clk_recalc(struct clk *clk) | 46 | static struct clk pll_clk = { |
60 | { | 47 | .name = "pll_clk", |
61 | int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003); | 48 | .id = -1, |
62 | return clk->parent->rate / ifc_divisors[idx]; | 49 | .ops = &pll_clk_ops, |
63 | } | 50 | .parent = &extal_clk, |
64 | 51 | .flags = CLK_ENABLE_ON_INIT, | |
65 | static struct clk_ops sh7786_cpu_clk_ops = { | ||
66 | .recalc = cpu_clk_recalc, | ||
67 | }; | 52 | }; |
68 | 53 | ||
69 | static struct clk_ops *sh7786_clk_ops[] = { | 54 | static struct clk *clks[] = { |
70 | &sh7786_master_clk_ops, | 55 | &extal_clk, |
71 | &sh7786_module_clk_ops, | 56 | &pll_clk, |
72 | &sh7786_bus_clk_ops, | ||
73 | &sh7786_cpu_clk_ops, | ||
74 | }; | 57 | }; |
75 | 58 | ||
76 | void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | 59 | static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, |
77 | { | 60 | 24, 32, 36, 48 }; |
78 | if (idx < ARRAY_SIZE(sh7786_clk_ops)) | ||
79 | *ops = sh7786_clk_ops[idx]; | ||
80 | } | ||
81 | 61 | ||
82 | static unsigned long shyway_clk_recalc(struct clk *clk) | 62 | static struct clk_div_mult_table div4_div_mult_table = { |
83 | { | 63 | .divisors = div2, |
84 | int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003); | 64 | .nr_divisors = ARRAY_SIZE(div2), |
85 | return clk->parent->rate / sfc_divisors[idx]; | ||
86 | } | ||
87 | |||
88 | static struct clk_ops sh7786_shyway_clk_ops = { | ||
89 | .recalc = shyway_clk_recalc, | ||
90 | }; | 65 | }; |
91 | 66 | ||
92 | static struct clk sh7786_shyway_clk = { | 67 | static struct clk_div4_table div4_table = { |
93 | .name = "shyway_clk", | 68 | .div_mult_table = &div4_div_mult_table, |
94 | .flags = CLK_ENABLE_ON_INIT, | ||
95 | .ops = &sh7786_shyway_clk_ops, | ||
96 | }; | 69 | }; |
97 | 70 | ||
98 | static unsigned long ddr_clk_recalc(struct clk *clk) | 71 | enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR }; |
99 | { | ||
100 | int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003); | ||
101 | return clk->parent->rate / mfc_divisors[idx]; | ||
102 | } | ||
103 | 72 | ||
104 | static struct clk_ops sh7786_ddr_clk_ops = { | 73 | #define DIV4(_str, _bit, _mask, _flags) \ |
105 | .recalc = ddr_clk_recalc, | 74 | SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags) |
106 | }; | ||
107 | 75 | ||
108 | static struct clk sh7786_ddr_clk = { | 76 | struct clk div4_clks[DIV4_NR] = { |
109 | .name = "ddr_clk", | 77 | [DIV4_P] = DIV4("peripheral_clk", 0, 0x0b40, 0), |
110 | .flags = CLK_ENABLE_ON_INIT, | 78 | [DIV4_DU] = DIV4("du_clk", 4, 0x0010, 0), |
111 | .ops = &sh7786_ddr_clk_ops, | 79 | [DIV4_DDR] = DIV4("ddr_clk", 12, 0x0002, CLK_ENABLE_ON_INIT), |
80 | [DIV4_B] = DIV4("bus_clk", 16, 0x0360, CLK_ENABLE_ON_INIT), | ||
81 | [DIV4_SH] = DIV4("shyway_clk", 20, 0x0002, CLK_ENABLE_ON_INIT), | ||
82 | [DIV4_I] = DIV4("cpu_clk", 28, 0x0006, CLK_ENABLE_ON_INIT), | ||
112 | }; | 83 | }; |
113 | 84 | ||
114 | /* | 85 | #define MSTPCR0 0xffc40030 |
115 | * Additional SH7786-specific on-chip clocks that aren't already part of the | 86 | #define MSTPCR1 0xffc40034 |
116 | * clock framework | 87 | |
117 | */ | 88 | static struct clk mstp_clks[] = { |
118 | static struct clk *sh7786_onchip_clocks[] = { | 89 | /* MSTPCR0 */ |
119 | &sh7786_shyway_clk, | 90 | SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0), |
120 | &sh7786_ddr_clk, | 91 | SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0), |
92 | SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0), | ||
93 | SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0), | ||
94 | SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0), | ||
95 | SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0), | ||
96 | SH_CLK_MSTP32("ssi_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 23, 0), | ||
97 | SH_CLK_MSTP32("ssi_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 22, 0), | ||
98 | SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0), | ||
99 | SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0), | ||
100 | SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0), | ||
101 | SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0), | ||
102 | SH_CLK_MSTP32("i2c_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 15, 0), | ||
103 | SH_CLK_MSTP32("i2c_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 14, 0), | ||
104 | SH_CLK_MSTP32("tmu9_11_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 11, 0), | ||
105 | SH_CLK_MSTP32("tmu678_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 10, 0), | ||
106 | SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0), | ||
107 | SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0), | ||
108 | SH_CLK_MSTP32("sdif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 5, 0), | ||
109 | SH_CLK_MSTP32("sdif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 4, 0), | ||
110 | SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0), | ||
111 | |||
112 | /* MSTPCR1 */ | ||
113 | SH_CLK_MSTP32("usb_fck", -1, NULL, MSTPCR1, 12, 0), | ||
114 | SH_CLK_MSTP32("pcie_fck", 2, NULL, MSTPCR1, 10, 0), | ||
115 | SH_CLK_MSTP32("pcie_fck", 1, NULL, MSTPCR1, 9, 0), | ||
116 | SH_CLK_MSTP32("pcie_fck", 0, NULL, MSTPCR1, 8, 0), | ||
117 | SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0), | ||
118 | SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0), | ||
119 | SH_CLK_MSTP32("du_fck", -1, NULL, MSTPCR1, 3, 0), | ||
120 | SH_CLK_MSTP32("ether_fck", -1, NULL, MSTPCR1, 2, 0), | ||
121 | }; | 121 | }; |
122 | 122 | ||
123 | int __init arch_clk_init(void) | 123 | int __init arch_clk_init(void) |
124 | { | 124 | { |
125 | struct clk *clk; | ||
126 | int i, ret = 0; | 125 | int i, ret = 0; |
127 | 126 | ||
128 | cpg_clk_init(); | 127 | for (i = 0; i < ARRAY_SIZE(clks); i++) |
129 | 128 | ret |= clk_register(clks[i]); | |
130 | clk = clk_get(NULL, "master_clk"); | ||
131 | for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) { | ||
132 | struct clk *clkp = sh7786_onchip_clocks[i]; | ||
133 | |||
134 | clkp->parent = clk; | ||
135 | ret |= clk_register(clkp); | ||
136 | } | ||
137 | 129 | ||
138 | clk_put(clk); | 130 | if (!ret) |
131 | ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), | ||
132 | &div4_table); | ||
133 | if (!ret) | ||
134 | ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); | ||
139 | 135 | ||
140 | return ret; | 136 | return ret; |
141 | } | 137 | } |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c index 23c27d32d982..e75c57bdfa5e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c | |||
@@ -33,7 +33,7 @@ static int cfc_divisors[] = { 1, 1, 4, 6 }; | |||
33 | 33 | ||
34 | static void master_clk_init(struct clk *clk) | 34 | static void master_clk_init(struct clk *clk) |
35 | { | 35 | { |
36 | clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK]; | 36 | clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK]; |
37 | } | 37 | } |
38 | 38 | ||
39 | static struct clk_ops shx3_master_clk_ops = { | 39 | static struct clk_ops shx3_master_clk_ops = { |
@@ -42,7 +42,7 @@ static struct clk_ops shx3_master_clk_ops = { | |||
42 | 42 | ||
43 | static unsigned long module_clk_recalc(struct clk *clk) | 43 | static unsigned long module_clk_recalc(struct clk *clk) |
44 | { | 44 | { |
45 | int idx = ((ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK); | 45 | int idx = ((__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK); |
46 | return clk->parent->rate / pfc_divisors[idx]; | 46 | return clk->parent->rate / pfc_divisors[idx]; |
47 | } | 47 | } |
48 | 48 | ||
@@ -52,7 +52,7 @@ static struct clk_ops shx3_module_clk_ops = { | |||
52 | 52 | ||
53 | static unsigned long bus_clk_recalc(struct clk *clk) | 53 | static unsigned long bus_clk_recalc(struct clk *clk) |
54 | { | 54 | { |
55 | int idx = ((ctrl_inl(FRQCR) >> BFC_POS) & BFC_MSK); | 55 | int idx = ((__raw_readl(FRQCR) >> BFC_POS) & BFC_MSK); |
56 | return clk->parent->rate / bfc_divisors[idx]; | 56 | return clk->parent->rate / bfc_divisors[idx]; |
57 | } | 57 | } |
58 | 58 | ||
@@ -62,7 +62,7 @@ static struct clk_ops shx3_bus_clk_ops = { | |||
62 | 62 | ||
63 | static unsigned long cpu_clk_recalc(struct clk *clk) | 63 | static unsigned long cpu_clk_recalc(struct clk *clk) |
64 | { | 64 | { |
65 | int idx = ((ctrl_inl(FRQCR) >> IFC_POS) & IFC_MSK); | 65 | int idx = ((__raw_readl(FRQCR) >> IFC_POS) & IFC_MSK); |
66 | return clk->parent->rate / ifc_divisors[idx]; | 66 | return clk->parent->rate / ifc_divisors[idx]; |
67 | } | 67 | } |
68 | 68 | ||
@@ -85,7 +85,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | |||
85 | 85 | ||
86 | static unsigned long shyway_clk_recalc(struct clk *clk) | 86 | static unsigned long shyway_clk_recalc(struct clk *clk) |
87 | { | 87 | { |
88 | int idx = ((ctrl_inl(FRQCR) >> CFC_POS) & CFC_MSK); | 88 | int idx = ((__raw_readl(FRQCR) >> CFC_POS) & CFC_MSK); |
89 | return clk->parent->rate / cfc_divisors[idx]; | 89 | return clk->parent->rate / cfc_divisors[idx]; |
90 | } | 90 | } |
91 | 91 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c index cb9d07bd59f8..0688a7502f86 100644 --- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c | |||
@@ -278,6 +278,7 @@ enum { | |||
278 | HIZA8_LCDC, HIZA8_HIZ, | 278 | HIZA8_LCDC, HIZA8_HIZ, |
279 | HIZA7_LCDC, HIZA7_HIZ, | 279 | HIZA7_LCDC, HIZA7_HIZ, |
280 | HIZA6_LCDC, HIZA6_HIZ, | 280 | HIZA6_LCDC, HIZA6_HIZ, |
281 | HIZB4_SIUA, HIZB4_HIZ, | ||
281 | HIZB1_VIO, HIZB1_HIZ, | 282 | HIZB1_VIO, HIZB1_HIZ, |
282 | HIZB0_VIO, HIZB0_HIZ, | 283 | HIZB0_VIO, HIZB0_HIZ, |
283 | HIZC15_IRQ7, HIZC15_HIZ, | 284 | HIZC15_IRQ7, HIZC15_HIZ, |
@@ -546,7 +547,7 @@ static pinmux_enum_t pinmux_data[] = { | |||
546 | PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, | 547 | PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2, |
547 | HIZB0_VIO, FOE_VIO_VD2), | 548 | HIZB0_VIO, FOE_VIO_VD2), |
548 | PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, | 549 | PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2, |
549 | HIZB1_VIO, HIZB1_VIO, FCE_VIO_HD2), | 550 | HIZB1_VIO, FCE_VIO_HD2), |
550 | PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, | 551 | PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2, |
551 | HIZB1_VIO, FRB_VIO_CLK2), | 552 | HIZB1_VIO, FRB_VIO_CLK2), |
552 | 553 | ||
@@ -658,14 +659,14 @@ static pinmux_enum_t pinmux_data[] = { | |||
658 | PINMUX_DATA(SDHICLK_MARK, SDHICLK), | 659 | PINMUX_DATA(SDHICLK_MARK, SDHICLK), |
659 | 660 | ||
660 | /* SIU - Port A */ | 661 | /* SIU - Port A */ |
661 | PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, SIUAOLR_SIOF1_SYNC), | 662 | PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC), |
662 | PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, SIUAOBT_SIOF1_SCK), | 663 | PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK), |
663 | PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, SIUAISLD_SIOF1_RXD), | 664 | PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD), |
664 | PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, SIUAILR_SIOF1_SS2), | 665 | PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2), |
665 | PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, SIUAIBT_SIOF1_SS1), | 666 | PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1), |
666 | PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, SIUAOSLD_SIOF1_TXD), | 667 | PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD), |
667 | PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIUMCKA, PTK0), | 668 | PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0), |
668 | PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, PTK0), | 669 | PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0), |
669 | 670 | ||
670 | /* SIU - Port B */ | 671 | /* SIU - Port B */ |
671 | PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), | 672 | PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR), |
@@ -1612,7 +1613,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = { | |||
1612 | 0, 0, | 1613 | 0, 0, |
1613 | 0, 0, | 1614 | 0, 0, |
1614 | 0, 0, | 1615 | 0, 0, |
1615 | 0, 0, | 1616 | HIZB4_SIUA, HIZB4_HIZ, |
1616 | 0, 0, | 1617 | 0, 0, |
1617 | 0, 0, | 1618 | 0, 0, |
1618 | HIZB1_VIO, HIZB1_HIZ, | 1619 | HIZB1_VIO, HIZB1_HIZ, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index b5335b5e309c..ef3f97827808 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -446,6 +446,8 @@ void __init plat_early_device_setup(void) | |||
446 | 446 | ||
447 | enum { | 447 | enum { |
448 | UNUSED=0, | 448 | UNUSED=0, |
449 | ENABLED, | ||
450 | DISABLED, | ||
449 | 451 | ||
450 | /* interrupt sources */ | 452 | /* interrupt sources */ |
451 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, | 453 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, |
@@ -461,7 +463,6 @@ enum { | |||
461 | SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO, | 463 | SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO, |
462 | FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, | 464 | FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, |
463 | I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, | 465 | I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, |
464 | SDHI0, SDHI1, SDHI2, SDHI3, | ||
465 | CMT, TSIF, SIU, TWODG, | 466 | CMT, TSIF, SIU, TWODG, |
466 | TMU0, TMU1, TMU2, | 467 | TMU0, TMU1, TMU2, |
467 | IRDA, JPU, LCDC, | 468 | IRDA, JPU, LCDC, |
@@ -494,8 +495,8 @@ static struct intc_vect vectors[] __initdata = { | |||
494 | INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), | 495 | INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), |
495 | INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), | 496 | INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), |
496 | INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), | 497 | INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), |
497 | INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), | 498 | INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), |
498 | INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), | 499 | INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), |
499 | INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), | 500 | INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), |
500 | INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0), | 501 | INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0), |
501 | INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), | 502 | INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), |
@@ -513,7 +514,6 @@ static struct intc_group groups[] __initdata = { | |||
513 | INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, | 514 | INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, |
514 | FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), | 515 | FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), |
515 | INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), | 516 | INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), |
516 | INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), | ||
517 | }; | 517 | }; |
518 | 518 | ||
519 | static struct intc_mask_reg mask_registers[] __initdata = { | 519 | static struct intc_mask_reg mask_registers[] __initdata = { |
@@ -535,7 +535,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { | |||
535 | { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, | 535 | { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, |
536 | FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, | 536 | FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, |
537 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ | 537 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ |
538 | { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } }, | 538 | { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } }, |
539 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ | 539 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ |
540 | { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, | 540 | { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, |
541 | { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ | 541 | { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ |
@@ -573,9 +573,13 @@ static struct intc_mask_reg ack_registers[] __initdata = { | |||
573 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, | 573 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, |
574 | }; | 574 | }; |
575 | 575 | ||
576 | static DECLARE_INTC_DESC_ACK(intc_desc, "sh7722", vectors, groups, | 576 | static struct intc_desc intc_desc __initdata = { |
577 | mask_registers, prio_registers, sense_registers, | 577 | .name = "sh7722", |
578 | ack_registers); | 578 | .force_enable = ENABLED, |
579 | .force_disable = DISABLED, | ||
580 | .hw = INTC_HW_DESC(vectors, groups, mask_registers, | ||
581 | prio_registers, sense_registers, ack_registers), | ||
582 | }; | ||
579 | 583 | ||
580 | void __init plat_irq_setup(void) | 584 | void __init plat_irq_setup(void) |
581 | { | 585 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 772b9265d0e4..85c61f624702 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c | |||
@@ -592,14 +592,17 @@ void __init plat_early_device_setup(void) | |||
592 | #define RAMCR_CACHE_L2FC 0x0002 | 592 | #define RAMCR_CACHE_L2FC 0x0002 |
593 | #define RAMCR_CACHE_L2E 0x0001 | 593 | #define RAMCR_CACHE_L2E 0x0001 |
594 | #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) | 594 | #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) |
595 | void __uses_jump_to_uncached l2_cache_init(void) | 595 | |
596 | void l2_cache_init(void) | ||
596 | { | 597 | { |
597 | /* Enable L2 cache */ | 598 | /* Enable L2 cache */ |
598 | ctrl_outl(L2_CACHE_ENABLE, RAMCR); | 599 | __raw_writel(L2_CACHE_ENABLE, RAMCR); |
599 | } | 600 | } |
600 | 601 | ||
601 | enum { | 602 | enum { |
602 | UNUSED=0, | 603 | UNUSED=0, |
604 | ENABLED, | ||
605 | DISABLED, | ||
603 | 606 | ||
604 | /* interrupt sources */ | 607 | /* interrupt sources */ |
605 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, | 608 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, |
@@ -622,7 +625,6 @@ enum { | |||
622 | SCIFA_SCIFA1, | 625 | SCIFA_SCIFA1, |
623 | FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I, | 626 | FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I, |
624 | I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI, | 627 | I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI, |
625 | SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2, | ||
626 | CMT_CMTI, | 628 | CMT_CMTI, |
627 | TSIF_TSIFI, | 629 | TSIF_TSIFI, |
628 | SIU_SIUI, | 630 | SIU_SIUI, |
@@ -630,7 +632,6 @@ enum { | |||
630 | TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, | 632 | TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, |
631 | IRDA_IRDAI, | 633 | IRDA_IRDAI, |
632 | ATAPI_ATAPII, | 634 | ATAPI_ATAPII, |
633 | SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2, | ||
634 | VEU2H1_VEU2HI, | 635 | VEU2H1_VEU2HI, |
635 | LCDC_LCDCI, | 636 | LCDC_LCDCI, |
636 | TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2, | 637 | TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2, |
@@ -701,9 +702,9 @@ static struct intc_vect vectors[] __initdata = { | |||
701 | INTC_VECT(I2C_WAITI,0xE40), | 702 | INTC_VECT(I2C_WAITI,0xE40), |
702 | INTC_VECT(I2C_DTEI,0xE60), | 703 | INTC_VECT(I2C_DTEI,0xE60), |
703 | 704 | ||
704 | INTC_VECT(SDHI0_SDHII0,0xE80), | 705 | INTC_VECT(SDHI0, 0xE80), |
705 | INTC_VECT(SDHI0_SDHII1,0xEA0), | 706 | INTC_VECT(SDHI0, 0xEA0), |
706 | INTC_VECT(SDHI0_SDHII2,0xEC0), | 707 | INTC_VECT(SDHI0, 0xEC0), |
707 | 708 | ||
708 | INTC_VECT(CMT_CMTI,0xF00), | 709 | INTC_VECT(CMT_CMTI,0xF00), |
709 | INTC_VECT(TSIF_TSIFI,0xF20), | 710 | INTC_VECT(TSIF_TSIFI,0xF20), |
@@ -717,9 +718,9 @@ static struct intc_vect vectors[] __initdata = { | |||
717 | INTC_VECT(IRDA_IRDAI,0x480), | 718 | INTC_VECT(IRDA_IRDAI,0x480), |
718 | INTC_VECT(ATAPI_ATAPII,0x4A0), | 719 | INTC_VECT(ATAPI_ATAPII,0x4A0), |
719 | 720 | ||
720 | INTC_VECT(SDHI1_SDHII0,0x4E0), | 721 | INTC_VECT(SDHI1, 0x4E0), |
721 | INTC_VECT(SDHI1_SDHII1,0x500), | 722 | INTC_VECT(SDHI1, 0x500), |
722 | INTC_VECT(SDHI1_SDHII2,0x520), | 723 | INTC_VECT(SDHI1, 0x520), |
723 | 724 | ||
724 | INTC_VECT(VEU2H1_VEU2HI,0x560), | 725 | INTC_VECT(VEU2H1_VEU2HI,0x560), |
725 | INTC_VECT(LCDC_LCDCI,0x580), | 726 | INTC_VECT(LCDC_LCDCI,0x580), |
@@ -738,15 +739,14 @@ static struct intc_group groups[] __initdata = { | |||
738 | INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I), | 739 | INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I), |
739 | INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI), | 740 | INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI), |
740 | INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI), | 741 | INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI), |
741 | INTC_GROUP(SDHI1, SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2), | ||
742 | INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI), | 742 | INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI), |
743 | INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR), | 743 | INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR), |
744 | INTC_GROUP(SDHI0,SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2), | ||
745 | }; | 744 | }; |
746 | 745 | ||
747 | static struct intc_mask_reg mask_registers[] __initdata = { | 746 | static struct intc_mask_reg mask_registers[] __initdata = { |
748 | { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ | 747 | { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ |
749 | { 0, TMU1_TUNI2,TMU1_TUNI1,TMU1_TUNI0,0,SDHI1_SDHII2,SDHI1_SDHII1,SDHI1_SDHII0} }, | 748 | { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, |
749 | 0, DISABLED, ENABLED, ENABLED } }, | ||
750 | { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ | 750 | { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ |
751 | { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, | 751 | { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, |
752 | { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ | 752 | { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ |
@@ -763,7 +763,8 @@ static struct intc_mask_reg mask_registers[] __initdata = { | |||
763 | { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, | 763 | { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, |
764 | FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, | 764 | FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, |
765 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ | 765 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ |
766 | { 0,SDHI0_SDHII2,SDHI0_SDHII1,SDHI0_SDHII0,0,0,SCIFA_SCIFA2,SIU_SIUI } }, | 766 | { 0, DISABLED, ENABLED, ENABLED, |
767 | 0, 0, SCIFA_SCIFA2, SIU_SIUI } }, | ||
767 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ | 768 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ |
768 | { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, | 769 | { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, |
769 | { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ | 770 | { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ |
@@ -803,9 +804,13 @@ static struct intc_mask_reg ack_registers[] __initdata = { | |||
803 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, | 804 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, |
804 | }; | 805 | }; |
805 | 806 | ||
806 | static DECLARE_INTC_DESC_ACK(intc_desc, "sh7723", vectors, groups, | 807 | static struct intc_desc intc_desc __initdata = { |
807 | mask_registers, prio_registers, sense_registers, | 808 | .name = "sh7723", |
808 | ack_registers); | 809 | .force_enable = ENABLED, |
810 | .force_disable = DISABLED, | ||
811 | .hw = INTC_HW_DESC(vectors, groups, mask_registers, | ||
812 | prio_registers, sense_registers, ack_registers), | ||
813 | }; | ||
809 | 814 | ||
810 | void __init plat_irq_setup(void) | 815 | void __init plat_irq_setup(void) |
811 | { | 816 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index d32f96c1cc15..31e3451f7e3d 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -714,14 +714,17 @@ void __init plat_early_device_setup(void) | |||
714 | #define RAMCR_CACHE_L2FC 0x0002 | 714 | #define RAMCR_CACHE_L2FC 0x0002 |
715 | #define RAMCR_CACHE_L2E 0x0001 | 715 | #define RAMCR_CACHE_L2E 0x0001 |
716 | #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) | 716 | #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) |
717 | void __uses_jump_to_uncached l2_cache_init(void) | 717 | |
718 | void l2_cache_init(void) | ||
718 | { | 719 | { |
719 | /* Enable L2 cache */ | 720 | /* Enable L2 cache */ |
720 | ctrl_outl(L2_CACHE_ENABLE, RAMCR); | 721 | __raw_writel(L2_CACHE_ENABLE, RAMCR); |
721 | } | 722 | } |
722 | 723 | ||
723 | enum { | 724 | enum { |
724 | UNUSED = 0, | 725 | UNUSED = 0, |
726 | ENABLED, | ||
727 | DISABLED, | ||
725 | 728 | ||
726 | /* interrupt sources */ | 729 | /* interrupt sources */ |
727 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, | 730 | IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, |
@@ -750,14 +753,12 @@ enum { | |||
750 | ETHI, | 753 | ETHI, |
751 | I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, | 754 | I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, |
752 | I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, | 755 | I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, |
753 | SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3, | ||
754 | CMT, | 756 | CMT, |
755 | TSIF, | 757 | TSIF, |
756 | FSI, | 758 | FSI, |
757 | SCIFA5, | 759 | SCIFA5, |
758 | TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, | 760 | TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, |
759 | IRDA, | 761 | IRDA, |
760 | SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2, | ||
761 | JPU, | 762 | JPU, |
762 | _2DDMAC, | 763 | _2DDMAC, |
763 | MMC_MMC2I, MMC_MMC3I, | 764 | MMC_MMC2I, MMC_MMC3I, |
@@ -839,10 +840,10 @@ static struct intc_vect vectors[] __initdata = { | |||
839 | INTC_VECT(I2C0_WAITI, 0xE40), | 840 | INTC_VECT(I2C0_WAITI, 0xE40), |
840 | INTC_VECT(I2C0_DTEI, 0xE60), | 841 | INTC_VECT(I2C0_DTEI, 0xE60), |
841 | 842 | ||
842 | INTC_VECT(SDHI0_SDHII0, 0xE80), | 843 | INTC_VECT(SDHI0, 0xE80), |
843 | INTC_VECT(SDHI0_SDHII1, 0xEA0), | 844 | INTC_VECT(SDHI0, 0xEA0), |
844 | INTC_VECT(SDHI0_SDHII2, 0xEC0), | 845 | INTC_VECT(SDHI0, 0xEC0), |
845 | INTC_VECT(SDHI0_SDHII3, 0xEE0), | 846 | INTC_VECT(SDHI0, 0xEE0), |
846 | 847 | ||
847 | INTC_VECT(CMT, 0xF00), | 848 | INTC_VECT(CMT, 0xF00), |
848 | INTC_VECT(TSIF, 0xF20), | 849 | INTC_VECT(TSIF, 0xF20), |
@@ -855,9 +856,9 @@ static struct intc_vect vectors[] __initdata = { | |||
855 | 856 | ||
856 | INTC_VECT(IRDA, 0x480), | 857 | INTC_VECT(IRDA, 0x480), |
857 | 858 | ||
858 | INTC_VECT(SDHI1_SDHII0, 0x4E0), | 859 | INTC_VECT(SDHI1, 0x4E0), |
859 | INTC_VECT(SDHI1_SDHII1, 0x500), | 860 | INTC_VECT(SDHI1, 0x500), |
860 | INTC_VECT(SDHI1_SDHII2, 0x520), | 861 | INTC_VECT(SDHI1, 0x520), |
861 | 862 | ||
862 | INTC_VECT(JPU, 0x560), | 863 | INTC_VECT(JPU, 0x560), |
863 | INTC_VECT(_2DDMAC, 0x4A0), | 864 | INTC_VECT(_2DDMAC, 0x4A0), |
@@ -883,8 +884,6 @@ static struct intc_group groups[] __initdata = { | |||
883 | INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), | 884 | INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), |
884 | INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), | 885 | INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), |
885 | INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), | 886 | INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), |
886 | INTC_GROUP(SDHI0, SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3), | ||
887 | INTC_GROUP(SDHI1, SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2), | ||
888 | INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), | 887 | INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), |
889 | INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), | 888 | INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), |
890 | }; | 889 | }; |
@@ -892,7 +891,7 @@ static struct intc_group groups[] __initdata = { | |||
892 | static struct intc_mask_reg mask_registers[] __initdata = { | 891 | static struct intc_mask_reg mask_registers[] __initdata = { |
893 | { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ | 892 | { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ |
894 | { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, | 893 | { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, |
895 | 0, SDHI1_SDHII2, SDHI1_SDHII1, SDHI1_SDHII0 } }, | 894 | 0, DISABLED, ENABLED, ENABLED } }, |
896 | { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ | 895 | { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ |
897 | { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, | 896 | { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, |
898 | DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, | 897 | DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, |
@@ -914,7 +913,7 @@ static struct intc_mask_reg mask_registers[] __initdata = { | |||
914 | { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, | 913 | { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, |
915 | I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, | 914 | I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, |
916 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ | 915 | { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ |
917 | { SDHI0_SDHII3, SDHI0_SDHII2, SDHI0_SDHII1, SDHI0_SDHII0, | 916 | { DISABLED, DISABLED, ENABLED, ENABLED, |
918 | 0, 0, SCIFA5, FSI } }, | 917 | 0, 0, SCIFA5, FSI } }, |
919 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ | 918 | { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ |
920 | { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, | 919 | { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, |
@@ -961,9 +960,13 @@ static struct intc_mask_reg ack_registers[] __initdata = { | |||
961 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, | 960 | { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, |
962 | }; | 961 | }; |
963 | 962 | ||
964 | static DECLARE_INTC_DESC_ACK(intc_desc, "sh7724", vectors, groups, | 963 | static struct intc_desc intc_desc __initdata = { |
965 | mask_registers, prio_registers, sense_registers, | 964 | .name = "sh7724", |
966 | ack_registers); | 965 | .force_enable = ENABLED, |
966 | .force_disable = DISABLED, | ||
967 | .hw = INTC_HW_DESC(vectors, groups, mask_registers, | ||
968 | prio_registers, sense_registers, ack_registers), | ||
969 | }; | ||
967 | 970 | ||
968 | void __init plat_irq_setup(void) | 971 | void __init plat_irq_setup(void) |
969 | { | 972 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index 37e32efbbaa7..e75edf58796a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c | |||
@@ -487,17 +487,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567, | |||
487 | void __init plat_irq_setup(void) | 487 | void __init plat_irq_setup(void) |
488 | { | 488 | { |
489 | /* disable IRQ3-0 + IRQ7-4 */ | 489 | /* disable IRQ3-0 + IRQ7-4 */ |
490 | ctrl_outl(0xff000000, INTC_INTMSK0); | 490 | __raw_writel(0xff000000, INTC_INTMSK0); |
491 | 491 | ||
492 | /* disable IRL3-0 + IRL7-4 */ | 492 | /* disable IRL3-0 + IRL7-4 */ |
493 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 493 | __raw_writel(0xc0000000, INTC_INTMSK1); |
494 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 494 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
495 | 495 | ||
496 | /* select IRL mode for IRL3-0 + IRL7-4 */ | 496 | /* select IRL mode for IRL3-0 + IRL7-4 */ |
497 | ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); | 497 | __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); |
498 | 498 | ||
499 | /* disable holding function, ie enable "SH-4 Mode" */ | 499 | /* disable holding function, ie enable "SH-4 Mode" */ |
500 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); | 500 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); |
501 | 501 | ||
502 | register_intc_controller(&intc_desc); | 502 | register_intc_controller(&intc_desc); |
503 | } | 503 | } |
@@ -507,32 +507,32 @@ void __init plat_irq_setup_pins(int mode) | |||
507 | switch (mode) { | 507 | switch (mode) { |
508 | case IRQ_MODE_IRQ7654: | 508 | case IRQ_MODE_IRQ7654: |
509 | /* select IRQ mode for IRL7-4 */ | 509 | /* select IRQ mode for IRL7-4 */ |
510 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); | 510 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); |
511 | register_intc_controller(&intc_desc_irq4567); | 511 | register_intc_controller(&intc_desc_irq4567); |
512 | break; | 512 | break; |
513 | case IRQ_MODE_IRQ3210: | 513 | case IRQ_MODE_IRQ3210: |
514 | /* select IRQ mode for IRL3-0 */ | 514 | /* select IRQ mode for IRL3-0 */ |
515 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); | 515 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); |
516 | register_intc_controller(&intc_desc_irq0123); | 516 | register_intc_controller(&intc_desc_irq0123); |
517 | break; | 517 | break; |
518 | case IRQ_MODE_IRL7654: | 518 | case IRQ_MODE_IRL7654: |
519 | /* enable IRL7-4 but don't provide any masking */ | 519 | /* enable IRL7-4 but don't provide any masking */ |
520 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 520 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
521 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 521 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
522 | break; | 522 | break; |
523 | case IRQ_MODE_IRL3210: | 523 | case IRQ_MODE_IRL3210: |
524 | /* enable IRL0-3 but don't provide any masking */ | 524 | /* enable IRL0-3 but don't provide any masking */ |
525 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 525 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
526 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 526 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
527 | break; | 527 | break; |
528 | case IRQ_MODE_IRL7654_MASK: | 528 | case IRQ_MODE_IRL7654_MASK: |
529 | /* enable IRL7-4 and mask using cpu intc controller */ | 529 | /* enable IRL7-4 and mask using cpu intc controller */ |
530 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 530 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
531 | register_intc_controller(&intc_desc_irl4567); | 531 | register_intc_controller(&intc_desc_irl4567); |
532 | break; | 532 | break; |
533 | case IRQ_MODE_IRL3210_MASK: | 533 | case IRQ_MODE_IRL3210_MASK: |
534 | /* enable IRL0-3 and mask using cpu intc controller */ | 534 | /* enable IRL0-3 and mask using cpu intc controller */ |
535 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 535 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
536 | register_intc_controller(&intc_desc_irl0123); | 536 | register_intc_controller(&intc_desc_irl0123); |
537 | break; | 537 | break; |
538 | default: | 538 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index 6aba26fec416..7f6b0a5f7f82 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c | |||
@@ -538,11 +538,11 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors, | |||
538 | void __init plat_irq_setup(void) | 538 | void __init plat_irq_setup(void) |
539 | { | 539 | { |
540 | /* disable IRQ7-0 */ | 540 | /* disable IRQ7-0 */ |
541 | ctrl_outl(0xff000000, INTC_INTMSK0); | 541 | __raw_writel(0xff000000, INTC_INTMSK0); |
542 | 542 | ||
543 | /* disable IRL3-0 + IRL7-4 */ | 543 | /* disable IRL3-0 + IRL7-4 */ |
544 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 544 | __raw_writel(0xc0000000, INTC_INTMSK1); |
545 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 545 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
546 | 546 | ||
547 | register_intc_controller(&intc_desc); | 547 | register_intc_controller(&intc_desc); |
548 | } | 548 | } |
@@ -552,27 +552,27 @@ void __init plat_irq_setup_pins(int mode) | |||
552 | switch (mode) { | 552 | switch (mode) { |
553 | case IRQ_MODE_IRQ: | 553 | case IRQ_MODE_IRQ: |
554 | /* select IRQ mode for IRL3-0 + IRL7-4 */ | 554 | /* select IRQ mode for IRL3-0 + IRL7-4 */ |
555 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); | 555 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); |
556 | register_intc_controller(&intc_irq_desc); | 556 | register_intc_controller(&intc_irq_desc); |
557 | break; | 557 | break; |
558 | case IRQ_MODE_IRL7654: | 558 | case IRQ_MODE_IRL7654: |
559 | /* enable IRL7-4 but don't provide any masking */ | 559 | /* enable IRL7-4 but don't provide any masking */ |
560 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 560 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
561 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 561 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
562 | break; | 562 | break; |
563 | case IRQ_MODE_IRL3210: | 563 | case IRQ_MODE_IRL3210: |
564 | /* enable IRL0-3 but don't provide any masking */ | 564 | /* enable IRL0-3 but don't provide any masking */ |
565 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 565 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
566 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 566 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
567 | break; | 567 | break; |
568 | case IRQ_MODE_IRL7654_MASK: | 568 | case IRQ_MODE_IRL7654_MASK: |
569 | /* enable IRL7-4 and mask using cpu intc controller */ | 569 | /* enable IRL7-4 and mask using cpu intc controller */ |
570 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 570 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
571 | register_intc_controller(&intc_irl7654_desc); | 571 | register_intc_controller(&intc_irl7654_desc); |
572 | break; | 572 | break; |
573 | case IRQ_MODE_IRL3210_MASK: | 573 | case IRQ_MODE_IRL3210_MASK: |
574 | /* enable IRL0-3 and mask using cpu intc controller */ | 574 | /* enable IRL0-3 and mask using cpu intc controller */ |
575 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 575 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
576 | register_intc_controller(&intc_irl3210_desc); | 576 | register_intc_controller(&intc_irl3210_desc); |
577 | break; | 577 | break; |
578 | default: | 578 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index c1643bc9590d..86d681ecf90e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c | |||
@@ -694,17 +694,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, | |||
694 | void __init plat_irq_setup(void) | 694 | void __init plat_irq_setup(void) |
695 | { | 695 | { |
696 | /* disable IRQ7-0 */ | 696 | /* disable IRQ7-0 */ |
697 | ctrl_outl(0xff000000, INTC_INTMSK0); | 697 | __raw_writel(0xff000000, INTC_INTMSK0); |
698 | 698 | ||
699 | /* disable IRL3-0 + IRL7-4 */ | 699 | /* disable IRL3-0 + IRL7-4 */ |
700 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 700 | __raw_writel(0xc0000000, INTC_INTMSK1); |
701 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 701 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
702 | 702 | ||
703 | /* select IRL mode for IRL3-0 + IRL7-4 */ | 703 | /* select IRL mode for IRL3-0 + IRL7-4 */ |
704 | ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); | 704 | __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); |
705 | 705 | ||
706 | /* disable holding function, ie enable "SH-4 Mode" */ | 706 | /* disable holding function, ie enable "SH-4 Mode" */ |
707 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); | 707 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); |
708 | 708 | ||
709 | register_intc_controller(&intc_desc); | 709 | register_intc_controller(&intc_desc); |
710 | } | 710 | } |
@@ -714,27 +714,27 @@ void __init plat_irq_setup_pins(int mode) | |||
714 | switch (mode) { | 714 | switch (mode) { |
715 | case IRQ_MODE_IRQ: | 715 | case IRQ_MODE_IRQ: |
716 | /* select IRQ mode for IRL3-0 + IRL7-4 */ | 716 | /* select IRQ mode for IRL3-0 + IRL7-4 */ |
717 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); | 717 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); |
718 | register_intc_controller(&intc_irq_desc); | 718 | register_intc_controller(&intc_irq_desc); |
719 | break; | 719 | break; |
720 | case IRQ_MODE_IRL7654: | 720 | case IRQ_MODE_IRL7654: |
721 | /* enable IRL7-4 but don't provide any masking */ | 721 | /* enable IRL7-4 but don't provide any masking */ |
722 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 722 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
723 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 723 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
724 | break; | 724 | break; |
725 | case IRQ_MODE_IRL3210: | 725 | case IRQ_MODE_IRL3210: |
726 | /* enable IRL0-3 but don't provide any masking */ | 726 | /* enable IRL0-3 but don't provide any masking */ |
727 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 727 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
728 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 728 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
729 | break; | 729 | break; |
730 | case IRQ_MODE_IRL7654_MASK: | 730 | case IRQ_MODE_IRL7654_MASK: |
731 | /* enable IRL7-4 and mask using cpu intc controller */ | 731 | /* enable IRL7-4 and mask using cpu intc controller */ |
732 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 732 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
733 | register_intc_controller(&intc_irl7654_desc); | 733 | register_intc_controller(&intc_irl7654_desc); |
734 | break; | 734 | break; |
735 | case IRQ_MODE_IRL3210_MASK: | 735 | case IRQ_MODE_IRL3210_MASK: |
736 | /* enable IRL0-3 and mask using cpu intc controller */ | 736 | /* enable IRL0-3 and mask using cpu intc controller */ |
737 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 737 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
738 | register_intc_controller(&intc_irl3210_desc); | 738 | register_intc_controller(&intc_irl3210_desc); |
739 | break; | 739 | break; |
740 | default: | 740 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index c310558490d5..f8f21618d785 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c | |||
@@ -461,17 +461,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, | |||
461 | void __init plat_irq_setup(void) | 461 | void __init plat_irq_setup(void) |
462 | { | 462 | { |
463 | /* disable IRQ7-0 */ | 463 | /* disable IRQ7-0 */ |
464 | ctrl_outl(0xff000000, INTC_INTMSK0); | 464 | __raw_writel(0xff000000, INTC_INTMSK0); |
465 | 465 | ||
466 | /* disable IRL3-0 + IRL7-4 */ | 466 | /* disable IRL3-0 + IRL7-4 */ |
467 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 467 | __raw_writel(0xc0000000, INTC_INTMSK1); |
468 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 468 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
469 | 469 | ||
470 | /* select IRL mode for IRL3-0 + IRL7-4 */ | 470 | /* select IRL mode for IRL3-0 + IRL7-4 */ |
471 | ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); | 471 | __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); |
472 | 472 | ||
473 | /* disable holding function, ie enable "SH-4 Mode" */ | 473 | /* disable holding function, ie enable "SH-4 Mode" */ |
474 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); | 474 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); |
475 | 475 | ||
476 | register_intc_controller(&intc_desc); | 476 | register_intc_controller(&intc_desc); |
477 | } | 477 | } |
@@ -481,27 +481,27 @@ void __init plat_irq_setup_pins(int mode) | |||
481 | switch (mode) { | 481 | switch (mode) { |
482 | case IRQ_MODE_IRQ: | 482 | case IRQ_MODE_IRQ: |
483 | /* select IRQ mode for IRL3-0 + IRL7-4 */ | 483 | /* select IRQ mode for IRL3-0 + IRL7-4 */ |
484 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); | 484 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); |
485 | register_intc_controller(&intc_irq_desc); | 485 | register_intc_controller(&intc_irq_desc); |
486 | break; | 486 | break; |
487 | case IRQ_MODE_IRL7654: | 487 | case IRQ_MODE_IRL7654: |
488 | /* enable IRL7-4 but don't provide any masking */ | 488 | /* enable IRL7-4 but don't provide any masking */ |
489 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 489 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
490 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 490 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
491 | break; | 491 | break; |
492 | case IRQ_MODE_IRL3210: | 492 | case IRQ_MODE_IRL3210: |
493 | /* enable IRL0-3 but don't provide any masking */ | 493 | /* enable IRL0-3 but don't provide any masking */ |
494 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 494 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
495 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 495 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
496 | break; | 496 | break; |
497 | case IRQ_MODE_IRL7654_MASK: | 497 | case IRQ_MODE_IRL7654_MASK: |
498 | /* enable IRL7-4 and mask using cpu intc controller */ | 498 | /* enable IRL7-4 and mask using cpu intc controller */ |
499 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 499 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
500 | register_intc_controller(&intc_irl7654_desc); | 500 | register_intc_controller(&intc_irl7654_desc); |
501 | break; | 501 | break; |
502 | case IRQ_MODE_IRL3210_MASK: | 502 | case IRQ_MODE_IRL3210_MASK: |
503 | /* enable IRL0-3 and mask using cpu intc controller */ | 503 | /* enable IRL0-3 and mask using cpu intc controller */ |
504 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 504 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
505 | register_intc_controller(&intc_irl3210_desc); | 505 | register_intc_controller(&intc_irl3210_desc); |
506 | break; | 506 | break; |
507 | default: | 507 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index f685b9b21999..23448d8c6711 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c | |||
@@ -541,17 +541,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567, | |||
541 | void __init plat_irq_setup(void) | 541 | void __init plat_irq_setup(void) |
542 | { | 542 | { |
543 | /* disable IRQ3-0 + IRQ7-4 */ | 543 | /* disable IRQ3-0 + IRQ7-4 */ |
544 | ctrl_outl(0xff000000, INTC_INTMSK0); | 544 | __raw_writel(0xff000000, INTC_INTMSK0); |
545 | 545 | ||
546 | /* disable IRL3-0 + IRL7-4 */ | 546 | /* disable IRL3-0 + IRL7-4 */ |
547 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 547 | __raw_writel(0xc0000000, INTC_INTMSK1); |
548 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 548 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
549 | 549 | ||
550 | /* select IRL mode for IRL3-0 + IRL7-4 */ | 550 | /* select IRL mode for IRL3-0 + IRL7-4 */ |
551 | ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); | 551 | __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); |
552 | 552 | ||
553 | /* disable holding function, ie enable "SH-4 Mode" */ | 553 | /* disable holding function, ie enable "SH-4 Mode" */ |
554 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); | 554 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); |
555 | 555 | ||
556 | register_intc_controller(&intc_desc); | 556 | register_intc_controller(&intc_desc); |
557 | } | 557 | } |
@@ -561,32 +561,32 @@ void __init plat_irq_setup_pins(int mode) | |||
561 | switch (mode) { | 561 | switch (mode) { |
562 | case IRQ_MODE_IRQ7654: | 562 | case IRQ_MODE_IRQ7654: |
563 | /* select IRQ mode for IRL7-4 */ | 563 | /* select IRQ mode for IRL7-4 */ |
564 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); | 564 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); |
565 | register_intc_controller(&intc_desc_irq4567); | 565 | register_intc_controller(&intc_desc_irq4567); |
566 | break; | 566 | break; |
567 | case IRQ_MODE_IRQ3210: | 567 | case IRQ_MODE_IRQ3210: |
568 | /* select IRQ mode for IRL3-0 */ | 568 | /* select IRQ mode for IRL3-0 */ |
569 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); | 569 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); |
570 | register_intc_controller(&intc_desc_irq0123); | 570 | register_intc_controller(&intc_desc_irq0123); |
571 | break; | 571 | break; |
572 | case IRQ_MODE_IRL7654: | 572 | case IRQ_MODE_IRL7654: |
573 | /* enable IRL7-4 but don't provide any masking */ | 573 | /* enable IRL7-4 but don't provide any masking */ |
574 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 574 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
575 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 575 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
576 | break; | 576 | break; |
577 | case IRQ_MODE_IRL3210: | 577 | case IRQ_MODE_IRL3210: |
578 | /* enable IRL0-3 but don't provide any masking */ | 578 | /* enable IRL0-3 but don't provide any masking */ |
579 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 579 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
580 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 580 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
581 | break; | 581 | break; |
582 | case IRQ_MODE_IRL7654_MASK: | 582 | case IRQ_MODE_IRL7654_MASK: |
583 | /* enable IRL7-4 and mask using cpu intc controller */ | 583 | /* enable IRL7-4 and mask using cpu intc controller */ |
584 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 584 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
585 | register_intc_controller(&intc_desc_irl4567); | 585 | register_intc_controller(&intc_desc_irl4567); |
586 | break; | 586 | break; |
587 | case IRQ_MODE_IRL3210_MASK: | 587 | case IRQ_MODE_IRL3210_MASK: |
588 | /* enable IRL0-3 and mask using cpu intc controller */ | 588 | /* enable IRL0-3 and mask using cpu intc controller */ |
589 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 589 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
590 | register_intc_controller(&intc_desc_irl0123); | 590 | register_intc_controller(&intc_desc_irl0123); |
591 | break; | 591 | break; |
592 | default: | 592 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index 71673487ace0..7e585320710a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c | |||
@@ -867,14 +867,14 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567, | |||
867 | void __init plat_irq_setup(void) | 867 | void __init plat_irq_setup(void) |
868 | { | 868 | { |
869 | /* disable IRQ3-0 + IRQ7-4 */ | 869 | /* disable IRQ3-0 + IRQ7-4 */ |
870 | ctrl_outl(0xff000000, INTC_INTMSK0); | 870 | __raw_writel(0xff000000, INTC_INTMSK0); |
871 | 871 | ||
872 | /* disable IRL3-0 + IRL7-4 */ | 872 | /* disable IRL3-0 + IRL7-4 */ |
873 | ctrl_outl(0xc0000000, INTC_INTMSK1); | 873 | __raw_writel(0xc0000000, INTC_INTMSK1); |
874 | ctrl_outl(0xfffefffe, INTC_INTMSK2); | 874 | __raw_writel(0xfffefffe, INTC_INTMSK2); |
875 | 875 | ||
876 | /* select IRL mode for IRL3-0 + IRL7-4 */ | 876 | /* select IRL mode for IRL3-0 + IRL7-4 */ |
877 | ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); | 877 | __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); |
878 | 878 | ||
879 | register_intc_controller(&intc_desc); | 879 | register_intc_controller(&intc_desc); |
880 | } | 880 | } |
@@ -884,32 +884,32 @@ void __init plat_irq_setup_pins(int mode) | |||
884 | switch (mode) { | 884 | switch (mode) { |
885 | case IRQ_MODE_IRQ7654: | 885 | case IRQ_MODE_IRQ7654: |
886 | /* select IRQ mode for IRL7-4 */ | 886 | /* select IRQ mode for IRL7-4 */ |
887 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); | 887 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); |
888 | register_intc_controller(&intc_desc_irq4567); | 888 | register_intc_controller(&intc_desc_irq4567); |
889 | break; | 889 | break; |
890 | case IRQ_MODE_IRQ3210: | 890 | case IRQ_MODE_IRQ3210: |
891 | /* select IRQ mode for IRL3-0 */ | 891 | /* select IRQ mode for IRL3-0 */ |
892 | ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); | 892 | __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); |
893 | register_intc_controller(&intc_desc_irq0123); | 893 | register_intc_controller(&intc_desc_irq0123); |
894 | break; | 894 | break; |
895 | case IRQ_MODE_IRL7654: | 895 | case IRQ_MODE_IRL7654: |
896 | /* enable IRL7-4 but don't provide any masking */ | 896 | /* enable IRL7-4 but don't provide any masking */ |
897 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 897 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
898 | ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); | 898 | __raw_writel(0x0000fffe, INTC_INTMSKCLR2); |
899 | break; | 899 | break; |
900 | case IRQ_MODE_IRL3210: | 900 | case IRQ_MODE_IRL3210: |
901 | /* enable IRL0-3 but don't provide any masking */ | 901 | /* enable IRL0-3 but don't provide any masking */ |
902 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 902 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
903 | ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); | 903 | __raw_writel(0xfffe0000, INTC_INTMSKCLR2); |
904 | break; | 904 | break; |
905 | case IRQ_MODE_IRL7654_MASK: | 905 | case IRQ_MODE_IRL7654_MASK: |
906 | /* enable IRL7-4 and mask using cpu intc controller */ | 906 | /* enable IRL7-4 and mask using cpu intc controller */ |
907 | ctrl_outl(0x40000000, INTC_INTMSKCLR1); | 907 | __raw_writel(0x40000000, INTC_INTMSKCLR1); |
908 | register_intc_controller(&intc_desc_irl4567); | 908 | register_intc_controller(&intc_desc_irl4567); |
909 | break; | 909 | break; |
910 | case IRQ_MODE_IRL3210_MASK: | 910 | case IRQ_MODE_IRL3210_MASK: |
911 | /* enable IRL0-3 and mask using cpu intc controller */ | 911 | /* enable IRL0-3 and mask using cpu intc controller */ |
912 | ctrl_outl(0x80000000, INTC_INTMSKCLR1); | 912 | __raw_writel(0x80000000, INTC_INTMSKCLR1); |
913 | register_intc_controller(&intc_desc_irl0123); | 913 | register_intc_controller(&intc_desc_irl0123); |
914 | break; | 914 | break; |
915 | default: | 915 | default: |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 5863e0c4d02f..11bf4c1e25c0 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -78,7 +78,10 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
78 | 78 | ||
79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | 79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) |
80 | { | 80 | { |
81 | __raw_writel(entry_point, RESET_REG(cpu)); | 81 | if (__in_29bit_mode()) |
82 | __raw_writel(entry_point, RESET_REG(cpu)); | ||
83 | else | ||
84 | __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu)); | ||
82 | 85 | ||
83 | if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) | 86 | if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
84 | __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); | 87 | __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); |
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c new file mode 100644 index 000000000000..efb2745bcb36 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/ubc.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh4a/ubc.c | ||
3 | * | ||
4 | * On-chip UBC support for SH-4A CPUs. | ||
5 | * | ||
6 | * Copyright (C) 2009 - 2010 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <asm/hw_breakpoint.h> | ||
17 | |||
18 | #define UBC_CBR(idx) (0xff200000 + (0x20 * idx)) | ||
19 | #define UBC_CRR(idx) (0xff200004 + (0x20 * idx)) | ||
20 | #define UBC_CAR(idx) (0xff200008 + (0x20 * idx)) | ||
21 | #define UBC_CAMR(idx) (0xff20000c + (0x20 * idx)) | ||
22 | |||
23 | #define UBC_CCMFR 0xff200600 | ||
24 | #define UBC_CBCR 0xff200620 | ||
25 | |||
26 | /* CRR */ | ||
27 | #define UBC_CRR_PCB (1 << 1) | ||
28 | #define UBC_CRR_BIE (1 << 0) | ||
29 | |||
30 | /* CBR */ | ||
31 | #define UBC_CBR_CE (1 << 0) | ||
32 | |||
33 | static struct sh_ubc sh4a_ubc; | ||
34 | |||
35 | static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx) | ||
36 | { | ||
37 | __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx)); | ||
38 | __raw_writel(info->address, UBC_CAR(idx)); | ||
39 | } | ||
40 | |||
41 | static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx) | ||
42 | { | ||
43 | __raw_writel(0, UBC_CBR(idx)); | ||
44 | __raw_writel(0, UBC_CAR(idx)); | ||
45 | } | ||
46 | |||
47 | static void sh4a_ubc_enable_all(unsigned long mask) | ||
48 | { | ||
49 | int i; | ||
50 | |||
51 | for (i = 0; i < sh4a_ubc.num_events; i++) | ||
52 | if (mask & (1 << i)) | ||
53 | __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE, | ||
54 | UBC_CBR(i)); | ||
55 | } | ||
56 | |||
57 | static void sh4a_ubc_disable_all(void) | ||
58 | { | ||
59 | int i; | ||
60 | |||
61 | for (i = 0; i < sh4a_ubc.num_events; i++) | ||
62 | __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE, | ||
63 | UBC_CBR(i)); | ||
64 | } | ||
65 | |||
66 | static unsigned long sh4a_ubc_active_mask(void) | ||
67 | { | ||
68 | unsigned long active = 0; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < sh4a_ubc.num_events; i++) | ||
72 | if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE) | ||
73 | active |= (1 << i); | ||
74 | |||
75 | return active; | ||
76 | } | ||
77 | |||
78 | static unsigned long sh4a_ubc_triggered_mask(void) | ||
79 | { | ||
80 | return __raw_readl(UBC_CCMFR); | ||
81 | } | ||
82 | |||
83 | static void sh4a_ubc_clear_triggered_mask(unsigned long mask) | ||
84 | { | ||
85 | __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR); | ||
86 | } | ||
87 | |||
88 | static struct sh_ubc sh4a_ubc = { | ||
89 | .name = "SH-4A", | ||
90 | .num_events = 2, | ||
91 | .trap_nr = 0x1e0, | ||
92 | .enable = sh4a_ubc_enable, | ||
93 | .disable = sh4a_ubc_disable, | ||
94 | .enable_all = sh4a_ubc_enable_all, | ||
95 | .disable_all = sh4a_ubc_disable_all, | ||
96 | .active_mask = sh4a_ubc_active_mask, | ||
97 | .triggered_mask = sh4a_ubc_triggered_mask, | ||
98 | .clear_triggered_mask = sh4a_ubc_clear_triggered_mask, | ||
99 | }; | ||
100 | |||
101 | static int __init sh4a_ubc_init(void) | ||
102 | { | ||
103 | struct clk *ubc_iclk = clk_get(NULL, "ubc0"); | ||
104 | int i; | ||
105 | |||
106 | /* | ||
107 | * The UBC MSTP bit is optional, as not all platforms will have | ||
108 | * it. Just ignore it if we can't find it. | ||
109 | */ | ||
110 | if (IS_ERR(ubc_iclk)) | ||
111 | ubc_iclk = NULL; | ||
112 | |||
113 | clk_enable(ubc_iclk); | ||
114 | |||
115 | __raw_writel(0, UBC_CBCR); | ||
116 | |||
117 | for (i = 0; i < sh4a_ubc.num_events; i++) { | ||
118 | __raw_writel(0, UBC_CAMR(i)); | ||
119 | __raw_writel(0, UBC_CBR(i)); | ||
120 | |||
121 | __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i)); | ||
122 | |||
123 | /* dummy read for write posting */ | ||
124 | (void)__raw_readl(UBC_CRR(i)); | ||
125 | } | ||
126 | |||
127 | clk_disable(ubc_iclk); | ||
128 | |||
129 | sh4a_ubc.clk = ubc_iclk; | ||
130 | |||
131 | return register_sh_ubc(&sh4a_ubc); | ||
132 | } | ||
133 | arch_initcall(sh4a_ubc_init); | ||
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c index 7f864ebc51d3..9cfc19b8dbe4 100644 --- a/arch/sh/kernel/cpu/sh5/clock-sh5.c +++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c | |||
@@ -24,7 +24,7 @@ static unsigned long cprc_base; | |||
24 | 24 | ||
25 | static void master_clk_init(struct clk *clk) | 25 | static void master_clk_init(struct clk *clk) |
26 | { | 26 | { |
27 | int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007; | 27 | int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007; |
28 | clk->rate *= ifc_table[idx]; | 28 | clk->rate *= ifc_table[idx]; |
29 | } | 29 | } |
30 | 30 | ||
@@ -34,7 +34,7 @@ static struct clk_ops sh5_master_clk_ops = { | |||
34 | 34 | ||
35 | static unsigned long module_clk_recalc(struct clk *clk) | 35 | static unsigned long module_clk_recalc(struct clk *clk) |
36 | { | 36 | { |
37 | int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007; | 37 | int idx = (__raw_readw(cprc_base) >> 12) & 0x0007; |
38 | return clk->parent->rate / ifc_table[idx]; | 38 | return clk->parent->rate / ifc_table[idx]; |
39 | } | 39 | } |
40 | 40 | ||
@@ -44,7 +44,7 @@ static struct clk_ops sh5_module_clk_ops = { | |||
44 | 44 | ||
45 | static unsigned long bus_clk_recalc(struct clk *clk) | 45 | static unsigned long bus_clk_recalc(struct clk *clk) |
46 | { | 46 | { |
47 | int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007; | 47 | int idx = (__raw_readw(cprc_base) >> 3) & 0x0007; |
48 | return clk->parent->rate / ifc_table[idx]; | 48 | return clk->parent->rate / ifc_table[idx]; |
49 | } | 49 | } |
50 | 50 | ||
@@ -54,7 +54,7 @@ static struct clk_ops sh5_bus_clk_ops = { | |||
54 | 54 | ||
55 | static unsigned long cpu_clk_recalc(struct clk *clk) | 55 | static unsigned long cpu_clk_recalc(struct clk *clk) |
56 | { | 56 | { |
57 | int idx = (ctrl_inw(cprc_base) & 0x0007); | 57 | int idx = (__raw_readw(cprc_base) & 0x0007); |
58 | return clk->parent->rate / ifc_table[idx]; | 58 | return clk->parent->rate / ifc_table[idx]; |
59 | } | 59 | } |
60 | 60 | ||
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index 8f13f73cb2cb..6b80295dd7a4 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -187,7 +187,7 @@ trap_jtable: | |||
187 | .rept 6 | 187 | .rept 6 |
188 | .long do_exception_error /* 0x880 - 0x920 */ | 188 | .long do_exception_error /* 0x880 - 0x920 */ |
189 | .endr | 189 | .endr |
190 | .long do_software_break_point /* 0x940 */ | 190 | .long breakpoint_trap_handler /* 0x940 */ |
191 | .long do_exception_error /* 0x960 */ | 191 | .long do_exception_error /* 0x960 */ |
192 | .long do_single_step /* 0x980 */ | 192 | .long do_single_step /* 0x980 */ |
193 | 193 | ||
@@ -1124,7 +1124,7 @@ fpu_error_or_IRQA: | |||
1124 | pta its_IRQ, tr0 | 1124 | pta its_IRQ, tr0 |
1125 | beqi/l r4, EVENT_INTERRUPT, tr0 | 1125 | beqi/l r4, EVENT_INTERRUPT, tr0 |
1126 | #ifdef CONFIG_SH_FPU | 1126 | #ifdef CONFIG_SH_FPU |
1127 | movi do_fpu_state_restore, r6 | 1127 | movi fpu_state_restore_trap_handler, r6 |
1128 | #else | 1128 | #else |
1129 | movi do_exception_error, r6 | 1129 | movi do_exception_error, r6 |
1130 | #endif | 1130 | #endif |
@@ -1135,7 +1135,7 @@ fpu_error_or_IRQB: | |||
1135 | pta its_IRQ, tr0 | 1135 | pta its_IRQ, tr0 |
1136 | beqi/l r4, EVENT_INTERRUPT, tr0 | 1136 | beqi/l r4, EVENT_INTERRUPT, tr0 |
1137 | #ifdef CONFIG_SH_FPU | 1137 | #ifdef CONFIG_SH_FPU |
1138 | movi do_fpu_state_restore, r6 | 1138 | movi fpu_state_restore_trap_handler, r6 |
1139 | #else | 1139 | #else |
1140 | movi do_exception_error, r6 | 1140 | movi do_exception_error, r6 |
1141 | #endif | 1141 | #endif |
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c index 4648ccee6c4d..4b3bb35e99f3 100644 --- a/arch/sh/kernel/cpu/sh5/fpu.c +++ b/arch/sh/kernel/cpu/sh5/fpu.c | |||
@@ -15,24 +15,6 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
17 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
18 | #include <asm/user.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/fpu.h> | ||
21 | |||
22 | /* | ||
23 | * Initially load the FPU with signalling NANS. This bit pattern | ||
24 | * has the property that no matter whether considered as single or as | ||
25 | * double precision, it still represents a signalling NAN. | ||
26 | */ | ||
27 | #define sNAN64 0xFFFFFFFFFFFFFFFFULL | ||
28 | #define sNAN32 0xFFFFFFFFUL | ||
29 | |||
30 | static union sh_fpu_union init_fpuregs = { | ||
31 | .hard = { | ||
32 | .fp_regs = { [0 ... 63] = sNAN32 }, | ||
33 | .fpscr = FPSCR_INIT | ||
34 | } | ||
35 | }; | ||
36 | 18 | ||
37 | void save_fpu(struct task_struct *tsk) | 19 | void save_fpu(struct task_struct *tsk) |
38 | { | 20 | { |
@@ -72,12 +54,11 @@ void save_fpu(struct task_struct *tsk) | |||
72 | "fgetscr fr63\n\t" | 54 | "fgetscr fr63\n\t" |
73 | "fst.s %0, (32*8), fr63\n\t" | 55 | "fst.s %0, (32*8), fr63\n\t" |
74 | : /* no output */ | 56 | : /* no output */ |
75 | : "r" (&tsk->thread.fpu.hard) | 57 | : "r" (&tsk->thread.xstate->hardfpu) |
76 | : "memory"); | 58 | : "memory"); |
77 | } | 59 | } |
78 | 60 | ||
79 | static inline void | 61 | void restore_fpu(struct task_struct *tsk) |
80 | fpload(struct sh_fpu_hard_struct *fpregs) | ||
81 | { | 62 | { |
82 | asm volatile("fld.p %0, (0*8), fp0\n\t" | 63 | asm volatile("fld.p %0, (0*8), fp0\n\t" |
83 | "fld.p %0, (1*8), fp2\n\t" | 64 | "fld.p %0, (1*8), fp2\n\t" |
@@ -116,16 +97,11 @@ fpload(struct sh_fpu_hard_struct *fpregs) | |||
116 | 97 | ||
117 | "fld.p %0, (31*8), fp62\n\t" | 98 | "fld.p %0, (31*8), fp62\n\t" |
118 | : /* no output */ | 99 | : /* no output */ |
119 | : "r" (fpregs) ); | 100 | : "r" (&tsk->thread.xstate->hardfpu) |
120 | } | 101 | : "memory"); |
121 | |||
122 | void fpinit(struct sh_fpu_hard_struct *fpregs) | ||
123 | { | ||
124 | *fpregs = init_fpuregs.hard; | ||
125 | } | 102 | } |
126 | 103 | ||
127 | asmlinkage void | 104 | asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs) |
128 | do_fpu_error(unsigned long ex, struct pt_regs *regs) | ||
129 | { | 105 | { |
130 | struct task_struct *tsk = current; | 106 | struct task_struct *tsk = current; |
131 | 107 | ||
@@ -133,35 +109,6 @@ do_fpu_error(unsigned long ex, struct pt_regs *regs) | |||
133 | 109 | ||
134 | tsk->thread.trap_no = 11; | 110 | tsk->thread.trap_no = 11; |
135 | tsk->thread.error_code = 0; | 111 | tsk->thread.error_code = 0; |
136 | force_sig(SIGFPE, tsk); | ||
137 | } | ||
138 | |||
139 | |||
140 | asmlinkage void | ||
141 | do_fpu_state_restore(unsigned long ex, struct pt_regs *regs) | ||
142 | { | ||
143 | void die(const char *str, struct pt_regs *regs, long err); | ||
144 | |||
145 | if (! user_mode(regs)) | ||
146 | die("FPU used in kernel", regs, ex); | ||
147 | 112 | ||
148 | regs->sr &= ~SR_FD; | 113 | force_sig(SIGFPE, tsk); |
149 | |||
150 | if (last_task_used_math == current) | ||
151 | return; | ||
152 | |||
153 | enable_fpu(); | ||
154 | if (last_task_used_math != NULL) | ||
155 | /* Other processes fpu state, save away */ | ||
156 | save_fpu(last_task_used_math); | ||
157 | |||
158 | last_task_used_math = current; | ||
159 | if (used_math()) { | ||
160 | fpload(¤t->thread.fpu.hard); | ||
161 | } else { | ||
162 | /* First time FPU user. */ | ||
163 | fpload(&init_fpuregs.hard); | ||
164 | set_used_math(); | ||
165 | } | ||
166 | disable_fpu(); | ||
167 | } | 114 | } |
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index ca029a44743c..e55968712706 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c | |||
@@ -33,7 +33,8 @@ ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list); | |||
33 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) | 33 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) |
34 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) | 34 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) |
35 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) | 35 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) |
36 | #define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF) | 36 | #define SUSP_MODE_RSTANDBY_SF \ |
37 | (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF) | ||
37 | /* | 38 | /* |
38 | * U-standby mode is unsupported since it needs bootloader hacks | 39 | * U-standby mode is unsupported since it needs bootloader hacks |
39 | */ | 40 | */ |
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index e9dd7fa0abd2..e6aac65f5750 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S | |||
@@ -48,8 +48,48 @@ ENTRY(sh_mobile_sleep_enter_start) | |||
48 | stc sr, r0 | 48 | stc sr, r0 |
49 | mov.l r0, @(SH_SLEEP_SR, r5) | 49 | mov.l r0, @(SH_SLEEP_SR, r5) |
50 | 50 | ||
51 | /* save sp */ | 51 | /* save general purpose registers to stack if needed */ |
52 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
53 | tst #SUSP_SH_REGS, r0 | ||
54 | bt skip_regs_save | ||
55 | |||
56 | sts.l pr, @-r15 | ||
57 | mov.l r14, @-r15 | ||
58 | mov.l r13, @-r15 | ||
59 | mov.l r12, @-r15 | ||
60 | mov.l r11, @-r15 | ||
61 | mov.l r10, @-r15 | ||
62 | mov.l r9, @-r15 | ||
63 | mov.l r8, @-r15 | ||
64 | |||
65 | /* make sure bank0 is selected, save low registers */ | ||
66 | mov.l rb_bit, r9 | ||
67 | not r9, r9 | ||
68 | bsr set_sr | ||
69 | mov #0, r10 | ||
70 | |||
71 | bsr save_low_regs | ||
72 | nop | ||
73 | |||
74 | /* switch to bank 1, save low registers */ | ||
75 | mov.l rb_bit, r10 | ||
76 | bsr set_sr | ||
77 | mov #-1, r9 | ||
78 | |||
79 | bsr save_low_regs | ||
80 | nop | ||
81 | |||
82 | /* switch back to bank 0 */ | ||
83 | mov.l rb_bit, r9 | ||
84 | not r9, r9 | ||
85 | bsr set_sr | ||
86 | mov #0, r10 | ||
87 | |||
88 | skip_regs_save: | ||
89 | |||
90 | /* save sp, also set to internal ram */ | ||
52 | mov.l r15, @(SH_SLEEP_SP, r5) | 91 | mov.l r15, @(SH_SLEEP_SP, r5) |
92 | mov r5, r15 | ||
53 | 93 | ||
54 | /* save stbcr */ | 94 | /* save stbcr */ |
55 | bsr save_register | 95 | bsr save_register |
@@ -60,7 +100,7 @@ ENTRY(sh_mobile_sleep_enter_start) | |||
60 | tst #SUSP_SH_MMU, r0 | 100 | tst #SUSP_SH_MMU, r0 |
61 | bt skip_mmu_save_disable | 101 | bt skip_mmu_save_disable |
62 | 102 | ||
63 | /* save mmu state */ | 103 | /* save mmu state */ |
64 | bsr save_register | 104 | bsr save_register |
65 | mov #SH_SLEEP_REG_PTEH, r0 | 105 | mov #SH_SLEEP_REG_PTEH, r0 |
66 | 106 | ||
@@ -177,6 +217,29 @@ get_register: | |||
177 | mov.l @(r0, r5), r0 | 217 | mov.l @(r0, r5), r0 |
178 | rts | 218 | rts |
179 | nop | 219 | nop |
220 | |||
221 | set_sr: | ||
222 | stc sr, r8 | ||
223 | and r9, r8 | ||
224 | or r10, r8 | ||
225 | ldc r8, sr | ||
226 | rts | ||
227 | nop | ||
228 | |||
229 | save_low_regs: | ||
230 | mov.l r7, @-r15 | ||
231 | mov.l r6, @-r15 | ||
232 | mov.l r5, @-r15 | ||
233 | mov.l r4, @-r15 | ||
234 | mov.l r3, @-r15 | ||
235 | mov.l r2, @-r15 | ||
236 | mov.l r1, @-r15 | ||
237 | rts | ||
238 | mov.l r0, @-r15 | ||
239 | |||
240 | .balign 4 | ||
241 | rb_bit: .long 0x20000000 ! RB=1 | ||
242 | |||
180 | ENTRY(sh_mobile_sleep_enter_end) | 243 | ENTRY(sh_mobile_sleep_enter_end) |
181 | 244 | ||
182 | .balign 4 | 245 | .balign 4 |
@@ -270,6 +333,40 @@ skip_restore_sf: | |||
270 | icbi @r0 | 333 | icbi @r0 |
271 | 334 | ||
272 | skip_restore_mmu: | 335 | skip_restore_mmu: |
336 | |||
337 | /* restore general purpose registers if needed */ | ||
338 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
339 | tst #SUSP_SH_REGS, r0 | ||
340 | bt skip_restore_regs | ||
341 | |||
342 | /* switch to bank 1, restore low registers */ | ||
343 | mov.l _rb_bit, r10 | ||
344 | bsr _set_sr | ||
345 | mov #-1, r9 | ||
346 | |||
347 | bsr restore_low_regs | ||
348 | nop | ||
349 | |||
350 | /* switch to bank0, restore low registers */ | ||
351 | mov.l _rb_bit, r9 | ||
352 | not r9, r9 | ||
353 | bsr _set_sr | ||
354 | mov #0, r10 | ||
355 | |||
356 | bsr restore_low_regs | ||
357 | nop | ||
358 | |||
359 | /* restore the rest of the registers */ | ||
360 | mov.l @r15+, r8 | ||
361 | mov.l @r15+, r9 | ||
362 | mov.l @r15+, r10 | ||
363 | mov.l @r15+, r11 | ||
364 | mov.l @r15+, r12 | ||
365 | mov.l @r15+, r13 | ||
366 | mov.l @r15+, r14 | ||
367 | lds.l @r15+, pr | ||
368 | |||
369 | skip_restore_regs: | ||
273 | rte | 370 | rte |
274 | nop | 371 | nop |
275 | 372 | ||
@@ -283,6 +380,26 @@ restore_register: | |||
283 | rts | 380 | rts |
284 | nop | 381 | nop |
285 | 382 | ||
383 | _set_sr: | ||
384 | stc sr, r8 | ||
385 | and r9, r8 | ||
386 | or r10, r8 | ||
387 | ldc r8, sr | ||
388 | rts | ||
389 | nop | ||
390 | |||
391 | restore_low_regs: | ||
392 | mov.l @r15+, r0 | ||
393 | mov.l @r15+, r1 | ||
394 | mov.l @r15+, r2 | ||
395 | mov.l @r15+, r3 | ||
396 | mov.l @r15+, r4 | ||
397 | mov.l @r15+, r5 | ||
398 | mov.l @r15+, r6 | ||
399 | rts | ||
400 | mov.l @r15+, r7 | ||
401 | |||
286 | .balign 4 | 402 | .balign 4 |
403 | _rb_bit: .long 0x20000000 ! RB=1 | ||
287 | 1: .long ~0x7ff | 404 | 1: .long ~0x7ff |
288 | ENTRY(sh_mobile_sleep_resume_end) | 405 | ENTRY(sh_mobile_sleep_resume_end) |
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S index 591741383ee6..7a1b46fec0f4 100644 --- a/arch/sh/kernel/debugtraps.S +++ b/arch/sh/kernel/debugtraps.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | 14 | ||
15 | #if !defined(CONFIG_KGDB) | 15 | #if !defined(CONFIG_KGDB) |
16 | #define breakpoint_trap_handler debug_trap_handler | ||
17 | #define singlestep_trap_handler debug_trap_handler | 16 | #define singlestep_trap_handler debug_trap_handler |
18 | #endif | 17 | #endif |
19 | 18 | ||
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e51168064e56..bd1c497280a6 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -39,10 +39,10 @@ static mempool_t *dwarf_frame_pool; | |||
39 | static struct kmem_cache *dwarf_reg_cachep; | 39 | static struct kmem_cache *dwarf_reg_cachep; |
40 | static mempool_t *dwarf_reg_pool; | 40 | static mempool_t *dwarf_reg_pool; |
41 | 41 | ||
42 | static LIST_HEAD(dwarf_cie_list); | 42 | static struct rb_root cie_root; |
43 | static DEFINE_SPINLOCK(dwarf_cie_lock); | 43 | static DEFINE_SPINLOCK(dwarf_cie_lock); |
44 | 44 | ||
45 | static LIST_HEAD(dwarf_fde_list); | 45 | static struct rb_root fde_root; |
46 | static DEFINE_SPINLOCK(dwarf_fde_lock); | 46 | static DEFINE_SPINLOCK(dwarf_fde_lock); |
47 | 47 | ||
48 | static struct dwarf_cie *cached_cie; | 48 | static struct dwarf_cie *cached_cie; |
@@ -301,7 +301,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) | |||
301 | */ | 301 | */ |
302 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | 302 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) |
303 | { | 303 | { |
304 | struct dwarf_cie *cie; | 304 | struct rb_node **rb_node = &cie_root.rb_node; |
305 | struct dwarf_cie *cie = NULL; | ||
305 | unsigned long flags; | 306 | unsigned long flags; |
306 | 307 | ||
307 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 308 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
@@ -315,16 +316,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | |||
315 | goto out; | 316 | goto out; |
316 | } | 317 | } |
317 | 318 | ||
318 | list_for_each_entry(cie, &dwarf_cie_list, link) { | 319 | while (*rb_node) { |
319 | if (cie->cie_pointer == cie_ptr) { | 320 | struct dwarf_cie *cie_tmp; |
320 | cached_cie = cie; | 321 | |
321 | break; | 322 | cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); |
323 | BUG_ON(!cie_tmp); | ||
324 | |||
325 | if (cie_ptr == cie_tmp->cie_pointer) { | ||
326 | cie = cie_tmp; | ||
327 | cached_cie = cie_tmp; | ||
328 | goto out; | ||
329 | } else { | ||
330 | if (cie_ptr < cie_tmp->cie_pointer) | ||
331 | rb_node = &(*rb_node)->rb_left; | ||
332 | else | ||
333 | rb_node = &(*rb_node)->rb_right; | ||
322 | } | 334 | } |
323 | } | 335 | } |
324 | 336 | ||
325 | /* Couldn't find the entry in the list. */ | ||
326 | if (&cie->link == &dwarf_cie_list) | ||
327 | cie = NULL; | ||
328 | out: | 337 | out: |
329 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | 338 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); |
330 | return cie; | 339 | return cie; |
@@ -336,25 +345,34 @@ out: | |||
336 | */ | 345 | */ |
337 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | 346 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) |
338 | { | 347 | { |
339 | struct dwarf_fde *fde; | 348 | struct rb_node **rb_node = &fde_root.rb_node; |
349 | struct dwarf_fde *fde = NULL; | ||
340 | unsigned long flags; | 350 | unsigned long flags; |
341 | 351 | ||
342 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 352 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
343 | 353 | ||
344 | list_for_each_entry(fde, &dwarf_fde_list, link) { | 354 | while (*rb_node) { |
345 | unsigned long start, end; | 355 | struct dwarf_fde *fde_tmp; |
356 | unsigned long tmp_start, tmp_end; | ||
346 | 357 | ||
347 | start = fde->initial_location; | 358 | fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); |
348 | end = fde->initial_location + fde->address_range; | 359 | BUG_ON(!fde_tmp); |
349 | 360 | ||
350 | if (pc >= start && pc < end) | 361 | tmp_start = fde_tmp->initial_location; |
351 | break; | 362 | tmp_end = fde_tmp->initial_location + fde_tmp->address_range; |
352 | } | ||
353 | 363 | ||
354 | /* Couldn't find the entry in the list. */ | 364 | if (pc < tmp_start) { |
355 | if (&fde->link == &dwarf_fde_list) | 365 | rb_node = &(*rb_node)->rb_left; |
356 | fde = NULL; | 366 | } else { |
367 | if (pc < tmp_end) { | ||
368 | fde = fde_tmp; | ||
369 | goto out; | ||
370 | } else | ||
371 | rb_node = &(*rb_node)->rb_right; | ||
372 | } | ||
373 | } | ||
357 | 374 | ||
375 | out: | ||
358 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | 376 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); |
359 | 377 | ||
360 | return fde; | 378 | return fde; |
@@ -552,8 +570,8 @@ extern void ret_from_irq(void); | |||
552 | * on the callstack. Each of the lower (older) stack frames are | 570 | * on the callstack. Each of the lower (older) stack frames are |
553 | * linked via the "prev" member. | 571 | * linked via the "prev" member. |
554 | */ | 572 | */ |
555 | struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | 573 | struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, |
556 | struct dwarf_frame *prev) | 574 | struct dwarf_frame *prev) |
557 | { | 575 | { |
558 | struct dwarf_frame *frame; | 576 | struct dwarf_frame *frame; |
559 | struct dwarf_cie *cie; | 577 | struct dwarf_cie *cie; |
@@ -708,6 +726,8 @@ bail: | |||
708 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 726 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
709 | unsigned char *end, struct module *mod) | 727 | unsigned char *end, struct module *mod) |
710 | { | 728 | { |
729 | struct rb_node **rb_node = &cie_root.rb_node; | ||
730 | struct rb_node *parent; | ||
711 | struct dwarf_cie *cie; | 731 | struct dwarf_cie *cie; |
712 | unsigned long flags; | 732 | unsigned long flags; |
713 | int count; | 733 | int count; |
@@ -802,11 +822,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
802 | cie->initial_instructions = p; | 822 | cie->initial_instructions = p; |
803 | cie->instructions_end = end; | 823 | cie->instructions_end = end; |
804 | 824 | ||
805 | cie->mod = mod; | ||
806 | |||
807 | /* Add to list */ | 825 | /* Add to list */ |
808 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 826 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
809 | list_add_tail(&cie->link, &dwarf_cie_list); | 827 | |
828 | while (*rb_node) { | ||
829 | struct dwarf_cie *cie_tmp; | ||
830 | |||
831 | cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); | ||
832 | |||
833 | parent = *rb_node; | ||
834 | |||
835 | if (cie->cie_pointer < cie_tmp->cie_pointer) | ||
836 | rb_node = &parent->rb_left; | ||
837 | else if (cie->cie_pointer >= cie_tmp->cie_pointer) | ||
838 | rb_node = &parent->rb_right; | ||
839 | else | ||
840 | WARN_ON(1); | ||
841 | } | ||
842 | |||
843 | rb_link_node(&cie->node, parent, rb_node); | ||
844 | rb_insert_color(&cie->node, &cie_root); | ||
845 | |||
846 | if (mod != NULL) | ||
847 | list_add_tail(&cie->link, &mod->arch.cie_list); | ||
848 | |||
810 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | 849 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); |
811 | 850 | ||
812 | return 0; | 851 | return 0; |
@@ -816,6 +855,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
816 | void *start, unsigned long len, | 855 | void *start, unsigned long len, |
817 | unsigned char *end, struct module *mod) | 856 | unsigned char *end, struct module *mod) |
818 | { | 857 | { |
858 | struct rb_node **rb_node = &fde_root.rb_node; | ||
859 | struct rb_node *parent; | ||
819 | struct dwarf_fde *fde; | 860 | struct dwarf_fde *fde; |
820 | struct dwarf_cie *cie; | 861 | struct dwarf_cie *cie; |
821 | unsigned long flags; | 862 | unsigned long flags; |
@@ -863,11 +904,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
863 | fde->instructions = p; | 904 | fde->instructions = p; |
864 | fde->end = end; | 905 | fde->end = end; |
865 | 906 | ||
866 | fde->mod = mod; | ||
867 | |||
868 | /* Add to list. */ | 907 | /* Add to list. */ |
869 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 908 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
870 | list_add_tail(&fde->link, &dwarf_fde_list); | 909 | |
910 | while (*rb_node) { | ||
911 | struct dwarf_fde *fde_tmp; | ||
912 | unsigned long tmp_start, tmp_end; | ||
913 | unsigned long start, end; | ||
914 | |||
915 | fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); | ||
916 | |||
917 | start = fde->initial_location; | ||
918 | end = fde->initial_location + fde->address_range; | ||
919 | |||
920 | tmp_start = fde_tmp->initial_location; | ||
921 | tmp_end = fde_tmp->initial_location + fde_tmp->address_range; | ||
922 | |||
923 | parent = *rb_node; | ||
924 | |||
925 | if (start < tmp_start) | ||
926 | rb_node = &parent->rb_left; | ||
927 | else if (start >= tmp_end) | ||
928 | rb_node = &parent->rb_right; | ||
929 | else | ||
930 | WARN_ON(1); | ||
931 | } | ||
932 | |||
933 | rb_link_node(&fde->node, parent, rb_node); | ||
934 | rb_insert_color(&fde->node, &fde_root); | ||
935 | |||
936 | if (mod != NULL) | ||
937 | list_add_tail(&fde->link, &mod->arch.fde_list); | ||
938 | |||
871 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | 939 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); |
872 | 940 | ||
873 | return 0; | 941 | return 0; |
@@ -912,19 +980,29 @@ static struct unwinder dwarf_unwinder = { | |||
912 | 980 | ||
913 | static void dwarf_unwinder_cleanup(void) | 981 | static void dwarf_unwinder_cleanup(void) |
914 | { | 982 | { |
915 | struct dwarf_cie *cie, *cie_tmp; | 983 | struct rb_node **fde_rb_node = &fde_root.rb_node; |
916 | struct dwarf_fde *fde, *fde_tmp; | 984 | struct rb_node **cie_rb_node = &cie_root.rb_node; |
917 | 985 | ||
918 | /* | 986 | /* |
919 | * Deallocate all the memory allocated for the DWARF unwinder. | 987 | * Deallocate all the memory allocated for the DWARF unwinder. |
920 | * Traverse all the FDE/CIE lists and remove and free all the | 988 | * Traverse all the FDE/CIE lists and remove and free all the |
921 | * memory associated with those data structures. | 989 | * memory associated with those data structures. |
922 | */ | 990 | */ |
923 | list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link) | 991 | while (*fde_rb_node) { |
924 | kfree(cie); | 992 | struct dwarf_fde *fde; |
925 | 993 | ||
926 | list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link) | 994 | fde = rb_entry(*fde_rb_node, struct dwarf_fde, node); |
995 | rb_erase(*fde_rb_node, &fde_root); | ||
927 | kfree(fde); | 996 | kfree(fde); |
997 | } | ||
998 | |||
999 | while (*cie_rb_node) { | ||
1000 | struct dwarf_cie *cie; | ||
1001 | |||
1002 | cie = rb_entry(*cie_rb_node, struct dwarf_cie, node); | ||
1003 | rb_erase(*cie_rb_node, &cie_root); | ||
1004 | kfree(cie); | ||
1005 | } | ||
928 | 1006 | ||
929 | kmem_cache_destroy(dwarf_reg_cachep); | 1007 | kmem_cache_destroy(dwarf_reg_cachep); |
930 | kmem_cache_destroy(dwarf_frame_cachep); | 1008 | kmem_cache_destroy(dwarf_frame_cachep); |
@@ -1024,6 +1102,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
1024 | 1102 | ||
1025 | /* Did we find the .eh_frame section? */ | 1103 | /* Did we find the .eh_frame section? */ |
1026 | if (i != hdr->e_shnum) { | 1104 | if (i != hdr->e_shnum) { |
1105 | INIT_LIST_HEAD(&me->arch.cie_list); | ||
1106 | INIT_LIST_HEAD(&me->arch.fde_list); | ||
1027 | err = dwarf_parse_section((char *)start, (char *)end, me); | 1107 | err = dwarf_parse_section((char *)start, (char *)end, me); |
1028 | if (err) { | 1108 | if (err) { |
1029 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | 1109 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", |
@@ -1044,38 +1124,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
1044 | */ | 1124 | */ |
1045 | void module_dwarf_cleanup(struct module *mod) | 1125 | void module_dwarf_cleanup(struct module *mod) |
1046 | { | 1126 | { |
1047 | struct dwarf_fde *fde; | 1127 | struct dwarf_fde *fde, *ftmp; |
1048 | struct dwarf_cie *cie; | 1128 | struct dwarf_cie *cie, *ctmp; |
1049 | unsigned long flags; | 1129 | unsigned long flags; |
1050 | 1130 | ||
1051 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 1131 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
1052 | 1132 | ||
1053 | again_cie: | 1133 | list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) { |
1054 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
1055 | if (cie->mod == mod) | ||
1056 | break; | ||
1057 | } | ||
1058 | |||
1059 | if (&cie->link != &dwarf_cie_list) { | ||
1060 | list_del(&cie->link); | 1134 | list_del(&cie->link); |
1135 | rb_erase(&cie->node, &cie_root); | ||
1061 | kfree(cie); | 1136 | kfree(cie); |
1062 | goto again_cie; | ||
1063 | } | 1137 | } |
1064 | 1138 | ||
1065 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | 1139 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); |
1066 | 1140 | ||
1067 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 1141 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
1068 | 1142 | ||
1069 | again_fde: | 1143 | list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { |
1070 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
1071 | if (fde->mod == mod) | ||
1072 | break; | ||
1073 | } | ||
1074 | |||
1075 | if (&fde->link != &dwarf_fde_list) { | ||
1076 | list_del(&fde->link); | 1144 | list_del(&fde->link); |
1145 | rb_erase(&fde->node, &fde_root); | ||
1077 | kfree(fde); | 1146 | kfree(fde); |
1078 | goto again_fde; | ||
1079 | } | 1147 | } |
1080 | 1148 | ||
1081 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | 1149 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); |
@@ -1094,8 +1162,6 @@ again_fde: | |||
1094 | static int __init dwarf_unwinder_init(void) | 1162 | static int __init dwarf_unwinder_init(void) |
1095 | { | 1163 | { |
1096 | int err; | 1164 | int err; |
1097 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
1098 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
1099 | 1165 | ||
1100 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | 1166 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", |
1101 | sizeof(struct dwarf_frame), 0, | 1167 | sizeof(struct dwarf_frame), 0, |
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c deleted file mode 100644 index f8bb50c6e050..000000000000 --- a/arch/sh/kernel/early_printk.c +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/early_printk.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2002 M. R. Brown | ||
6 | * Copyright (C) 2004 - 2007 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/console.h> | ||
13 | #include <linux/tty.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/delay.h> | ||
17 | |||
18 | #include <asm/sh_bios.h> | ||
19 | |||
20 | /* | ||
21 | * Print a string through the BIOS | ||
22 | */ | ||
23 | static void sh_console_write(struct console *co, const char *s, | ||
24 | unsigned count) | ||
25 | { | ||
26 | sh_bios_console_write(s, count); | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * Setup initial baud/bits/parity. We do two things here: | ||
31 | * - construct a cflag setting for the first rs_open() | ||
32 | * - initialize the serial port | ||
33 | * Return non-zero if we didn't find a serial port. | ||
34 | */ | ||
35 | static int __init sh_console_setup(struct console *co, char *options) | ||
36 | { | ||
37 | int cflag = CREAD | HUPCL | CLOCAL; | ||
38 | |||
39 | /* | ||
40 | * Now construct a cflag setting. | ||
41 | * TODO: this is a totally bogus cflag, as we have | ||
42 | * no idea what serial settings the BIOS is using, or | ||
43 | * even if its using the serial port at all. | ||
44 | */ | ||
45 | cflag |= B115200 | CS8 | /*no parity*/0; | ||
46 | |||
47 | co->cflag = cflag; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static struct console bios_console = { | ||
53 | .name = "bios", | ||
54 | .write = sh_console_write, | ||
55 | .setup = sh_console_setup, | ||
56 | .flags = CON_PRINTBUFFER, | ||
57 | .index = -1, | ||
58 | }; | ||
59 | |||
60 | static struct console *early_console; | ||
61 | |||
62 | static int __init setup_early_printk(char *buf) | ||
63 | { | ||
64 | int keep_early = 0; | ||
65 | |||
66 | if (!buf) | ||
67 | return 0; | ||
68 | |||
69 | if (strstr(buf, "keep")) | ||
70 | keep_early = 1; | ||
71 | |||
72 | if (!strncmp(buf, "bios", 4)) | ||
73 | early_console = &bios_console; | ||
74 | |||
75 | if (likely(early_console)) { | ||
76 | if (keep_early) | ||
77 | early_console->flags &= ~CON_BOOT; | ||
78 | else | ||
79 | early_console->flags |= CON_BOOT; | ||
80 | register_console(early_console); | ||
81 | } | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 1151ecdffa71..fe0b743881b0 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -3,6 +3,7 @@ | |||
3 | * arch/sh/kernel/head.S | 3 | * arch/sh/kernel/head.S |
4 | * | 4 | * |
5 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 5 | * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
6 | * Copyright (C) 2010 Matt Fleming | ||
6 | * | 7 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -13,6 +14,8 @@ | |||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
15 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/mmu.h> | ||
18 | #include <cpu/mmu_context.h> | ||
16 | 19 | ||
17 | #ifdef CONFIG_CPU_SH4A | 20 | #ifdef CONFIG_CPU_SH4A |
18 | #define SYNCO() synco | 21 | #define SYNCO() synco |
@@ -33,7 +36,7 @@ ENTRY(empty_zero_page) | |||
33 | .long 1 /* LOADER_TYPE */ | 36 | .long 1 /* LOADER_TYPE */ |
34 | .long 0x00000000 /* INITRD_START */ | 37 | .long 0x00000000 /* INITRD_START */ |
35 | .long 0x00000000 /* INITRD_SIZE */ | 38 | .long 0x00000000 /* INITRD_SIZE */ |
36 | #if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) | 39 | #ifdef CONFIG_32BIT |
37 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ | 40 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ |
38 | #else | 41 | #else |
39 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ | 42 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ |
@@ -82,6 +85,209 @@ ENTRY(_stext) | |||
82 | ldc r0, r7_bank ! ... and initial thread_info | 85 | ldc r0, r7_bank ! ... and initial thread_info |
83 | #endif | 86 | #endif |
84 | 87 | ||
88 | #ifdef CONFIG_PMB | ||
89 | /* | ||
90 | * Reconfigure the initial PMB mappings setup by the hardware. | ||
91 | * | ||
92 | * When we boot in 32-bit MMU mode there are 2 PMB entries already | ||
93 | * setup for us. | ||
94 | * | ||
95 | * Entry VPN PPN V SZ C UB WT | ||
96 | * --------------------------------------------------------------- | ||
97 | * 0 0x80000000 0x00000000 1 512MB 1 0 1 | ||
98 | * 1 0xA0000000 0x00000000 1 512MB 0 0 0 | ||
99 | * | ||
100 | * But we reprogram them here because we want complete control over | ||
101 | * our address space and the initial mappings may not map PAGE_OFFSET | ||
102 | * to __MEMORY_START (or even map all of our RAM). | ||
103 | * | ||
104 | * Once we've setup cached and uncached mappings we clear the rest of the | ||
105 | * PMB entries. This clearing also deals with the fact that PMB entries | ||
106 | * can persist across reboots. The PMB could have been left in any state | ||
107 | * when the reboot occurred, so to be safe we clear all entries and start | ||
108 | * with with a clean slate. | ||
109 | * | ||
110 | * The uncached mapping is constructed using the smallest possible | ||
111 | * mapping with a single unbufferable page. Only the kernel text needs to | ||
112 | * be covered via the uncached mapping so that certain functions can be | ||
113 | * run uncached. | ||
114 | * | ||
115 | * Drivers and the like that have previously abused the 1:1 identity | ||
116 | * mapping are unsupported in 32-bit mode and must specify their caching | ||
117 | * preference when page tables are constructed. | ||
118 | * | ||
119 | * This frees up the P2 space for more nefarious purposes. | ||
120 | * | ||
121 | * Register utilization is as follows: | ||
122 | * | ||
123 | * r0 = PMB_DATA data field | ||
124 | * r1 = PMB_DATA address field | ||
125 | * r2 = PMB_ADDR data field | ||
126 | * r3 = PMB_ADDR address field | ||
127 | * r4 = PMB_E_SHIFT | ||
128 | * r5 = remaining amount of RAM to map | ||
129 | * r6 = PMB mapping size we're trying to use | ||
130 | * r7 = cached_to_uncached | ||
131 | * r8 = scratch register | ||
132 | * r9 = scratch register | ||
133 | * r10 = number of PMB entries we've setup | ||
134 | */ | ||
135 | |||
136 | mov.l .LMMUCR, r1 /* Flush the TLB */ | ||
137 | mov.l @r1, r0 | ||
138 | or #MMUCR_TI, r0 | ||
139 | mov.l r0, @r1 | ||
140 | |||
141 | mov.l .LMEMORY_SIZE, r5 | ||
142 | |||
143 | mov #PMB_E_SHIFT, r0 | ||
144 | mov #0x1, r4 | ||
145 | shld r0, r4 | ||
146 | |||
147 | mov.l .LFIRST_DATA_ENTRY, r0 | ||
148 | mov.l .LPMB_DATA, r1 | ||
149 | mov.l .LFIRST_ADDR_ENTRY, r2 | ||
150 | mov.l .LPMB_ADDR, r3 | ||
151 | |||
152 | /* | ||
153 | * First we need to walk the PMB and figure out if there are any | ||
154 | * existing mappings that match the initial mappings VPN/PPN. | ||
155 | * If these have already been established by the bootloader, we | ||
156 | * don't bother setting up new entries here, and let the late PMB | ||
157 | * initialization take care of things instead. | ||
158 | * | ||
159 | * Note that we may need to coalesce and merge entries in order | ||
160 | * to reclaim more available PMB slots, which is much more than | ||
161 | * we want to do at this early stage. | ||
162 | */ | ||
163 | mov #0, r10 | ||
164 | mov #NR_PMB_ENTRIES, r9 | ||
165 | |||
166 | mov r1, r7 /* temporary PMB_DATA iter */ | ||
167 | |||
168 | .Lvalidate_existing_mappings: | ||
169 | |||
170 | mov.l @r7, r8 | ||
171 | and r0, r8 | ||
172 | cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ | ||
173 | bt .Lpmb_done | ||
174 | |||
175 | add #1, r10 /* Increment the loop counter */ | ||
176 | cmp/eq r9, r10 | ||
177 | bf/s .Lvalidate_existing_mappings | ||
178 | add r4, r7 /* Increment to the next PMB_DATA entry */ | ||
179 | |||
180 | /* | ||
181 | * If we've fallen through, continue with setting up the initial | ||
182 | * mappings. | ||
183 | */ | ||
184 | |||
185 | mov r5, r7 /* cached_to_uncached */ | ||
186 | mov #0, r10 | ||
187 | |||
188 | #ifdef CONFIG_UNCACHED_MAPPING | ||
189 | /* | ||
190 | * Uncached mapping | ||
191 | */ | ||
192 | mov #(PMB_SZ_16M >> 2), r9 | ||
193 | shll2 r9 | ||
194 | |||
195 | mov #(PMB_UB >> 8), r8 | ||
196 | shll8 r8 | ||
197 | |||
198 | or r0, r8 | ||
199 | or r9, r8 | ||
200 | mov.l r8, @r1 | ||
201 | mov r2, r8 | ||
202 | add r7, r8 | ||
203 | mov.l r8, @r3 | ||
204 | |||
205 | add r4, r1 | ||
206 | add r4, r3 | ||
207 | add #1, r10 | ||
208 | #endif | ||
209 | |||
210 | /* | ||
211 | * Iterate over all of the available sizes from largest to | ||
212 | * smallest for constructing the cached mapping. | ||
213 | */ | ||
214 | #define __PMB_ITER_BY_SIZE(size) \ | ||
215 | .L##size: \ | ||
216 | mov #(size >> 4), r6; \ | ||
217 | shll16 r6; \ | ||
218 | shll8 r6; \ | ||
219 | \ | ||
220 | cmp/hi r5, r6; \ | ||
221 | bt 9999f; \ | ||
222 | \ | ||
223 | mov #(PMB_SZ_##size##M >> 2), r9; \ | ||
224 | shll2 r9; \ | ||
225 | \ | ||
226 | /* \ | ||
227 | * Cached mapping \ | ||
228 | */ \ | ||
229 | mov #PMB_C, r8; \ | ||
230 | or r0, r8; \ | ||
231 | or r9, r8; \ | ||
232 | mov.l r8, @r1; \ | ||
233 | mov.l r2, @r3; \ | ||
234 | \ | ||
235 | /* Increment to the next PMB_DATA entry */ \ | ||
236 | add r4, r1; \ | ||
237 | /* Increment to the next PMB_ADDR entry */ \ | ||
238 | add r4, r3; \ | ||
239 | /* Increment number of PMB entries */ \ | ||
240 | add #1, r10; \ | ||
241 | \ | ||
242 | sub r6, r5; \ | ||
243 | add r6, r0; \ | ||
244 | add r6, r2; \ | ||
245 | \ | ||
246 | bra .L##size; \ | ||
247 | 9999: | ||
248 | |||
249 | __PMB_ITER_BY_SIZE(512) | ||
250 | __PMB_ITER_BY_SIZE(128) | ||
251 | __PMB_ITER_BY_SIZE(64) | ||
252 | __PMB_ITER_BY_SIZE(16) | ||
253 | |||
254 | #ifdef CONFIG_UNCACHED_MAPPING | ||
255 | /* | ||
256 | * Now that we can access it, update cached_to_uncached and | ||
257 | * uncached_size. | ||
258 | */ | ||
259 | mov.l .Lcached_to_uncached, r0 | ||
260 | mov.l r7, @r0 | ||
261 | |||
262 | mov.l .Luncached_size, r0 | ||
263 | mov #1, r7 | ||
264 | shll16 r7 | ||
265 | shll8 r7 | ||
266 | mov.l r7, @r0 | ||
267 | #endif | ||
268 | |||
269 | /* | ||
270 | * Clear the remaining PMB entries. | ||
271 | * | ||
272 | * r3 = entry to begin clearing from | ||
273 | * r10 = number of entries we've setup so far | ||
274 | */ | ||
275 | mov #0, r1 | ||
276 | mov #NR_PMB_ENTRIES, r0 | ||
277 | |||
278 | .Lagain: | ||
279 | mov.l r1, @r3 /* Clear PMB_ADDR entry */ | ||
280 | add #1, r10 /* Increment the loop counter */ | ||
281 | cmp/eq r0, r10 | ||
282 | bf/s .Lagain | ||
283 | add r4, r3 /* Increment to the next PMB_ADDR entry */ | ||
284 | |||
285 | mov.l 6f, r0 | ||
286 | icbi @r0 | ||
287 | |||
288 | .Lpmb_done: | ||
289 | #endif /* CONFIG_PMB */ | ||
290 | |||
85 | #ifndef CONFIG_SH_NO_BSS_INIT | 291 | #ifndef CONFIG_SH_NO_BSS_INIT |
86 | /* | 292 | /* |
87 | * Don't clear BSS if running on slow platforms such as an RTL simulation, | 293 | * Don't clear BSS if running on slow platforms such as an RTL simulation, |
@@ -131,3 +337,16 @@ ENTRY(stack_start) | |||
131 | 5: .long start_kernel | 337 | 5: .long start_kernel |
132 | 6: .long sh_cpu_init | 338 | 6: .long sh_cpu_init |
133 | 7: .long init_thread_union | 339 | 7: .long init_thread_union |
340 | |||
341 | #ifdef CONFIG_PMB | ||
342 | .LPMB_ADDR: .long PMB_ADDR | ||
343 | .LPMB_DATA: .long PMB_DATA | ||
344 | .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V | ||
345 | .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V | ||
346 | .LMMUCR: .long MMUCR | ||
347 | .LMEMORY_SIZE: .long __MEMORY_SIZE | ||
348 | #ifdef CONFIG_UNCACHED_MAPPING | ||
349 | .Lcached_to_uncached: .long cached_to_uncached | ||
350 | .Luncached_size: .long uncached_size | ||
351 | #endif | ||
352 | #endif | ||
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S index 3ea765844c74..defd851abefa 100644 --- a/arch/sh/kernel/head_64.S +++ b/arch/sh/kernel/head_64.S | |||
@@ -220,7 +220,6 @@ clear_DTLB: | |||
220 | add.l r22, r63, r22 /* Sign extend */ | 220 | add.l r22, r63, r22 /* Sign extend */ |
221 | putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ | 221 | putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ |
222 | 222 | ||
223 | #ifdef CONFIG_EARLY_PRINTK | ||
224 | /* | 223 | /* |
225 | * Setup a DTLB translation for SCIF phys. | 224 | * Setup a DTLB translation for SCIF phys. |
226 | */ | 225 | */ |
@@ -231,7 +230,6 @@ clear_DTLB: | |||
231 | movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ | 230 | movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ |
232 | shori 0x0003, r22 | 231 | shori 0x0003, r22 |
233 | putcfg r21, 0, r22 /* PTEH last */ | 232 | putcfg r21, 0, r22 /* PTEH last */ |
234 | #endif | ||
235 | 233 | ||
236 | /* | 234 | /* |
237 | * Set cache behaviours. | 235 | * Set cache behaviours. |
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..e2f1753d275c --- /dev/null +++ b/arch/sh/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,463 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/hw_breakpoint.c | ||
3 | * | ||
4 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. | ||
5 | * | ||
6 | * Copyright (C) 2009 - 2010 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/perf_event.h> | ||
14 | #include <linux/hw_breakpoint.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/kallsyms.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/kprobes.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <asm/hw_breakpoint.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | /* | ||
27 | * Stores the breakpoints currently in use on each breakpoint address | ||
28 | * register for each cpus | ||
29 | */ | ||
30 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | ||
31 | |||
32 | /* | ||
33 | * A dummy placeholder for early accesses until the CPUs get a chance to | ||
34 | * register their UBCs later in the boot process. | ||
35 | */ | ||
36 | static struct sh_ubc ubc_dummy = { .num_events = 0 }; | ||
37 | |||
38 | static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; | ||
39 | |||
40 | /* | ||
41 | * Install a perf counter breakpoint. | ||
42 | * | ||
43 | * We seek a free UBC channel and use it for this breakpoint. | ||
44 | * | ||
45 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
46 | * and registers local to this cpu. | ||
47 | */ | ||
48 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
49 | { | ||
50 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
51 | int i; | ||
52 | |||
53 | for (i = 0; i < sh_ubc->num_events; i++) { | ||
54 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
55 | |||
56 | if (!*slot) { | ||
57 | *slot = bp; | ||
58 | break; | ||
59 | } | ||
60 | } | ||
61 | |||
62 | if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) | ||
63 | return -EBUSY; | ||
64 | |||
65 | clk_enable(sh_ubc->clk); | ||
66 | sh_ubc->enable(info, i); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Uninstall the breakpoint contained in the given counter. | ||
73 | * | ||
74 | * First we search the debug address register it uses and then we disable | ||
75 | * it. | ||
76 | * | ||
77 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
78 | * and registers local to this cpu. | ||
79 | */ | ||
80 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
81 | { | ||
82 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
83 | int i; | ||
84 | |||
85 | for (i = 0; i < sh_ubc->num_events; i++) { | ||
86 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
87 | |||
88 | if (*slot == bp) { | ||
89 | *slot = NULL; | ||
90 | break; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) | ||
95 | return; | ||
96 | |||
97 | sh_ubc->disable(info, i); | ||
98 | clk_disable(sh_ubc->clk); | ||
99 | } | ||
100 | |||
101 | static int get_hbp_len(u16 hbp_len) | ||
102 | { | ||
103 | unsigned int len_in_bytes = 0; | ||
104 | |||
105 | switch (hbp_len) { | ||
106 | case SH_BREAKPOINT_LEN_1: | ||
107 | len_in_bytes = 1; | ||
108 | break; | ||
109 | case SH_BREAKPOINT_LEN_2: | ||
110 | len_in_bytes = 2; | ||
111 | break; | ||
112 | case SH_BREAKPOINT_LEN_4: | ||
113 | len_in_bytes = 4; | ||
114 | break; | ||
115 | case SH_BREAKPOINT_LEN_8: | ||
116 | len_in_bytes = 8; | ||
117 | break; | ||
118 | } | ||
119 | return len_in_bytes; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Check for virtual address in user space. | ||
124 | */ | ||
125 | int arch_check_va_in_userspace(unsigned long va, u16 hbp_len) | ||
126 | { | ||
127 | unsigned int len; | ||
128 | |||
129 | len = get_hbp_len(hbp_len); | ||
130 | |||
131 | return (va <= TASK_SIZE - len); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Check for virtual address in kernel space. | ||
136 | */ | ||
137 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | ||
138 | { | ||
139 | unsigned int len; | ||
140 | |||
141 | len = get_hbp_len(hbp_len); | ||
142 | |||
143 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Store a breakpoint's encoded address, length, and type. | ||
148 | */ | ||
149 | static int arch_store_info(struct perf_event *bp) | ||
150 | { | ||
151 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
152 | |||
153 | /* | ||
154 | * User-space requests will always have the address field populated | ||
155 | * For kernel-addresses, either the address or symbol name can be | ||
156 | * specified. | ||
157 | */ | ||
158 | if (info->name) | ||
159 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | ||
160 | if (info->address) | ||
161 | return 0; | ||
162 | |||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | int arch_bp_generic_fields(int sh_len, int sh_type, | ||
167 | int *gen_len, int *gen_type) | ||
168 | { | ||
169 | /* Len */ | ||
170 | switch (sh_len) { | ||
171 | case SH_BREAKPOINT_LEN_1: | ||
172 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
173 | break; | ||
174 | case SH_BREAKPOINT_LEN_2: | ||
175 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
176 | break; | ||
177 | case SH_BREAKPOINT_LEN_4: | ||
178 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
179 | break; | ||
180 | case SH_BREAKPOINT_LEN_8: | ||
181 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
182 | break; | ||
183 | default: | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | /* Type */ | ||
188 | switch (sh_type) { | ||
189 | case SH_BREAKPOINT_READ: | ||
190 | *gen_type = HW_BREAKPOINT_R; | ||
191 | case SH_BREAKPOINT_WRITE: | ||
192 | *gen_type = HW_BREAKPOINT_W; | ||
193 | break; | ||
194 | case SH_BREAKPOINT_RW: | ||
195 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
196 | break; | ||
197 | default: | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int arch_build_bp_info(struct perf_event *bp) | ||
205 | { | ||
206 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
207 | |||
208 | info->address = bp->attr.bp_addr; | ||
209 | |||
210 | /* Len */ | ||
211 | switch (bp->attr.bp_len) { | ||
212 | case HW_BREAKPOINT_LEN_1: | ||
213 | info->len = SH_BREAKPOINT_LEN_1; | ||
214 | break; | ||
215 | case HW_BREAKPOINT_LEN_2: | ||
216 | info->len = SH_BREAKPOINT_LEN_2; | ||
217 | break; | ||
218 | case HW_BREAKPOINT_LEN_4: | ||
219 | info->len = SH_BREAKPOINT_LEN_4; | ||
220 | break; | ||
221 | case HW_BREAKPOINT_LEN_8: | ||
222 | info->len = SH_BREAKPOINT_LEN_8; | ||
223 | break; | ||
224 | default: | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | |||
228 | /* Type */ | ||
229 | switch (bp->attr.bp_type) { | ||
230 | case HW_BREAKPOINT_R: | ||
231 | info->type = SH_BREAKPOINT_READ; | ||
232 | break; | ||
233 | case HW_BREAKPOINT_W: | ||
234 | info->type = SH_BREAKPOINT_WRITE; | ||
235 | break; | ||
236 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
237 | info->type = SH_BREAKPOINT_RW; | ||
238 | break; | ||
239 | default: | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Validate the arch-specific HW Breakpoint register settings | ||
248 | */ | ||
249 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
250 | struct task_struct *tsk) | ||
251 | { | ||
252 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
253 | unsigned int align; | ||
254 | int ret; | ||
255 | |||
256 | ret = arch_build_bp_info(bp); | ||
257 | if (ret) | ||
258 | return ret; | ||
259 | |||
260 | ret = -EINVAL; | ||
261 | |||
262 | switch (info->len) { | ||
263 | case SH_BREAKPOINT_LEN_1: | ||
264 | align = 0; | ||
265 | break; | ||
266 | case SH_BREAKPOINT_LEN_2: | ||
267 | align = 1; | ||
268 | break; | ||
269 | case SH_BREAKPOINT_LEN_4: | ||
270 | align = 3; | ||
271 | break; | ||
272 | case SH_BREAKPOINT_LEN_8: | ||
273 | align = 7; | ||
274 | break; | ||
275 | default: | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | ret = arch_store_info(bp); | ||
280 | |||
281 | if (ret < 0) | ||
282 | return ret; | ||
283 | |||
284 | /* | ||
285 | * Check that the low-order bits of the address are appropriate | ||
286 | * for the alignment implied by len. | ||
287 | */ | ||
288 | if (info->address & align) | ||
289 | return -EINVAL; | ||
290 | |||
291 | /* Check that the virtual address is in the proper range */ | ||
292 | if (tsk) { | ||
293 | if (!arch_check_va_in_userspace(info->address, info->len)) | ||
294 | return -EFAULT; | ||
295 | } else { | ||
296 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | ||
297 | return -EFAULT; | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Release the user breakpoints used by ptrace | ||
305 | */ | ||
306 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
307 | { | ||
308 | int i; | ||
309 | struct thread_struct *t = &tsk->thread; | ||
310 | |||
311 | for (i = 0; i < sh_ubc->num_events; i++) { | ||
312 | unregister_hw_breakpoint(t->ptrace_bps[i]); | ||
313 | t->ptrace_bps[i] = NULL; | ||
314 | } | ||
315 | } | ||
316 | |||
317 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | ||
318 | { | ||
319 | int cpu, i, rc = NOTIFY_STOP; | ||
320 | struct perf_event *bp; | ||
321 | unsigned int cmf, resume_mask; | ||
322 | |||
323 | /* | ||
324 | * Do an early return if none of the channels triggered. | ||
325 | */ | ||
326 | cmf = sh_ubc->triggered_mask(); | ||
327 | if (unlikely(!cmf)) | ||
328 | return NOTIFY_DONE; | ||
329 | |||
330 | /* | ||
331 | * By default, resume all of the active channels. | ||
332 | */ | ||
333 | resume_mask = sh_ubc->active_mask(); | ||
334 | |||
335 | /* | ||
336 | * Disable breakpoints during exception handling. | ||
337 | */ | ||
338 | sh_ubc->disable_all(); | ||
339 | |||
340 | cpu = get_cpu(); | ||
341 | for (i = 0; i < sh_ubc->num_events; i++) { | ||
342 | unsigned long event_mask = (1 << i); | ||
343 | |||
344 | if (likely(!(cmf & event_mask))) | ||
345 | continue; | ||
346 | |||
347 | /* | ||
348 | * The counter may be concurrently released but that can only | ||
349 | * occur from a call_rcu() path. We can then safely fetch | ||
350 | * the breakpoint, use its callback, touch its counter | ||
351 | * while we are in an rcu_read_lock() path. | ||
352 | */ | ||
353 | rcu_read_lock(); | ||
354 | |||
355 | bp = per_cpu(bp_per_reg[i], cpu); | ||
356 | if (bp) | ||
357 | rc = NOTIFY_DONE; | ||
358 | |||
359 | /* | ||
360 | * Reset the condition match flag to denote completion of | ||
361 | * exception handling. | ||
362 | */ | ||
363 | sh_ubc->clear_triggered_mask(event_mask); | ||
364 | |||
365 | /* | ||
366 | * bp can be NULL due to concurrent perf counter | ||
367 | * removing. | ||
368 | */ | ||
369 | if (!bp) { | ||
370 | rcu_read_unlock(); | ||
371 | break; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Don't restore the channel if the breakpoint is from | ||
376 | * ptrace, as it always operates in one-shot mode. | ||
377 | */ | ||
378 | if (bp->overflow_handler == ptrace_triggered) | ||
379 | resume_mask &= ~(1 << i); | ||
380 | |||
381 | perf_bp_event(bp, args->regs); | ||
382 | |||
383 | /* Deliver the signal to userspace */ | ||
384 | if (arch_check_va_in_userspace(bp->attr.bp_addr, | ||
385 | bp->attr.bp_len)) { | ||
386 | siginfo_t info; | ||
387 | |||
388 | info.si_signo = args->signr; | ||
389 | info.si_errno = notifier_to_errno(rc); | ||
390 | info.si_code = TRAP_HWBKPT; | ||
391 | |||
392 | force_sig_info(args->signr, &info, current); | ||
393 | } | ||
394 | |||
395 | rcu_read_unlock(); | ||
396 | } | ||
397 | |||
398 | if (cmf == 0) | ||
399 | rc = NOTIFY_DONE; | ||
400 | |||
401 | sh_ubc->enable_all(resume_mask); | ||
402 | |||
403 | put_cpu(); | ||
404 | |||
405 | return rc; | ||
406 | } | ||
407 | |||
408 | BUILD_TRAP_HANDLER(breakpoint) | ||
409 | { | ||
410 | unsigned long ex = lookup_exception_vector(); | ||
411 | TRAP_HANDLER_DECL; | ||
412 | |||
413 | notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Handle debug exception notifications. | ||
418 | */ | ||
419 | int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
420 | unsigned long val, void *data) | ||
421 | { | ||
422 | struct die_args *args = data; | ||
423 | |||
424 | if (val != DIE_BREAKPOINT) | ||
425 | return NOTIFY_DONE; | ||
426 | |||
427 | /* | ||
428 | * If the breakpoint hasn't been triggered by the UBC, it's | ||
429 | * probably from a debugger, so don't do anything more here. | ||
430 | * | ||
431 | * This also permits the UBC interface clock to remain off for | ||
432 | * non-UBC breakpoints, as we don't need to check the triggered | ||
433 | * or active channel masks. | ||
434 | */ | ||
435 | if (args->trapnr != sh_ubc->trap_nr) | ||
436 | return NOTIFY_DONE; | ||
437 | |||
438 | return hw_breakpoint_handler(data); | ||
439 | } | ||
440 | |||
441 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
442 | { | ||
443 | /* TODO */ | ||
444 | } | ||
445 | |||
446 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
447 | { | ||
448 | /* TODO */ | ||
449 | } | ||
450 | |||
451 | int register_sh_ubc(struct sh_ubc *ubc) | ||
452 | { | ||
453 | /* Bail if it's already assigned */ | ||
454 | if (sh_ubc != &ubc_dummy) | ||
455 | return -EBUSY; | ||
456 | sh_ubc = ubc; | ||
457 | |||
458 | pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); | ||
459 | |||
460 | WARN_ON(ubc->num_events > HBP_NUM); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 6b3d706deac1..0fd7b41f0a22 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -20,10 +20,9 @@ | |||
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | 22 | ||
23 | static int hlt_counter; | ||
24 | void (*pm_idle)(void) = NULL; | 23 | void (*pm_idle)(void) = NULL; |
25 | void (*pm_power_off)(void); | 24 | |
26 | EXPORT_SYMBOL(pm_power_off); | 25 | static int hlt_counter; |
27 | 26 | ||
28 | static int __init nohlt_setup(char *__unused) | 27 | static int __init nohlt_setup(char *__unused) |
29 | { | 28 | { |
@@ -131,6 +130,15 @@ static void do_nothing(void *unused) | |||
131 | { | 130 | { |
132 | } | 131 | } |
133 | 132 | ||
133 | void stop_this_cpu(void *unused) | ||
134 | { | ||
135 | local_irq_disable(); | ||
136 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
137 | |||
138 | for (;;) | ||
139 | cpu_sleep(); | ||
140 | } | ||
141 | |||
134 | /* | 142 | /* |
135 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | 143 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of |
136 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | 144 | * pm_idle and update to new pm_idle value. Required while changing pm_idle |
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 69be603aa2d7..4a8bb4eeb8ad 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c | |||
@@ -184,31 +184,31 @@ static unsigned long long copy_word(unsigned long src_addr, int src_len, | |||
184 | 184 | ||
185 | switch (src_len) { | 185 | switch (src_len) { |
186 | case 1: | 186 | case 1: |
187 | tmp = ctrl_inb(src_addr); | 187 | tmp = __raw_readb(src_addr); |
188 | break; | 188 | break; |
189 | case 2: | 189 | case 2: |
190 | tmp = ctrl_inw(src_addr); | 190 | tmp = __raw_readw(src_addr); |
191 | break; | 191 | break; |
192 | case 4: | 192 | case 4: |
193 | tmp = ctrl_inl(src_addr); | 193 | tmp = __raw_readl(src_addr); |
194 | break; | 194 | break; |
195 | case 8: | 195 | case 8: |
196 | tmp = ctrl_inq(src_addr); | 196 | tmp = __raw_readq(src_addr); |
197 | break; | 197 | break; |
198 | } | 198 | } |
199 | 199 | ||
200 | switch (dst_len) { | 200 | switch (dst_len) { |
201 | case 1: | 201 | case 1: |
202 | ctrl_outb(tmp, dst_addr); | 202 | __raw_writeb(tmp, dst_addr); |
203 | break; | 203 | break; |
204 | case 2: | 204 | case 2: |
205 | ctrl_outw(tmp, dst_addr); | 205 | __raw_writew(tmp, dst_addr); |
206 | break; | 206 | break; |
207 | case 4: | 207 | case 4: |
208 | ctrl_outl(tmp, dst_addr); | 208 | __raw_writel(tmp, dst_addr); |
209 | break; | 209 | break; |
210 | case 8: | 210 | case 8: |
211 | ctrl_outq(tmp, dst_addr); | 211 | __raw_writeq(tmp, dst_addr); |
212 | break; | 212 | break; |
213 | } | 213 | } |
214 | 214 | ||
@@ -271,6 +271,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address) | |||
271 | insn_size_t instruction; | 271 | insn_size_t instruction; |
272 | int tmp; | 272 | int tmp; |
273 | 273 | ||
274 | if (trapped_io_disable) | ||
275 | return 0; | ||
274 | if (!lookup_tiop(address)) | 276 | if (!lookup_tiop(address)) |
275 | return 0; | 277 | return 0; |
276 | 278 | ||
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 3e532d0d4a5c..70c69659b846 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SuperH KGDB support | 2 | * SuperH KGDB support |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Paul Mundt | 4 | * Copyright (C) 2008 - 2009 Paul Mundt |
5 | * | 5 | * |
6 | * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. | 6 | * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. |
7 | * | 7 | * |
@@ -251,24 +251,60 @@ BUILD_TRAP_HANDLER(singlestep) | |||
251 | local_irq_restore(flags); | 251 | local_irq_restore(flags); |
252 | } | 252 | } |
253 | 253 | ||
254 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | ||
255 | { | ||
256 | int ret; | ||
257 | |||
258 | switch (cmd) { | ||
259 | case DIE_BREAKPOINT: | ||
260 | /* | ||
261 | * This means a user thread is single stepping | ||
262 | * a system call which should be ignored | ||
263 | */ | ||
264 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
265 | return NOTIFY_DONE; | ||
266 | |||
267 | ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr, | ||
268 | args->err, args->regs); | ||
269 | if (ret) | ||
270 | return NOTIFY_DONE; | ||
271 | |||
272 | break; | ||
273 | } | ||
254 | 274 | ||
255 | BUILD_TRAP_HANDLER(breakpoint) | 275 | return NOTIFY_STOP; |
276 | } | ||
277 | |||
278 | static int | ||
279 | kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | ||
256 | { | 280 | { |
257 | unsigned long flags; | 281 | unsigned long flags; |
258 | TRAP_HANDLER_DECL; | 282 | int ret; |
259 | 283 | ||
260 | local_irq_save(flags); | 284 | local_irq_save(flags); |
261 | kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); | 285 | ret = __kgdb_notify(ptr, cmd); |
262 | local_irq_restore(flags); | 286 | local_irq_restore(flags); |
287 | |||
288 | return ret; | ||
263 | } | 289 | } |
264 | 290 | ||
291 | static struct notifier_block kgdb_notifier = { | ||
292 | .notifier_call = kgdb_notify, | ||
293 | |||
294 | /* | ||
295 | * Lowest-prio notifier priority, we want to be notified last: | ||
296 | */ | ||
297 | .priority = -INT_MAX, | ||
298 | }; | ||
299 | |||
265 | int kgdb_arch_init(void) | 300 | int kgdb_arch_init(void) |
266 | { | 301 | { |
267 | return 0; | 302 | return register_die_notifier(&kgdb_notifier); |
268 | } | 303 | } |
269 | 304 | ||
270 | void kgdb_arch_exit(void) | 305 | void kgdb_arch_exit(void) |
271 | { | 306 | { |
307 | unregister_die_notifier(&kgdb_notifier); | ||
272 | } | 308 | } |
273 | 309 | ||
274 | struct kgdb_arch arch_kgdb_ops = { | 310 | struct kgdb_arch arch_kgdb_ops = { |
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 76f280223ebd..7672141c841b 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | #include <asm/sh_bios.h> | ||
25 | #include <asm/reboot.h> | ||
24 | 26 | ||
25 | typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, | 27 | typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, |
26 | unsigned long reboot_code_buffer, | 28 | unsigned long reboot_code_buffer, |
@@ -28,15 +30,11 @@ typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, | |||
28 | 30 | ||
29 | extern const unsigned char relocate_new_kernel[]; | 31 | extern const unsigned char relocate_new_kernel[]; |
30 | extern const unsigned int relocate_new_kernel_size; | 32 | extern const unsigned int relocate_new_kernel_size; |
31 | extern void *gdb_vbr_vector; | ||
32 | extern void *vbr_base; | 33 | extern void *vbr_base; |
33 | 34 | ||
34 | void machine_shutdown(void) | 35 | void native_machine_crash_shutdown(struct pt_regs *regs) |
35 | { | ||
36 | } | ||
37 | |||
38 | void machine_crash_shutdown(struct pt_regs *regs) | ||
39 | { | 36 | { |
37 | /* Nothing to do for UP, but definitely broken for SMP.. */ | ||
40 | } | 38 | } |
41 | 39 | ||
42 | /* | 40 | /* |
@@ -117,11 +115,7 @@ void machine_kexec(struct kimage *image) | |||
117 | kexec_info(image); | 115 | kexec_info(image); |
118 | flush_cache_all(); | 116 | flush_cache_all(); |
119 | 117 | ||
120 | #if defined(CONFIG_SH_STANDARD_BIOS) | 118 | sh_bios_vbr_reload(); |
121 | asm volatile("ldc %0, vbr" : | ||
122 | : "r" (((unsigned long) gdb_vbr_vector) - 0x100) | ||
123 | : "memory"); | ||
124 | #endif | ||
125 | 119 | ||
126 | /* now call it */ | 120 | /* now call it */ |
127 | rnk = (relocate_new_kernel_t) reboot_code_buffer; | 121 | rnk = (relocate_new_kernel_t) reboot_code_buffer; |
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c new file mode 100644 index 000000000000..81add9b9ea6e --- /dev/null +++ b/arch/sh/kernel/process.c | |||
@@ -0,0 +1,100 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
4 | |||
5 | struct kmem_cache *task_xstate_cachep = NULL; | ||
6 | unsigned int xstate_size; | ||
7 | |||
8 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | ||
9 | { | ||
10 | *dst = *src; | ||
11 | |||
12 | if (src->thread.xstate) { | ||
13 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | ||
14 | GFP_KERNEL); | ||
15 | if (!dst->thread.xstate) | ||
16 | return -ENOMEM; | ||
17 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | ||
18 | } | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | void free_thread_xstate(struct task_struct *tsk) | ||
24 | { | ||
25 | if (tsk->thread.xstate) { | ||
26 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | ||
27 | tsk->thread.xstate = NULL; | ||
28 | } | ||
29 | } | ||
30 | |||
31 | #if THREAD_SHIFT < PAGE_SHIFT | ||
32 | static struct kmem_cache *thread_info_cache; | ||
33 | |||
34 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
35 | { | ||
36 | struct thread_info *ti; | ||
37 | |||
38 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | ||
39 | if (unlikely(ti == NULL)) | ||
40 | return NULL; | ||
41 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
42 | memset(ti, 0, THREAD_SIZE); | ||
43 | #endif | ||
44 | return ti; | ||
45 | } | ||
46 | |||
47 | void free_thread_info(struct thread_info *ti) | ||
48 | { | ||
49 | free_thread_xstate(ti->task); | ||
50 | kmem_cache_free(thread_info_cache, ti); | ||
51 | } | ||
52 | |||
53 | void thread_info_cache_init(void) | ||
54 | { | ||
55 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | ||
56 | THREAD_SIZE, SLAB_PANIC, NULL); | ||
57 | } | ||
58 | #else | ||
59 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
60 | { | ||
61 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
62 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | ||
63 | #else | ||
64 | gfp_t mask = GFP_KERNEL; | ||
65 | #endif | ||
66 | return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); | ||
67 | } | ||
68 | |||
69 | void free_thread_info(struct thread_info *ti) | ||
70 | { | ||
71 | free_thread_xstate(ti->task); | ||
72 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | ||
73 | } | ||
74 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
75 | |||
76 | void arch_task_cache_init(void) | ||
77 | { | ||
78 | if (!xstate_size) | ||
79 | return; | ||
80 | |||
81 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, | ||
82 | __alignof__(union thread_xstate), | ||
83 | SLAB_PANIC | SLAB_NOTRACK, NULL); | ||
84 | } | ||
85 | |||
86 | #ifdef CONFIG_SH_FPU_EMU | ||
87 | # define HAVE_SOFTFP 1 | ||
88 | #else | ||
89 | # define HAVE_SOFTFP 0 | ||
90 | #endif | ||
91 | |||
92 | void init_thread_xstate(void) | ||
93 | { | ||
94 | if (boot_cpu_data.flags & CPU_HAS_FPU) | ||
95 | xstate_size = sizeof(struct sh_fpu_hard_struct); | ||
96 | else if (HAVE_SOFTFP) | ||
97 | xstate_size = sizeof(struct sh_fpu_soft_struct); | ||
98 | else | ||
99 | xstate_size = 0; | ||
100 | } | ||
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index d8af889366a4..3cb88f114d7a 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -16,65 +16,15 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/elfcore.h> | 18 | #include <linux/elfcore.h> |
19 | #include <linux/pm.h> | ||
20 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
21 | #include <linux/kexec.h> | ||
22 | #include <linux/kdebug.h> | ||
23 | #include <linux/tick.h> | ||
24 | #include <linux/reboot.h> | ||
25 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
26 | #include <linux/ftrace.h> | 21 | #include <linux/ftrace.h> |
27 | #include <linux/preempt.h> | 22 | #include <linux/hw_breakpoint.h> |
28 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
29 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
30 | #include <asm/pgalloc.h> | ||
31 | #include <asm/system.h> | 25 | #include <asm/system.h> |
32 | #include <asm/ubc.h> | ||
33 | #include <asm/fpu.h> | 26 | #include <asm/fpu.h> |
34 | #include <asm/syscalls.h> | 27 | #include <asm/syscalls.h> |
35 | #include <asm/watchdog.h> | ||
36 | |||
37 | int ubc_usercnt = 0; | ||
38 | |||
39 | #ifdef CONFIG_32BIT | ||
40 | static void watchdog_trigger_immediate(void) | ||
41 | { | ||
42 | sh_wdt_write_cnt(0xFF); | ||
43 | sh_wdt_write_csr(0xC2); | ||
44 | } | ||
45 | |||
46 | void machine_restart(char * __unused) | ||
47 | { | ||
48 | local_irq_disable(); | ||
49 | |||
50 | /* Use watchdog timer to trigger reset */ | ||
51 | watchdog_trigger_immediate(); | ||
52 | |||
53 | while (1) | ||
54 | cpu_sleep(); | ||
55 | } | ||
56 | #else | ||
57 | void machine_restart(char * __unused) | ||
58 | { | ||
59 | /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ | ||
60 | asm volatile("ldc %0, sr\n\t" | ||
61 | "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | void machine_halt(void) | ||
66 | { | ||
67 | local_irq_disable(); | ||
68 | |||
69 | while (1) | ||
70 | cpu_sleep(); | ||
71 | } | ||
72 | |||
73 | void machine_power_off(void) | ||
74 | { | ||
75 | if (pm_power_off) | ||
76 | pm_power_off(); | ||
77 | } | ||
78 | 28 | ||
79 | void show_regs(struct pt_regs * regs) | 29 | void show_regs(struct pt_regs * regs) |
80 | { | 30 | { |
@@ -91,7 +41,7 @@ void show_regs(struct pt_regs * regs) | |||
91 | printk("PC : %08lx SP : %08lx SR : %08lx ", | 41 | printk("PC : %08lx SP : %08lx SR : %08lx ", |
92 | regs->pc, regs->regs[15], regs->sr); | 42 | regs->pc, regs->regs[15], regs->sr); |
93 | #ifdef CONFIG_MMU | 43 | #ifdef CONFIG_MMU |
94 | printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); | 44 | printk("TEA : %08x\n", __raw_readl(MMU_TEA)); |
95 | #else | 45 | #else |
96 | printk("\n"); | 46 | printk("\n"); |
97 | #endif | 47 | #endif |
@@ -147,21 +97,34 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
147 | } | 97 | } |
148 | EXPORT_SYMBOL(kernel_thread); | 98 | EXPORT_SYMBOL(kernel_thread); |
149 | 99 | ||
100 | void start_thread(struct pt_regs *regs, unsigned long new_pc, | ||
101 | unsigned long new_sp) | ||
102 | { | ||
103 | set_fs(USER_DS); | ||
104 | |||
105 | regs->pr = 0; | ||
106 | regs->sr = SR_FD; | ||
107 | regs->pc = new_pc; | ||
108 | regs->regs[15] = new_sp; | ||
109 | |||
110 | free_thread_xstate(current); | ||
111 | } | ||
112 | EXPORT_SYMBOL(start_thread); | ||
113 | |||
150 | /* | 114 | /* |
151 | * Free current thread data structures etc.. | 115 | * Free current thread data structures etc.. |
152 | */ | 116 | */ |
153 | void exit_thread(void) | 117 | void exit_thread(void) |
154 | { | 118 | { |
155 | if (current->thread.ubc_pc) { | ||
156 | current->thread.ubc_pc = 0; | ||
157 | ubc_usercnt -= 1; | ||
158 | } | ||
159 | } | 119 | } |
160 | 120 | ||
161 | void flush_thread(void) | 121 | void flush_thread(void) |
162 | { | 122 | { |
163 | #if defined(CONFIG_SH_FPU) | ||
164 | struct task_struct *tsk = current; | 123 | struct task_struct *tsk = current; |
124 | |||
125 | flush_ptrace_hw_breakpoint(tsk); | ||
126 | |||
127 | #if defined(CONFIG_SH_FPU) | ||
165 | /* Forget lazy FPU state */ | 128 | /* Forget lazy FPU state */ |
166 | clear_fpu(tsk, task_pt_regs(tsk)); | 129 | clear_fpu(tsk, task_pt_regs(tsk)); |
167 | clear_used_math(); | 130 | clear_used_math(); |
@@ -209,11 +172,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
209 | { | 172 | { |
210 | struct thread_info *ti = task_thread_info(p); | 173 | struct thread_info *ti = task_thread_info(p); |
211 | struct pt_regs *childregs; | 174 | struct pt_regs *childregs; |
175 | |||
212 | #if defined(CONFIG_SH_DSP) | 176 | #if defined(CONFIG_SH_DSP) |
213 | struct task_struct *tsk = current; | 177 | struct task_struct *tsk = current; |
214 | #endif | ||
215 | 178 | ||
216 | #if defined(CONFIG_SH_DSP) | ||
217 | if (is_dsp_enabled(tsk)) { | 179 | if (is_dsp_enabled(tsk)) { |
218 | /* We can use the __save_dsp or just copy the struct: | 180 | /* We can use the __save_dsp or just copy the struct: |
219 | * __save_dsp(p); | 181 | * __save_dsp(p); |
@@ -244,53 +206,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
244 | p->thread.sp = (unsigned long) childregs; | 206 | p->thread.sp = (unsigned long) childregs; |
245 | p->thread.pc = (unsigned long) ret_from_fork; | 207 | p->thread.pc = (unsigned long) ret_from_fork; |
246 | 208 | ||
247 | p->thread.ubc_pc = 0; | 209 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
248 | 210 | ||
249 | return 0; | 211 | return 0; |
250 | } | 212 | } |
251 | 213 | ||
252 | /* Tracing by user break controller. */ | ||
253 | static void ubc_set_tracing(int asid, unsigned long pc) | ||
254 | { | ||
255 | #if defined(CONFIG_CPU_SH4A) | ||
256 | unsigned long val; | ||
257 | |||
258 | val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); | ||
259 | val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); | ||
260 | |||
261 | ctrl_outl(val, UBC_CBR0); | ||
262 | ctrl_outl(pc, UBC_CAR0); | ||
263 | ctrl_outl(0x0, UBC_CAMR0); | ||
264 | ctrl_outl(0x0, UBC_CBCR); | ||
265 | |||
266 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); | ||
267 | ctrl_outl(val, UBC_CRR0); | ||
268 | |||
269 | /* Read UBC register that we wrote last, for checking update */ | ||
270 | val = ctrl_inl(UBC_CRR0); | ||
271 | |||
272 | #else /* CONFIG_CPU_SH4A */ | ||
273 | ctrl_outl(pc, UBC_BARA); | ||
274 | |||
275 | #ifdef CONFIG_MMU | ||
276 | ctrl_outb(asid, UBC_BASRA); | ||
277 | #endif | ||
278 | |||
279 | ctrl_outl(0, UBC_BAMRA); | ||
280 | |||
281 | if (current_cpu_data.type == CPU_SH7729 || | ||
282 | current_cpu_data.type == CPU_SH7710 || | ||
283 | current_cpu_data.type == CPU_SH7712 || | ||
284 | current_cpu_data.type == CPU_SH7203){ | ||
285 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); | ||
286 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); | ||
287 | } else { | ||
288 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); | ||
289 | ctrl_outw(BRCR_PCBA, UBC_BRCR); | ||
290 | } | ||
291 | #endif /* CONFIG_CPU_SH4A */ | ||
292 | } | ||
293 | |||
294 | /* | 214 | /* |
295 | * switch_to(x,y) should switch tasks from x to y. | 215 | * switch_to(x,y) should switch tasks from x to y. |
296 | * | 216 | * |
@@ -304,7 +224,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
304 | 224 | ||
305 | /* we're going to use this soon, after a few expensive things */ | 225 | /* we're going to use this soon, after a few expensive things */ |
306 | if (next->fpu_counter > 5) | 226 | if (next->fpu_counter > 5) |
307 | prefetch(&next_t->fpu.hard); | 227 | prefetch(next_t->xstate); |
308 | 228 | ||
309 | #ifdef CONFIG_MMU | 229 | #ifdef CONFIG_MMU |
310 | /* | 230 | /* |
@@ -316,32 +236,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
316 | : "r" (task_thread_info(next))); | 236 | : "r" (task_thread_info(next))); |
317 | #endif | 237 | #endif |
318 | 238 | ||
319 | /* If no tasks are using the UBC, we're done */ | ||
320 | if (ubc_usercnt == 0) | ||
321 | /* If no tasks are using the UBC, we're done */; | ||
322 | else if (next->thread.ubc_pc && next->mm) { | ||
323 | int asid = 0; | ||
324 | #ifdef CONFIG_MMU | ||
325 | asid |= cpu_asid(smp_processor_id(), next->mm); | ||
326 | #endif | ||
327 | ubc_set_tracing(asid, next->thread.ubc_pc); | ||
328 | } else { | ||
329 | #if defined(CONFIG_CPU_SH4A) | ||
330 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
331 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
332 | #else | ||
333 | ctrl_outw(0, UBC_BBRA); | ||
334 | ctrl_outw(0, UBC_BBRB); | ||
335 | #endif | ||
336 | } | ||
337 | |||
338 | /* | 239 | /* |
339 | * If the task has used fpu the last 5 timeslices, just do a full | 240 | * If the task has used fpu the last 5 timeslices, just do a full |
340 | * restore of the math state immediately to avoid the trap; the | 241 | * restore of the math state immediately to avoid the trap; the |
341 | * chances of needing FPU soon are obviously high now | 242 | * chances of needing FPU soon are obviously high now |
342 | */ | 243 | */ |
343 | if (next->fpu_counter > 5) | 244 | if (next->fpu_counter > 5) |
344 | fpu_state_restore(task_pt_regs(next)); | 245 | __fpu_state_restore(); |
345 | 246 | ||
346 | return prev; | 247 | return prev; |
347 | } | 248 | } |
@@ -434,20 +335,3 @@ unsigned long get_wchan(struct task_struct *p) | |||
434 | 335 | ||
435 | return pc; | 336 | return pc; |
436 | } | 337 | } |
437 | |||
438 | asmlinkage void break_point_trap(void) | ||
439 | { | ||
440 | /* Clear tracing. */ | ||
441 | #if defined(CONFIG_CPU_SH4A) | ||
442 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
443 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
444 | #else | ||
445 | ctrl_outw(0, UBC_BBRA); | ||
446 | ctrl_outw(0, UBC_BBRB); | ||
447 | ctrl_outl(0, UBC_BRCR); | ||
448 | #endif | ||
449 | current->thread.ubc_pc = 0; | ||
450 | ubc_usercnt -= 1; | ||
451 | |||
452 | force_sig(SIGTRAP, current); | ||
453 | } | ||
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index ec79faf6f021..c90957a459ac 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -32,30 +32,7 @@ | |||
32 | 32 | ||
33 | struct task_struct *last_task_used_math = NULL; | 33 | struct task_struct *last_task_used_math = NULL; |
34 | 34 | ||
35 | void machine_restart(char * __unused) | 35 | void show_regs(struct pt_regs *regs) |
36 | { | ||
37 | extern void phys_stext(void); | ||
38 | |||
39 | phys_stext(); | ||
40 | } | ||
41 | |||
42 | void machine_halt(void) | ||
43 | { | ||
44 | for (;;); | ||
45 | } | ||
46 | |||
47 | void machine_power_off(void) | ||
48 | { | ||
49 | __asm__ __volatile__ ( | ||
50 | "sleep\n\t" | ||
51 | "synci\n\t" | ||
52 | "nop;nop;nop;nop\n\t" | ||
53 | ); | ||
54 | |||
55 | panic("Unexpected wakeup!\n"); | ||
56 | } | ||
57 | |||
58 | void show_regs(struct pt_regs * regs) | ||
59 | { | 36 | { |
60 | unsigned long long ah, al, bh, bl, ch, cl; | 37 | unsigned long long ah, al, bh, bl, ch, cl; |
61 | 38 | ||
@@ -410,7 +387,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
410 | regs->sr |= SR_FD; | 387 | regs->sr |= SR_FD; |
411 | } | 388 | } |
412 | 389 | ||
413 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); | 390 | memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu)); |
414 | } | 391 | } |
415 | 392 | ||
416 | return fpvalid; | 393 | return fpvalid; |
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 9be35f348093..c625cdab76dd 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * SuperH process tracing | 2 | * SuperH process tracing |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | 4 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka |
5 | * Copyright (C) 2002 - 2008 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
6 | * | 6 | * |
7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> | 7 | * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> |
8 | * | 8 | * |
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/tracehook.h> | 26 | #include <linux/tracehook.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/regset.h> | 28 | #include <linux/regset.h> |
29 | #include <linux/hw_breakpoint.h> | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
@@ -63,33 +64,64 @@ static inline int put_stack_long(struct task_struct *task, int offset, | |||
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
65 | 66 | ||
66 | void user_enable_single_step(struct task_struct *child) | 67 | void ptrace_triggered(struct perf_event *bp, int nmi, |
68 | struct perf_sample_data *data, struct pt_regs *regs) | ||
67 | { | 69 | { |
68 | /* Next scheduling will set up UBC */ | 70 | struct perf_event_attr attr; |
69 | if (child->thread.ubc_pc == 0) | 71 | |
70 | ubc_usercnt += 1; | 72 | /* |
73 | * Disable the breakpoint request here since ptrace has defined a | ||
74 | * one-shot behaviour for breakpoint exceptions. | ||
75 | */ | ||
76 | attr = bp->attr; | ||
77 | attr.disabled = true; | ||
78 | modify_user_hw_breakpoint(bp, &attr); | ||
79 | } | ||
80 | |||
81 | static int set_single_step(struct task_struct *tsk, unsigned long addr) | ||
82 | { | ||
83 | struct thread_struct *thread = &tsk->thread; | ||
84 | struct perf_event *bp; | ||
85 | struct perf_event_attr attr; | ||
86 | |||
87 | bp = thread->ptrace_bps[0]; | ||
88 | if (!bp) { | ||
89 | hw_breakpoint_init(&attr); | ||
90 | |||
91 | attr.bp_addr = addr; | ||
92 | attr.bp_len = HW_BREAKPOINT_LEN_2; | ||
93 | attr.bp_type = HW_BREAKPOINT_R; | ||
94 | |||
95 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); | ||
96 | if (IS_ERR(bp)) | ||
97 | return PTR_ERR(bp); | ||
98 | |||
99 | thread->ptrace_bps[0] = bp; | ||
100 | } else { | ||
101 | int err; | ||
102 | |||
103 | attr = bp->attr; | ||
104 | attr.bp_addr = addr; | ||
105 | err = modify_user_hw_breakpoint(bp, &attr); | ||
106 | if (unlikely(err)) | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
71 | 112 | ||
72 | child->thread.ubc_pc = get_stack_long(child, | 113 | void user_enable_single_step(struct task_struct *child) |
73 | offsetof(struct pt_regs, pc)); | 114 | { |
115 | unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); | ||
74 | 116 | ||
75 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 117 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
118 | |||
119 | set_single_step(child, pc); | ||
76 | } | 120 | } |
77 | 121 | ||
78 | void user_disable_single_step(struct task_struct *child) | 122 | void user_disable_single_step(struct task_struct *child) |
79 | { | 123 | { |
80 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 124 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
81 | |||
82 | /* | ||
83 | * Ensure the UBC is not programmed at the next context switch. | ||
84 | * | ||
85 | * Normally this is not needed but there are sequences such as | ||
86 | * singlestep, signal delivery, and continue that leave the | ||
87 | * ubc_pc non-zero leading to spurious SIGTRAPs. | ||
88 | */ | ||
89 | if (child->thread.ubc_pc != 0) { | ||
90 | ubc_usercnt -= 1; | ||
91 | child->thread.ubc_pc = 0; | ||
92 | } | ||
93 | } | 125 | } |
94 | 126 | ||
95 | /* | 127 | /* |
@@ -163,10 +195,10 @@ int fpregs_get(struct task_struct *target, | |||
163 | 195 | ||
164 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) | 196 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) |
165 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 197 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
166 | &target->thread.fpu.hard, 0, -1); | 198 | &target->thread.xstate->hardfpu, 0, -1); |
167 | 199 | ||
168 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 200 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
169 | &target->thread.fpu.soft, 0, -1); | 201 | &target->thread.xstate->softfpu, 0, -1); |
170 | } | 202 | } |
171 | 203 | ||
172 | static int fpregs_set(struct task_struct *target, | 204 | static int fpregs_set(struct task_struct *target, |
@@ -184,10 +216,10 @@ static int fpregs_set(struct task_struct *target, | |||
184 | 216 | ||
185 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) | 217 | if ((boot_cpu_data.flags & CPU_HAS_FPU)) |
186 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 218 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
187 | &target->thread.fpu.hard, 0, -1); | 219 | &target->thread.xstate->hardfpu, 0, -1); |
188 | 220 | ||
189 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 221 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
190 | &target->thread.fpu.soft, 0, -1); | 222 | &target->thread.xstate->softfpu, 0, -1); |
191 | } | 223 | } |
192 | 224 | ||
193 | static int fpregs_active(struct task_struct *target, | 225 | static int fpregs_active(struct task_struct *target, |
@@ -333,7 +365,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
333 | else | 365 | else |
334 | tmp = 0; | 366 | tmp = 0; |
335 | } else | 367 | } else |
336 | tmp = ((long *)&child->thread.fpu) | 368 | tmp = ((long *)child->thread.xstate) |
337 | [(addr - (long)&dummy->fpu) >> 2]; | 369 | [(addr - (long)&dummy->fpu) >> 2]; |
338 | } else if (addr == (long) &dummy->u_fpvalid) | 370 | } else if (addr == (long) &dummy->u_fpvalid) |
339 | tmp = !!tsk_used_math(child); | 371 | tmp = !!tsk_used_math(child); |
@@ -362,7 +394,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
362 | else if (addr >= (long) &dummy->fpu && | 394 | else if (addr >= (long) &dummy->fpu && |
363 | addr < (long) &dummy->u_fpvalid) { | 395 | addr < (long) &dummy->u_fpvalid) { |
364 | set_stopped_child_used_math(child); | 396 | set_stopped_child_used_math(child); |
365 | ((long *)&child->thread.fpu) | 397 | ((long *)child->thread.xstate) |
366 | [(addr - (long)&dummy->fpu) >> 2] = data; | 398 | [(addr - (long)&dummy->fpu) >> 2] = data; |
367 | ret = 0; | 399 | ret = 0; |
368 | } else if (addr == (long) &dummy->u_fpvalid) { | 400 | } else if (addr == (long) &dummy->u_fpvalid) { |
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index b063eb8b18e3..5fd644da7f02 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c | |||
@@ -88,7 +88,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr) | |||
88 | regs->sr |= SR_FD; | 88 | regs->sr |= SR_FD; |
89 | } | 89 | } |
90 | 90 | ||
91 | tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)]; | 91 | tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)]; |
92 | return tmp; | 92 | return tmp; |
93 | } | 93 | } |
94 | 94 | ||
@@ -114,8 +114,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) | |||
114 | regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; | 114 | regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; |
115 | 115 | ||
116 | if (!tsk_used_math(task)) { | 116 | if (!tsk_used_math(task)) { |
117 | fpinit(&task->thread.fpu.hard); | 117 | init_fpu(task); |
118 | set_stopped_child_used_math(task); | ||
119 | } else if (last_task_used_math == task) { | 118 | } else if (last_task_used_math == task) { |
120 | enable_fpu(); | 119 | enable_fpu(); |
121 | save_fpu(task); | 120 | save_fpu(task); |
@@ -124,7 +123,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) | |||
124 | regs->sr |= SR_FD; | 123 | regs->sr |= SR_FD; |
125 | } | 124 | } |
126 | 125 | ||
127 | ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data; | 126 | ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data; |
128 | return 0; | 127 | return 0; |
129 | } | 128 | } |
130 | 129 | ||
@@ -226,7 +225,7 @@ int fpregs_get(struct task_struct *target, | |||
226 | return ret; | 225 | return ret; |
227 | 226 | ||
228 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 227 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
229 | &target->thread.fpu.hard, 0, -1); | 228 | &target->thread.xstate->hardfpu, 0, -1); |
230 | } | 229 | } |
231 | 230 | ||
232 | static int fpregs_set(struct task_struct *target, | 231 | static int fpregs_set(struct task_struct *target, |
@@ -243,7 +242,7 @@ static int fpregs_set(struct task_struct *target, | |||
243 | set_stopped_child_used_math(target); | 242 | set_stopped_child_used_math(target); |
244 | 243 | ||
245 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 244 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
246 | &target->thread.fpu.hard, 0, -1); | 245 | &target->thread.xstate->hardfpu, 0, -1); |
247 | } | 246 | } |
248 | 247 | ||
249 | static int fpregs_active(struct task_struct *target, | 248 | static int fpregs_active(struct task_struct *target, |
@@ -486,9 +485,10 @@ asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs) | |||
486 | } | 485 | } |
487 | 486 | ||
488 | /* Called with interrupts disabled */ | 487 | /* Called with interrupts disabled */ |
489 | asmlinkage void do_software_break_point(unsigned long long vec, | 488 | BUILD_TRAP_HANDLER(breakpoint) |
490 | struct pt_regs *regs) | ||
491 | { | 489 | { |
490 | TRAP_HANDLER_DECL; | ||
491 | |||
492 | /* We need to forward step the PC, to counteract the backstep done | 492 | /* We need to forward step the PC, to counteract the backstep done |
493 | in signal.c. */ | 493 | in signal.c. */ |
494 | local_irq_enable(); | 494 | local_irq_enable(); |
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c new file mode 100644 index 000000000000..b1fca66bb92e --- /dev/null +++ b/arch/sh/kernel/reboot.c | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <linux/pm.h> | ||
2 | #include <linux/kexec.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/reboot.h> | ||
5 | #include <linux/module.h> | ||
6 | #ifdef CONFIG_SUPERH32 | ||
7 | #include <asm/watchdog.h> | ||
8 | #endif | ||
9 | #include <asm/addrspace.h> | ||
10 | #include <asm/reboot.h> | ||
11 | #include <asm/system.h> | ||
12 | |||
13 | void (*pm_power_off)(void); | ||
14 | EXPORT_SYMBOL(pm_power_off); | ||
15 | |||
16 | #ifdef CONFIG_SUPERH32 | ||
17 | static void watchdog_trigger_immediate(void) | ||
18 | { | ||
19 | sh_wdt_write_cnt(0xFF); | ||
20 | sh_wdt_write_csr(0xC2); | ||
21 | } | ||
22 | #endif | ||
23 | |||
24 | static void native_machine_restart(char * __unused) | ||
25 | { | ||
26 | local_irq_disable(); | ||
27 | |||
28 | /* Address error with SR.BL=1 first. */ | ||
29 | trigger_address_error(); | ||
30 | |||
31 | #ifdef CONFIG_SUPERH32 | ||
32 | /* If that fails or is unsupported, go for the watchdog next. */ | ||
33 | watchdog_trigger_immediate(); | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * Give up and sleep. | ||
38 | */ | ||
39 | while (1) | ||
40 | cpu_sleep(); | ||
41 | } | ||
42 | |||
43 | static void native_machine_shutdown(void) | ||
44 | { | ||
45 | smp_send_stop(); | ||
46 | } | ||
47 | |||
48 | static void native_machine_power_off(void) | ||
49 | { | ||
50 | if (pm_power_off) | ||
51 | pm_power_off(); | ||
52 | } | ||
53 | |||
54 | static void native_machine_halt(void) | ||
55 | { | ||
56 | /* stop other cpus */ | ||
57 | machine_shutdown(); | ||
58 | |||
59 | /* stop this cpu */ | ||
60 | stop_this_cpu(NULL); | ||
61 | } | ||
62 | |||
63 | struct machine_ops machine_ops = { | ||
64 | .power_off = native_machine_power_off, | ||
65 | .shutdown = native_machine_shutdown, | ||
66 | .restart = native_machine_restart, | ||
67 | .halt = native_machine_halt, | ||
68 | #ifdef CONFIG_KEXEC | ||
69 | .crash_shutdown = native_machine_crash_shutdown, | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | void machine_power_off(void) | ||
74 | { | ||
75 | machine_ops.power_off(); | ||
76 | } | ||
77 | |||
78 | void machine_shutdown(void) | ||
79 | { | ||
80 | machine_ops.shutdown(); | ||
81 | } | ||
82 | |||
83 | void machine_restart(char *cmd) | ||
84 | { | ||
85 | machine_ops.restart(cmd); | ||
86 | } | ||
87 | |||
88 | void machine_halt(void) | ||
89 | { | ||
90 | machine_ops.halt(); | ||
91 | } | ||
92 | |||
93 | #ifdef CONFIG_KEXEC | ||
94 | void machine_crash_shutdown(struct pt_regs *regs) | ||
95 | { | ||
96 | machine_ops.crash_shutdown(regs); | ||
97 | } | ||
98 | #endif | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 8b0e69792cf4..3459e70eed72 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) | |||
421 | 421 | ||
422 | parse_early_param(); | 422 | parse_early_param(); |
423 | 423 | ||
424 | uncached_init(); | ||
425 | |||
424 | plat_early_device_setup(); | 426 | plat_early_device_setup(); |
425 | 427 | ||
426 | /* Let earlyprintk output early console messages */ | 428 | /* Let earlyprintk output early console messages */ |
@@ -449,17 +451,15 @@ void __init setup_arch(char **cmdline_p) | |||
449 | #ifdef CONFIG_DUMMY_CONSOLE | 451 | #ifdef CONFIG_DUMMY_CONSOLE |
450 | conswitchp = &dummy_con; | 452 | conswitchp = &dummy_con; |
451 | #endif | 453 | #endif |
454 | paging_init(); | ||
455 | pmb_init(); | ||
456 | |||
457 | ioremap_fixed_init(); | ||
452 | 458 | ||
453 | /* Perform the machine specific initialisation */ | 459 | /* Perform the machine specific initialisation */ |
454 | if (likely(sh_mv.mv_setup)) | 460 | if (likely(sh_mv.mv_setup)) |
455 | sh_mv.mv_setup(cmdline_p); | 461 | sh_mv.mv_setup(cmdline_p); |
456 | 462 | ||
457 | paging_init(); | ||
458 | |||
459 | #ifdef CONFIG_PMB_ENABLE | ||
460 | pmb_init(); | ||
461 | #endif | ||
462 | |||
463 | #ifdef CONFIG_SMP | 463 | #ifdef CONFIG_SMP |
464 | plat_smp_setup(); | 464 | plat_smp_setup(); |
465 | #endif | 465 | #endif |
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c index c852f7805728..47475cca068a 100644 --- a/arch/sh/kernel/sh_bios.c +++ b/arch/sh/kernel/sh_bios.c | |||
@@ -1,19 +1,30 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/sh/kernel/sh_bios.c | ||
3 | * C interface for trapping into the standard LinuxSH BIOS. | 2 | * C interface for trapping into the standard LinuxSH BIOS. |
4 | * | 3 | * |
5 | * Copyright (C) 2000 Greg Banks, Mitch Davis | 4 | * Copyright (C) 2000 Greg Banks, Mitch Davis |
5 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
6 | * Copyright (C) 2002 M. R. Brown | ||
7 | * Copyright (C) 2004 - 2010 Paul Mundt | ||
6 | * | 8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
7 | */ | 12 | */ |
8 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/console.h> | ||
15 | #include <linux/tty.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/delay.h> | ||
9 | #include <asm/sh_bios.h> | 19 | #include <asm/sh_bios.h> |
10 | 20 | ||
11 | #define BIOS_CALL_CONSOLE_WRITE 0 | 21 | #define BIOS_CALL_CONSOLE_WRITE 0 |
12 | #define BIOS_CALL_ETH_NODE_ADDR 10 | 22 | #define BIOS_CALL_ETH_NODE_ADDR 10 |
13 | #define BIOS_CALL_SHUTDOWN 11 | 23 | #define BIOS_CALL_SHUTDOWN 11 |
14 | #define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */ | ||
15 | #define BIOS_CALL_GDB_DETACH 0xff | 24 | #define BIOS_CALL_GDB_DETACH 0xff |
16 | 25 | ||
26 | void *gdb_vbr_vector = NULL; | ||
27 | |||
17 | static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, | 28 | static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, |
18 | long arg3) | 29 | long arg3) |
19 | { | 30 | { |
@@ -23,6 +34,9 @@ static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, | |||
23 | register long r6 __asm__("r6") = arg2; | 34 | register long r6 __asm__("r6") = arg2; |
24 | register long r7 __asm__("r7") = arg3; | 35 | register long r7 __asm__("r7") = arg3; |
25 | 36 | ||
37 | if (!gdb_vbr_vector) | ||
38 | return -ENOSYS; | ||
39 | |||
26 | __asm__ __volatile__("trapa #0x3f":"=z"(r0) | 40 | __asm__ __volatile__("trapa #0x3f":"=z"(r0) |
27 | :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7) | 41 | :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7) |
28 | :"memory"); | 42 | :"memory"); |
@@ -34,11 +48,6 @@ void sh_bios_console_write(const char *buf, unsigned int len) | |||
34 | sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); | 48 | sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); |
35 | } | 49 | } |
36 | 50 | ||
37 | void sh_bios_char_out(char ch) | ||
38 | { | ||
39 | sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0); | ||
40 | } | ||
41 | |||
42 | void sh_bios_gdb_detach(void) | 51 | void sh_bios_gdb_detach(void) |
43 | { | 52 | { |
44 | sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); | 53 | sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); |
@@ -55,3 +64,109 @@ void sh_bios_shutdown(unsigned int how) | |||
55 | { | 64 | { |
56 | sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); | 65 | sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); |
57 | } | 66 | } |
67 | |||
68 | /* | ||
69 | * Read the old value of the VBR register to initialise the vector | ||
70 | * through which debug and BIOS traps are delegated by the Linux trap | ||
71 | * handler. | ||
72 | */ | ||
73 | void sh_bios_vbr_init(void) | ||
74 | { | ||
75 | unsigned long vbr; | ||
76 | |||
77 | if (unlikely(gdb_vbr_vector)) | ||
78 | return; | ||
79 | |||
80 | __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr)); | ||
81 | |||
82 | if (vbr) { | ||
83 | gdb_vbr_vector = (void *)(vbr + 0x100); | ||
84 | printk(KERN_NOTICE "Setting GDB trap vector to %p\n", | ||
85 | gdb_vbr_vector); | ||
86 | } else | ||
87 | printk(KERN_NOTICE "SH-BIOS not detected\n"); | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector. | ||
92 | * | ||
93 | * This can be used by save/restore code to reinitialize the system VBR | ||
94 | * from the fixed BIOS VBR. A no-op if no BIOS VBR is known. | ||
95 | */ | ||
96 | void sh_bios_vbr_reload(void) | ||
97 | { | ||
98 | if (gdb_vbr_vector) | ||
99 | __asm__ __volatile__ ( | ||
100 | "ldc %0, vbr" | ||
101 | : | ||
102 | : "r" (((unsigned long) gdb_vbr_vector) - 0x100) | ||
103 | : "memory" | ||
104 | ); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Print a string through the BIOS | ||
109 | */ | ||
110 | static void sh_console_write(struct console *co, const char *s, | ||
111 | unsigned count) | ||
112 | { | ||
113 | sh_bios_console_write(s, count); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Setup initial baud/bits/parity. We do two things here: | ||
118 | * - construct a cflag setting for the first rs_open() | ||
119 | * - initialize the serial port | ||
120 | * Return non-zero if we didn't find a serial port. | ||
121 | */ | ||
122 | static int __init sh_console_setup(struct console *co, char *options) | ||
123 | { | ||
124 | int cflag = CREAD | HUPCL | CLOCAL; | ||
125 | |||
126 | /* | ||
127 | * Now construct a cflag setting. | ||
128 | * TODO: this is a totally bogus cflag, as we have | ||
129 | * no idea what serial settings the BIOS is using, or | ||
130 | * even if its using the serial port at all. | ||
131 | */ | ||
132 | cflag |= B115200 | CS8 | /*no parity*/0; | ||
133 | |||
134 | co->cflag = cflag; | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static struct console bios_console = { | ||
140 | .name = "bios", | ||
141 | .write = sh_console_write, | ||
142 | .setup = sh_console_setup, | ||
143 | .flags = CON_PRINTBUFFER, | ||
144 | .index = -1, | ||
145 | }; | ||
146 | |||
147 | static struct console *early_console; | ||
148 | |||
149 | static int __init setup_early_printk(char *buf) | ||
150 | { | ||
151 | int keep_early = 0; | ||
152 | |||
153 | if (!buf) | ||
154 | return 0; | ||
155 | |||
156 | if (strstr(buf, "keep")) | ||
157 | keep_early = 1; | ||
158 | |||
159 | if (!strncmp(buf, "bios", 4)) | ||
160 | early_console = &bios_console; | ||
161 | |||
162 | if (likely(early_console)) { | ||
163 | if (keep_early) | ||
164 | early_console->flags &= ~CON_BOOT; | ||
165 | else | ||
166 | early_console->flags |= CON_BOOT; | ||
167 | register_console(early_console); | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 12815ce01ecd..579cd2ca358d 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -150,7 +150,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) | |||
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | set_used_math(); | 152 | set_used_math(); |
153 | return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0], | 153 | return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0], |
154 | sizeof(long)*(16*2+2)); | 154 | sizeof(long)*(16*2+2)); |
155 | } | 155 | } |
156 | 156 | ||
@@ -175,7 +175,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc, | |||
175 | clear_used_math(); | 175 | clear_used_math(); |
176 | 176 | ||
177 | unlazy_fpu(tsk, regs); | 177 | unlazy_fpu(tsk, regs); |
178 | return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard, | 178 | return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu, |
179 | sizeof(long)*(16*2+2)); | 179 | sizeof(long)*(16*2+2)); |
180 | } | 180 | } |
181 | #endif /* CONFIG_SH_FPU */ | 181 | #endif /* CONFIG_SH_FPU */ |
@@ -528,7 +528,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, | |||
528 | /* fallthrough */ | 528 | /* fallthrough */ |
529 | case -ERESTARTNOINTR: | 529 | case -ERESTARTNOINTR: |
530 | regs->regs[0] = save_r0; | 530 | regs->regs[0] = save_r0; |
531 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); | 531 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
532 | break; | 532 | break; |
533 | } | 533 | } |
534 | } | 534 | } |
@@ -626,9 +626,9 @@ no_signal: | |||
626 | regs->regs[0] == -ERESTARTSYS || | 626 | regs->regs[0] == -ERESTARTSYS || |
627 | regs->regs[0] == -ERESTARTNOINTR) { | 627 | regs->regs[0] == -ERESTARTNOINTR) { |
628 | regs->regs[0] = save_r0; | 628 | regs->regs[0] = save_r0; |
629 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); | 629 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
630 | } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { | 630 | } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { |
631 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); | 631 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
632 | regs->regs[3] = __NR_restart_syscall; | 632 | regs->regs[3] = __NR_restart_syscall; |
633 | } | 633 | } |
634 | } | 634 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 580e97d46ca5..5a9f1f10ebf4 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -297,7 +297,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | |||
297 | regs->sr |= SR_FD; | 297 | regs->sr |= SR_FD; |
298 | } | 298 | } |
299 | 299 | ||
300 | err |= __copy_from_user(¤t->thread.fpu.hard, &sc->sc_fpregs[0], | 300 | err |= __copy_from_user(¤t->thread.xstate->hardfpu, &sc->sc_fpregs[0], |
301 | (sizeof(long long) * 32) + (sizeof(int) * 1)); | 301 | (sizeof(long long) * 32) + (sizeof(int) * 1)); |
302 | 302 | ||
303 | return err; | 303 | return err; |
@@ -322,7 +322,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) | |||
322 | regs->sr |= SR_FD; | 322 | regs->sr |= SR_FD; |
323 | } | 323 | } |
324 | 324 | ||
325 | err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.fpu.hard, | 325 | err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.xstate->hardfpu, |
326 | (sizeof(long long) * 32) + (sizeof(int) * 1)); | 326 | (sizeof(long long) * 32) + (sizeof(int) * 1)); |
327 | clear_used_math(); | 327 | clear_used_math(); |
328 | 328 | ||
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 983e0792d5f3..e124cf7008df 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -161,15 +161,6 @@ void smp_send_reschedule(int cpu) | |||
161 | plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); | 161 | plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); |
162 | } | 162 | } |
163 | 163 | ||
164 | static void stop_this_cpu(void *unused) | ||
165 | { | ||
166 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
167 | local_irq_disable(); | ||
168 | |||
169 | for (;;) | ||
170 | cpu_relax(); | ||
171 | } | ||
172 | |||
173 | void smp_send_stop(void) | 164 | void smp_send_stop(void) |
174 | { | 165 | { |
175 | smp_call_function(stop_this_cpu, 0, 0); | 166 | smp_call_function(stop_this_cpu, 0, 0); |
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 7b036339dc92..0830c2a9f712 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -58,7 +58,7 @@ BUILD_TRAP_HANDLER(debug) | |||
58 | TRAP_HANDLER_DECL; | 58 | TRAP_HANDLER_DECL; |
59 | 59 | ||
60 | /* Rewind */ | 60 | /* Rewind */ |
61 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); | 61 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
62 | 62 | ||
63 | if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, | 63 | if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, |
64 | SIGTRAP) == NOTIFY_STOP) | 64 | SIGTRAP) == NOTIFY_STOP) |
@@ -75,7 +75,7 @@ BUILD_TRAP_HANDLER(bug) | |||
75 | TRAP_HANDLER_DECL; | 75 | TRAP_HANDLER_DECL; |
76 | 76 | ||
77 | /* Rewind */ | 77 | /* Rewind */ |
78 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); | 78 | regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); |
79 | 79 | ||
80 | if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, | 80 | if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, |
81 | SIGTRAP) == NOTIFY_STOP) | 81 | SIGTRAP) == NOTIFY_STOP) |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 86639beac3a2..c3d86fa71ddf 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -24,11 +24,10 @@ | |||
24 | #include <linux/kdebug.h> | 24 | #include <linux/kdebug.h> |
25 | #include <linux/kexec.h> | 25 | #include <linux/kexec.h> |
26 | #include <linux/limits.h> | 26 | #include <linux/limits.h> |
27 | #include <linux/proc_fs.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/sysfs.h> | 27 | #include <linux/sysfs.h> |
28 | #include <linux/uaccess.h> | ||
30 | #include <asm/system.h> | 29 | #include <asm/system.h> |
31 | #include <asm/uaccess.h> | 30 | #include <asm/alignment.h> |
32 | #include <asm/fpu.h> | 31 | #include <asm/fpu.h> |
33 | #include <asm/kprobes.h> | 32 | #include <asm/kprobes.h> |
34 | 33 | ||
@@ -47,73 +46,6 @@ | |||
47 | #define TRAP_ILLEGAL_SLOT_INST 13 | 46 | #define TRAP_ILLEGAL_SLOT_INST 13 |
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | static unsigned long se_user; | ||
51 | static unsigned long se_sys; | ||
52 | static unsigned long se_half; | ||
53 | static unsigned long se_word; | ||
54 | static unsigned long se_dword; | ||
55 | static unsigned long se_multi; | ||
56 | /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not | ||
57 | valid! */ | ||
58 | static int se_usermode = 3; | ||
59 | /* 0: no warning 1: print a warning message, disabled by default */ | ||
60 | static int se_kernmode_warn; | ||
61 | |||
62 | #ifdef CONFIG_PROC_FS | ||
63 | static const char *se_usermode_action[] = { | ||
64 | "ignored", | ||
65 | "warn", | ||
66 | "fixup", | ||
67 | "fixup+warn", | ||
68 | "signal", | ||
69 | "signal+warn" | ||
70 | }; | ||
71 | |||
72 | static int alignment_proc_show(struct seq_file *m, void *v) | ||
73 | { | ||
74 | seq_printf(m, "User:\t\t%lu\n", se_user); | ||
75 | seq_printf(m, "System:\t\t%lu\n", se_sys); | ||
76 | seq_printf(m, "Half:\t\t%lu\n", se_half); | ||
77 | seq_printf(m, "Word:\t\t%lu\n", se_word); | ||
78 | seq_printf(m, "DWord:\t\t%lu\n", se_dword); | ||
79 | seq_printf(m, "Multi:\t\t%lu\n", se_multi); | ||
80 | seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, | ||
81 | se_usermode_action[se_usermode]); | ||
82 | seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, | ||
83 | se_kernmode_warn ? "+warn" : ""); | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int alignment_proc_open(struct inode *inode, struct file *file) | ||
88 | { | ||
89 | return single_open(file, alignment_proc_show, NULL); | ||
90 | } | ||
91 | |||
92 | static ssize_t alignment_proc_write(struct file *file, | ||
93 | const char __user *buffer, size_t count, loff_t *pos) | ||
94 | { | ||
95 | int *data = PDE(file->f_path.dentry->d_inode)->data; | ||
96 | char mode; | ||
97 | |||
98 | if (count > 0) { | ||
99 | if (get_user(mode, buffer)) | ||
100 | return -EFAULT; | ||
101 | if (mode >= '0' && mode <= '5') | ||
102 | *data = mode - '0'; | ||
103 | } | ||
104 | return count; | ||
105 | } | ||
106 | |||
107 | static const struct file_operations alignment_proc_fops = { | ||
108 | .owner = THIS_MODULE, | ||
109 | .open = alignment_proc_open, | ||
110 | .read = seq_read, | ||
111 | .llseek = seq_lseek, | ||
112 | .release = single_release, | ||
113 | .write = alignment_proc_write, | ||
114 | }; | ||
115 | #endif | ||
116 | |||
117 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) | 49 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) |
118 | { | 50 | { |
119 | unsigned long p; | 51 | unsigned long p; |
@@ -265,10 +197,10 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs, | |||
265 | count = 1<<(instruction&3); | 197 | count = 1<<(instruction&3); |
266 | 198 | ||
267 | switch (count) { | 199 | switch (count) { |
268 | case 1: se_half += 1; break; | 200 | case 1: inc_unaligned_byte_access(); break; |
269 | case 2: se_word += 1; break; | 201 | case 2: inc_unaligned_word_access(); break; |
270 | case 4: se_dword += 1; break; | 202 | case 4: inc_unaligned_dword_access(); break; |
271 | case 8: se_multi += 1; break; /* ??? */ | 203 | case 8: inc_unaligned_multi_access(); break; |
272 | } | 204 | } |
273 | 205 | ||
274 | ret = -EFAULT; | 206 | ret = -EFAULT; |
@@ -452,18 +384,8 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, | |||
452 | rm = regs->regs[index]; | 384 | rm = regs->regs[index]; |
453 | 385 | ||
454 | /* shout about fixups */ | 386 | /* shout about fixups */ |
455 | if (!expected) { | 387 | if (!expected) |
456 | if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit()) | 388 | unaligned_fixups_notify(current, instruction, regs); |
457 | pr_notice("Fixing up unaligned userspace access " | ||
458 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
459 | current->comm, task_pid_nr(current), | ||
460 | (void *)regs->pc, instruction); | ||
461 | else if (se_kernmode_warn && printk_ratelimit()) | ||
462 | pr_notice("Fixing up unaligned kernel access " | ||
463 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
464 | current->comm, task_pid_nr(current), | ||
465 | (void *)regs->pc, instruction); | ||
466 | } | ||
467 | 389 | ||
468 | ret = -EFAULT; | 390 | ret = -EFAULT; |
469 | switch (instruction&0xF000) { | 391 | switch (instruction&0xF000) { |
@@ -616,10 +538,10 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
616 | 538 | ||
617 | if (user_mode(regs)) { | 539 | if (user_mode(regs)) { |
618 | int si_code = BUS_ADRERR; | 540 | int si_code = BUS_ADRERR; |
541 | unsigned int user_action; | ||
619 | 542 | ||
620 | local_irq_enable(); | 543 | local_irq_enable(); |
621 | 544 | inc_unaligned_user_access(); | |
622 | se_user += 1; | ||
623 | 545 | ||
624 | set_fs(USER_DS); | 546 | set_fs(USER_DS); |
625 | if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), | 547 | if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), |
@@ -630,16 +552,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
630 | set_fs(oldfs); | 552 | set_fs(oldfs); |
631 | 553 | ||
632 | /* shout about userspace fixups */ | 554 | /* shout about userspace fixups */ |
633 | if (se_usermode & 1) | 555 | unaligned_fixups_notify(current, instruction, regs); |
634 | printk(KERN_NOTICE "Unaligned userspace access " | ||
635 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
636 | current->comm, current->pid, (void *)regs->pc, | ||
637 | instruction); | ||
638 | 556 | ||
639 | if (se_usermode & 2) | 557 | user_action = unaligned_user_action(); |
558 | if (user_action & UM_FIXUP) | ||
640 | goto fixup; | 559 | goto fixup; |
641 | 560 | if (user_action & UM_SIGNAL) | |
642 | if (se_usermode & 4) | ||
643 | goto uspace_segv; | 561 | goto uspace_segv; |
644 | else { | 562 | else { |
645 | /* ignore */ | 563 | /* ignore */ |
@@ -659,7 +577,7 @@ fixup: | |||
659 | &user_mem_access, 0); | 577 | &user_mem_access, 0); |
660 | set_fs(oldfs); | 578 | set_fs(oldfs); |
661 | 579 | ||
662 | if (tmp==0) | 580 | if (tmp == 0) |
663 | return; /* sorted */ | 581 | return; /* sorted */ |
664 | uspace_segv: | 582 | uspace_segv: |
665 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " | 583 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " |
@@ -672,7 +590,7 @@ uspace_segv: | |||
672 | info.si_addr = (void __user *)address; | 590 | info.si_addr = (void __user *)address; |
673 | force_sig_info(SIGBUS, &info, current); | 591 | force_sig_info(SIGBUS, &info, current); |
674 | } else { | 592 | } else { |
675 | se_sys += 1; | 593 | inc_unaligned_kernel_access(); |
676 | 594 | ||
677 | if (regs->pc & 1) | 595 | if (regs->pc & 1) |
678 | die("unaligned program counter", regs, error_code); | 596 | die("unaligned program counter", regs, error_code); |
@@ -687,11 +605,7 @@ uspace_segv: | |||
687 | die("insn faulting in do_address_error", regs, 0); | 605 | die("insn faulting in do_address_error", regs, 0); |
688 | } | 606 | } |
689 | 607 | ||
690 | if (se_kernmode_warn) | 608 | unaligned_fixups_notify(current, instruction, regs); |
691 | printk(KERN_NOTICE "Unaligned kernel access " | ||
692 | "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
693 | current->comm, current->pid, (void *)regs->pc, | ||
694 | instruction); | ||
695 | 609 | ||
696 | handle_unaligned_access(instruction, regs, | 610 | handle_unaligned_access(instruction, regs, |
697 | &user_mem_access, 0); | 611 | &user_mem_access, 0); |
@@ -876,35 +790,10 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | |||
876 | die_if_kernel("exception", regs, ex); | 790 | die_if_kernel("exception", regs, ex); |
877 | } | 791 | } |
878 | 792 | ||
879 | #if defined(CONFIG_SH_STANDARD_BIOS) | ||
880 | void *gdb_vbr_vector; | ||
881 | |||
882 | static inline void __init gdb_vbr_init(void) | ||
883 | { | ||
884 | register unsigned long vbr; | ||
885 | |||
886 | /* | ||
887 | * Read the old value of the VBR register to initialise | ||
888 | * the vector through which debug and BIOS traps are | ||
889 | * delegated by the Linux trap handler. | ||
890 | */ | ||
891 | asm volatile("stc vbr, %0" : "=r" (vbr)); | ||
892 | |||
893 | gdb_vbr_vector = (void *)(vbr + 0x100); | ||
894 | printk("Setting GDB trap vector to 0x%08lx\n", | ||
895 | (unsigned long)gdb_vbr_vector); | ||
896 | } | ||
897 | #endif | ||
898 | |||
899 | void __cpuinit per_cpu_trap_init(void) | 793 | void __cpuinit per_cpu_trap_init(void) |
900 | { | 794 | { |
901 | extern void *vbr_base; | 795 | extern void *vbr_base; |
902 | 796 | ||
903 | #ifdef CONFIG_SH_STANDARD_BIOS | ||
904 | if (raw_smp_processor_id() == 0) | ||
905 | gdb_vbr_init(); | ||
906 | #endif | ||
907 | |||
908 | /* NOTE: The VBR value should be at P1 | 797 | /* NOTE: The VBR value should be at P1 |
909 | (or P2, virtural "fixed" address space). | 798 | (or P2, virtural "fixed" address space). |
910 | It's definitely should not in physical address. */ | 799 | It's definitely should not in physical address. */ |
@@ -956,11 +845,8 @@ void __init trap_init(void) | |||
956 | #endif | 845 | #endif |
957 | 846 | ||
958 | #ifdef TRAP_UBC | 847 | #ifdef TRAP_UBC |
959 | set_exception_table_vec(TRAP_UBC, break_point_trap); | 848 | set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); |
960 | #endif | 849 | #endif |
961 | |||
962 | /* Setup VBR for boot cpu */ | ||
963 | per_cpu_trap_init(); | ||
964 | } | 850 | } |
965 | 851 | ||
966 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 852 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
@@ -985,34 +871,3 @@ void dump_stack(void) | |||
985 | show_stack(NULL, NULL); | 871 | show_stack(NULL, NULL); |
986 | } | 872 | } |
987 | EXPORT_SYMBOL(dump_stack); | 873 | EXPORT_SYMBOL(dump_stack); |
988 | |||
989 | #ifdef CONFIG_PROC_FS | ||
990 | /* | ||
991 | * This needs to be done after sysctl_init, otherwise sys/ will be | ||
992 | * overwritten. Actually, this shouldn't be in sys/ at all since | ||
993 | * it isn't a sysctl, and it doesn't contain sysctl information. | ||
994 | * We now locate it in /proc/cpu/alignment instead. | ||
995 | */ | ||
996 | static int __init alignment_init(void) | ||
997 | { | ||
998 | struct proc_dir_entry *dir, *res; | ||
999 | |||
1000 | dir = proc_mkdir("cpu", NULL); | ||
1001 | if (!dir) | ||
1002 | return -ENOMEM; | ||
1003 | |||
1004 | res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, | ||
1005 | &alignment_proc_fops, &se_usermode); | ||
1006 | if (!res) | ||
1007 | return -ENOMEM; | ||
1008 | |||
1009 | res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, | ||
1010 | &alignment_proc_fops, &se_kernmode_warn); | ||
1011 | if (!res) | ||
1012 | return -ENOMEM; | ||
1013 | |||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | fs_initcall(alignment_init); | ||
1018 | #endif | ||
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index d86f5315a0c1..e3f92eb05ffd 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c | |||
@@ -611,19 +611,19 @@ static int misaligned_fpu_load(struct pt_regs *regs, | |||
611 | 611 | ||
612 | switch (width_shift) { | 612 | switch (width_shift) { |
613 | case 2: | 613 | case 2: |
614 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | 614 | current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; |
615 | break; | 615 | break; |
616 | case 3: | 616 | case 3: |
617 | if (do_paired_load) { | 617 | if (do_paired_load) { |
618 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | 618 | current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; |
619 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | 619 | current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; |
620 | } else { | 620 | } else { |
621 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) | 621 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) |
622 | current->thread.fpu.hard.fp_regs[destreg] = bufhi; | 622 | current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi; |
623 | current->thread.fpu.hard.fp_regs[destreg+1] = buflo; | 623 | current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo; |
624 | #else | 624 | #else |
625 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | 625 | current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; |
626 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | 626 | current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; |
627 | #endif | 627 | #endif |
628 | } | 628 | } |
629 | break; | 629 | break; |
@@ -681,19 +681,19 @@ static int misaligned_fpu_store(struct pt_regs *regs, | |||
681 | 681 | ||
682 | switch (width_shift) { | 682 | switch (width_shift) { |
683 | case 2: | 683 | case 2: |
684 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | 684 | buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; |
685 | break; | 685 | break; |
686 | case 3: | 686 | case 3: |
687 | if (do_paired_load) { | 687 | if (do_paired_load) { |
688 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | 688 | buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; |
689 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | 689 | bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; |
690 | } else { | 690 | } else { |
691 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) | 691 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) |
692 | bufhi = current->thread.fpu.hard.fp_regs[srcreg]; | 692 | bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; |
693 | buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; | 693 | buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; |
694 | #else | 694 | #else |
695 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | 695 | buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; |
696 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | 696 | bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; |
697 | #endif | 697 | #endif |
698 | } | 698 | } |
699 | break; | 699 | break; |
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index a1e4ec24f1f5..7f8a709c3ada 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S | |||
@@ -3,7 +3,7 @@ | |||
3 | * Written by Niibe Yutaka and Paul Mundt | 3 | * Written by Niibe Yutaka and Paul Mundt |
4 | */ | 4 | */ |
5 | #ifdef CONFIG_SUPERH64 | 5 | #ifdef CONFIG_SUPERH64 |
6 | #define LOAD_OFFSET CONFIG_PAGE_OFFSET | 6 | #define LOAD_OFFSET PAGE_OFFSET |
7 | OUTPUT_ARCH(sh:sh5) | 7 | OUTPUT_ARCH(sh:sh5) |
8 | #else | 8 | #else |
9 | #define LOAD_OFFSET 0 | 9 | #define LOAD_OFFSET 0 |
@@ -14,17 +14,16 @@ OUTPUT_ARCH(sh) | |||
14 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
15 | #include <asm/vmlinux.lds.h> | 15 | #include <asm/vmlinux.lds.h> |
16 | 16 | ||
17 | #ifdef CONFIG_PMB | ||
18 | #define MEMORY_OFFSET 0 | ||
19 | #else | ||
20 | #define MEMORY_OFFSET __MEMORY_START | ||
21 | #endif | ||
22 | |||
17 | ENTRY(_start) | 23 | ENTRY(_start) |
18 | SECTIONS | 24 | SECTIONS |
19 | { | 25 | { |
20 | #ifdef CONFIG_PMB_FIXED | 26 | . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET; |
21 | . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) + | ||
22 | CONFIG_ZERO_PAGE_OFFSET; | ||
23 | #elif defined(CONFIG_32BIT) | ||
24 | . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; | ||
25 | #else | ||
26 | . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; | ||
27 | #endif | ||
28 | 27 | ||
29 | _text = .; /* Text and read-only data */ | 28 | _text = .; /* Text and read-only data */ |
30 | 29 | ||
@@ -35,12 +34,7 @@ SECTIONS | |||
35 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 34 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
36 | HEAD_TEXT | 35 | HEAD_TEXT |
37 | TEXT_TEXT | 36 | TEXT_TEXT |
38 | 37 | EXTRA_TEXT | |
39 | #ifdef CONFIG_SUPERH64 | ||
40 | *(.text64) | ||
41 | *(.text..SHmedia32) | ||
42 | #endif | ||
43 | |||
44 | SCHED_TEXT | 38 | SCHED_TEXT |
45 | LOCK_TEXT | 39 | LOCK_TEXT |
46 | KPROBES_TEXT | 40 | KPROBES_TEXT |
@@ -51,24 +45,12 @@ SECTIONS | |||
51 | } = 0x0009 | 45 | } = 0x0009 |
52 | 46 | ||
53 | EXCEPTION_TABLE(16) | 47 | EXCEPTION_TABLE(16) |
54 | |||
55 | NOTES | 48 | NOTES |
56 | RO_DATA(PAGE_SIZE) | ||
57 | |||
58 | /* | ||
59 | * Code which must be executed uncached and the associated data | ||
60 | */ | ||
61 | . = ALIGN(PAGE_SIZE); | ||
62 | .uncached : AT(ADDR(.uncached) - LOAD_OFFSET) { | ||
63 | __uncached_start = .; | ||
64 | *(.uncached.text) | ||
65 | *(.uncached.data) | ||
66 | __uncached_end = .; | ||
67 | } | ||
68 | 49 | ||
50 | _sdata = .; | ||
51 | RO_DATA(PAGE_SIZE) | ||
69 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | 52 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) |
70 | 53 | _edata = .; | |
71 | _edata = .; /* End of data section */ | ||
72 | 54 | ||
73 | DWARF_EH_FRAME | 55 | DWARF_EH_FRAME |
74 | 56 | ||