aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu/init.c')
-rw-r--r--arch/sh/kernel/cpu/init.c147
1 files changed, 78 insertions, 69 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index e932ebef4738..c736422344eb 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -24,22 +24,32 @@
24#include <asm/elf.h> 24#include <asm/elf.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27#ifdef CONFIG_SUPERH32 27#include <asm/sh_bios.h>
28#include <asm/ubc.h> 28
29#ifdef CONFIG_SH_FPU
30#define cpu_has_fpu 1
31#else
32#define cpu_has_fpu 0
33#endif
34
35#ifdef CONFIG_SH_DSP
36#define cpu_has_dsp 1
37#else
38#define cpu_has_dsp 0
29#endif 39#endif
30 40
31/* 41/*
32 * Generic wrapper for command line arguments to disable on-chip 42 * Generic wrapper for command line arguments to disable on-chip
33 * peripherals (nofpu, nodsp, and so forth). 43 * peripherals (nofpu, nodsp, and so forth).
34 */ 44 */
35#define onchip_setup(x) \ 45#define onchip_setup(x) \
36static int x##_disabled __initdata = 0; \ 46static int x##_disabled __initdata = !cpu_has_##x; \
37 \ 47 \
38static int __init x##_setup(char *opts) \ 48static int __init x##_setup(char *opts) \
39{ \ 49{ \
40 x##_disabled = 1; \ 50 x##_disabled = 1; \
41 return 1; \ 51 return 1; \
42} \ 52} \
43__setup("no" __stringify(x), x##_setup); 53__setup("no" __stringify(x), x##_setup);
44 54
45onchip_setup(fpu); 55onchip_setup(fpu);
@@ -52,10 +62,10 @@ onchip_setup(dsp);
52static void __init speculative_execution_init(void) 62static void __init speculative_execution_init(void)
53{ 63{
54 /* Clear RABD */ 64 /* Clear RABD */
55 ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
56 66
57 /* Flush the update */ 67 /* Flush the update */
58 (void)ctrl_inl(CPUOPM); 68 (void)__raw_readl(CPUOPM);
59 ctrl_barrier(); 69 ctrl_barrier();
60} 70}
61#else 71#else
@@ -75,16 +85,11 @@ static void __init expmask_init(void)
75 /* 85 /*
76 * Future proofing. 86 * Future proofing.
77 * 87 *
78 * Disable support for slottable sleep instruction 88 * Disable support for slottable sleep instruction, non-nop
79 * and non-nop instructions in the rte delay slot. 89 * instructions in the rte delay slot, and associative writes to
90 * the memory-mapped cache array.
80 */ 91 */
81 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); 92 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
82
83 /*
84 * Enable associative writes to the memory-mapped cache array
85 * until the cache flush ops have been rewritten.
86 */
87 expmask |= EXPMASK_MMCAW;
88 93
89 __raw_writel(expmask, EXPMASK); 94 __raw_writel(expmask, EXPMASK);
90 ctrl_barrier(); 95 ctrl_barrier();
@@ -94,7 +99,7 @@ static void __init expmask_init(void)
94#endif 99#endif
95 100
96/* 2nd-level cache init */ 101/* 2nd-level cache init */
97void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) 102void __attribute__ ((weak)) l2_cache_init(void)
98{ 103{
99} 104}
100 105
@@ -102,12 +107,12 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
102 * Generic first-level cache init 107 * Generic first-level cache init
103 */ 108 */
104#ifdef CONFIG_SUPERH32 109#ifdef CONFIG_SUPERH32
105static void __uses_jump_to_uncached cache_init(void) 110static void cache_init(void)
106{ 111{
107 unsigned long ccr, flags; 112 unsigned long ccr, flags;
108 113
109 jump_to_uncached(); 114 jump_to_uncached();
110 ccr = ctrl_inl(CCR); 115 ccr = __raw_readl(CCR);
111 116
112 /* 117 /*
113 * At this point we don't know whether the cache is enabled or not - a 118 * At this point we don't know whether the cache is enabled or not - a
@@ -151,7 +156,7 @@ static void __uses_jump_to_uncached cache_init(void)
151 for (addr = addrstart; 156 for (addr = addrstart;
152 addr < addrstart + waysize; 157 addr < addrstart + waysize;
153 addr += current_cpu_data.dcache.linesz) 158 addr += current_cpu_data.dcache.linesz)
154 ctrl_outl(0, addr); 159 __raw_writel(0, addr);
155 160
156 addrstart += current_cpu_data.dcache.way_incr; 161 addrstart += current_cpu_data.dcache.way_incr;
157 } while (--ways); 162 } while (--ways);
@@ -184,7 +189,7 @@ static void __uses_jump_to_uncached cache_init(void)
184 189
185 l2_cache_init(); 190 l2_cache_init();
186 191
187 ctrl_outl(flags, CCR); 192 __raw_writel(flags, CCR);
188 back_to_cached(); 193 back_to_cached();
189} 194}
190#else 195#else
@@ -212,6 +217,18 @@ static void detect_cache_shape(void)
212 l2_cache_shape = -1; /* No S-cache */ 217 l2_cache_shape = -1; /* No S-cache */
213} 218}
214 219
220static void __init fpu_init(void)
221{
222 /* Disable the FPU */
223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
224 printk("FPU Disabled\n");
225 current_cpu_data.flags &= ~CPU_HAS_FPU;
226 }
227
228 disable_fpu();
229 clear_used_math();
230}
231
215#ifdef CONFIG_SH_DSP 232#ifdef CONFIG_SH_DSP
216static void __init release_dsp(void) 233static void __init release_dsp(void)
217{ 234{
@@ -249,28 +266,35 @@ static void __init dsp_init(void)
249 if (sr & SR_DSP) 266 if (sr & SR_DSP)
250 current_cpu_data.flags |= CPU_HAS_DSP; 267 current_cpu_data.flags |= CPU_HAS_DSP;
251 268
269 /* Disable the DSP */
270 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
271 printk("DSP Disabled\n");
272 current_cpu_data.flags &= ~CPU_HAS_DSP;
273 }
274
252 /* Now that we've determined the DSP status, clear the DSP bit. */ 275 /* Now that we've determined the DSP status, clear the DSP bit. */
253 release_dsp(); 276 release_dsp();
254} 277}
278#else
279static inline void __init dsp_init(void) { }
255#endif /* CONFIG_SH_DSP */ 280#endif /* CONFIG_SH_DSP */
256 281
257/** 282/**
258 * sh_cpu_init 283 * sh_cpu_init
259 * 284 *
260 * This is our initial entry point for each CPU, and is invoked on the boot 285 * This is our initial entry point for each CPU, and is invoked on the
261 * CPU prior to calling start_kernel(). For SMP, a combination of this and 286 * boot CPU prior to calling start_kernel(). For SMP, a combination of
262 * start_secondary() will bring up each processor to a ready state prior 287 * this and start_secondary() will bring up each processor to a ready
263 * to hand forking the idle loop. 288 * state prior to hand forking the idle loop.
264 * 289 *
265 * We do all of the basic processor init here, including setting up the 290 * We do all of the basic processor init here, including setting up
266 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is 291 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
267 * hit (and subsequently platform_setup()) things like determining the 292 * subsequently platform_setup()) things like determining the CPU
268 * CPU subtype and initial configuration will all be done. 293 * subtype and initial configuration will all be done.
269 * 294 *
270 * Each processor family is still responsible for doing its own probing 295 * Each processor family is still responsible for doing its own probing
271 * and cache configuration in detect_cpu_and_cache_system(). 296 * and cache configuration in detect_cpu_and_cache_system().
272 */ 297 */
273
274asmlinkage void __init sh_cpu_init(void) 298asmlinkage void __init sh_cpu_init(void)
275{ 299{
276 current_thread_info()->cpu = hard_smp_processor_id(); 300 current_thread_info()->cpu = hard_smp_processor_id();
@@ -307,18 +331,8 @@ asmlinkage void __init sh_cpu_init(void)
307 detect_cache_shape(); 331 detect_cache_shape();
308 } 332 }
309 333
310 /* Disable the FPU */ 334 fpu_init();
311 if (fpu_disabled) { 335 dsp_init();
312 printk("FPU Disabled\n");
313 current_cpu_data.flags &= ~CPU_HAS_FPU;
314 disable_fpu();
315 }
316
317 /* FPU initialization */
318 if ((current_cpu_data.flags & CPU_HAS_FPU)) {
319 clear_thread_flag(TIF_USEDFPU);
320 clear_used_math();
321 }
322 336
323 /* 337 /*
324 * Initialize the per-CPU ASID cache very early, since the 338 * Initialize the per-CPU ASID cache very early, since the
@@ -326,29 +340,24 @@ asmlinkage void __init sh_cpu_init(void)
326 */ 340 */
327 current_cpu_data.asid_cache = NO_CONTEXT; 341 current_cpu_data.asid_cache = NO_CONTEXT;
328 342
329#ifdef CONFIG_SH_DSP 343 speculative_execution_init();
330 /* Probe for DSP */ 344 expmask_init();
331 dsp_init();
332 345
333 /* Disable the DSP */ 346 /* Do the rest of the boot processor setup */
334 if (dsp_disabled) { 347 if (raw_smp_processor_id() == 0) {
335 printk("DSP Disabled\n"); 348 /* Save off the BIOS VBR, if there is one */
336 current_cpu_data.flags &= ~CPU_HAS_DSP; 349 sh_bios_vbr_init();
337 release_dsp();
338 }
339#endif
340 350
341 /* 351 /*
342 * Some brain-damaged loaders decided it would be a good idea to put 352 * Setup VBR for boot CPU. Secondary CPUs do this through
343 * the UBC to sleep. This causes some issues when it comes to things 353 * start_secondary().
344 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. 354 */
345 * we wake it up and hope that all is well. 355 per_cpu_trap_init();
346 */
347#ifdef CONFIG_SUPERH32
348 if (raw_smp_processor_id() == 0)
349 ubc_wakeup();
350#endif
351 356
352 speculative_execution_init(); 357 /*
353 expmask_init(); 358 * Boot processor to setup the FP and extended state
359 * context info.
360 */
361 init_thread_xstate();
362 }
354} 363}