diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 04:35:20 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 04:35:20 -0500 |
commit | 77f36fcc035a5af19e95f50a2e648cda2a6ef2b9 (patch) | |
tree | a183a3289807a83da9c11e0d2d722cec60fce5d9 /arch/sh/kernel | |
parent | 838a4a9dcee0cbaeb0943531da00ac44d578f315 (diff) | |
parent | d01447b3197c2c470a14666be2c640407bbbfec7 (diff) |
Merge branch 'sh/pmb-dynamic'
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 21 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/sq.c | 13 | ||||
-rw-r--r-- | arch/sh/kernel/head_32.S | 52 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/traps_32.c | 7 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux.lds.S | 7 |
6 files changed, 74 insertions, 28 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 6311b0b1789d..c736422344eb 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/elf.h> | 24 | #include <asm/elf.h> |
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/smp.h> | 26 | #include <asm/smp.h> |
27 | #include <asm/sh_bios.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_SH_FPU | 29 | #ifdef CONFIG_SH_FPU |
29 | #define cpu_has_fpu 1 | 30 | #define cpu_has_fpu 1 |
@@ -342,9 +343,21 @@ asmlinkage void __init sh_cpu_init(void) | |||
342 | speculative_execution_init(); | 343 | speculative_execution_init(); |
343 | expmask_init(); | 344 | expmask_init(); |
344 | 345 | ||
345 | /* | 346 | /* Do the rest of the boot processor setup */ |
346 | * Boot processor to setup the FP and extended state context info. | 347 | if (raw_smp_processor_id() == 0) { |
347 | */ | 348 | /* Save off the BIOS VBR, if there is one */ |
348 | if (raw_smp_processor_id() == 0) | 349 | sh_bios_vbr_init(); |
350 | |||
351 | /* | ||
352 | * Setup VBR for boot CPU. Secondary CPUs do this through | ||
353 | * start_secondary(). | ||
354 | */ | ||
355 | per_cpu_trap_init(); | ||
356 | |||
357 | /* | ||
358 | * Boot processor to setup the FP and extended state | ||
359 | * context info. | ||
360 | */ | ||
349 | init_thread_xstate(); | 361 | init_thread_xstate(); |
362 | } | ||
350 | } | 363 | } |
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 97aea9d69b00..fc065f9da6e5 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) | |||
100 | spin_unlock_irq(&sq_mapping_lock); | 100 | spin_unlock_irq(&sq_mapping_lock); |
101 | } | 101 | } |
102 | 102 | ||
103 | static int __sq_remap(struct sq_mapping *map, unsigned long flags) | 103 | static int __sq_remap(struct sq_mapping *map, pgprot_t prot) |
104 | { | 104 | { |
105 | #if defined(CONFIG_MMU) | 105 | #if defined(CONFIG_MMU) |
106 | struct vm_struct *vma; | 106 | struct vm_struct *vma; |
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
113 | 113 | ||
114 | if (ioremap_page_range((unsigned long)vma->addr, | 114 | if (ioremap_page_range((unsigned long)vma->addr, |
115 | (unsigned long)vma->addr + map->size, | 115 | (unsigned long)vma->addr + map->size, |
116 | vma->phys_addr, __pgprot(flags))) { | 116 | vma->phys_addr, prot)) { |
117 | vunmap(vma->addr); | 117 | vunmap(vma->addr); |
118 | return -EAGAIN; | 118 | return -EAGAIN; |
119 | } | 119 | } |
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
135 | * @phys: Physical address of mapping. | 135 | * @phys: Physical address of mapping. |
136 | * @size: Length of mapping. | 136 | * @size: Length of mapping. |
137 | * @name: User invoking mapping. | 137 | * @name: User invoking mapping. |
138 | * @flags: Protection flags. | 138 | * @prot: Protection bits. |
139 | * | 139 | * |
140 | * Remaps the physical address @phys through the next available store queue | 140 | * Remaps the physical address @phys through the next available store queue |
141 | * address of @size length. @name is logged at boot time as well as through | 141 | * address of @size length. @name is logged at boot time as well as through |
142 | * the sysfs interface. | 142 | * the sysfs interface. |
143 | */ | 143 | */ |
144 | unsigned long sq_remap(unsigned long phys, unsigned int size, | 144 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
145 | const char *name, unsigned long flags) | 145 | const char *name, pgprot_t prot) |
146 | { | 146 | { |
147 | struct sq_mapping *map; | 147 | struct sq_mapping *map; |
148 | unsigned long end; | 148 | unsigned long end; |
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, | |||
177 | 177 | ||
178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); | 178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); |
179 | 179 | ||
180 | ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | 180 | ret = __sq_remap(map, prot); |
181 | if (unlikely(ret != 0)) | 181 | if (unlikely(ret != 0)) |
182 | goto out; | 182 | goto out; |
183 | 183 | ||
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) | |||
309 | return -EIO; | 309 | return -EIO; |
310 | 310 | ||
311 | if (likely(len)) { | 311 | if (likely(len)) { |
312 | int ret = sq_remap(base, len, "Userspace", | 312 | int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); |
313 | pgprot_val(PAGE_SHARED)); | ||
314 | if (ret < 0) | 313 | if (ret < 0) |
315 | return ret; | 314 | return ret; |
316 | } else | 315 | } else |
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 83f2b84b58da..fe0b743881b0 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -85,7 +85,7 @@ ENTRY(_stext) | |||
85 | ldc r0, r7_bank ! ... and initial thread_info | 85 | ldc r0, r7_bank ! ... and initial thread_info |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | #if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) | 88 | #ifdef CONFIG_PMB |
89 | /* | 89 | /* |
90 | * Reconfigure the initial PMB mappings setup by the hardware. | 90 | * Reconfigure the initial PMB mappings setup by the hardware. |
91 | * | 91 | * |
@@ -139,7 +139,6 @@ ENTRY(_stext) | |||
139 | mov.l r0, @r1 | 139 | mov.l r0, @r1 |
140 | 140 | ||
141 | mov.l .LMEMORY_SIZE, r5 | 141 | mov.l .LMEMORY_SIZE, r5 |
142 | mov r5, r7 | ||
143 | 142 | ||
144 | mov #PMB_E_SHIFT, r0 | 143 | mov #PMB_E_SHIFT, r0 |
145 | mov #0x1, r4 | 144 | mov #0x1, r4 |
@@ -150,8 +149,43 @@ ENTRY(_stext) | |||
150 | mov.l .LFIRST_ADDR_ENTRY, r2 | 149 | mov.l .LFIRST_ADDR_ENTRY, r2 |
151 | mov.l .LPMB_ADDR, r3 | 150 | mov.l .LPMB_ADDR, r3 |
152 | 151 | ||
152 | /* | ||
153 | * First we need to walk the PMB and figure out if there are any | ||
154 | * existing mappings that match the initial mappings VPN/PPN. | ||
155 | * If these have already been established by the bootloader, we | ||
156 | * don't bother setting up new entries here, and let the late PMB | ||
157 | * initialization take care of things instead. | ||
158 | * | ||
159 | * Note that we may need to coalesce and merge entries in order | ||
160 | * to reclaim more available PMB slots, which is much more than | ||
161 | * we want to do at this early stage. | ||
162 | */ | ||
163 | mov #0, r10 | ||
164 | mov #NR_PMB_ENTRIES, r9 | ||
165 | |||
166 | mov r1, r7 /* temporary PMB_DATA iter */ | ||
167 | |||
168 | .Lvalidate_existing_mappings: | ||
169 | |||
170 | mov.l @r7, r8 | ||
171 | and r0, r8 | ||
172 | cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ | ||
173 | bt .Lpmb_done | ||
174 | |||
175 | add #1, r10 /* Increment the loop counter */ | ||
176 | cmp/eq r9, r10 | ||
177 | bf/s .Lvalidate_existing_mappings | ||
178 | add r4, r7 /* Increment to the next PMB_DATA entry */ | ||
179 | |||
180 | /* | ||
181 | * If we've fallen through, continue with setting up the initial | ||
182 | * mappings. | ||
183 | */ | ||
184 | |||
185 | mov r5, r7 /* cached_to_uncached */ | ||
153 | mov #0, r10 | 186 | mov #0, r10 |
154 | 187 | ||
188 | #ifdef CONFIG_UNCACHED_MAPPING | ||
155 | /* | 189 | /* |
156 | * Uncached mapping | 190 | * Uncached mapping |
157 | */ | 191 | */ |
@@ -171,6 +205,7 @@ ENTRY(_stext) | |||
171 | add r4, r1 | 205 | add r4, r1 |
172 | add r4, r3 | 206 | add r4, r3 |
173 | add #1, r10 | 207 | add #1, r10 |
208 | #endif | ||
174 | 209 | ||
175 | /* | 210 | /* |
176 | * Iterate over all of the available sizes from largest to | 211 | * Iterate over all of the available sizes from largest to |
@@ -216,6 +251,7 @@ ENTRY(_stext) | |||
216 | __PMB_ITER_BY_SIZE(64) | 251 | __PMB_ITER_BY_SIZE(64) |
217 | __PMB_ITER_BY_SIZE(16) | 252 | __PMB_ITER_BY_SIZE(16) |
218 | 253 | ||
254 | #ifdef CONFIG_UNCACHED_MAPPING | ||
219 | /* | 255 | /* |
220 | * Now that we can access it, update cached_to_uncached and | 256 | * Now that we can access it, update cached_to_uncached and |
221 | * uncached_size. | 257 | * uncached_size. |
@@ -228,6 +264,7 @@ ENTRY(_stext) | |||
228 | shll16 r7 | 264 | shll16 r7 |
229 | shll8 r7 | 265 | shll8 r7 |
230 | mov.l r7, @r0 | 266 | mov.l r7, @r0 |
267 | #endif | ||
231 | 268 | ||
232 | /* | 269 | /* |
233 | * Clear the remaining PMB entries. | 270 | * Clear the remaining PMB entries. |
@@ -236,7 +273,7 @@ ENTRY(_stext) | |||
236 | * r10 = number of entries we've setup so far | 273 | * r10 = number of entries we've setup so far |
237 | */ | 274 | */ |
238 | mov #0, r1 | 275 | mov #0, r1 |
239 | mov #PMB_ENTRY_MAX, r0 | 276 | mov #NR_PMB_ENTRIES, r0 |
240 | 277 | ||
241 | .Lagain: | 278 | .Lagain: |
242 | mov.l r1, @r3 /* Clear PMB_ADDR entry */ | 279 | mov.l r1, @r3 /* Clear PMB_ADDR entry */ |
@@ -248,7 +285,8 @@ ENTRY(_stext) | |||
248 | mov.l 6f, r0 | 285 | mov.l 6f, r0 |
249 | icbi @r0 | 286 | icbi @r0 |
250 | 287 | ||
251 | #endif /* !CONFIG_PMB_LEGACY */ | 288 | .Lpmb_done: |
289 | #endif /* CONFIG_PMB */ | ||
252 | 290 | ||
253 | #ifndef CONFIG_SH_NO_BSS_INIT | 291 | #ifndef CONFIG_SH_NO_BSS_INIT |
254 | /* | 292 | /* |
@@ -300,13 +338,15 @@ ENTRY(stack_start) | |||
300 | 6: .long sh_cpu_init | 338 | 6: .long sh_cpu_init |
301 | 7: .long init_thread_union | 339 | 7: .long init_thread_union |
302 | 340 | ||
303 | #if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) | 341 | #ifdef CONFIG_PMB |
304 | .LPMB_ADDR: .long PMB_ADDR | 342 | .LPMB_ADDR: .long PMB_ADDR |
305 | .LPMB_DATA: .long PMB_DATA | 343 | .LPMB_DATA: .long PMB_DATA |
306 | .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V | 344 | .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V |
307 | .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V | 345 | .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V |
308 | .LMMUCR: .long MMUCR | 346 | .LMMUCR: .long MMUCR |
347 | .LMEMORY_SIZE: .long __MEMORY_SIZE | ||
348 | #ifdef CONFIG_UNCACHED_MAPPING | ||
309 | .Lcached_to_uncached: .long cached_to_uncached | 349 | .Lcached_to_uncached: .long cached_to_uncached |
310 | .Luncached_size: .long uncached_size | 350 | .Luncached_size: .long uncached_size |
311 | .LMEMORY_SIZE: .long __MEMORY_SIZE | 351 | #endif |
312 | #endif | 352 | #endif |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e187750dd319..3459e70eed72 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) | |||
421 | 421 | ||
422 | parse_early_param(); | 422 | parse_early_param(); |
423 | 423 | ||
424 | uncached_init(); | ||
425 | |||
424 | plat_early_device_setup(); | 426 | plat_early_device_setup(); |
425 | 427 | ||
426 | /* Let earlyprintk output early console messages */ | 428 | /* Let earlyprintk output early console messages */ |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 9c090cb68878..c3d86fa71ddf 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/alignment.h> | 30 | #include <asm/alignment.h> |
31 | #include <asm/fpu.h> | 31 | #include <asm/fpu.h> |
32 | #include <asm/kprobes.h> | 32 | #include <asm/kprobes.h> |
33 | #include <asm/sh_bios.h> | ||
34 | 33 | ||
35 | #ifdef CONFIG_CPU_SH2 | 34 | #ifdef CONFIG_CPU_SH2 |
36 | # define TRAP_RESERVED_INST 4 | 35 | # define TRAP_RESERVED_INST 4 |
@@ -848,12 +847,6 @@ void __init trap_init(void) | |||
848 | #ifdef TRAP_UBC | 847 | #ifdef TRAP_UBC |
849 | set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); | 848 | set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); |
850 | #endif | 849 | #endif |
851 | |||
852 | /* Save off the BIOS VBR, if there is one */ | ||
853 | sh_bios_vbr_init(); | ||
854 | |||
855 | /* Setup VBR for boot cpu */ | ||
856 | per_cpu_trap_init(); | ||
857 | } | 850 | } |
858 | 851 | ||
859 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 852 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 0e66c7b30e0f..7f8a709c3ada 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S | |||
@@ -14,11 +14,10 @@ OUTPUT_ARCH(sh) | |||
14 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
15 | #include <asm/vmlinux.lds.h> | 15 | #include <asm/vmlinux.lds.h> |
16 | 16 | ||
17 | #if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ | 17 | #ifdef CONFIG_PMB |
18 | defined(CONFIG_PMB_LEGACY) | 18 | #define MEMORY_OFFSET 0 |
19 | #define MEMORY_OFFSET __MEMORY_START | ||
20 | #else | 19 | #else |
21 | #define MEMORY_OFFSET 0 | 20 | #define MEMORY_OFFSET __MEMORY_START |
22 | #endif | 21 | #endif |
23 | 22 | ||
24 | ENTRY(_start) | 23 | ENTRY(_start) |