aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-05-31 17:22:32 -0400
committerRussell King <rmk@dyn-67.arm.linux.org.uk>2005-05-31 17:22:32 -0400
commitccea7a19e54349d4f40778304e1bb88da83d39e7 (patch)
treedd42dbff31b2dff8b226f1b61eff4b958fca5496 /arch
parent49f680ea7bac5c679fb6374a326a164a3fba07cc (diff)
[PATCH] ARM SMP: Fix vector entry
The current vector entry system does not allow for SMP. In order to work around this, we need to eliminate our reliance on the fixed save areas, which breaks the way we enable alignment traps. This patch changes the way we handle the save areas such that we can have one per CPU. Signed-off-by: Russell King <rmk@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/entry-armv.S172
-rw-r--r--arch/arm/kernel/setup.c52
2 files changed, 139 insertions, 85 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cfb5cf5e48fc..78cf84cdc2ae 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -53,46 +53,62 @@
53/* 53/*
54 * Invalid mode handlers 54 * Invalid mode handlers
55 */ 55 */
56 .macro inv_entry, sym, reason 56 .macro inv_entry, reason
57 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go 57 sub sp, sp, #S_FRAME_SIZE
58 stmia sp, {r0 - lr} @ Save XXX r0 - lr 58 stmib sp, {r1 - lr}
59 ldr r4, .LC\sym
60 mov r1, #\reason 59 mov r1, #\reason
61 .endm 60 .endm
62 61
63__pabt_invalid: 62__pabt_invalid:
64 inv_entry abt, BAD_PREFETCH 63 inv_entry BAD_PREFETCH
65 b 1f 64 b common_invalid
66 65
67__dabt_invalid: 66__dabt_invalid:
68 inv_entry abt, BAD_DATA 67 inv_entry BAD_DATA
69 b 1f 68 b common_invalid
70 69
71__irq_invalid: 70__irq_invalid:
72 inv_entry irq, BAD_IRQ 71 inv_entry BAD_IRQ
73 b 1f 72 b common_invalid
74 73
75__und_invalid: 74__und_invalid:
76 inv_entry und, BAD_UNDEFINSTR 75 inv_entry BAD_UNDEFINSTR
76
77 @
78 @ XXX fall through to common_invalid
79 @
80
81@
82@ common_invalid - generic code for failed exception (re-entrant version of handlers)
83@
84common_invalid:
85 zero_fp
86
87 ldmia r0, {r4 - r6}
88 add r0, sp, #S_PC @ here for interlock avoidance
89 mov r7, #-1 @ "" "" "" ""
90 str r4, [sp] @ save preserved r0
91 stmia r0, {r5 - r7} @ lr_<exception>,
92 @ cpsr_<exception>, "old_r0"
77 93
781: zero_fp
79 ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
80 add r4, sp, #S_PC
81 stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
82 mov r0, sp 94 mov r0, sp
83 and r2, r6, #31 @ int mode 95 and r2, r6, #0x1f
84 b bad_mode 96 b bad_mode
85 97
86/* 98/*
87 * SVC mode handlers 99 * SVC mode handlers
88 */ 100 */
89 .macro svc_entry, sym 101 .macro svc_entry
90 sub sp, sp, #S_FRAME_SIZE 102 sub sp, sp, #S_FRAME_SIZE
91 stmia sp, {r0 - r12} @ save r0 - r12 103 stmib sp, {r1 - r12}
92 ldr r2, .LC\sym 104
93 add r0, sp, #S_FRAME_SIZE 105 ldmia r0, {r1 - r3}
94 ldmia r2, {r2 - r4} @ get pc, cpsr 106 add r5, sp, #S_SP @ here for interlock avoidance
95 add r5, sp, #S_SP 107 mov r4, #-1 @ "" "" "" ""
108 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
109 str r1, [sp] @ save the "real" r0 copied
110 @ from the exception stack
111
96 mov r1, lr 112 mov r1, lr
97 113
98 @ 114 @
@@ -109,7 +125,7 @@ __und_invalid:
109 125
110 .align 5 126 .align 5
111__dabt_svc: 127__dabt_svc:
112 svc_entry abt 128 svc_entry
113 129
114 @ 130 @
115 @ get ready to re-enable interrupts if appropriate 131 @ get ready to re-enable interrupts if appropriate
@@ -156,13 +172,15 @@ __dabt_svc:
156 172
157 .align 5 173 .align 5
158__irq_svc: 174__irq_svc:
159 svc_entry irq 175 svc_entry
176
160#ifdef CONFIG_PREEMPT 177#ifdef CONFIG_PREEMPT
161 get_thread_info tsk 178 get_thread_info tsk
162 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 179 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
163 add r7, r8, #1 @ increment it 180 add r7, r8, #1 @ increment it
164 str r7, [tsk, #TI_PREEMPT] 181 str r7, [tsk, #TI_PREEMPT]
165#endif 182#endif
183
166 irq_handler 184 irq_handler
167#ifdef CONFIG_PREEMPT 185#ifdef CONFIG_PREEMPT
168 ldr r0, [tsk, #TI_FLAGS] @ get flags 186 ldr r0, [tsk, #TI_FLAGS] @ get flags
@@ -200,7 +218,7 @@ svc_preempt:
200 218
201 .align 5 219 .align 5
202__und_svc: 220__und_svc:
203 svc_entry und 221 svc_entry
204 222
205 @ 223 @
206 @ call emulation code, which returns using r9 if it has emulated 224 @ call emulation code, which returns using r9 if it has emulated
@@ -230,7 +248,7 @@ __und_svc:
230 248
231 .align 5 249 .align 5
232__pabt_svc: 250__pabt_svc:
233 svc_entry abt 251 svc_entry
234 252
235 @ 253 @
236 @ re-enable interrupts if appropriate 254 @ re-enable interrupts if appropriate
@@ -263,12 +281,6 @@ __pabt_svc:
263 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 281 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
264 282
265 .align 5 283 .align 5
266.LCirq:
267 .word __temp_irq
268.LCund:
269 .word __temp_und
270.LCabt:
271 .word __temp_abt
272.LCcralign: 284.LCcralign:
273 .word cr_alignment 285 .word cr_alignment
274#ifdef MULTI_ABORT 286#ifdef MULTI_ABORT
@@ -285,12 +297,16 @@ __pabt_svc:
285/* 297/*
286 * User mode handlers 298 * User mode handlers
287 */ 299 */
288 .macro usr_entry, sym 300 .macro usr_entry
289 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go 301 sub sp, sp, #S_FRAME_SIZE
290 stmia sp, {r0 - r12} @ save r0 - r12 302 stmib sp, {r1 - r12}
291 ldr r7, .LC\sym 303
292 add r5, sp, #S_PC 304 ldmia r0, {r1 - r3}
293 ldmia r7, {r2 - r4} @ Get USR pc, cpsr 305 add r0, sp, #S_PC @ here for interlock avoidance
306 mov r4, #-1 @ "" "" "" ""
307
308 str r1, [sp] @ save the "real" r0 copied
309 @ from the exception stack
294 310
295#if __LINUX_ARM_ARCH__ < 6 311#if __LINUX_ARM_ARCH__ < 6
296 @ make sure our user space atomic helper is aborted 312 @ make sure our user space atomic helper is aborted
@@ -307,8 +323,8 @@ __pabt_svc:
307 @ 323 @
308 @ Also, separately save sp_usr and lr_usr 324 @ Also, separately save sp_usr and lr_usr
309 @ 325 @
310 stmia r5, {r2 - r4} 326 stmia r0, {r2 - r4}
311 stmdb r5, {sp, lr}^ 327 stmdb r0, {sp, lr}^
312 328
313 @ 329 @
314 @ Enable the alignment trap while in kernel mode 330 @ Enable the alignment trap while in kernel mode
@@ -323,7 +339,7 @@ __pabt_svc:
323 339
324 .align 5 340 .align 5
325__dabt_usr: 341__dabt_usr:
326 usr_entry abt 342 usr_entry
327 343
328 @ 344 @
329 @ Call the processor-specific abort handler: 345 @ Call the processor-specific abort handler:
@@ -352,7 +368,7 @@ __dabt_usr:
352 368
353 .align 5 369 .align 5
354__irq_usr: 370__irq_usr:
355 usr_entry irq 371 usr_entry
356 372
357 get_thread_info tsk 373 get_thread_info tsk
358#ifdef CONFIG_PREEMPT 374#ifdef CONFIG_PREEMPT
@@ -360,6 +376,7 @@ __irq_usr:
360 add r7, r8, #1 @ increment it 376 add r7, r8, #1 @ increment it
361 str r7, [tsk, #TI_PREEMPT] 377 str r7, [tsk, #TI_PREEMPT]
362#endif 378#endif
379
363 irq_handler 380 irq_handler
364#ifdef CONFIG_PREEMPT 381#ifdef CONFIG_PREEMPT
365 ldr r0, [tsk, #TI_PREEMPT] 382 ldr r0, [tsk, #TI_PREEMPT]
@@ -367,6 +384,7 @@ __irq_usr:
367 teq r0, r7 384 teq r0, r7
368 strne r0, [r0, -r0] 385 strne r0, [r0, -r0]
369#endif 386#endif
387
370 mov why, #0 388 mov why, #0
371 b ret_to_user 389 b ret_to_user
372 390
@@ -374,7 +392,7 @@ __irq_usr:
374 392
375 .align 5 393 .align 5
376__und_usr: 394__und_usr:
377 usr_entry und 395 usr_entry
378 396
379 tst r3, #PSR_T_BIT @ Thumb mode? 397 tst r3, #PSR_T_BIT @ Thumb mode?
380 bne fpundefinstr @ ignore FP 398 bne fpundefinstr @ ignore FP
@@ -490,7 +508,7 @@ fpundefinstr:
490 508
491 .align 5 509 .align 5
492__pabt_usr: 510__pabt_usr:
493 usr_entry abt 511 usr_entry
494 512
495 enable_irq @ Enable interrupts 513 enable_irq @ Enable interrupts
496 mov r0, r2 @ address (pc) 514 mov r0, r2 @ address (pc)
@@ -749,29 +767,41 @@ __kuser_helper_end:
749 * 767 *
750 * Common stub entry macro: 768 * Common stub entry macro:
751 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 769 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
770 *
771 * SP points to a minimal amount of processor-private memory, the address
772 * of which is copied into r0 for the mode specific abort handler.
752 */ 773 */
753 .macro vector_stub, name, sym, correction=0 774 .macro vector_stub, name, correction=0
754 .align 5 775 .align 5
755 776
756vector_\name: 777vector_\name:
757 ldr r13, .LCs\sym
758 .if \correction 778 .if \correction
759 sub lr, lr, #\correction 779 sub lr, lr, #\correction
760 .endif 780 .endif
761 str lr, [r13] @ save lr_IRQ 781
782 @
783 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
784 @ (parent CPSR)
785 @
786 stmia sp, {r0, lr} @ save r0, lr
762 mrs lr, spsr 787 mrs lr, spsr
763 str lr, [r13, #4] @ save spsr_IRQ 788 str lr, [sp, #8] @ save spsr
789
764 @ 790 @
765 @ now branch to the relevant MODE handling routine 791 @ Prepare for SVC32 mode. IRQs remain disabled.
766 @ 792 @
767 mrs r13, cpsr 793 mrs r0, cpsr
768 bic r13, r13, #MODE_MASK 794 bic r0, r0, #MODE_MASK
769 orr r13, r13, #SVC_MODE 795 orr r0, r0, #SVC_MODE
770 msr spsr_cxsf, r13 @ switch to SVC_32 mode 796 msr spsr_cxsf, r0
771 797
772 and lr, lr, #15 798 @
799 @ the branch table must immediately follow this code
800 @
801 mov r0, sp
802 and lr, lr, #0x0f
773 ldr lr, [pc, lr, lsl #2] 803 ldr lr, [pc, lr, lsl #2]
774 movs pc, lr @ Changes mode and branches 804 movs pc, lr @ branch to handler in SVC mode
775 .endm 805 .endm
776 806
777 .globl __stubs_start 807 .globl __stubs_start
@@ -779,7 +809,7 @@ __stubs_start:
779/* 809/*
780 * Interrupt dispatcher 810 * Interrupt dispatcher
781 */ 811 */
782 vector_stub irq, irq, 4 812 vector_stub irq, 4
783 813
784 .long __irq_usr @ 0 (USR_26 / USR_32) 814 .long __irq_usr @ 0 (USR_26 / USR_32)
785 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 815 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -802,7 +832,7 @@ __stubs_start:
802 * Data abort dispatcher 832 * Data abort dispatcher
803 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 833 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
804 */ 834 */
805 vector_stub dabt, abt, 8 835 vector_stub dabt, 8
806 836
807 .long __dabt_usr @ 0 (USR_26 / USR_32) 837 .long __dabt_usr @ 0 (USR_26 / USR_32)
808 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 838 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -825,7 +855,7 @@ __stubs_start:
825 * Prefetch abort dispatcher 855 * Prefetch abort dispatcher
826 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 856 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
827 */ 857 */
828 vector_stub pabt, abt, 4 858 vector_stub pabt, 4
829 859
830 .long __pabt_usr @ 0 (USR_26 / USR_32) 860 .long __pabt_usr @ 0 (USR_26 / USR_32)
831 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 861 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -848,7 +878,7 @@ __stubs_start:
848 * Undef instr entry dispatcher 878 * Undef instr entry dispatcher
849 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 879 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
850 */ 880 */
851 vector_stub und, und 881 vector_stub und
852 882
853 .long __und_usr @ 0 (USR_26 / USR_32) 883 .long __und_usr @ 0 (USR_26 / USR_32)
854 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 884 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -902,13 +932,6 @@ vector_addrexcptn:
902.LCvswi: 932.LCvswi:
903 .word vector_swi 933 .word vector_swi
904 934
905.LCsirq:
906 .word __temp_irq
907.LCsund:
908 .word __temp_und
909.LCsabt:
910 .word __temp_abt
911
912 .globl __stubs_end 935 .globl __stubs_end
913__stubs_end: 936__stubs_end:
914 937
@@ -930,23 +953,6 @@ __vectors_end:
930 953
931 .data 954 .data
932 955
933/*
934 * Do not reorder these, and do not insert extra data between...
935 */
936
937__temp_irq:
938 .word 0 @ saved lr_irq
939 .word 0 @ saved spsr_irq
940 .word -1 @ old_r0
941__temp_und:
942 .word 0 @ Saved lr_und
943 .word 0 @ Saved spsr_und
944 .word -1 @ old_r0
945__temp_abt:
946 .word 0 @ Saved lr_abt
947 .word 0 @ Saved spsr_abt
948 .word -1 @ old_r0
949
950 .globl cr_alignment 956 .globl cr_alignment
951 .globl cr_no_alignment 957 .globl cr_no_alignment
952cr_alignment: 958cr_alignment:
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c2a7da3ac0f1..7ecdda3f1253 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user;
92struct cpu_cache_fns cpu_cache; 92struct cpu_cache_fns cpu_cache;
93#endif 93#endif
94 94
95struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99} ____cacheline_aligned;
100
101static struct stack stacks[NR_CPUS];
102
95char elf_platform[ELF_PLATFORM_SIZE]; 103char elf_platform[ELF_PLATFORM_SIZE];
96EXPORT_SYMBOL(elf_platform); 104EXPORT_SYMBOL(elf_platform);
97 105
@@ -307,8 +315,6 @@ static void __init setup_processor(void)
307 cpu_name, processor_id, (int)processor_id & 15, 315 cpu_name, processor_id, (int)processor_id & 15,
308 proc_arch[cpu_architecture()]); 316 proc_arch[cpu_architecture()]);
309 317
310 dump_cpu_info(smp_processor_id());
311
312 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); 318 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
313 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 319 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
314 elf_hwcap = list->elf_hwcap; 320 elf_hwcap = list->elf_hwcap;
@@ -316,6 +322,46 @@ static void __init setup_processor(void)
316 cpu_proc_init(); 322 cpu_proc_init();
317} 323}
318 324
325/*
326 * cpu_init - initialise one CPU.
327 *
328 * cpu_init dumps the cache information, initialises SMP specific
329 * information, and sets up the per-CPU stacks.
330 */
331void __init cpu_init(void)
332{
333 unsigned int cpu = smp_processor_id();
334 struct stack *stk = &stacks[cpu];
335
336 if (cpu >= NR_CPUS) {
337 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
338 BUG();
339 }
340
341 dump_cpu_info(cpu);
342
343 /*
344 * setup stacks for re-entrant exception handlers
345 */
346 __asm__ (
347 "msr cpsr_c, %1\n\t"
348 "add sp, %0, %2\n\t"
349 "msr cpsr_c, %3\n\t"
350 "add sp, %0, %4\n\t"
351 "msr cpsr_c, %5\n\t"
352 "add sp, %0, %6\n\t"
353 "msr cpsr_c, %7"
354 :
355 : "r" (stk),
356 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
357 "I" (offsetof(struct stack, irq[0])),
358 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
359 "I" (offsetof(struct stack, abt[0])),
360 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
361 "I" (offsetof(struct stack, und[0])),
362 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE));
363}
364
319static struct machine_desc * __init setup_machine(unsigned int nr) 365static struct machine_desc * __init setup_machine(unsigned int nr)
320{ 366{
321 struct machine_desc *list; 367 struct machine_desc *list;
@@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p)
715 paging_init(&meminfo, mdesc); 761 paging_init(&meminfo, mdesc);
716 request_standard_resources(&meminfo, mdesc); 762 request_standard_resources(&meminfo, mdesc);
717 763
764 cpu_init();
765
718 /* 766 /*
719 * Set up various architecture-specific pointers 767 * Set up various architecture-specific pointers
720 */ 768 */