aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/entry-armv.S254
-rw-r--r--arch/arm/kernel/entry-header.S7
-rw-r--r--arch/arm/kernel/head.S44
-rw-r--r--arch/arm/kernel/setup.c52
-rw-r--r--arch/arm/kernel/smp.c107
-rw-r--r--arch/arm/mach-integrator/Makefile1
-rw-r--r--arch/arm/mach-integrator/core.c20
-rw-r--r--arch/arm/mach-integrator/headsmp.S37
-rw-r--r--arch/arm/mach-integrator/leds.c4
-rw-r--r--arch/arm/mach-integrator/platsmp.c192
-rw-r--r--arch/arm/mach-pxa/pm.c2
-rw-r--r--arch/arm/mach-sa1100/pm.c2
12 files changed, 597 insertions, 125 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e14278d59882..39a6c1b0b9a3 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -24,48 +24,91 @@
24#include "entry-header.S" 24#include "entry-header.S"
25 25
26/* 26/*
27 * Interrupt handling. Preserves r7, r8, r9
28 */
29 .macro irq_handler
301: get_irqnr_and_base r0, r6, r5, lr
31 movne r1, sp
32 @
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
34 @
35 adrne lr, 1b
36 bne asm_do_IRQ
37
38#ifdef CONFIG_SMP
39 /*
40 * XXX
41 *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
44 */
45 test_for_ipi r0, r6, r5, lr
46 movne r0, sp
47 adrne lr, 1b
48 bne do_IPI
49#endif
50
51 .endm
52
53/*
27 * Invalid mode handlers 54 * Invalid mode handlers
28 */ 55 */
29 .macro inv_entry, sym, reason 56 .macro inv_entry, reason
30 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go 57 sub sp, sp, #S_FRAME_SIZE
31 stmia sp, {r0 - lr} @ Save XXX r0 - lr 58 stmib sp, {r1 - lr}
32 ldr r4, .LC\sym
33 mov r1, #\reason 59 mov r1, #\reason
34 .endm 60 .endm
35 61
36__pabt_invalid: 62__pabt_invalid:
37 inv_entry abt, BAD_PREFETCH 63 inv_entry BAD_PREFETCH
38 b 1f 64 b common_invalid
39 65
40__dabt_invalid: 66__dabt_invalid:
41 inv_entry abt, BAD_DATA 67 inv_entry BAD_DATA
42 b 1f 68 b common_invalid
43 69
44__irq_invalid: 70__irq_invalid:
45 inv_entry irq, BAD_IRQ 71 inv_entry BAD_IRQ
46 b 1f 72 b common_invalid
47 73
48__und_invalid: 74__und_invalid:
49 inv_entry und, BAD_UNDEFINSTR 75 inv_entry BAD_UNDEFINSTR
76
77 @
78 @ XXX fall through to common_invalid
79 @
80
81@
82@ common_invalid - generic code for failed exception (re-entrant version of handlers)
83@
84common_invalid:
85 zero_fp
86
87 ldmia r0, {r4 - r6}
88 add r0, sp, #S_PC @ here for interlock avoidance
89 mov r7, #-1 @ "" "" "" ""
90 str r4, [sp] @ save preserved r0
91 stmia r0, {r5 - r7} @ lr_<exception>,
92 @ cpsr_<exception>, "old_r0"
50 93
511: zero_fp
52 ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
53 add r4, sp, #S_PC
54 stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
55 mov r0, sp 94 mov r0, sp
56 and r2, r6, #31 @ int mode 95 and r2, r6, #0x1f
57 b bad_mode 96 b bad_mode
58 97
59/* 98/*
60 * SVC mode handlers 99 * SVC mode handlers
61 */ 100 */
62 .macro svc_entry, sym 101 .macro svc_entry
63 sub sp, sp, #S_FRAME_SIZE 102 sub sp, sp, #S_FRAME_SIZE
64 stmia sp, {r0 - r12} @ save r0 - r12 103 stmib sp, {r1 - r12}
65 ldr r2, .LC\sym 104
66 add r0, sp, #S_FRAME_SIZE 105 ldmia r0, {r1 - r3}
67 ldmia r2, {r2 - r4} @ get pc, cpsr 106 add r5, sp, #S_SP @ here for interlock avoidance
68 add r5, sp, #S_SP 107 mov r4, #-1 @ "" "" "" ""
108 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
109 str r1, [sp] @ save the "real" r0 copied
110 @ from the exception stack
111
69 mov r1, lr 112 mov r1, lr
70 113
71 @ 114 @
@@ -82,7 +125,7 @@ __und_invalid:
82 125
83 .align 5 126 .align 5
84__dabt_svc: 127__dabt_svc:
85 svc_entry abt 128 svc_entry
86 129
87 @ 130 @
88 @ get ready to re-enable interrupts if appropriate 131 @ get ready to re-enable interrupts if appropriate
@@ -129,28 +172,24 @@ __dabt_svc:
129 172
130 .align 5 173 .align 5
131__irq_svc: 174__irq_svc:
132 svc_entry irq 175 svc_entry
176
133#ifdef CONFIG_PREEMPT 177#ifdef CONFIG_PREEMPT
134 get_thread_info r8 178 get_thread_info tsk
135 ldr r9, [r8, #TI_PREEMPT] @ get preempt count 179 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
136 add r7, r9, #1 @ increment it 180 add r7, r8, #1 @ increment it
137 str r7, [r8, #TI_PREEMPT] 181 str r7, [tsk, #TI_PREEMPT]
138#endif 182#endif
1391: get_irqnr_and_base r0, r6, r5, lr 183
140 movne r1, sp 184 irq_handler
141 @
142 @ routine called with r0 = irq number, r1 = struct pt_regs *
143 @
144 adrne lr, 1b
145 bne asm_do_IRQ
146#ifdef CONFIG_PREEMPT 185#ifdef CONFIG_PREEMPT
147 ldr r0, [r8, #TI_FLAGS] @ get flags 186 ldr r0, [tsk, #TI_FLAGS] @ get flags
148 tst r0, #_TIF_NEED_RESCHED 187 tst r0, #_TIF_NEED_RESCHED
149 blne svc_preempt 188 blne svc_preempt
150preempt_return: 189preempt_return:
151 ldr r0, [r8, #TI_PREEMPT] @ read preempt value 190 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
191 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
152 teq r0, r7 192 teq r0, r7
153 str r9, [r8, #TI_PREEMPT] @ restore preempt count
154 strne r0, [r0, -r0] @ bug() 193 strne r0, [r0, -r0] @ bug()
155#endif 194#endif
156 ldr r0, [sp, #S_PSR] @ irqs are already disabled 195 ldr r0, [sp, #S_PSR] @ irqs are already disabled
@@ -161,7 +200,7 @@ preempt_return:
161 200
162#ifdef CONFIG_PREEMPT 201#ifdef CONFIG_PREEMPT
163svc_preempt: 202svc_preempt:
164 teq r9, #0 @ was preempt count = 0 203 teq r8, #0 @ was preempt count = 0
165 ldreq r6, .LCirq_stat 204 ldreq r6, .LCirq_stat
166 movne pc, lr @ no 205 movne pc, lr @ no
167 ldr r0, [r6, #4] @ local_irq_count 206 ldr r0, [r6, #4] @ local_irq_count
@@ -169,9 +208,9 @@ svc_preempt:
169 adds r0, r0, r1 208 adds r0, r0, r1
170 movne pc, lr 209 movne pc, lr
171 mov r7, #0 @ preempt_schedule_irq 210 mov r7, #0 @ preempt_schedule_irq
172 str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 211 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
1731: bl preempt_schedule_irq @ irq en/disable is done inside 2121: bl preempt_schedule_irq @ irq en/disable is done inside
174 ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS 213 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
175 tst r0, #_TIF_NEED_RESCHED 214 tst r0, #_TIF_NEED_RESCHED
176 beq preempt_return @ go again 215 beq preempt_return @ go again
177 b 1b 216 b 1b
@@ -179,7 +218,7 @@ svc_preempt:
179 218
180 .align 5 219 .align 5
181__und_svc: 220__und_svc:
182 svc_entry und 221 svc_entry
183 222
184 @ 223 @
185 @ call emulation code, which returns using r9 if it has emulated 224 @ call emulation code, which returns using r9 if it has emulated
@@ -209,7 +248,7 @@ __und_svc:
209 248
210 .align 5 249 .align 5
211__pabt_svc: 250__pabt_svc:
212 svc_entry abt 251 svc_entry
213 252
214 @ 253 @
215 @ re-enable interrupts if appropriate 254 @ re-enable interrupts if appropriate
@@ -242,12 +281,8 @@ __pabt_svc:
242 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 281 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
243 282
244 .align 5 283 .align 5
245.LCirq: 284.LCcralign:
246 .word __temp_irq 285 .word cr_alignment
247.LCund:
248 .word __temp_und
249.LCabt:
250 .word __temp_abt
251#ifdef MULTI_ABORT 286#ifdef MULTI_ABORT
252.LCprocfns: 287.LCprocfns:
253 .word processor 288 .word processor
@@ -262,12 +297,16 @@ __pabt_svc:
262/* 297/*
263 * User mode handlers 298 * User mode handlers
264 */ 299 */
265 .macro usr_entry, sym 300 .macro usr_entry
266 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go 301 sub sp, sp, #S_FRAME_SIZE
267 stmia sp, {r0 - r12} @ save r0 - r12 302 stmib sp, {r1 - r12}
268 ldr r7, .LC\sym 303
269 add r5, sp, #S_PC 304 ldmia r0, {r1 - r3}
270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr 305 add r0, sp, #S_PC @ here for interlock avoidance
306 mov r4, #-1 @ "" "" "" ""
307
308 str r1, [sp] @ save the "real" r0 copied
309 @ from the exception stack
271 310
272#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 311#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
273 @ make sure our user space atomic helper is aborted 312 @ make sure our user space atomic helper is aborted
@@ -284,13 +323,13 @@ __pabt_svc:
284 @ 323 @
285 @ Also, separately save sp_usr and lr_usr 324 @ Also, separately save sp_usr and lr_usr
286 @ 325 @
287 stmia r5, {r2 - r4} 326 stmia r0, {r2 - r4}
288 stmdb r5, {sp, lr}^ 327 stmdb r0, {sp, lr}^
289 328
290 @ 329 @
291 @ Enable the alignment trap while in kernel mode 330 @ Enable the alignment trap while in kernel mode
292 @ 331 @
293 alignment_trap r7, r0, __temp_\sym 332 alignment_trap r0
294 333
295 @ 334 @
296 @ Clear FP to mark the first stack frame 335 @ Clear FP to mark the first stack frame
@@ -300,7 +339,7 @@ __pabt_svc:
300 339
301 .align 5 340 .align 5
302__dabt_usr: 341__dabt_usr:
303 usr_entry abt 342 usr_entry
304 343
305 @ 344 @
306 @ Call the processor-specific abort handler: 345 @ Call the processor-specific abort handler:
@@ -329,30 +368,23 @@ __dabt_usr:
329 368
330 .align 5 369 .align 5
331__irq_usr: 370__irq_usr:
332 usr_entry irq 371 usr_entry
333 372
373 get_thread_info tsk
334#ifdef CONFIG_PREEMPT 374#ifdef CONFIG_PREEMPT
335 get_thread_info r8 375 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
336 ldr r9, [r8, #TI_PREEMPT] @ get preempt count 376 add r7, r8, #1 @ increment it
337 add r7, r9, #1 @ increment it 377 str r7, [tsk, #TI_PREEMPT]
338 str r7, [r8, #TI_PREEMPT]
339#endif 378#endif
3401: get_irqnr_and_base r0, r6, r5, lr 379
341 movne r1, sp 380 irq_handler
342 adrne lr, 1b
343 @
344 @ routine called with r0 = irq number, r1 = struct pt_regs *
345 @
346 bne asm_do_IRQ
347#ifdef CONFIG_PREEMPT 381#ifdef CONFIG_PREEMPT
348 ldr r0, [r8, #TI_PREEMPT] 382 ldr r0, [tsk, #TI_PREEMPT]
383 str r8, [tsk, #TI_PREEMPT]
349 teq r0, r7 384 teq r0, r7
350 str r9, [r8, #TI_PREEMPT]
351 strne r0, [r0, -r0] 385 strne r0, [r0, -r0]
352 mov tsk, r8
353#else
354 get_thread_info tsk
355#endif 386#endif
387
356 mov why, #0 388 mov why, #0
357 b ret_to_user 389 b ret_to_user
358 390
@@ -360,7 +392,7 @@ __irq_usr:
360 392
361 .align 5 393 .align 5
362__und_usr: 394__und_usr:
363 usr_entry und 395 usr_entry
364 396
365 tst r3, #PSR_T_BIT @ Thumb mode? 397 tst r3, #PSR_T_BIT @ Thumb mode?
366 bne fpundefinstr @ ignore FP 398 bne fpundefinstr @ ignore FP
@@ -476,7 +508,7 @@ fpundefinstr:
476 508
477 .align 5 509 .align 5
478__pabt_usr: 510__pabt_usr:
479 usr_entry abt 511 usr_entry
480 512
481 enable_irq @ Enable interrupts 513 enable_irq @ Enable interrupts
482 mov r0, r2 @ address (pc) 514 mov r0, r2 @ address (pc)
@@ -741,29 +773,41 @@ __kuser_helper_end:
741 * 773 *
742 * Common stub entry macro: 774 * Common stub entry macro:
743 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 775 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
776 *
777 * SP points to a minimal amount of processor-private memory, the address
778 * of which is copied into r0 for the mode specific abort handler.
744 */ 779 */
745 .macro vector_stub, name, sym, correction=0 780 .macro vector_stub, name, correction=0
746 .align 5 781 .align 5
747 782
748vector_\name: 783vector_\name:
749 ldr r13, .LCs\sym
750 .if \correction 784 .if \correction
751 sub lr, lr, #\correction 785 sub lr, lr, #\correction
752 .endif 786 .endif
753 str lr, [r13] @ save lr_IRQ 787
788 @
789 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
790 @ (parent CPSR)
791 @
792 stmia sp, {r0, lr} @ save r0, lr
754 mrs lr, spsr 793 mrs lr, spsr
755 str lr, [r13, #4] @ save spsr_IRQ 794 str lr, [sp, #8] @ save spsr
795
756 @ 796 @
757 @ now branch to the relevant MODE handling routine 797 @ Prepare for SVC32 mode. IRQs remain disabled.
758 @ 798 @
759 mrs r13, cpsr 799 mrs r0, cpsr
760 bic r13, r13, #MODE_MASK 800 bic r0, r0, #MODE_MASK
761 orr r13, r13, #SVC_MODE 801 orr r0, r0, #SVC_MODE
762 msr spsr_cxsf, r13 @ switch to SVC_32 mode 802 msr spsr_cxsf, r0
763 803
764 and lr, lr, #15 804 @
805 @ the branch table must immediately follow this code
806 @
807 mov r0, sp
808 and lr, lr, #0x0f
765 ldr lr, [pc, lr, lsl #2] 809 ldr lr, [pc, lr, lsl #2]
766 movs pc, lr @ Changes mode and branches 810 movs pc, lr @ branch to handler in SVC mode
767 .endm 811 .endm
768 812
769 .globl __stubs_start 813 .globl __stubs_start
@@ -771,7 +815,7 @@ __stubs_start:
771/* 815/*
772 * Interrupt dispatcher 816 * Interrupt dispatcher
773 */ 817 */
774 vector_stub irq, irq, 4 818 vector_stub irq, 4
775 819
776 .long __irq_usr @ 0 (USR_26 / USR_32) 820 .long __irq_usr @ 0 (USR_26 / USR_32)
777 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 821 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -794,7 +838,7 @@ __stubs_start:
794 * Data abort dispatcher 838 * Data abort dispatcher
795 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 839 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
796 */ 840 */
797 vector_stub dabt, abt, 8 841 vector_stub dabt, 8
798 842
799 .long __dabt_usr @ 0 (USR_26 / USR_32) 843 .long __dabt_usr @ 0 (USR_26 / USR_32)
800 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 844 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -817,7 +861,7 @@ __stubs_start:
817 * Prefetch abort dispatcher 861 * Prefetch abort dispatcher
818 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 862 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
819 */ 863 */
820 vector_stub pabt, abt, 4 864 vector_stub pabt, 4
821 865
822 .long __pabt_usr @ 0 (USR_26 / USR_32) 866 .long __pabt_usr @ 0 (USR_26 / USR_32)
823 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 867 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -840,7 +884,7 @@ __stubs_start:
840 * Undef instr entry dispatcher 884 * Undef instr entry dispatcher
841 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 885 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
842 */ 886 */
843 vector_stub und, und 887 vector_stub und
844 888
845 .long __und_usr @ 0 (USR_26 / USR_32) 889 .long __und_usr @ 0 (USR_26 / USR_32)
846 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 890 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -894,13 +938,6 @@ vector_addrexcptn:
894.LCvswi: 938.LCvswi:
895 .word vector_swi 939 .word vector_swi
896 940
897.LCsirq:
898 .word __temp_irq
899.LCsund:
900 .word __temp_und
901.LCsabt:
902 .word __temp_abt
903
904 .globl __stubs_end 941 .globl __stubs_end
905__stubs_end: 942__stubs_end:
906 943
@@ -922,23 +959,6 @@ __vectors_end:
922 959
923 .data 960 .data
924 961
925/*
926 * Do not reorder these, and do not insert extra data between...
927 */
928
929__temp_irq:
930 .word 0 @ saved lr_irq
931 .word 0 @ saved spsr_irq
932 .word -1 @ old_r0
933__temp_und:
934 .word 0 @ Saved lr_und
935 .word 0 @ Saved spsr_und
936 .word -1 @ old_r0
937__temp_abt:
938 .word 0 @ Saved lr_abt
939 .word 0 @ Saved spsr_abt
940 .word -1 @ old_r0
941
942 .globl cr_alignment 962 .globl cr_alignment
943 .globl cr_no_alignment 963 .globl cr_no_alignment
944cr_alignment: 964cr_alignment:
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index a3d40a0e2b04..afef21273963 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -59,11 +59,10 @@
59 mov \rd, \rd, lsl #13 59 mov \rd, \rd, lsl #13
60 .endm 60 .endm
61 61
62 .macro alignment_trap, rbase, rtemp, sym 62 .macro alignment_trap, rtemp
63#ifdef CONFIG_ALIGNMENT_TRAP 63#ifdef CONFIG_ALIGNMENT_TRAP
64#define OFF_CR_ALIGNMENT(x) cr_alignment - x 64 ldr \rtemp, .LCcralign
65 65 ldr \rtemp, [\rtemp]
66 ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)]
67 mcr p15, 0, \rtemp, c1, c0 66 mcr p15, 0, \rtemp, c1, c0
68#endif 67#endif
69 .endm 68 .endm
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4733877296d4..bd4823c74645 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -2,6 +2,8 @@
2 * linux/arch/arm/kernel/head.S 2 * linux/arch/arm/kernel/head.S
3 * 3 *
4 * Copyright (C) 1994-2002 Russell King 4 * Copyright (C) 1994-2002 Russell King
5 * Copyright (c) 2003 ARM Limited
6 * All Rights Reserved
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -165,6 +167,48 @@ __mmap_switched:
165 stmia r6, {r0, r4} @ Save control register values 167 stmia r6, {r0, r4} @ Save control register values
166 b start_kernel 168 b start_kernel
167 169
170#if defined(CONFIG_SMP)
171 .type secondary_startup, #function
172ENTRY(secondary_startup)
173 /*
174 * Common entry point for secondary CPUs.
175 *
176 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
177 * the processor type - there is no need to check the machine type
178 * as it has already been validated by the primary processor.
179 */
180 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
181 bl __lookup_processor_type
182 movs r10, r5 @ invalid processor?
183 moveq r0, #'p' @ yes, error 'p'
184 beq __error
185
186 /*
187 * Use the page tables supplied from __cpu_up.
188 */
189 adr r4, __secondary_data
190 ldmia r4, {r5, r6, r13} @ address to jump to after
191 sub r4, r4, r5 @ mmu has been enabled
192 ldr r4, [r6, r4] @ get secondary_data.pgdir
193 adr lr, __enable_mmu @ return address
194 add pc, r10, #12 @ initialise processor
195 @ (return control reg)
196
197 /*
198 * r6 = &secondary_data
199 */
200ENTRY(__secondary_switched)
201 ldr sp, [r6, #4] @ get secondary_data.stack
202 mov fp, #0
203 b secondary_start_kernel
204
205 .type __secondary_data, %object
206__secondary_data:
207 .long .
208 .long secondary_data
209 .long __secondary_switched
210#endif /* defined(CONFIG_SMP) */
211
168 212
169 213
170/* 214/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c2a7da3ac0f1..9fed5fa194d9 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user;
92struct cpu_cache_fns cpu_cache; 92struct cpu_cache_fns cpu_cache;
93#endif 93#endif
94 94
95struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99} ____cacheline_aligned;
100
101static struct stack stacks[NR_CPUS];
102
95char elf_platform[ELF_PLATFORM_SIZE]; 103char elf_platform[ELF_PLATFORM_SIZE];
96EXPORT_SYMBOL(elf_platform); 104EXPORT_SYMBOL(elf_platform);
97 105
@@ -307,8 +315,6 @@ static void __init setup_processor(void)
307 cpu_name, processor_id, (int)processor_id & 15, 315 cpu_name, processor_id, (int)processor_id & 15,
308 proc_arch[cpu_architecture()]); 316 proc_arch[cpu_architecture()]);
309 317
310 dump_cpu_info(smp_processor_id());
311
312 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); 318 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
313 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 319 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
314 elf_hwcap = list->elf_hwcap; 320 elf_hwcap = list->elf_hwcap;
@@ -316,6 +322,46 @@ static void __init setup_processor(void)
316 cpu_proc_init(); 322 cpu_proc_init();
317} 323}
318 324
325/*
326 * cpu_init - initialise one CPU.
327 *
328 * cpu_init dumps the cache information, initialises SMP specific
329 * information, and sets up the per-CPU stacks.
330 */
331void cpu_init(void)
332{
333 unsigned int cpu = smp_processor_id();
334 struct stack *stk = &stacks[cpu];
335
336 if (cpu >= NR_CPUS) {
337 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
338 BUG();
339 }
340
341 dump_cpu_info(cpu);
342
343 /*
344 * setup stacks for re-entrant exception handlers
345 */
346 __asm__ (
347 "msr cpsr_c, %1\n\t"
348 "add sp, %0, %2\n\t"
349 "msr cpsr_c, %3\n\t"
350 "add sp, %0, %4\n\t"
351 "msr cpsr_c, %5\n\t"
352 "add sp, %0, %6\n\t"
353 "msr cpsr_c, %7"
354 :
355 : "r" (stk),
356 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
357 "I" (offsetof(struct stack, irq[0])),
358 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
359 "I" (offsetof(struct stack, abt[0])),
360 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
361 "I" (offsetof(struct stack, und[0])),
362 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE));
363}
364
319static struct machine_desc * __init setup_machine(unsigned int nr) 365static struct machine_desc * __init setup_machine(unsigned int nr)
320{ 366{
321 struct machine_desc *list; 367 struct machine_desc *list;
@@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p)
715 paging_init(&meminfo, mdesc); 761 paging_init(&meminfo, mdesc);
716 request_standard_resources(&meminfo, mdesc); 762 request_standard_resources(&meminfo, mdesc);
717 763
764 cpu_init();
765
718 /* 766 /*
719 * Set up various architecture-specific pointers 767 * Set up various architecture-specific pointers
720 */ 768 */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ecc8c3332408..45ed036336e0 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -24,6 +24,9 @@
24#include <asm/atomic.h> 24#include <asm/atomic.h>
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/cpu.h> 26#include <asm/cpu.h>
27#include <asm/mmu_context.h>
28#include <asm/pgtable.h>
29#include <asm/pgalloc.h>
27#include <asm/processor.h> 30#include <asm/processor.h>
28#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
29#include <asm/ptrace.h> 32#include <asm/ptrace.h>
@@ -37,6 +40,13 @@ cpumask_t cpu_present_mask;
37cpumask_t cpu_online_map; 40cpumask_t cpu_online_map;
38 41
39/* 42/*
43 * as from 2.5, kernels no longer have an init_tasks structure
44 * so we need some other way of telling a new secondary core
45 * where to place its SVC stack
46 */
47struct secondary_data secondary_data;
48
49/*
40 * structures for inter-processor calls 50 * structures for inter-processor calls
41 * - A collection of single bit ipi messages. 51 * - A collection of single bit ipi messages.
42 */ 52 */
@@ -71,6 +81,8 @@ static DEFINE_SPINLOCK(smp_call_function_lock);
71int __init __cpu_up(unsigned int cpu) 81int __init __cpu_up(unsigned int cpu)
72{ 82{
73 struct task_struct *idle; 83 struct task_struct *idle;
84 pgd_t *pgd;
85 pmd_t *pmd;
74 int ret; 86 int ret;
75 87
76 /* 88 /*
@@ -84,9 +96,54 @@ int __init __cpu_up(unsigned int cpu)
84 } 96 }
85 97
86 /* 98 /*
99 * Allocate initial page tables to allow the new CPU to
100 * enable the MMU safely. This essentially means a set
101 * of our "standard" page tables, with the addition of
102 * a 1:1 mapping for the physical address of the kernel.
103 */
104 pgd = pgd_alloc(&init_mm);
105 pmd = pmd_offset(pgd, PHYS_OFFSET);
106 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
107 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
108
109 /*
110 * We need to tell the secondary core where to find
111 * its stack and the page tables.
112 */
113 secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8;
114 secondary_data.pgdir = virt_to_phys(pgd);
115 wmb();
116
117 /*
87 * Now bring the CPU into our world. 118 * Now bring the CPU into our world.
88 */ 119 */
89 ret = boot_secondary(cpu, idle); 120 ret = boot_secondary(cpu, idle);
121 if (ret == 0) {
122 unsigned long timeout;
123
124 /*
125 * CPU was successfully started, wait for it
126 * to come online or time out.
127 */
128 timeout = jiffies + HZ;
129 while (time_before(jiffies, timeout)) {
130 if (cpu_online(cpu))
131 break;
132
133 udelay(10);
134 barrier();
135 }
136
137 if (!cpu_online(cpu))
138 ret = -EIO;
139 }
140
141 secondary_data.stack = 0;
142 secondary_data.pgdir = 0;
143
144 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
145 pgd_free(pgd);
146
90 if (ret) { 147 if (ret) {
91 printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); 148 printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu);
92 /* 149 /*
@@ -98,6 +155,56 @@ int __init __cpu_up(unsigned int cpu)
98} 155}
99 156
100/* 157/*
158 * This is the secondary CPU boot entry. We're using this CPUs
159 * idle thread stack, but a set of temporary page tables.
160 */
161asmlinkage void __init secondary_start_kernel(void)
162{
163 struct mm_struct *mm = &init_mm;
164 unsigned int cpu = smp_processor_id();
165
166 printk("CPU%u: Booted secondary processor\n", cpu);
167
168 /*
169 * All kernel threads share the same mm context; grab a
170 * reference and switch to it.
171 */
172 atomic_inc(&mm->mm_users);
173 atomic_inc(&mm->mm_count);
174 current->active_mm = mm;
175 cpu_set(cpu, mm->cpu_vm_mask);
176 cpu_switch_mm(mm->pgd, mm);
177 enter_lazy_tlb(mm, current);
178
179 cpu_init();
180
181 /*
182 * Give the platform a chance to do its own initialisation.
183 */
184 platform_secondary_init(cpu);
185
186 /*
187 * Enable local interrupts.
188 */
189 local_irq_enable();
190 local_fiq_enable();
191
192 calibrate_delay();
193
194 smp_store_cpu_info(cpu);
195
196 /*
197 * OK, now it's safe to let the boot CPU continue
198 */
199 cpu_set(cpu, cpu_online_map);
200
201 /*
202 * OK, it's off to the idle thread for us
203 */
204 cpu_idle();
205}
206
207/*
101 * Called by both boot and secondaries to move global data into 208 * Called by both boot and secondaries to move global data into
102 * per-processor storage. 209 * per-processor storage.
103 */ 210 */
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 158daaf9e3b0..ebb255bdce8a 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS) += leds.o
12obj-$(CONFIG_PCI) += pci_v3.o pci.o 12obj-$(CONFIG_PCI) += pci_v3.o pci.o
13obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o 13obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o
14obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o 14obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o
15obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index bd17b5154311..d302f0405fd2 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -14,6 +14,7 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/smp.h>
17 18
18#include <asm/hardware.h> 19#include <asm/hardware.h>
19#include <asm/irq.h> 20#include <asm/irq.h>
@@ -221,7 +222,24 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
221 */ 222 */
222 timer1->TimerClear = 1; 223 timer1->TimerClear = 1;
223 224
224 timer_tick(regs); 225 /*
226 * the clock tick routines are only processed on the
227 * primary CPU
228 */
229 if (hard_smp_processor_id() == 0) {
230 nmi_tick();
231 timer_tick(regs);
232#ifdef CONFIG_SMP
233 smp_send_timer();
234#endif
235 }
236
237#ifdef CONFIG_SMP
238 /*
239 * this is the ARM equivalent of the APIC timer interrupt
240 */
241 update_process_times(user_mode(regs));
242#endif /* CONFIG_SMP */
225 243
226 write_sequnlock(&xtime_lock); 244 write_sequnlock(&xtime_lock);
227 245
diff --git a/arch/arm/mach-integrator/headsmp.S b/arch/arm/mach-integrator/headsmp.S
new file mode 100644
index 000000000000..ceaa88e30d70
--- /dev/null
+++ b/arch/arm/mach-integrator/headsmp.S
@@ -0,0 +1,37 @@
1/*
2 * linux/arch/arm/mach-integrator/headsmp.S
3 *
4 * Copyright (c) 2003 ARM Limited
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13
14 __INIT
15
16/*
17 * Integrator specific entry point for secondary CPUs. This provides
18 * a "holding pen" into which all secondary cores are held until we're
19 * ready for them to initialise.
20 */
21ENTRY(integrator_secondary_startup)
22 adr r4, 1f
23 ldmia r4, {r5, r6}
24 sub r4, r4, r5
25 ldr r6, [r6, r4]
26pen: ldr r7, [r6]
27 cmp r7, r0
28 bne pen
29
30 /*
31 * we've been released from the holding pen: secondary_stack
32 * should now contain the SVC stack for this core
33 */
34 b secondary_startup
35
361: .long .
37 .long phys_pen_release
diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c
index d2c0ab21150c..f1436e683b49 100644
--- a/arch/arm/mach-integrator/leds.c
+++ b/arch/arm/mach-integrator/leds.c
@@ -22,6 +22,8 @@
22 */ 22 */
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
25 27
26#include <asm/hardware.h> 28#include <asm/hardware.h>
27#include <asm/io.h> 29#include <asm/io.h>
@@ -85,4 +87,4 @@ static int __init leds_init(void)
85 return 0; 87 return 0;
86} 88}
87 89
88__initcall(leds_init); 90core_initcall(leds_init);
diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c
new file mode 100644
index 000000000000..ead15dfcb53d
--- /dev/null
+++ b/arch/arm/mach-integrator/platsmp.c
@@ -0,0 +1,192 @@
1/*
2 * linux/arch/arm/mach-cintegrator/platsmp.c
3 *
4 * Copyright (C) 2002 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16
17#include <asm/atomic.h>
18#include <asm/delay.h>
19#include <asm/mmu_context.h>
20#include <asm/procinfo.h>
21#include <asm/ptrace.h>
22#include <asm/smp.h>
23
24extern void integrator_secondary_startup(void);
25
26/*
27 * control for which core is the next to come out of the secondary
28 * boot "holding pen"
29 */
30volatile int __initdata pen_release = -1;
31unsigned long __initdata phys_pen_release = 0;
32
33static DEFINE_SPINLOCK(boot_lock);
34
35void __init platform_secondary_init(unsigned int cpu)
36{
37 /*
38 * the primary core may have used a "cross call" soft interrupt
39 * to get this processor out of WFI in the BootMonitor - make
40 * sure that we are no longer being sent this soft interrupt
41 */
42 smp_cross_call_done(cpumask_of_cpu(cpu));
43
44 /*
45 * if any interrupts are already enabled for the primary
46 * core (e.g. timer irq), then they will not have been enabled
47 * for us: do so
48 */
49 secondary_scan_irqs();
50
51 /*
52 * let the primary processor know we're out of the
53 * pen, then head off into the C entry point
54 */
55 pen_release = -1;
56
57 /*
58 * Synchronise with the boot thread.
59 */
60 spin_lock(&boot_lock);
61 spin_unlock(&boot_lock);
62}
63
64int __init boot_secondary(unsigned int cpu, struct task_struct *idle)
65{
66 unsigned long timeout;
67
68 /*
69 * set synchronisation state between this boot processor
70 * and the secondary one
71 */
72 spin_lock(&boot_lock);
73
74 /*
75 * The secondary processor is waiting to be released from
76 * the holding pen - release it, then wait for it to flag
77 * that it has been released by resetting pen_release.
78 *
79 * Note that "pen_release" is the hardware CPU ID, whereas
80 * "cpu" is Linux's internal ID.
81 */
82 pen_release = cpu;
83
84 /*
85 * XXX
86 *
87 * This is a later addition to the booting protocol: the
88 * bootMonitor now puts secondary cores into WFI, so
89 * poke_milo() no longer gets the cores moving; we need
90 * to send a soft interrupt to wake the secondary core.
91 * Use smp_cross_call() for this, since there's little
92 * point duplicating the code here
93 */
94 smp_cross_call(cpumask_of_cpu(cpu));
95
96 timeout = jiffies + (1 * HZ);
97 while (time_before(jiffies, timeout)) {
98 if (pen_release == -1)
99 break;
100
101 udelay(10);
102 }
103
104 /*
105 * now the secondary core is starting up let it run its
106 * calibrations, then wait for it to finish
107 */
108 spin_unlock(&boot_lock);
109
110 return pen_release != -1 ? -ENOSYS : 0;
111}
112
113static void __init poke_milo(void)
114{
115 extern void secondary_startup(void);
116
117 /* nobody is to be released from the pen yet */
118 pen_release = -1;
119
120 phys_pen_release = virt_to_phys(&pen_release);
121
122 /*
123 * write the address of secondary startup into the system-wide
124 * flags register, then clear the bottom two bits, which is what
125 * BootMonitor is waiting for
126 */
127#if 1
128#define CINTEGRATOR_HDR_FLAGSS_OFFSET 0x30
129 __raw_writel(virt_to_phys(integrator_secondary_startup),
130 (IO_ADDRESS(INTEGRATOR_HDR_BASE) +
131 CINTEGRATOR_HDR_FLAGSS_OFFSET));
132#define CINTEGRATOR_HDR_FLAGSC_OFFSET 0x34
133 __raw_writel(3,
134 (IO_ADDRESS(INTEGRATOR_HDR_BASE) +
135 CINTEGRATOR_HDR_FLAGSC_OFFSET));
136#endif
137
138 mb();
139}
140
141void __init smp_prepare_cpus(unsigned int max_cpus)
142{
143 unsigned int ncores = get_core_count();
144 unsigned int cpu = smp_processor_id();
145 int i;
146
147 /* sanity check */
148 if (ncores == 0) {
149 printk(KERN_ERR
150 "Integrator/CP: strange CM count of 0? Default to 1\n");
151
152 ncores = 1;
153 }
154
155 if (ncores > NR_CPUS) {
156 printk(KERN_WARNING
157 "Integrator/CP: no. of cores (%d) greater than configured "
158 "maximum of %d - clipping\n",
159 ncores, NR_CPUS);
160 ncores = NR_CPUS;
161 }
162
163 /*
164 * start with some more config for the Boot CPU, now that
165 * the world is a bit more alive (which was not the case
166 * when smp_prepare_boot_cpu() was called)
167 */
168 smp_store_cpu_info(cpu);
169
170 /*
171 * are we trying to boot more cores than exist?
172 */
173 if (max_cpus > ncores)
174 max_cpus = ncores;
175
176 /*
177 * Initialise the present mask - this tells us which CPUs should
178 * be present.
179 */
180 for (i = 0; i < max_cpus; i++) {
181 cpu_set(i, cpu_present_mask);
182 }
183
184 /*
185 * Do we need any more CPUs? If so, then let them know where
186 * to start. Note that, on modern versions of MILO, the "poke"
187 * doesn't actually do anything until each individual core is
188 * sent a soft interrupt to get it out of WFI
189 */
190 if (max_cpus > 1)
191 poke_milo();
192}
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 9799fe80df23..ac4dd4336160 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -133,6 +133,8 @@ static int pxa_pm_enter(suspend_state_t state)
133 /* *** go zzz *** */ 133 /* *** go zzz *** */
134 pxa_cpu_pm_enter(state); 134 pxa_cpu_pm_enter(state);
135 135
136 cpu_init();
137
136 /* after sleeping, validate the checksum */ 138 /* after sleeping, validate the checksum */
137 checksum = 0; 139 checksum = 0;
138 for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++) 140 for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 379ea5e3950f..59c7964cfe11 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -88,6 +88,8 @@ static int sa11x0_pm_enter(suspend_state_t state)
88 /* go zzz */ 88 /* go zzz */
89 sa1100_cpu_suspend(); 89 sa1100_cpu_suspend();
90 90
91 cpu_init();
92
91 /* 93 /*
92 * Ensure not to come back here if it wasn't intended 94 * Ensure not to come back here if it wasn't intended
93 */ 95 */