aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/kernel
parent9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff)
parent33b9f582c5c1db515412cc7efff28f7d1779321f (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c6
-rw-r--r--arch/arm/kernel/entry-armv.S59
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/entry-header.S66
-rw-r--r--arch/arm/kernel/head-common.S9
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/return_address.c5
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/smp.c42
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c9
13 files changed, 160 insertions, 83 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 923eec7105cf..a53efa993690 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -149,6 +149,10 @@ int main(void)
149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152 BLANK();
153 DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
154 DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
155 BLANK();
152#ifdef CONFIG_KVM_ARM_HOST 156#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 157 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); 158 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); 169 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); 170 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 171 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); 172 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 173 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 174 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 175 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
172#ifdef CONFIG_KVM_ARM_VGIC 176#ifdef CONFIG_KVM_ARM_VGIC
173 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 177 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
174 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 178 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a1f73b502ef0..b2ed73c45489 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
462 sys->busnr = busnr; 462 sys->busnr = busnr;
463 sys->swizzle = hw->swizzle; 463 sys->swizzle = hw->swizzle;
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 sys->align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 466 INIT_LIST_HEAD(&sys->resources);
466 467
467 if (hw->private_data) 468 if (hw->private_data)
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
574resource_size_t pcibios_align_resource(void *data, const struct resource *res, 575resource_size_t pcibios_align_resource(void *data, const struct resource *res,
575 resource_size_t size, resource_size_t align) 576 resource_size_t size, resource_size_t align)
576{ 577{
578 struct pci_dev *dev = data;
579 struct pci_sys_data *sys = dev->sysdata;
577 resource_size_t start = res->start; 580 resource_size_t start = res->start;
578 581
579 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
581 584
582 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
583 586
587 if (sys->align_resource)
588 return sys->align_resource(dev, res, start, size, align);
589
584 return start; 590 return start;
585} 591}
586 592
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0f82098c9bfe..582b405befc5 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,18 +192,6 @@ __dabt_svc:
192 svc_entry 192 svc_entry
193 mov r2, sp 193 mov r2, sp
194 dabt_helper 194 dabt_helper
195
196 @
197 @ IRQs off again before pulling preserved data off the stack
198 @
199 disable_irq_notrace
200
201#ifdef CONFIG_TRACE_IRQFLAGS
202 tst r5, #PSR_I_BIT
203 bleq trace_hardirqs_on
204 tst r5, #PSR_I_BIT
205 blne trace_hardirqs_off
206#endif
207 svc_exit r5 @ return from exception 195 svc_exit r5 @ return from exception
208 UNWIND(.fnend ) 196 UNWIND(.fnend )
209ENDPROC(__dabt_svc) 197ENDPROC(__dabt_svc)
@@ -223,12 +211,7 @@ __irq_svc:
223 blne svc_preempt 211 blne svc_preempt
224#endif 212#endif
225 213
226#ifdef CONFIG_TRACE_IRQFLAGS 214 svc_exit r5, irq = 1 @ return from exception
227 @ The parent context IRQs must have been enabled to get here in
228 @ the first place, so there's no point checking the PSR I bit.
229 bl trace_hardirqs_on
230#endif
231 svc_exit r5 @ return from exception
232 UNWIND(.fnend ) 215 UNWIND(.fnend )
233ENDPROC(__irq_svc) 216ENDPROC(__irq_svc)
234 217
@@ -295,22 +278,8 @@ __und_svc_fault:
295 mov r0, sp @ struct pt_regs *regs 278 mov r0, sp @ struct pt_regs *regs
296 bl __und_fault 279 bl __und_fault
297 280
298 @
299 @ IRQs off again before pulling preserved data off the stack
300 @
301__und_svc_finish: 281__und_svc_finish:
302 disable_irq_notrace
303
304 @
305 @ restore SPSR and restart the instruction
306 @
307 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 282 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
308#ifdef CONFIG_TRACE_IRQFLAGS
309 tst r5, #PSR_I_BIT
310 bleq trace_hardirqs_on
311 tst r5, #PSR_I_BIT
312 blne trace_hardirqs_off
313#endif
314 svc_exit r5 @ return from exception 283 svc_exit r5 @ return from exception
315 UNWIND(.fnend ) 284 UNWIND(.fnend )
316ENDPROC(__und_svc) 285ENDPROC(__und_svc)
@@ -320,18 +289,6 @@ __pabt_svc:
320 svc_entry 289 svc_entry
321 mov r2, sp @ regs 290 mov r2, sp @ regs
322 pabt_helper 291 pabt_helper
323
324 @
325 @ IRQs off again before pulling preserved data off the stack
326 @
327 disable_irq_notrace
328
329#ifdef CONFIG_TRACE_IRQFLAGS
330 tst r5, #PSR_I_BIT
331 bleq trace_hardirqs_on
332 tst r5, #PSR_I_BIT
333 blne trace_hardirqs_off
334#endif
335 svc_exit r5 @ return from exception 292 svc_exit r5 @ return from exception
336 UNWIND(.fnend ) 293 UNWIND(.fnend )
337ENDPROC(__pabt_svc) 294ENDPROC(__pabt_svc)
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
396#ifdef CONFIG_IRQSOFF_TRACER 353#ifdef CONFIG_IRQSOFF_TRACER
397 bl trace_hardirqs_off 354 bl trace_hardirqs_off
398#endif 355#endif
356 ct_user_exit save = 0
399 .endm 357 .endm
400 358
401 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
@@ -562,21 +520,21 @@ ENDPROC(__und_usr)
562 @ Fall-through from Thumb-2 __und_usr 520 @ Fall-through from Thumb-2 __und_usr
563 @ 521 @
564#ifdef CONFIG_NEON 522#ifdef CONFIG_NEON
523 get_thread_info r10 @ get current thread
565 adr r6, .LCneon_thumb_opcodes 524 adr r6, .LCneon_thumb_opcodes
566 b 2f 525 b 2f
567#endif 526#endif
568call_fpe: 527call_fpe:
528 get_thread_info r10 @ get current thread
569#ifdef CONFIG_NEON 529#ifdef CONFIG_NEON
570 adr r6, .LCneon_arm_opcodes 530 adr r6, .LCneon_arm_opcodes
5712: 5312: ldr r5, [r6], #4 @ mask value
572 ldr r7, [r6], #4 @ mask value
573 cmp r7, #0 @ end mask?
574 beq 1f
575 and r8, r0, r7
576 ldr r7, [r6], #4 @ opcode bits matching in mask 532 ldr r7, [r6], #4 @ opcode bits matching in mask
533 cmp r5, #0 @ end mask?
534 beq 1f
535 and r8, r0, r5
577 cmp r8, r7 @ NEON instruction? 536 cmp r8, r7 @ NEON instruction?
578 bne 2b 537 bne 2b
579 get_thread_info r10
580 mov r7, #1 538 mov r7, #1
581 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 539 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
582 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 540 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
@@ -586,7 +544,6 @@ call_fpe:
586 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 544 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
587 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 545 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
588 moveq pc, lr 546 moveq pc, lr
589 get_thread_info r10 @ get current thread
590 and r8, r0, #0x00000f00 @ mask out CP number 547 and r8, r0, #0x00000f00 @ mask out CP number
591 THUMB( lsr r8, r8, #8 ) 548 THUMB( lsr r8, r8, #8 )
592 mov r7, #1 549 mov r7, #1
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index fefd7f971437..bc5bc0a97131 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -35,12 +35,11 @@ ret_fast_syscall:
35 ldr r1, [tsk, #TI_FLAGS] 35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK 36 tst r1, #_TIF_WORK_MASK
37 bne fast_work_pending 37 bne fast_work_pending
38#if defined(CONFIG_IRQSOFF_TRACER)
39 asm_trace_hardirqs_on 38 asm_trace_hardirqs_on
40#endif
41 39
42 /* perform architecture specific actions before user return */ 40 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr 41 arch_ret_to_user r1, lr
42 ct_user_enter
44 43
45 restore_user_regs fast = 1, offset = S_OFF 44 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend ) 45 UNWIND(.fnend )
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
71 tst r1, #_TIF_WORK_MASK 70 tst r1, #_TIF_WORK_MASK
72 bne work_pending 71 bne work_pending
73no_work_pending: 72no_work_pending:
74#if defined(CONFIG_IRQSOFF_TRACER)
75 asm_trace_hardirqs_on 73 asm_trace_hardirqs_on
76#endif 74
77 /* perform architecture specific actions before user return */ 75 /* perform architecture specific actions before user return */
78 arch_ret_to_user r1, lr 76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
79 78
80 restore_user_regs fast = 0, offset = 0 79 restore_user_regs fast = 0, offset = 0
81ENDPROC(ret_to_user_from_irq) 80ENDPROC(ret_to_user_from_irq)
@@ -406,6 +405,7 @@ ENTRY(vector_swi)
406 mcr p15, 0, ip, c1, c0 @ update control register 405 mcr p15, 0, ip, c1, c0 @ update control register
407#endif 406#endif
408 enable_irq 407 enable_irq
408 ct_user_exit
409 409
410 get_thread_info tsk 410 get_thread_info tsk
411 adr tbl, sys_call_table @ load syscall table pointer 411 adr tbl, sys_call_table @ load syscall table pointer
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a8531eadd3d..160f3376ba6d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -74,7 +74,24 @@
74 .endm 74 .endm
75 75
76#ifndef CONFIG_THUMB2_KERNEL 76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr 77 .macro svc_exit, rpsr, irq = 0
78 .if \irq != 0
79 @ IRQs already off
80#ifdef CONFIG_TRACE_IRQFLAGS
81 @ The parent context IRQs must have been enabled to get here in
82 @ the first place, so there's no point checking the PSR I bit.
83 bl trace_hardirqs_on
84#endif
85 .else
86 @ IRQs off again before pulling preserved data off the stack
87 disable_irq_notrace
88#ifdef CONFIG_TRACE_IRQFLAGS
89 tst \rpsr, #PSR_I_BIT
90 bleq trace_hardirqs_on
91 tst \rpsr, #PSR_I_BIT
92 blne trace_hardirqs_off
93#endif
94 .endif
78 msr spsr_cxsf, \rpsr 95 msr spsr_cxsf, \rpsr
79#if defined(CONFIG_CPU_V6) 96#if defined(CONFIG_CPU_V6)
80 ldr r0, [sp] 97 ldr r0, [sp]
@@ -120,7 +137,24 @@
120 mov pc, \reg 137 mov pc, \reg
121 .endm 138 .endm
122#else /* CONFIG_THUMB2_KERNEL */ 139#else /* CONFIG_THUMB2_KERNEL */
123 .macro svc_exit, rpsr 140 .macro svc_exit, rpsr, irq = 0
141 .if \irq != 0
142 @ IRQs already off
143#ifdef CONFIG_TRACE_IRQFLAGS
144 @ The parent context IRQs must have been enabled to get here in
145 @ the first place, so there's no point checking the PSR I bit.
146 bl trace_hardirqs_on
147#endif
148 .else
149 @ IRQs off again before pulling preserved data off the stack
150 disable_irq_notrace
151#ifdef CONFIG_TRACE_IRQFLAGS
152 tst \rpsr, #PSR_I_BIT
153 bleq trace_hardirqs_on
154 tst \rpsr, #PSR_I_BIT
155 blne trace_hardirqs_off
156#endif
157 .endif
124 ldr lr, [sp, #S_SP] @ top of the stack 158 ldr lr, [sp, #S_SP] @ top of the stack
125 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 159 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
126 clrex @ clear the exclusive monitor 160 clrex @ clear the exclusive monitor
@@ -164,6 +198,34 @@
164#endif /* !CONFIG_THUMB2_KERNEL */ 198#endif /* !CONFIG_THUMB2_KERNEL */
165 199
166/* 200/*
201 * Context tracking subsystem. Used to instrument transitions
202 * between user and kernel mode.
203 */
204 .macro ct_user_exit, save = 1
205#ifdef CONFIG_CONTEXT_TRACKING
206 .if \save
207 stmdb sp!, {r0-r3, ip, lr}
208 bl user_exit
209 ldmia sp!, {r0-r3, ip, lr}
210 .else
211 bl user_exit
212 .endif
213#endif
214 .endm
215
216 .macro ct_user_enter, save = 1
217#ifdef CONFIG_CONTEXT_TRACKING
218 .if \save
219 stmdb sp!, {r0-r3, ip, lr}
220 bl user_enter
221 ldmia sp!, {r0-r3, ip, lr}
222 .else
223 bl user_enter
224 .endif
225#endif
226 .endm
227
228/*
167 * These are the registers used in the syscall handler, and allow us to 229 * These are the registers used in the syscall handler, and allow us to
168 * have in theory up to 7 arguments to a function - r0 to r6. 230 * have in theory up to 7 arguments to a function - r0 to r6.
169 * 231 *
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 854bd22380d3..5b391a689b47 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -98,8 +98,9 @@ __mmap_switched:
98 str r9, [r4] @ Save processor ID 98 str r9, [r4] @ Save processor ID
99 str r1, [r5] @ Save machine type 99 str r1, [r5] @ Save machine type
100 str r2, [r6] @ Save atags pointer 100 str r2, [r6] @ Save atags pointer
101 bic r4, r0, #CR_A @ Clear 'A' bit 101 cmp r7, #0
102 stmia r7, {r0, r4} @ Save control register values 102 bicne r4, r0, #CR_A @ Clear 'A' bit
103 stmneia r7, {r0, r4} @ Save control register values
103 b start_kernel 104 b start_kernel
104ENDPROC(__mmap_switched) 105ENDPROC(__mmap_switched)
105 106
@@ -113,7 +114,11 @@ __mmap_switched_data:
113 .long processor_id @ r4 114 .long processor_id @ r4
114 .long __machine_arch_type @ r5 115 .long __machine_arch_type @ r5
115 .long __atags_pointer @ r6 116 .long __atags_pointer @ r6
117#ifdef CONFIG_CPU_CP15
116 .long cr_alignment @ r7 118 .long cr_alignment @ r7
119#else
120 .long 0 @ r7
121#endif
117 .long init_thread_union + THREAD_START_SP @ sp 122 .long init_thread_union + THREAD_START_SP @ sp
118 .size __mmap_switched_data, . - __mmap_switched_data 123 .size __mmap_switched_data, . - __mmap_switched_data
119 124
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2c228a07e58c..6a2e09c952c7 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,15 +32,21 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36 35
37 __HEAD 36 __HEAD
37
38#ifdef CONFIG_CPU_THUMBONLY
39 .thumb
40ENTRY(stext)
41#else
42 .arm
38ENTRY(stext) 43ENTRY(stext)
39 44
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 45 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 46 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now. 47 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: ) 48 THUMB(1: )
49#endif
44 50
45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 51 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
46 @ and irqs disabled 52 @ and irqs disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ae58d3b37d9d..f21970316836 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -407,15 +407,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
407 * atomic helpers and the signal restart code. Insert it into the 407 * atomic helpers and the signal restart code. Insert it into the
408 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 408 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
409 */ 409 */
410static struct vm_area_struct gate_vma; 410static struct vm_area_struct gate_vma = {
411 .vm_start = 0xffff0000,
412 .vm_end = 0xffff0000 + PAGE_SIZE,
413 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
414 .vm_mm = &init_mm,
415};
411 416
412static int __init gate_vma_init(void) 417static int __init gate_vma_init(void)
413{ 418{
414 gate_vma.vm_start = 0xffff0000; 419 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
415 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
416 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
417 gate_vma.vm_flags = VM_READ | VM_EXEC |
418 VM_MAYREAD | VM_MAYEXEC;
419 return 0; 420 return 0;
420} 421}
421arch_initcall(gate_vma_init); 422arch_initcall(gate_vma_init);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555dd..fafedd86885d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
26 struct return_address_data *data = d; 26 struct return_address_data *data = d;
27 27
28 if (!data->level) { 28 if (!data->level) {
29 data->addr = (void *)frame->lr; 29 data->addr = (void *)frame->pc;
30 30
31 return 1; 31 return 1;
32 } else { 32 } else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
41 struct stackframe frame; 41 struct stackframe frame;
42 register unsigned long current_sp asm ("sp"); 42 register unsigned long current_sp asm ("sp");
43 43
44 data.level = level + 1; 44 data.level = level + 2;
45 data.addr = NULL;
45 46
46 frame.fp = (unsigned long)__builtin_frame_address(0); 47 frame.fp = (unsigned long)__builtin_frame_address(0);
47 frame.sp = current_sp; 48 frame.sp = current_sp;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 234e339196c0..728007c4a2b7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -290,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
290 290
291static void __init cacheid_init(void) 291static void __init cacheid_init(void)
292{ 292{
293 unsigned int cachetype = read_cpuid_cachetype();
294 unsigned int arch = cpu_architecture(); 293 unsigned int arch = cpu_architecture();
295 294
296 if (arch >= CPU_ARCH_ARMv6) { 295 if (arch >= CPU_ARCH_ARMv6) {
296 unsigned int cachetype = read_cpuid_cachetype();
297 if ((cachetype & (7 << 29)) == 4 << 29) { 297 if ((cachetype & (7 << 29)) == 4 << 29) {
298 /* ARMv7 register format */ 298 /* ARMv7 register format */
299 arch = CPU_ARCH_ARMv7; 299 arch = CPU_ARCH_ARMv7;
@@ -389,7 +389,7 @@ static void __init feat_v6_fixup(void)
389 * 389 *
390 * cpu_init sets up the per-CPU stacks. 390 * cpu_init sets up the per-CPU stacks.
391 */ 391 */
392void cpu_init(void) 392void notrace cpu_init(void)
393{ 393{
394 unsigned int cpu = smp_processor_id(); 394 unsigned int cpu = smp_processor_id();
395 struct stack *stk = &stacks[cpu]; 395 struct stack *stk = &stacks[cpu];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4619177bcfe6..47ab90563bf4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu)
211 } 211 }
212 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 212 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
213 213
214 /*
215 * platform_cpu_kill() is generally expected to do the powering off
216 * and/or cutting of clocks to the dying CPU. Optionally, this may
217 * be done by the CPU which is dying in preference to supporting
218 * this call, but that means there is _no_ synchronisation between
219 * the requesting CPU and the dying CPU actually losing power.
220 */
214 if (!platform_cpu_kill(cpu)) 221 if (!platform_cpu_kill(cpu))
215 printk("CPU%u: unable to kill\n", cpu); 222 printk("CPU%u: unable to kill\n", cpu);
216} 223}
@@ -230,14 +237,41 @@ void __ref cpu_die(void)
230 idle_task_exit(); 237 idle_task_exit();
231 238
232 local_irq_disable(); 239 local_irq_disable();
233 mb();
234 240
235 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 241 /*
242 * Flush the data out of the L1 cache for this CPU. This must be
243 * before the completion to ensure that data is safely written out
244 * before platform_cpu_kill() gets called - which may disable
245 * *this* CPU and power down its cache.
246 */
247 flush_cache_louis();
248
249 /*
250 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
251 * this returns, power and/or clocks can be removed at any point
252 * from this CPU and its cache by platform_cpu_kill().
253 */
236 RCU_NONIDLE(complete(&cpu_died)); 254 RCU_NONIDLE(complete(&cpu_died));
237 255
238 /* 256 /*
239 * actual CPU shutdown procedure is at least platform (if not 257 * Ensure that the cache lines associated with that completion are
240 * CPU) specific. 258 * written out. This covers the case where _this_ CPU is doing the
259 * powering down, to ensure that the completion is visible to the
260 * CPU waiting for this one.
261 */
262 flush_cache_louis();
263
264 /*
265 * The actual CPU shutdown procedure is at least platform (if not
266 * CPU) specific. This may remove power, or it may simply spin.
267 *
268 * Platforms are generally expected *NOT* to return from this call,
269 * although there are some which do because they have no way to
270 * power down the CPU. These platforms are the _only_ reason we
271 * have a return path which uses the fragment of assembly below.
272 *
273 * The return path should not be used for platforms which can
274 * power off the CPU.
241 */ 275 */
242 if (smp_ops.cpu_die) 276 if (smp_ops.cpu_die)
243 smp_ops.cpu_die(cpu); 277 smp_ops.cpu_die(cpu);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 45eac87ed66a..5bc1a63284e3 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
41 41
42#ifdef CONFIG_ARM_ERRATA_764369 42#ifdef CONFIG_ARM_ERRATA_764369
43 /* Cortex-A9 only */ 43 /* Cortex-A9 only */
44 if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { 44 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
45 scu_ctrl = __raw_readl(scu_base + 0x30); 45 scu_ctrl = __raw_readl(scu_base + 0x30);
46 if (!(scu_ctrl & 1)) 46 if (!(scu_ctrl & 1))
47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); 47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index e82e1d248772..9a52a07aa40e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void)
98 return; 98 return;
99 99
100 dummy_flush_tlb_a15_erratum(); 100 dummy_flush_tlb_a15_erratum();
101 smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, 101 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
102 NULL, 1);
103} 102}
104 103
105static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106{ 105{
107 int cpu; 106 int cpu, this_cpu;
108 cpumask_t mask = { CPU_BITS_NONE }; 107 cpumask_t mask = { CPU_BITS_NONE };
109 108
110 if (!erratum_a15_798181()) 109 if (!erratum_a15_798181())
111 return; 110 return;
112 111
113 dummy_flush_tlb_a15_erratum(); 112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) { 114 for_each_online_cpu(cpu) {
115 if (cpu == smp_processor_id()) 115 if (cpu == this_cpu)
116 continue; 116 continue;
117 /* 117 /*
118 * We only need to send an IPI if the other CPUs are running 118 * We only need to send an IPI if the other CPUs are running
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
127 cpumask_set_cpu(cpu, &mask); 127 cpumask_set_cpu(cpu, &mask);
128 } 128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu();
130} 131}
131 132
132void flush_tlb_all(void) 133void flush_tlb_all(void)