diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 18 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 124 | ||||
-rw-r--r-- | arch/arm/kernel/entry-v7m.S | 143 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 170 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 10 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 7 | ||||
-rw-r--r-- | arch/arm/kernel/psci.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/psci_smp.c | 84 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 101 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 97 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 21 | ||||
-rw-r--r-- | arch/arm/kernel/suspend.c | 76 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 8 |
16 files changed, 813 insertions, 72 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5f3338eacad2..fccfbdb03df1 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
15 | 15 | ||
16 | # Object file lists. | 16 | # Object file lists. |
17 | 17 | ||
18 | obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ | 18 | obj-y := elf.o entry-common.o irq.o opcodes.o \ |
19 | process.o ptrace.o return_address.o sched_clock.o \ | 19 | process.o ptrace.o return_address.o sched_clock.o \ |
20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o | 20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o |
21 | 21 | ||
@@ -23,6 +23,12 @@ obj-$(CONFIG_ATAGS) += atags_parse.o | |||
23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o | 23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o |
24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o | 24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o |
25 | 25 | ||
26 | ifeq ($(CONFIG_CPU_V7M),y) | ||
27 | obj-y += entry-v7m.o | ||
28 | else | ||
29 | obj-y += entry-armv.o | ||
30 | endif | ||
31 | |||
26 | obj-$(CONFIG_OC_ETM) += etm.o | 32 | obj-$(CONFIG_OC_ETM) += etm.o |
27 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 33 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
28 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 34 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
@@ -32,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o | |||
32 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
33 | obj-$(CONFIG_PCI) += bios32.o isa.o | 39 | obj-$(CONFIG_PCI) += bios32.o isa.o |
34 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o | 40 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o |
35 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 41 | obj-$(CONFIG_SMP) += smp.o |
42 | ifdef CONFIG_MMU | ||
43 | obj-$(CONFIG_SMP) += smp_tlb.o | ||
44 | endif | ||
36 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 45 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
37 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o | 46 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o |
38 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o | 47 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o |
@@ -82,6 +91,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o | |||
82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 91 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
83 | 92 | ||
84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | 93 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o |
85 | obj-$(CONFIG_ARM_PSCI) += psci.o | 94 | ifeq ($(CONFIG_ARM_PSCI),y) |
95 | obj-y += psci.o | ||
96 | obj-$(CONFIG_SMP) += psci_smp.o | ||
97 | endif | ||
86 | 98 | ||
87 | extra-y := $(head-y) vmlinux.lds | 99 | extra-y := $(head-y) vmlinux.lds |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ee68cce6b48e..ded041711beb 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/memory.h> | 24 | #include <asm/memory.h> |
25 | #include <asm/procinfo.h> | 25 | #include <asm/procinfo.h> |
26 | #include <asm/suspend.h> | ||
26 | #include <asm/hardware/cache-l2x0.h> | 27 | #include <asm/hardware/cache-l2x0.h> |
27 | #include <linux/kbuild.h> | 28 | #include <linux/kbuild.h> |
28 | 29 | ||
@@ -145,6 +146,11 @@ int main(void) | |||
145 | #ifdef MULTI_CACHE | 146 | #ifdef MULTI_CACHE |
146 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); | 147 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); |
147 | #endif | 148 | #endif |
149 | #ifdef CONFIG_ARM_CPU_SUSPEND | ||
150 | DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); | ||
151 | DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); | ||
152 | DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); | ||
153 | #endif | ||
148 | BLANK(); | 154 | BLANK(); |
149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 155 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 156 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 4bc816a74a2e..94104bf69719 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub) | |||
350 | 350 | ||
351 | .align 5 | 351 | .align 5 |
352 | ENTRY(vector_swi) | 352 | ENTRY(vector_swi) |
353 | #ifdef CONFIG_CPU_V7M | ||
354 | v7m_exception_entry | ||
355 | #else | ||
353 | sub sp, sp, #S_FRAME_SIZE | 356 | sub sp, sp, #S_FRAME_SIZE |
354 | stmia sp, {r0 - r12} @ Calling r0 - r12 | 357 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
355 | ARM( add r8, sp, #S_PC ) | 358 | ARM( add r8, sp, #S_PC ) |
@@ -360,6 +363,7 @@ ENTRY(vector_swi) | |||
360 | str lr, [sp, #S_PC] @ Save calling PC | 363 | str lr, [sp, #S_PC] @ Save calling PC |
361 | str r8, [sp, #S_PSR] @ Save CPSR | 364 | str r8, [sp, #S_PSR] @ Save CPSR |
362 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | 365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
366 | #endif | ||
363 | zero_fp | 367 | zero_fp |
364 | 368 | ||
365 | #ifdef CONFIG_ALIGNMENT_TRAP | 369 | #ifdef CONFIG_ALIGNMENT_TRAP |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 160f3376ba6d..de23a9beed13 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/asm-offsets.h> | 5 | #include <asm/asm-offsets.h> |
6 | #include <asm/errno.h> | 6 | #include <asm/errno.h> |
7 | #include <asm/thread_info.h> | 7 | #include <asm/thread_info.h> |
8 | #include <asm/v7m.h> | ||
8 | 9 | ||
9 | @ Bad Abort numbers | 10 | @ Bad Abort numbers |
10 | @ ----------------- | 11 | @ ----------------- |
@@ -44,6 +45,116 @@ | |||
44 | #endif | 45 | #endif |
45 | .endm | 46 | .endm |
46 | 47 | ||
48 | #ifdef CONFIG_CPU_V7M | ||
49 | /* | ||
50 | * ARMv7-M exception entry/exit macros. | ||
51 | * | ||
52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are | ||
53 | * automatically saved on the current stack (32 words) before | ||
54 | * switching to the exception stack (SP_main). | ||
55 | * | ||
56 | * If exception is taken while in user mode, SP_main is | ||
57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically | ||
58 | * (CCR.STKALIGN set). | ||
59 | * | ||
60 | * Linux assumes that the interrupts are disabled when entering an | ||
61 | * exception handler and it may BUG if this is not the case. Interrupts | ||
62 | * are disabled during entry and reenabled in the exit macro. | ||
63 | * | ||
64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. | ||
65 | * When returning to kernel mode, we don't return from exception. | ||
66 | */ | ||
67 | .macro v7m_exception_entry | ||
68 | @ determine the location of the registers saved by the core during | ||
69 | @ exception entry. Depending on the mode the cpu was in when the | ||
70 | @ exception happend that is either on the main or the process stack. | ||
71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack | ||
72 | @ was used. | ||
73 | tst lr, #EXC_RET_STACK_MASK | ||
74 | mrsne r12, psp | ||
75 | moveq r12, sp | ||
76 | |||
77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the | ||
78 | @ exception frame because of tail-chaining. So these have to be | ||
79 | @ reloaded. | ||
80 | ldmia r12!, {r0-r3} | ||
81 | |||
82 | @ Linux expects to have irqs off. Do it here before taking stack space | ||
83 | cpsid i | ||
84 | |||
85 | sub sp, #S_FRAME_SIZE-S_IP | ||
86 | stmdb sp!, {r0-r11} | ||
87 | |||
88 | @ load saved r12, lr, return address and xPSR. | ||
89 | @ r0-r7 are used for signals and never touched from now on. Clobbering | ||
90 | @ r8-r12 is OK. | ||
91 | mov r9, r12 | ||
92 | ldmia r9!, {r8, r10-r12} | ||
93 | |||
94 | @ calculate the original stack pointer value. | ||
95 | @ r9 currently points to the memory location just above the auto saved | ||
96 | @ xPSR. | ||
97 | @ The cpu might automatically 8-byte align the stack. Bit 9 | ||
98 | @ of the saved xPSR specifies if stack aligning took place. In this case | ||
99 | @ another 32-bit value is included in the stack. | ||
100 | |||
101 | tst r12, V7M_xPSR_FRAMEPTRALIGN | ||
102 | addne r9, r9, #4 | ||
103 | |||
104 | @ store saved r12 using str to have a register to hold the base for stm | ||
105 | str r8, [sp, #S_IP] | ||
106 | add r8, sp, #S_SP | ||
107 | @ store r13-r15, xPSR | ||
108 | stmia r8!, {r9-r12} | ||
109 | @ store old_r0 | ||
110 | str r0, [r8] | ||
111 | .endm | ||
112 | |||
113 | /* | ||
114 | * PENDSV and SVCALL are configured to have the same exception | ||
115 | * priorities. As a kernel thread runs at SVCALL execution priority it | ||
116 | * can never be preempted and so we will never have to return to a | ||
117 | * kernel thread here. | ||
118 | */ | ||
119 | .macro v7m_exception_slow_exit ret_r0 | ||
120 | cpsid i | ||
121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK | ||
122 | |||
123 | @ read original r12, sp, lr, pc and xPSR | ||
124 | add r12, sp, #S_IP | ||
125 | ldmia r12, {r1-r5} | ||
126 | |||
127 | @ an exception frame is always 8-byte aligned. To tell the hardware if | ||
128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR | ||
129 | @ accordingly. | ||
130 | tst r2, #4 | ||
131 | subne r2, r2, #4 | ||
132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN | ||
133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN | ||
134 | |||
135 | @ write basic exception frame | ||
136 | stmdb r2!, {r1, r3-r5} | ||
137 | ldmia sp, {r1, r3-r5} | ||
138 | .if \ret_r0 | ||
139 | stmdb r2!, {r0, r3-r5} | ||
140 | .else | ||
141 | stmdb r2!, {r1, r3-r5} | ||
142 | .endif | ||
143 | |||
144 | @ restore process sp | ||
145 | msr psp, r2 | ||
146 | |||
147 | @ restore original r4-r11 | ||
148 | ldmia sp!, {r0-r11} | ||
149 | |||
150 | @ restore main sp | ||
151 | add sp, sp, #S_FRAME_SIZE-S_IP | ||
152 | |||
153 | cpsie i | ||
154 | bx lr | ||
155 | .endm | ||
156 | #endif /* CONFIG_CPU_V7M */ | ||
157 | |||
47 | @ | 158 | @ |
48 | @ Store/load the USER SP and LR registers by switching to the SYS | 159 | @ Store/load the USER SP and LR registers by switching to the SYS |
49 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | 160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
@@ -165,6 +276,18 @@ | |||
165 | rfeia sp! | 276 | rfeia sp! |
166 | .endm | 277 | .endm |
167 | 278 | ||
279 | #ifdef CONFIG_CPU_V7M | ||
280 | /* | ||
281 | * Note we don't need to do clrex here as clearing the local monitor is | ||
282 | * part of each exception entry and exit sequence. | ||
283 | */ | ||
284 | .macro restore_user_regs, fast = 0, offset = 0 | ||
285 | .if \offset | ||
286 | add sp, #\offset | ||
287 | .endif | ||
288 | v7m_exception_slow_exit ret_r0 = \fast | ||
289 | .endm | ||
290 | #else /* ifdef CONFIG_CPU_V7M */ | ||
168 | .macro restore_user_regs, fast = 0, offset = 0 | 291 | .macro restore_user_regs, fast = 0, offset = 0 |
169 | clrex @ clear the exclusive monitor | 292 | clrex @ clear the exclusive monitor |
170 | mov r2, sp | 293 | mov r2, sp |
@@ -181,6 +304,7 @@ | |||
181 | add sp, sp, #S_FRAME_SIZE - S_SP | 304 | add sp, sp, #S_FRAME_SIZE - S_SP |
182 | movs pc, lr @ return & move spsr_svc into cpsr | 305 | movs pc, lr @ return & move spsr_svc into cpsr |
183 | .endm | 306 | .endm |
307 | #endif /* ifdef CONFIG_CPU_V7M / else */ | ||
184 | 308 | ||
185 | .macro get_thread_info, rd | 309 | .macro get_thread_info, rd |
186 | mov \rd, sp | 310 | mov \rd, sp |
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S new file mode 100644 index 000000000000..e00621f1403f --- /dev/null +++ b/arch/arm/kernel/entry-v7m.S | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/entry-v7m.S | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Low-level vector interface routines for the ARMv7-M architecture | ||
11 | */ | ||
12 | #include <asm/memory.h> | ||
13 | #include <asm/glue.h> | ||
14 | #include <asm/thread_notify.h> | ||
15 | #include <asm/v7m.h> | ||
16 | |||
17 | #include <mach/entry-macro.S> | ||
18 | |||
19 | #include "entry-header.S" | ||
20 | |||
21 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
22 | #error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation" | ||
23 | #endif | ||
24 | |||
25 | __invalid_entry: | ||
26 | v7m_exception_entry | ||
27 | adr r0, strerr | ||
28 | mrs r1, ipsr | ||
29 | mov r2, lr | ||
30 | bl printk | ||
31 | mov r0, sp | ||
32 | bl show_regs | ||
33 | 1: b 1b | ||
34 | ENDPROC(__invalid_entry) | ||
35 | |||
36 | strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n" | ||
37 | |||
38 | .align 2 | ||
39 | __irq_entry: | ||
40 | v7m_exception_entry | ||
41 | |||
42 | @ | ||
43 | @ Invoke the IRQ handler | ||
44 | @ | ||
45 | mrs r0, ipsr | ||
46 | ldr r1, =V7M_xPSR_EXCEPTIONNO | ||
47 | and r0, r1 | ||
48 | sub r0, #16 | ||
49 | mov r1, sp | ||
50 | stmdb sp!, {lr} | ||
51 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
52 | bl nvic_do_IRQ | ||
53 | |||
54 | pop {lr} | ||
55 | @ | ||
56 | @ Check for any pending work if returning to user | ||
57 | @ | ||
58 | ldr r1, =BASEADDR_V7M_SCB | ||
59 | ldr r0, [r1, V7M_SCB_ICSR] | ||
60 | tst r0, V7M_SCB_ICSR_RETTOBASE | ||
61 | beq 2f | ||
62 | |||
63 | get_thread_info tsk | ||
64 | ldr r2, [tsk, #TI_FLAGS] | ||
65 | tst r2, #_TIF_WORK_MASK | ||
66 | beq 2f @ no work pending | ||
67 | mov r0, #V7M_SCB_ICSR_PENDSVSET | ||
68 | str r0, [r1, V7M_SCB_ICSR] @ raise PendSV | ||
69 | |||
70 | 2: | ||
71 | @ registers r0-r3 and r12 are automatically restored on exception | ||
72 | @ return. r4-r7 were not clobbered in v7m_exception_entry so for | ||
73 | @ correctness they don't need to be restored. So only r8-r11 must be | ||
74 | @ restored here. The easiest way to do so is to restore r0-r7, too. | ||
75 | ldmia sp!, {r0-r11} | ||
76 | add sp, #S_FRAME_SIZE-S_IP | ||
77 | cpsie i | ||
78 | bx lr | ||
79 | ENDPROC(__irq_entry) | ||
80 | |||
81 | __pendsv_entry: | ||
82 | v7m_exception_entry | ||
83 | |||
84 | ldr r1, =BASEADDR_V7M_SCB | ||
85 | mov r0, #V7M_SCB_ICSR_PENDSVCLR | ||
86 | str r0, [r1, V7M_SCB_ICSR] @ clear PendSV | ||
87 | |||
88 | @ execute the pending work, including reschedule | ||
89 | get_thread_info tsk | ||
90 | mov why, #0 | ||
91 | b ret_to_user | ||
92 | ENDPROC(__pendsv_entry) | ||
93 | |||
94 | /* | ||
95 | * Register switch for ARMv7-M processors. | ||
96 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | ||
97 | * previous and next are guaranteed not to be the same. | ||
98 | */ | ||
99 | ENTRY(__switch_to) | ||
100 | .fnstart | ||
101 | .cantunwind | ||
102 | add ip, r1, #TI_CPU_SAVE | ||
103 | stmia ip!, {r4 - r11} @ Store most regs on stack | ||
104 | str sp, [ip], #4 | ||
105 | str lr, [ip], #4 | ||
106 | mov r5, r0 | ||
107 | add r4, r2, #TI_CPU_SAVE | ||
108 | ldr r0, =thread_notify_head | ||
109 | mov r1, #THREAD_NOTIFY_SWITCH | ||
110 | bl atomic_notifier_call_chain | ||
111 | mov ip, r4 | ||
112 | mov r0, r5 | ||
113 | ldmia ip!, {r4 - r11} @ Load all regs saved previously | ||
114 | ldr sp, [ip] | ||
115 | ldr pc, [ip, #4]! | ||
116 | .fnend | ||
117 | ENDPROC(__switch_to) | ||
118 | |||
119 | .data | ||
120 | .align 8 | ||
121 | /* | ||
122 | * Vector table (64 words => 256 bytes natural alignment) | ||
123 | */ | ||
124 | ENTRY(vector_table) | ||
125 | .long 0 @ 0 - Reset stack pointer | ||
126 | .long __invalid_entry @ 1 - Reset | ||
127 | .long __invalid_entry @ 2 - NMI | ||
128 | .long __invalid_entry @ 3 - HardFault | ||
129 | .long __invalid_entry @ 4 - MemManage | ||
130 | .long __invalid_entry @ 5 - BusFault | ||
131 | .long __invalid_entry @ 6 - UsageFault | ||
132 | .long __invalid_entry @ 7 - Reserved | ||
133 | .long __invalid_entry @ 8 - Reserved | ||
134 | .long __invalid_entry @ 9 - Reserved | ||
135 | .long __invalid_entry @ 10 - Reserved | ||
136 | .long vector_swi @ 11 - SVCall | ||
137 | .long __invalid_entry @ 12 - Debug Monitor | ||
138 | .long __invalid_entry @ 13 - Reserved | ||
139 | .long __pendsv_entry @ 14 - PendSV | ||
140 | .long __invalid_entry @ 15 - SysTick | ||
141 | .rept 64 - 16 | ||
142 | .long __irq_entry @ 16..64 - External Interrupts | ||
143 | .endr | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6a2e09c952c7..75f14cc3e073 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -17,8 +17,12 @@ | |||
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/memory.h> | ||
20 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
21 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | #include <asm/v7m.h> | ||
24 | #include <asm/mpu.h> | ||
25 | #include <asm/page.h> | ||
22 | 26 | ||
23 | /* | 27 | /* |
24 | * Kernel startup entry point. | 28 | * Kernel startup entry point. |
@@ -50,21 +54,86 @@ ENTRY(stext) | |||
50 | 54 | ||
51 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 55 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
52 | @ and irqs disabled | 56 | @ and irqs disabled |
53 | #ifndef CONFIG_CPU_CP15 | 57 | #if defined(CONFIG_CPU_CP15) |
54 | ldr r9, =CONFIG_PROCESSOR_ID | ||
55 | #else | ||
56 | mrc p15, 0, r9, c0, c0 @ get processor id | 58 | mrc p15, 0, r9, c0, c0 @ get processor id |
59 | #elif defined(CONFIG_CPU_V7M) | ||
60 | ldr r9, =BASEADDR_V7M_SCB | ||
61 | ldr r9, [r9, V7M_SCB_CPUID] | ||
62 | #else | ||
63 | ldr r9, =CONFIG_PROCESSOR_ID | ||
57 | #endif | 64 | #endif |
58 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | 65 | bl __lookup_processor_type @ r5=procinfo r9=cpuid |
59 | movs r10, r5 @ invalid processor (r5=0)? | 66 | movs r10, r5 @ invalid processor (r5=0)? |
60 | beq __error_p @ yes, error 'p' | 67 | beq __error_p @ yes, error 'p' |
61 | 68 | ||
62 | adr lr, BSYM(__after_proc_init) @ return (PIC) address | 69 | #ifdef CONFIG_ARM_MPU |
70 | /* Calculate the size of a region covering just the kernel */ | ||
71 | ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET | ||
72 | ldr r6, =(_end) @ Cover whole kernel | ||
73 | sub r6, r6, r5 @ Minimum size of region to map | ||
74 | clz r6, r6 @ Region size must be 2^N... | ||
75 | rsb r6, r6, #31 @ ...so round up region size | ||
76 | lsl r6, r6, #MPU_RSR_SZ @ Put size in right field | ||
77 | orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit | ||
78 | bl __setup_mpu | ||
79 | #endif | ||
80 | ldr r13, =__mmap_switched @ address to jump to after | ||
81 | @ initialising sctlr | ||
82 | adr lr, BSYM(1f) @ return (PIC) address | ||
63 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 83 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
64 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 84 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
65 | THUMB( mov pc, r12 ) | 85 | THUMB( mov pc, r12 ) |
86 | 1: b __after_proc_init | ||
66 | ENDPROC(stext) | 87 | ENDPROC(stext) |
67 | 88 | ||
89 | #ifdef CONFIG_SMP | ||
90 | __CPUINIT | ||
91 | ENTRY(secondary_startup) | ||
92 | /* | ||
93 | * Common entry point for secondary CPUs. | ||
94 | * | ||
95 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
96 | * the processor type - there is no need to check the machine type | ||
97 | * as it has already been validated by the primary processor. | ||
98 | */ | ||
99 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
100 | #ifndef CONFIG_CPU_CP15 | ||
101 | ldr r9, =CONFIG_PROCESSOR_ID | ||
102 | #else | ||
103 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
104 | #endif | ||
105 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | ||
106 | movs r10, r5 @ invalid processor? | ||
107 | beq __error_p @ yes, error 'p' | ||
108 | |||
109 | adr r4, __secondary_data | ||
110 | ldmia r4, {r7, r12} | ||
111 | |||
112 | #ifdef CONFIG_ARM_MPU | ||
113 | /* Use MPU region info supplied by __cpu_up */ | ||
114 | ldr r6, [r7] @ get secondary_data.mpu_szr | ||
115 | bl __setup_mpu @ Initialize the MPU | ||
116 | #endif | ||
117 | |||
118 | adr lr, BSYM(__after_proc_init) @ return address | ||
119 | mov r13, r12 @ __secondary_switched address | ||
120 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | ||
121 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
122 | THUMB( mov pc, r12 ) | ||
123 | ENDPROC(secondary_startup) | ||
124 | |||
125 | ENTRY(__secondary_switched) | ||
126 | ldr sp, [r7, #8] @ set up the stack pointer | ||
127 | mov fp, #0 | ||
128 | b secondary_start_kernel | ||
129 | ENDPROC(__secondary_switched) | ||
130 | |||
131 | .type __secondary_data, %object | ||
132 | __secondary_data: | ||
133 | .long secondary_data | ||
134 | .long __secondary_switched | ||
135 | #endif /* CONFIG_SMP */ | ||
136 | |||
68 | /* | 137 | /* |
69 | * Set the Control Register and Read the process ID. | 138 | * Set the Control Register and Read the process ID. |
70 | */ | 139 | */ |
@@ -95,10 +164,97 @@ __after_proc_init: | |||
95 | #endif | 164 | #endif |
96 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | 165 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
97 | #endif /* CONFIG_CPU_CP15 */ | 166 | #endif /* CONFIG_CPU_CP15 */ |
98 | 167 | mov pc, r13 | |
99 | b __mmap_switched @ clear the BSS and jump | ||
100 | @ to start_kernel | ||
101 | ENDPROC(__after_proc_init) | 168 | ENDPROC(__after_proc_init) |
102 | .ltorg | 169 | .ltorg |
103 | 170 | ||
171 | #ifdef CONFIG_ARM_MPU | ||
172 | |||
173 | |||
174 | /* Set which MPU region should be programmed */ | ||
175 | .macro set_region_nr tmp, rgnr | ||
176 | mov \tmp, \rgnr @ Use static region numbers | ||
177 | mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR | ||
178 | .endm | ||
179 | |||
180 | /* Setup a single MPU region, either D or I side (D-side for unified) */ | ||
181 | .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE | ||
182 | mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR | ||
183 | mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR | ||
184 | mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR | ||
185 | .endm | ||
186 | |||
187 | /* | ||
188 | * Setup the MPU and initial MPU Regions. We create the following regions: | ||
189 | * Region 0: Use this for probing the MPU details, so leave disabled. | ||
190 | * Region 1: Background region - covers the whole of RAM as strongly ordered | ||
191 | * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 | ||
192 | * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page | ||
193 | * | ||
194 | * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION | ||
195 | */ | ||
196 | |||
197 | ENTRY(__setup_mpu) | ||
198 | |||
199 | /* Probe for v7 PMSA compliance */ | ||
200 | mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 | ||
201 | and r0, r0, #(MMFR0_PMSA) @ PMSA field | ||
202 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 | ||
203 | bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA | ||
204 | |||
205 | /* Determine whether the D/I-side memory map is unified. We set the | ||
206 | * flags here and continue to use them for the rest of this function */ | ||
207 | mrc p15, 0, r0, c0, c0, 4 @ MPUIR | ||
208 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU | ||
209 | beq __error_p @ Fail: ARM_MPU and no MPU | ||
210 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified | ||
211 | |||
212 | /* Setup second region first to free up r6 */ | ||
213 | set_region_nr r0, #MPU_RAM_REGION | ||
214 | isb | ||
215 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ | ||
216 | ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET | ||
217 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) | ||
218 | |||
219 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled | ||
220 | beq 1f @ Memory-map not unified | ||
221 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled | ||
222 | 1: isb | ||
223 | |||
224 | /* First/background region */ | ||
225 | set_region_nr r0, #MPU_BG_REGION | ||
226 | isb | ||
227 | /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ | ||
228 | mov r0, #0 @ BG region starts at 0x0 | ||
229 | ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) | ||
230 | mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled | ||
231 | |||
232 | setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled | ||
233 | beq 2f @ Memory-map not unified | ||
234 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled | ||
235 | 2: isb | ||
236 | |||
237 | /* Vectors region */ | ||
238 | set_region_nr r0, #MPU_VECTORS_REGION | ||
239 | isb | ||
240 | /* Shared, inaccessible to PL0, rw PL1 */ | ||
241 | mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE | ||
242 | ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) | ||
243 | /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ | ||
244 | mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) | ||
245 | |||
246 | setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
247 | beq 3f @ Memory-map not unified | ||
248 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
249 | 3: isb | ||
250 | |||
251 | /* Enable the MPU */ | ||
252 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR | ||
253 | bic r0, r0, #CR_BR @ Disable the 'default mem-map' | ||
254 | orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) | ||
255 | mcr p15, 0, r0, c1, c0, 0 @ Enable MPU | ||
256 | isb | ||
257 | mov pc,lr | ||
258 | ENDPROC(__setup_mpu) | ||
259 | #endif | ||
104 | #include "head-common.S" | 260 | #include "head-common.S" |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8bac553fe213..45e8935cae4e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -156,7 +156,7 @@ ENDPROC(stext) | |||
156 | * | 156 | * |
157 | * Returns: | 157 | * Returns: |
158 | * r0, r3, r5-r7 corrupted | 158 | * r0, r3, r5-r7 corrupted |
159 | * r4 = physical page table address | 159 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
160 | */ | 160 | */ |
161 | __create_page_tables: | 161 | __create_page_tables: |
162 | pgtbl r4, r8 @ page table address | 162 | pgtbl r4, r8 @ page table address |
@@ -331,6 +331,7 @@ __create_page_tables: | |||
331 | #endif | 331 | #endif |
332 | #ifdef CONFIG_ARM_LPAE | 332 | #ifdef CONFIG_ARM_LPAE |
333 | sub r4, r4, #0x1000 @ point to the PGD table | 333 | sub r4, r4, #0x1000 @ point to the PGD table |
334 | mov r4, r4, lsr #ARCH_PGD_SHIFT | ||
334 | #endif | 335 | #endif |
335 | mov pc, lr | 336 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 337 | ENDPROC(__create_page_tables) |
@@ -408,7 +409,7 @@ __secondary_data: | |||
408 | * r0 = cp#15 control register | 409 | * r0 = cp#15 control register |
409 | * r1 = machine ID | 410 | * r1 = machine ID |
410 | * r2 = atags or dtb pointer | 411 | * r2 = atags or dtb pointer |
411 | * r4 = page table pointer | 412 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
412 | * r9 = processor ID | 413 | * r9 = processor ID |
413 | * r13 = *virtual* address to jump to upon completion | 414 | * r13 = *virtual* address to jump to upon completion |
414 | */ | 415 | */ |
@@ -427,10 +428,7 @@ __enable_mmu: | |||
427 | #ifdef CONFIG_CPU_ICACHE_DISABLE | 428 | #ifdef CONFIG_CPU_ICACHE_DISABLE |
428 | bic r0, r0, #CR_I | 429 | bic r0, r0, #CR_I |
429 | #endif | 430 | #endif |
430 | #ifdef CONFIG_ARM_LPAE | 431 | #ifndef CONFIG_ARM_LPAE |
431 | mov r5, #0 | ||
432 | mcrr p15, 0, r4, r5, c2 @ load TTBR0 | ||
433 | #else | ||
434 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | 432 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ |
435 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | 433 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ |
436 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | 434 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ |
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 1315c4ccfa56..4910232c4833 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE | |||
153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL | 153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL |
154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN | 154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN |
155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL | 155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL |
156 | mov r7, #0 | ||
157 | mcrr p15, 4, r7, r7, c14 @ CNTVOFF | ||
158 | |||
159 | @ Disable virtual timer in case it was counting | ||
160 | mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
161 | bic r7, #1 @ Clear ENABLE | ||
162 | mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
156 | 1: | 163 | 1: |
157 | #endif | 164 | #endif |
158 | 165 | ||
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c index 36531643cc2c..46931880093d 100644 --- a/arch/arm/kernel/psci.c +++ b/arch/arm/kernel/psci.c | |||
@@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = { | |||
158 | {}, | 158 | {}, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int __init psci_init(void) | 161 | void __init psci_init(void) |
162 | { | 162 | { |
163 | struct device_node *np; | 163 | struct device_node *np; |
164 | const char *method; | 164 | const char *method; |
@@ -166,7 +166,7 @@ static int __init psci_init(void) | |||
166 | 166 | ||
167 | np = of_find_matching_node(NULL, psci_of_match); | 167 | np = of_find_matching_node(NULL, psci_of_match); |
168 | if (!np) | 168 | if (!np) |
169 | return 0; | 169 | return; |
170 | 170 | ||
171 | pr_info("probing function IDs from device-tree\n"); | 171 | pr_info("probing function IDs from device-tree\n"); |
172 | 172 | ||
@@ -206,6 +206,5 @@ static int __init psci_init(void) | |||
206 | 206 | ||
207 | out_put_node: | 207 | out_put_node: |
208 | of_node_put(np); | 208 | of_node_put(np); |
209 | return 0; | 209 | return; |
210 | } | 210 | } |
211 | early_initcall(psci_init); | ||
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c new file mode 100644 index 000000000000..219f1d73572a --- /dev/null +++ b/arch/arm/kernel/psci_smp.c | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #include <linux/init.h> | ||
17 | #include <linux/irqchip/arm-gic.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/psci.h> | ||
22 | #include <asm/smp_plat.h> | ||
23 | |||
24 | /* | ||
25 | * psci_smp assumes that the following is true about PSCI: | ||
26 | * | ||
27 | * cpu_suspend Suspend the execution on a CPU | ||
28 | * @state we don't currently describe affinity levels, so just pass 0. | ||
29 | * @entry_point the first instruction to be executed on return | ||
30 | * returns 0 success, < 0 on failure | ||
31 | * | ||
32 | * cpu_off Power down a CPU | ||
33 | * @state we don't currently describe affinity levels, so just pass 0. | ||
34 | * no return on successful call | ||
35 | * | ||
36 | * cpu_on Power up a CPU | ||
37 | * @cpuid cpuid of target CPU, as from MPIDR | ||
38 | * @entry_point the first instruction to be executed on return | ||
39 | * returns 0 success, < 0 on failure | ||
40 | * | ||
41 | * migrate Migrate the context to a different CPU | ||
42 | * @cpuid cpuid of target CPU, as from MPIDR | ||
43 | * returns 0 success, < 0 on failure | ||
44 | * | ||
45 | */ | ||
46 | |||
47 | extern void secondary_startup(void); | ||
48 | |||
49 | static int __cpuinit psci_boot_secondary(unsigned int cpu, | ||
50 | struct task_struct *idle) | ||
51 | { | ||
52 | if (psci_ops.cpu_on) | ||
53 | return psci_ops.cpu_on(cpu_logical_map(cpu), | ||
54 | __pa(secondary_startup)); | ||
55 | return -ENODEV; | ||
56 | } | ||
57 | |||
58 | #ifdef CONFIG_HOTPLUG_CPU | ||
59 | void __ref psci_cpu_die(unsigned int cpu) | ||
60 | { | ||
61 | const struct psci_power_state ps = { | ||
62 | .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, | ||
63 | }; | ||
64 | |||
65 | if (psci_ops.cpu_off) | ||
66 | psci_ops.cpu_off(ps); | ||
67 | |||
68 | /* We should never return */ | ||
69 | panic("psci: cpu %d failed to shutdown\n", cpu); | ||
70 | } | ||
71 | #endif | ||
72 | |||
73 | bool __init psci_smp_available(void) | ||
74 | { | ||
75 | /* is cpu_on available at least? */ | ||
76 | return (psci_ops.cpu_on != NULL); | ||
77 | } | ||
78 | |||
79 | struct smp_operations __initdata psci_smp_ops = { | ||
80 | .smp_boot_secondary = psci_boot_secondary, | ||
81 | #ifdef CONFIG_HOTPLUG_CPU | ||
82 | .cpu_die = psci_cpu_die, | ||
83 | #endif | ||
84 | }; | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 0cde326f5542..9b653278c9e8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/cputype.h> | 37 | #include <asm/cputype.h> |
38 | #include <asm/elf.h> | 38 | #include <asm/elf.h> |
39 | #include <asm/procinfo.h> | 39 | #include <asm/procinfo.h> |
40 | #include <asm/psci.h> | ||
40 | #include <asm/sections.h> | 41 | #include <asm/sections.h> |
41 | #include <asm/setup.h> | 42 | #include <asm/setup.h> |
42 | #include <asm/smp_plat.h> | 43 | #include <asm/smp_plat.h> |
@@ -128,7 +129,9 @@ struct stack { | |||
128 | u32 und[3]; | 129 | u32 und[3]; |
129 | } ____cacheline_aligned; | 130 | } ____cacheline_aligned; |
130 | 131 | ||
132 | #ifndef CONFIG_CPU_V7M | ||
131 | static struct stack stacks[NR_CPUS]; | 133 | static struct stack stacks[NR_CPUS]; |
134 | #endif | ||
132 | 135 | ||
133 | char elf_platform[ELF_PLATFORM_SIZE]; | 136 | char elf_platform[ELF_PLATFORM_SIZE]; |
134 | EXPORT_SYMBOL(elf_platform); | 137 | EXPORT_SYMBOL(elf_platform); |
@@ -207,7 +210,7 @@ static const char *proc_arch[] = { | |||
207 | "5TEJ", | 210 | "5TEJ", |
208 | "6TEJ", | 211 | "6TEJ", |
209 | "7", | 212 | "7", |
210 | "?(11)", | 213 | "7M", |
211 | "?(12)", | 214 | "?(12)", |
212 | "?(13)", | 215 | "?(13)", |
213 | "?(14)", | 216 | "?(14)", |
@@ -216,6 +219,12 @@ static const char *proc_arch[] = { | |||
216 | "?(17)", | 219 | "?(17)", |
217 | }; | 220 | }; |
218 | 221 | ||
222 | #ifdef CONFIG_CPU_V7M | ||
223 | static int __get_cpu_architecture(void) | ||
224 | { | ||
225 | return CPU_ARCH_ARMv7M; | ||
226 | } | ||
227 | #else | ||
219 | static int __get_cpu_architecture(void) | 228 | static int __get_cpu_architecture(void) |
220 | { | 229 | { |
221 | int cpu_arch; | 230 | int cpu_arch; |
@@ -248,6 +257,7 @@ static int __get_cpu_architecture(void) | |||
248 | 257 | ||
249 | return cpu_arch; | 258 | return cpu_arch; |
250 | } | 259 | } |
260 | #endif | ||
251 | 261 | ||
252 | int __pure cpu_architecture(void) | 262 | int __pure cpu_architecture(void) |
253 | { | 263 | { |
@@ -293,7 +303,9 @@ static void __init cacheid_init(void) | |||
293 | { | 303 | { |
294 | unsigned int arch = cpu_architecture(); | 304 | unsigned int arch = cpu_architecture(); |
295 | 305 | ||
296 | if (arch >= CPU_ARCH_ARMv6) { | 306 | if (arch == CPU_ARCH_ARMv7M) { |
307 | cacheid = 0; | ||
308 | } else if (arch >= CPU_ARCH_ARMv6) { | ||
297 | unsigned int cachetype = read_cpuid_cachetype(); | 309 | unsigned int cachetype = read_cpuid_cachetype(); |
298 | if ((cachetype & (7 << 29)) == 4 << 29) { | 310 | if ((cachetype & (7 << 29)) == 4 << 29) { |
299 | /* ARMv7 register format */ | 311 | /* ARMv7 register format */ |
@@ -355,7 +367,7 @@ void __init early_print(const char *str, ...) | |||
355 | 367 | ||
356 | static void __init cpuid_init_hwcaps(void) | 368 | static void __init cpuid_init_hwcaps(void) |
357 | { | 369 | { |
358 | unsigned int divide_instrs; | 370 | unsigned int divide_instrs, vmsa; |
359 | 371 | ||
360 | if (cpu_architecture() < CPU_ARCH_ARMv7) | 372 | if (cpu_architecture() < CPU_ARCH_ARMv7) |
361 | return; | 373 | return; |
@@ -368,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) | |||
368 | case 1: | 380 | case 1: |
369 | elf_hwcap |= HWCAP_IDIVT; | 381 | elf_hwcap |= HWCAP_IDIVT; |
370 | } | 382 | } |
383 | |||
384 | /* LPAE implies atomic ldrd/strd instructions */ | ||
385 | vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; | ||
386 | if (vmsa >= 5) | ||
387 | elf_hwcap |= HWCAP_LPAE; | ||
371 | } | 388 | } |
372 | 389 | ||
373 | static void __init feat_v6_fixup(void) | 390 | static void __init feat_v6_fixup(void) |
@@ -392,6 +409,7 @@ static void __init feat_v6_fixup(void) | |||
392 | */ | 409 | */ |
393 | void notrace cpu_init(void) | 410 | void notrace cpu_init(void) |
394 | { | 411 | { |
412 | #ifndef CONFIG_CPU_V7M | ||
395 | unsigned int cpu = smp_processor_id(); | 413 | unsigned int cpu = smp_processor_id(); |
396 | struct stack *stk = &stacks[cpu]; | 414 | struct stack *stk = &stacks[cpu]; |
397 | 415 | ||
@@ -442,6 +460,7 @@ void notrace cpu_init(void) | |||
442 | "I" (offsetof(struct stack, und[0])), | 460 | "I" (offsetof(struct stack, und[0])), |
443 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) | 461 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) |
444 | : "r14"); | 462 | : "r14"); |
463 | #endif | ||
445 | } | 464 | } |
446 | 465 | ||
447 | u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; | 466 | u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; |
@@ -466,6 +485,72 @@ void __init smp_setup_processor_id(void) | |||
466 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); | 485 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
467 | } | 486 | } |
468 | 487 | ||
488 | struct mpidr_hash mpidr_hash; | ||
489 | #ifdef CONFIG_SMP | ||
490 | /** | ||
491 | * smp_build_mpidr_hash - Pre-compute shifts required at each affinity | ||
492 | * level in order to build a linear index from an | ||
493 | * MPIDR value. Resulting algorithm is a collision | ||
494 | * free hash carried out through shifting and ORing | ||
495 | */ | ||
496 | static void __init smp_build_mpidr_hash(void) | ||
497 | { | ||
498 | u32 i, affinity; | ||
499 | u32 fs[3], bits[3], ls, mask = 0; | ||
500 | /* | ||
501 | * Pre-scan the list of MPIDRS and filter out bits that do | ||
502 | * not contribute to affinity levels, ie they never toggle. | ||
503 | */ | ||
504 | for_each_possible_cpu(i) | ||
505 | mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); | ||
506 | pr_debug("mask of set bits 0x%x\n", mask); | ||
507 | /* | ||
508 | * Find and stash the last and first bit set at all affinity levels to | ||
509 | * check how many bits are required to represent them. | ||
510 | */ | ||
511 | for (i = 0; i < 3; i++) { | ||
512 | affinity = MPIDR_AFFINITY_LEVEL(mask, i); | ||
513 | /* | ||
514 | * Find the MSB bit and LSB bits position | ||
515 | * to determine how many bits are required | ||
516 | * to express the affinity level. | ||
517 | */ | ||
518 | ls = fls(affinity); | ||
519 | fs[i] = affinity ? ffs(affinity) - 1 : 0; | ||
520 | bits[i] = ls - fs[i]; | ||
521 | } | ||
522 | /* | ||
523 | * An index can be created from the MPIDR by isolating the | ||
524 | * significant bits at each affinity level and by shifting | ||
525 | * them in order to compress the 24 bits values space to a | ||
526 | * compressed set of values. This is equivalent to hashing | ||
527 | * the MPIDR through shifting and ORing. It is a collision free | ||
528 | * hash though not minimal since some levels might contain a number | ||
529 | * of CPUs that is not an exact power of 2 and their bit | ||
530 | * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. | ||
531 | */ | ||
532 | mpidr_hash.shift_aff[0] = fs[0]; | ||
533 | mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; | ||
534 | mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - | ||
535 | (bits[1] + bits[0]); | ||
536 | mpidr_hash.mask = mask; | ||
537 | mpidr_hash.bits = bits[2] + bits[1] + bits[0]; | ||
538 | pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", | ||
539 | mpidr_hash.shift_aff[0], | ||
540 | mpidr_hash.shift_aff[1], | ||
541 | mpidr_hash.shift_aff[2], | ||
542 | mpidr_hash.mask, | ||
543 | mpidr_hash.bits); | ||
544 | /* | ||
545 | * 4x is an arbitrary value used to warn on a hash table much bigger | ||
546 | * than expected on most systems. | ||
547 | */ | ||
548 | if (mpidr_hash_size() > 4 * num_possible_cpus()) | ||
549 | pr_warn("Large number of MPIDR hash buckets detected\n"); | ||
550 | sync_cache_w(&mpidr_hash); | ||
551 | } | ||
552 | #endif | ||
553 | |||
469 | static void __init setup_processor(void) | 554 | static void __init setup_processor(void) |
470 | { | 555 | { |
471 | struct proc_info_list *list; | 556 | struct proc_info_list *list; |
@@ -803,10 +888,17 @@ void __init setup_arch(char **cmdline_p) | |||
803 | unflatten_device_tree(); | 888 | unflatten_device_tree(); |
804 | 889 | ||
805 | arm_dt_init_cpu_maps(); | 890 | arm_dt_init_cpu_maps(); |
891 | psci_init(); | ||
806 | #ifdef CONFIG_SMP | 892 | #ifdef CONFIG_SMP |
807 | if (is_smp()) { | 893 | if (is_smp()) { |
808 | smp_set_ops(mdesc->smp); | 894 | if (!mdesc->smp_init || !mdesc->smp_init()) { |
895 | if (psci_smp_available()) | ||
896 | smp_set_ops(&psci_smp_ops); | ||
897 | else if (mdesc->smp) | ||
898 | smp_set_ops(mdesc->smp); | ||
899 | } | ||
809 | smp_init_cpus(); | 900 | smp_init_cpus(); |
901 | smp_build_mpidr_hash(); | ||
810 | } | 902 | } |
811 | #endif | 903 | #endif |
812 | 904 | ||
@@ -879,6 +971,7 @@ static const char *hwcap_str[] = { | |||
879 | "vfpv4", | 971 | "vfpv4", |
880 | "idiva", | 972 | "idiva", |
881 | "idivt", | 973 | "idivt", |
974 | "lpae", | ||
882 | NULL | 975 | NULL |
883 | }; | 976 | }; |
884 | 977 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..1c16c35c271a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
392 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | 392 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
393 | idx += 3; | 393 | idx += 3; |
394 | 394 | ||
395 | /* | ||
396 | * Put the sigreturn code on the stack no matter which return | ||
397 | * mechanism we use in order to remain ABI compliant | ||
398 | */ | ||
395 | if (__put_user(sigreturn_codes[idx], rc) || | 399 | if (__put_user(sigreturn_codes[idx], rc) || |
396 | __put_user(sigreturn_codes[idx+1], rc+1)) | 400 | __put_user(sigreturn_codes[idx+1], rc+1)) |
397 | return 1; | 401 | return 1; |
398 | 402 | ||
399 | if (cpsr & MODE32_BIT) { | 403 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { |
400 | /* | 404 | /* |
401 | * 32-bit code can use the new high-page | 405 | * 32-bit code can use the new high-page |
402 | * signal return code support. | 406 | * signal return code support except when the MPU has |
407 | * protected the vectors page from PL0 | ||
403 | */ | 408 | */ |
404 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 409 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; |
405 | } else { | 410 | } else { |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 987dcf33415c..db1536b8b30b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -7,6 +7,49 @@ | |||
7 | .text | 7 | .text |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Implementation of MPIDR hash algorithm through shifting | ||
11 | * and OR'ing. | ||
12 | * | ||
13 | * @dst: register containing hash result | ||
14 | * @rs0: register containing affinity level 0 bit shift | ||
15 | * @rs1: register containing affinity level 1 bit shift | ||
16 | * @rs2: register containing affinity level 2 bit shift | ||
17 | * @mpidr: register containing MPIDR value | ||
18 | * @mask: register containing MPIDR mask | ||
19 | * | ||
20 | * Pseudo C-code: | ||
21 | * | ||
22 | *u32 dst; | ||
23 | * | ||
24 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { | ||
25 | * u32 aff0, aff1, aff2; | ||
26 | * u32 mpidr_masked = mpidr & mask; | ||
27 | * aff0 = mpidr_masked & 0xff; | ||
28 | * aff1 = mpidr_masked & 0xff00; | ||
29 | * aff2 = mpidr_masked & 0xff0000; | ||
30 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); | ||
31 | *} | ||
32 | * Input registers: rs0, rs1, rs2, mpidr, mask | ||
33 | * Output register: dst | ||
34 | * Note: input and output registers must be disjoint register sets | ||
35 | (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) | ||
36 | */ | ||
37 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask | ||
38 | and \mpidr, \mpidr, \mask @ mask out MPIDR bits | ||
39 | and \dst, \mpidr, #0xff @ mask=aff0 | ||
40 | ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 | ||
41 | THUMB( lsr \dst, \dst, \rs0 ) | ||
42 | and \mask, \mpidr, #0xff00 @ mask = aff1 | ||
43 | ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) | ||
44 | THUMB( lsr \mask, \mask, \rs1 ) | ||
45 | THUMB( orr \dst, \dst, \mask ) | ||
46 | and \mask, \mpidr, #0xff0000 @ mask = aff2 | ||
47 | ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) | ||
48 | THUMB( lsr \mask, \mask, \rs2 ) | ||
49 | THUMB( orr \dst, \dst, \mask ) | ||
50 | .endm | ||
51 | |||
52 | /* | ||
10 | * Save CPU state for a suspend. This saves the CPU general purpose | 53 | * Save CPU state for a suspend. This saves the CPU general purpose |
11 | * registers, and allocates space on the kernel stack to save the CPU | 54 | * registers, and allocates space on the kernel stack to save the CPU |
12 | * specific registers and some other data for resume. | 55 | * specific registers and some other data for resume. |
@@ -29,12 +72,18 @@ ENTRY(__cpu_suspend) | |||
29 | mov r1, r4 @ size of save block | 72 | mov r1, r4 @ size of save block |
30 | mov r2, r5 @ virtual SP | 73 | mov r2, r5 @ virtual SP |
31 | ldr r3, =sleep_save_sp | 74 | ldr r3, =sleep_save_sp |
32 | #ifdef CONFIG_SMP | 75 | ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] |
33 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 76 | ALT_SMP(mrc p15, 0, r9, c0, c0, 5) |
34 | ALT_UP(mov lr, #0) | 77 | ALT_UP_B(1f) |
35 | and lr, lr, #15 | 78 | ldr r8, =mpidr_hash |
79 | /* | ||
80 | * This ldmia relies on the memory layout of the mpidr_hash | ||
81 | * struct mpidr_hash. | ||
82 | */ | ||
83 | ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts | ||
84 | compute_mpidr_hash lr, r5, r6, r7, r9, r4 | ||
36 | add r3, r3, lr, lsl #2 | 85 | add r3, r3, lr, lsl #2 |
37 | #endif | 86 | 1: |
38 | bl __cpu_suspend_save | 87 | bl __cpu_suspend_save |
39 | adr lr, BSYM(cpu_suspend_abort) | 88 | adr lr, BSYM(cpu_suspend_abort) |
40 | ldmfd sp!, {r0, pc} @ call suspend fn | 89 | ldmfd sp!, {r0, pc} @ call suspend fn |
@@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu) | |||
81 | .data | 130 | .data |
82 | .align | 131 | .align |
83 | ENTRY(cpu_resume) | 132 | ENTRY(cpu_resume) |
84 | #ifdef CONFIG_SMP | 133 | mov r1, #0 |
85 | adr r0, sleep_save_sp | 134 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) |
86 | ALT_SMP(mrc p15, 0, r1, c0, c0, 5) | 135 | ALT_UP_B(1f) |
87 | ALT_UP(mov r1, #0) | 136 | adr r2, mpidr_hash_ptr |
88 | and r1, r1, #15 | 137 | ldr r3, [r2] |
89 | ldr r0, [r0, r1, lsl #2] @ stack phys addr | 138 | add r2, r2, r3 @ r2 = struct mpidr_hash phys address |
90 | #else | 139 | /* |
91 | ldr r0, sleep_save_sp @ stack phys addr | 140 | * This ldmia relies on the memory layout of the mpidr_hash |
92 | #endif | 141 | * struct mpidr_hash. |
142 | */ | ||
143 | ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts | ||
144 | compute_mpidr_hash r1, r4, r5, r6, r0, r3 | ||
145 | 1: | ||
146 | adr r0, _sleep_save_sp | ||
147 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] | ||
148 | ldr r0, [r0, r1, lsl #2] | ||
149 | |||
93 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 150 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
94 | @ load phys pgd, stack, resume fn | 151 | @ load phys pgd, stack, resume fn |
95 | ARM( ldmia r0!, {r1, sp, pc} ) | 152 | ARM( ldmia r0!, {r1, sp, pc} ) |
@@ -98,7 +155,11 @@ THUMB( mov sp, r2 ) | |||
98 | THUMB( bx r3 ) | 155 | THUMB( bx r3 ) |
99 | ENDPROC(cpu_resume) | 156 | ENDPROC(cpu_resume) |
100 | 157 | ||
101 | sleep_save_sp: | 158 | .align 2 |
102 | .rept CONFIG_NR_CPUS | 159 | mpidr_hash_ptr: |
103 | .long 0 @ preserve stack phys ptr here | 160 | .long mpidr_hash - . @ mpidr_hash struct offset |
104 | .endr | 161 | |
162 | .type sleep_save_sp, #object | ||
163 | ENTRY(sleep_save_sp) | ||
164 | _sleep_save_sp: | ||
165 | .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..c5fb5469054b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/smp_plat.h> | 45 | #include <asm/smp_plat.h> |
46 | #include <asm/virt.h> | 46 | #include <asm/virt.h> |
47 | #include <asm/mach/arch.h> | 47 | #include <asm/mach/arch.h> |
48 | #include <asm/mpu.h> | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * as from 2.5, kernels no longer have an init_tasks structure | 51 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -78,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops) | |||
78 | smp_ops = *ops; | 79 | smp_ops = *ops; |
79 | }; | 80 | }; |
80 | 81 | ||
82 | static unsigned long get_arch_pgd(pgd_t *pgd) | ||
83 | { | ||
84 | phys_addr_t pgdir = virt_to_phys(pgd); | ||
85 | BUG_ON(pgdir & ARCH_PGD_MASK); | ||
86 | return pgdir >> ARCH_PGD_SHIFT; | ||
87 | } | ||
88 | |||
81 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 89 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) |
82 | { | 90 | { |
83 | int ret; | 91 | int ret; |
@@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
87 | * its stack and the page tables. | 95 | * its stack and the page tables. |
88 | */ | 96 | */ |
89 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 97 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
90 | secondary_data.pgdir = virt_to_phys(idmap_pgd); | 98 | #ifdef CONFIG_ARM_MPU |
91 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); | 99 | secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; |
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_MMU | ||
103 | secondary_data.pgdir = get_arch_pgd(idmap_pgd); | ||
104 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); | ||
105 | #endif | ||
92 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 106 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
93 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | 107 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
94 | 108 | ||
@@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
112 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 126 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
113 | } | 127 | } |
114 | 128 | ||
115 | secondary_data.stack = NULL; | ||
116 | secondary_data.pgdir = 0; | ||
117 | 129 | ||
130 | memset(&secondary_data, 0, sizeof(secondary_data)); | ||
118 | return ret; | 131 | return ret; |
119 | } | 132 | } |
120 | 133 | ||
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index c59c97ea8268..41cf3cbf756d 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c | |||
@@ -1,15 +1,54 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/slab.h> | ||
2 | 3 | ||
4 | #include <asm/cacheflush.h> | ||
3 | #include <asm/idmap.h> | 5 | #include <asm/idmap.h> |
4 | #include <asm/pgalloc.h> | 6 | #include <asm/pgalloc.h> |
5 | #include <asm/pgtable.h> | 7 | #include <asm/pgtable.h> |
6 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
9 | #include <asm/smp_plat.h> | ||
7 | #include <asm/suspend.h> | 10 | #include <asm/suspend.h> |
8 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
9 | 12 | ||
10 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); | 13 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); |
11 | extern void cpu_resume_mmu(void); | 14 | extern void cpu_resume_mmu(void); |
12 | 15 | ||
16 | #ifdef CONFIG_MMU | ||
17 | /* | ||
18 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
19 | * detail which platform code shouldn't have to know about. | ||
20 | */ | ||
21 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
22 | { | ||
23 | struct mm_struct *mm = current->active_mm; | ||
24 | int ret; | ||
25 | |||
26 | if (!idmap_pgd) | ||
27 | return -EINVAL; | ||
28 | |||
29 | /* | ||
30 | * Provide a temporary page table with an identity mapping for | ||
31 | * the MMU-enable code, required for resuming. On successful | ||
32 | * resume (indicated by a zero return code), we need to switch | ||
33 | * back to the correct page tables. | ||
34 | */ | ||
35 | ret = __cpu_suspend(arg, fn); | ||
36 | if (ret == 0) { | ||
37 | cpu_switch_mm(mm->pgd, mm); | ||
38 | local_flush_bp_all(); | ||
39 | local_flush_tlb_all(); | ||
40 | } | ||
41 | |||
42 | return ret; | ||
43 | } | ||
44 | #else | ||
45 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
46 | { | ||
47 | return __cpu_suspend(arg, fn); | ||
48 | } | ||
49 | #define idmap_pgd NULL | ||
50 | #endif | ||
51 | |||
13 | /* | 52 | /* |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | 53 | * This is called by __cpu_suspend() to save the state, and do whatever |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | 54 | * flushing is required to ensure that when the CPU goes to sleep we have |
@@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) | |||
47 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); | 86 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); |
48 | } | 87 | } |
49 | 88 | ||
50 | /* | 89 | extern struct sleep_save_sp sleep_save_sp; |
51 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
52 | * detail which platform code shouldn't have to know about. | ||
53 | */ | ||
54 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
55 | { | ||
56 | struct mm_struct *mm = current->active_mm; | ||
57 | int ret; | ||
58 | |||
59 | if (!idmap_pgd) | ||
60 | return -EINVAL; | ||
61 | 90 | ||
62 | /* | 91 | static int cpu_suspend_alloc_sp(void) |
63 | * Provide a temporary page table with an identity mapping for | 92 | { |
64 | * the MMU-enable code, required for resuming. On successful | 93 | void *ctx_ptr; |
65 | * resume (indicated by a zero return code), we need to switch | 94 | /* ctx_ptr is an array of physical addresses */ |
66 | * back to the correct page tables. | 95 | ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL); |
67 | */ | ||
68 | ret = __cpu_suspend(arg, fn); | ||
69 | if (ret == 0) { | ||
70 | cpu_switch_mm(mm->pgd, mm); | ||
71 | local_flush_bp_all(); | ||
72 | local_flush_tlb_all(); | ||
73 | } | ||
74 | 96 | ||
75 | return ret; | 97 | if (WARN_ON(!ctx_ptr)) |
98 | return -ENOMEM; | ||
99 | sleep_save_sp.save_ptr_stash = ctx_ptr; | ||
100 | sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); | ||
101 | sync_cache_w(&sleep_save_sp); | ||
102 | return 0; | ||
76 | } | 103 | } |
104 | early_initcall(cpu_suspend_alloc_sp); | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 517bfd4da1c9..cab094c234ee 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -812,6 +812,7 @@ static void __init kuser_get_tls_init(unsigned long vectors) | |||
812 | 812 | ||
813 | void __init early_trap_init(void *vectors_base) | 813 | void __init early_trap_init(void *vectors_base) |
814 | { | 814 | { |
815 | #ifndef CONFIG_CPU_V7M | ||
815 | unsigned long vectors = (unsigned long)vectors_base; | 816 | unsigned long vectors = (unsigned long)vectors_base; |
816 | extern char __stubs_start[], __stubs_end[]; | 817 | extern char __stubs_start[], __stubs_end[]; |
817 | extern char __vectors_start[], __vectors_end[]; | 818 | extern char __vectors_start[], __vectors_end[]; |
@@ -843,4 +844,11 @@ void __init early_trap_init(void *vectors_base) | |||
843 | 844 | ||
844 | flush_icache_range(vectors, vectors + PAGE_SIZE); | 845 | flush_icache_range(vectors, vectors + PAGE_SIZE); |
845 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 846 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
847 | #else /* ifndef CONFIG_CPU_V7M */ | ||
848 | /* | ||
849 | * on V7-M there is no need to copy the vector table to a dedicated | ||
850 | * memory area. The address is configurable and so a table in the kernel | ||
851 | * image can be used. | ||
852 | */ | ||
853 | #endif | ||
846 | } | 854 | } |