aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
commit5cea24c5899a81abf59706d69580dd5c734effa8 (patch)
treec080ec6b1c6cf27b50f00b2980068fb563b6f7ec /arch/arm/kernel
parent2fc07efa2241afe08de136c061b3baa103fb286c (diff)
parenta0f0dd57f4a85310d9936f1770a0424b49fef876 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull second set of ARM updates from Russell King: "This is the second set of ARM updates for this merge window. Contained within are changes to allow the kernel to boot in hypervisor mode on CPUs supporting virtualization, and cache flushing support to the point of inner sharable unification, which are used by the suspend/resume code to avoid having to do a full cache flush. Also included is one fix for VFP code identified by Michael Olbrich." * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels ARM: 7549/1: HYP: fix boot on some ARM1136 cores ARM: 7542/1: mm: fix cache LoUIS API for xscale and feroceon ARM: mm: update __v7_setup() to the new LoUIS cache maintenance API ARM: kernel: update __cpu_disable to use cache LoUIS maintenance API ARM: kernel: update cpu_suspend code to use cache LoUIS operations ARM: mm: rename jump labels in v7_flush_dcache_all function ARM: mm: implement LoUIS API for cache maintenance ops ARM: virt: arch_timers: enable access to physical timers ARM: virt: Add CONFIG_ARM_VIRT_EXT option ARM: virt: Add boot-time diagnostics ARM: virt: Update documentation for hyp mode entry support ARM: zImage/virt: hyp mode entry support for the zImage loader ARM: virt: allow the kernel to be entered in HYP mode ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hyp-stub.S223
-rw-r--r--arch/arm/kernel/setup.c20
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/suspend.c17
6 files changed, 279 insertions, 5 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5dfef9d97ed9..5bbec7b8183e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -81,4 +81,6 @@ head-y := head$(MMUEXT).o
81obj-$(CONFIG_DEBUG_LL) += debug.o 81obj-$(CONFIG_DEBUG_LL) += debug.o
82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
83 83
84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
85
84extra-y := $(head-y) vmlinux.lds 86extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9874d0741191..4eee351f4668 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -83,8 +83,12 @@ ENTRY(stext)
83 THUMB( .thumb ) @ switch to Thumb now. 83 THUMB( .thumb ) @ switch to Thumb now.
84 THUMB(1: ) 84 THUMB(1: )
85 85
86 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 86#ifdef CONFIG_ARM_VIRT_EXT
87 @ and irqs disabled 87 bl __hyp_stub_install
88#endif
89 @ ensure svc mode and all interrupts masked
90 safe_svcmode_maskall r9
91
88 mrc p15, 0, r9, c0, c0 @ get processor id 92 mrc p15, 0, r9, c0, c0 @ get processor id
89 bl __lookup_processor_type @ r5=procinfo r9=cpuid 93 bl __lookup_processor_type @ r5=procinfo r9=cpuid
90 movs r10, r5 @ invalid processor (r5=0)? 94 movs r10, r5 @ invalid processor (r5=0)?
@@ -326,7 +330,11 @@ ENTRY(secondary_startup)
326 * the processor type - there is no need to check the machine type 330 * the processor type - there is no need to check the machine type
327 * as it has already been validated by the primary processor. 331 * as it has already been validated by the primary processor.
328 */ 332 */
329 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 333#ifdef CONFIG_ARM_VIRT_EXT
334 bl __hyp_stub_install
335#endif
336 safe_svcmode_maskall r9
337
330 mrc p15, 0, r9, c0, c0 @ get processor id 338 mrc p15, 0, r9, c0, c0 @ get processor id
331 bl __lookup_processor_type 339 bl __lookup_processor_type
332 movs r10, r5 @ invalid processor? 340 movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
new file mode 100644
index 000000000000..65b2417aebce
--- /dev/null
+++ b/arch/arm/kernel/hyp-stub.S
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/virt.h>
23
24#ifndef ZIMAGE
25/*
26 * For the kernel proper, we need to find out the CPU boot mode long after
27 * boot, so we need to store it in a writable variable.
28 *
29 * This is not in .bss, because we set it sufficiently early that the boot-time
30 * zeroing of .bss would clobber it.
31 */
32.data
33ENTRY(__boot_cpu_mode)
34 .long 0
35.text
36
37 /*
38 * Save the primary CPU boot mode. Requires 3 scratch registers.
39 */
40 .macro store_primary_cpu_mode reg1, reg2, reg3
41 mrs \reg1, cpsr
42 and \reg1, \reg1, #MODE_MASK
43 adr \reg2, .L__boot_cpu_mode_offset
44 ldr \reg3, [\reg2]
45 str \reg1, [\reg2, \reg3]
46 .endm
47
48 /*
49 * Compare the current mode with the one saved on the primary CPU.
50 * If they don't match, record that fact. The Z bit indicates
51 * if there's a match or not.
52 * Requires 3 additionnal scratch registers.
53 */
54 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
55 adr \reg2, .L__boot_cpu_mode_offset
56 ldr \reg3, [\reg2]
57 ldr \reg1, [\reg2, \reg3]
58 cmp \mode, \reg1 @ matches primary CPU boot mode?
59 orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
60 strne r7, [r5, r6] @ record what happened and give up
61 .endm
62
63#else /* ZIMAGE */
64
65 .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
66 .endm
67
68/*
69 * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
70 * consistency checking:
71 */
72 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
73 cmp \mode, \mode
74 .endm
75
76#endif /* ZIMAGE */
77
78/*
79 * Hypervisor stub installation functions.
80 *
81 * These must be called with the MMU and D-cache off.
82 * They are not ABI compliant and are only intended to be called from the kernel
83 * entry points in head.S.
84 */
85@ Call this from the primary CPU
86ENTRY(__hyp_stub_install)
87 store_primary_cpu_mode r4, r5, r6
88ENDPROC(__hyp_stub_install)
89
90 @ fall through...
91
92@ Secondary CPUs should call here
93ENTRY(__hyp_stub_install_secondary)
94 mrs r4, cpsr
95 and r4, r4, #MODE_MASK
96
97 /*
98 * If the secondary has booted with a different mode, give up
99 * immediately.
100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 bxne lr
103
104 /*
105 * Once we have given up on one CPU, we do not try to install the
106 * stub hypervisor on the remaining ones: because the saved boot mode
107 * is modified, it can't compare equal to the CPSR mode field any
108 * more.
109 *
110 * Otherwise...
111 */
112
113 cmp r4, #HYP_MODE
114 bxne lr @ give up if the CPU is not in HYP mode
115
116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set
118 * state etc.
119 * Turn off all traps
120 * Eventually, CPU-specific code might be needed -- assume not for now
121 *
122 * This code relies on the "eret" instruction to synchronize the
123 * various coprocessor accesses.
124 */
125 @ Now install the hypervisor stub:
126 adr r7, __hyp_stub_vectors
127 mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
128
129 @ Disable all traps, so we don't get any nasty surprise
130 mov r7, #0
131 mcr p15, 4, r7, c1, c1, 0 @ HCR
132 mcr p15, 4, r7, c1, c1, 2 @ HCPTR
133 mcr p15, 4, r7, c1, c1, 3 @ HSTR
134
135THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
136#ifdef CONFIG_CPU_BIG_ENDIAN
137 orr r7, #(1 << 9) @ HSCTLR.EE
138#endif
139 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
140
141 mrc p15, 4, r7, c1, c1, 1 @ HDCR
142 and r7, #0x1f @ Preserve HPMN
143 mcr p15, 4, r7, c1, c1, 1 @ HDCR
144
145#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
146 @ make CNTP_* and CNTPCT accessible from PL1
147 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
148 lsr r7, #16
149 and r7, #0xf
150 cmp r7, #1
151 bne 1f
152 mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
153 orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
154 mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
1551:
156#endif
157
158 bic r7, r4, #MODE_MASK
159 orr r7, r7, #SVC_MODE
160THUMB( orr r7, r7, #PSR_T_BIT )
161 msr spsr_cxsf, r7 @ This is SPSR_hyp.
162
163 __MSR_ELR_HYP(14) @ msr elr_hyp, lr
164 __ERET @ return, switching to SVC mode
165 @ The boot CPU mode is left in r4.
166ENDPROC(__hyp_stub_install_secondary)
167
168__hyp_stub_do_trap:
169 cmp r0, #-1
170 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
171 mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
172 __ERET
173ENDPROC(__hyp_stub_do_trap)
174
175/*
176 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
177 * vectors as part of hypervisor installation. On an SMP system, this should
178 * be called on each CPU.
179 *
180 * r0 must be the physical address of the new vector table (which must lie in
181 * the bottom 4GB of physical address space.
182 *
183 * r0 must be 32-byte aligned.
184 *
185 * Before calling this, you must check that the stub hypervisor is installed
186 * everywhere, by waiting for any secondary CPUs to be brought up and then
187 * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
188 *
189 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
190 * something else went wrong... in such cases, trying to install a new
191 * hypervisor is unlikely to work as desired.
192 *
193 * When you call into your shiny new hypervisor, sp_hyp will contain junk,
194 * so you will need to set that to something sensible at the new hypervisor's
195 * initialisation entry point.
196 */
197ENTRY(__hyp_get_vectors)
198 mov r0, #-1
199ENDPROC(__hyp_get_vectors)
200 @ fall through
201ENTRY(__hyp_set_vectors)
202 __HVC(0)
203 bx lr
204ENDPROC(__hyp_set_vectors)
205
206#ifndef ZIMAGE
207.align 2
208.L__boot_cpu_mode_offset:
209 .long __boot_cpu_mode - .
210#endif
211
212.align 5
213__hyp_stub_vectors:
214__hyp_stub_reset: W(b) .
215__hyp_stub_und: W(b) .
216__hyp_stub_svc: W(b) .
217__hyp_stub_pabort: W(b) .
218__hyp_stub_dabort: W(b) .
219__hyp_stub_trap: W(b) __hyp_stub_do_trap
220__hyp_stub_irq: W(b) .
221__hyp_stub_fiq: W(b) .
222ENDPROC(__hyp_stub_vectors)
223
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index febafa0f552d..da1d1aa20ad9 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -53,6 +53,7 @@
53#include <asm/traps.h> 53#include <asm/traps.h>
54#include <asm/unwind.h> 54#include <asm/unwind.h>
55#include <asm/memblock.h> 55#include <asm/memblock.h>
56#include <asm/virt.h>
56 57
57#include "atags.h" 58#include "atags.h"
58#include "tcm.h" 59#include "tcm.h"
@@ -703,6 +704,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
703 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 704 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
704} 705}
705 706
707void __init hyp_mode_check(void)
708{
709#ifdef CONFIG_ARM_VIRT_EXT
710 if (is_hyp_mode_available()) {
711 pr_info("CPU: All CPU(s) started in HYP mode.\n");
712 pr_info("CPU: Virtualization extensions available.\n");
713 } else if (is_hyp_mode_mismatched()) {
714 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
715 __boot_cpu_mode & MODE_MASK);
716 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
717 } else
718 pr_info("CPU: All CPU(s) started in SVC mode.\n");
719#endif
720}
721
706void __init setup_arch(char **cmdline_p) 722void __init setup_arch(char **cmdline_p)
707{ 723{
708 struct machine_desc *mdesc; 724 struct machine_desc *mdesc;
@@ -748,6 +764,10 @@ void __init setup_arch(char **cmdline_p)
748 smp_init_cpus(); 764 smp_init_cpus();
749 } 765 }
750#endif 766#endif
767
768 if (!is_smp())
769 hyp_mode_check();
770
751 reserve_crashkernel(); 771 reserve_crashkernel();
752 772
753 tcm_init(); 773 tcm_init();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d100eacdb798..8e20754dd31d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -43,6 +43,7 @@
43#include <asm/ptrace.h> 43#include <asm/ptrace.h>
44#include <asm/localtimer.h> 44#include <asm/localtimer.h>
45#include <asm/smp_plat.h> 45#include <asm/smp_plat.h>
46#include <asm/virt.h>
46#include <asm/mach/arch.h> 47#include <asm/mach/arch.h>
47 48
48/* 49/*
@@ -202,8 +203,11 @@ int __cpuinit __cpu_disable(void)
202 /* 203 /*
203 * Flush user cache and TLB mappings, and then remove this CPU 204 * Flush user cache and TLB mappings, and then remove this CPU
204 * from the vm mask set of all processes. 205 * from the vm mask set of all processes.
206 *
207 * Caches are flushed to the Level of Unification Inner Shareable
208 * to write-back dirty lines to unified caches shared by all CPUs.
205 */ 209 */
206 flush_cache_all(); 210 flush_cache_louis();
207 local_flush_tlb_all(); 211 local_flush_tlb_all();
208 212
209 clear_tasks_mm_cpumask(cpu); 213 clear_tasks_mm_cpumask(cpu);
@@ -355,6 +359,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
355 num_online_cpus(), 359 num_online_cpus(),
356 bogosum / (500000/HZ), 360 bogosum / (500000/HZ),
357 (bogosum / (5000/HZ)) % 100); 361 (bogosum / (5000/HZ)) % 100);
362
363 hyp_mode_check();
358} 364}
359 365
360void __init smp_prepare_boot_cpu(void) 366void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f18..358bca3a995e 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -17,6 +17,8 @@ extern void cpu_resume_mmu(void);
17 */ 17 */
18void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) 18void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
19{ 19{
20 u32 *ctx = ptr;
21
20 *save_ptr = virt_to_phys(ptr); 22 *save_ptr = virt_to_phys(ptr);
21 23
22 /* This must correspond to the LDM in cpu_resume() assembly */ 24 /* This must correspond to the LDM in cpu_resume() assembly */
@@ -26,7 +28,20 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
26 28
27 cpu_do_suspend(ptr); 29 cpu_do_suspend(ptr);
28 30
29 flush_cache_all(); 31 flush_cache_louis();
32
33 /*
34 * flush_cache_louis does not guarantee that
35 * save_ptr and ptr are cleaned to main memory,
36 * just up to the Level of Unification Inner Shareable.
37 * Since the context pointer and context itself
38 * are to be retrieved with the MMU off that
39 * data must be cleaned from all cache levels
40 * to main memory using "area" cache primitives.
41 */
42 __cpuc_flush_dcache_area(ctx, ptrsz);
43 __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
44
30 outer_clean_range(*save_ptr, *save_ptr + ptrsz); 45 outer_clean_range(*save_ptr, *save_ptr + ptrsz);
31 outer_clean_range(virt_to_phys(save_ptr), 46 outer_clean_range(virt_to_phys(save_ptr),
32 virt_to_phys(save_ptr) + sizeof(*save_ptr)); 47 virt_to_phys(save_ptr) + sizeof(*save_ptr));