aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
committerSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
commitee3e542fec6e69bc9fb668698889a37d93950ddf (patch)
treee74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/arm64/kernel
parentfe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff)
parentf1d6e17f540af37bb1891480143669ba7636c4cf (diff)
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/asm-offsets.c34
-rw-r--r--arch/arm64/kernel/debug-monitors.c72
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/ptrace.c59
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/kernel/time.c6
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S30
10 files changed, 141 insertions, 98 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a2a4d810bea3..666e231d410b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -21,6 +21,7 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/kvm_host.h>
24#include <asm/thread_info.h> 25#include <asm/thread_info.h>
25#include <asm/memory.h> 26#include <asm/memory.h>
26#include <asm/cputable.h> 27#include <asm/cputable.h>
@@ -104,5 +105,38 @@ int main(void)
104 BLANK(); 105 BLANK();
105 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 106 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
106 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 107 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
108 BLANK();
109#ifdef CONFIG_KVM_ARM_HOST
110 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
111 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
112 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
113 DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
114 DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
115 DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
116 DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
117 DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
118 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
119 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
120 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
121 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
122 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
123 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
124 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
125 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
126 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
127 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
128 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
129 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
130 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
131 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
132 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
133 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
134 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
135 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
136 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
137 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
138 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
139 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
140#endif
107 return 0; 141 return 0;
108} 142}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f4726dc054b3..cbfacf7fb438 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/stat.h> 26#include <linux/stat.h>
27#include <linux/uaccess.h>
27 28
28#include <asm/debug-monitors.h> 29#include <asm/debug-monitors.h>
29#include <asm/local.h> 30#include <asm/local.h>
@@ -140,7 +141,7 @@ static void clear_os_lock(void *unused)
140 isb(); 141 isb();
141} 142}
142 143
143static int __cpuinit os_lock_notify(struct notifier_block *self, 144static int os_lock_notify(struct notifier_block *self,
144 unsigned long action, void *data) 145 unsigned long action, void *data)
145{ 146{
146 int cpu = (unsigned long)data; 147 int cpu = (unsigned long)data;
@@ -149,11 +150,11 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
149 return NOTIFY_OK; 150 return NOTIFY_OK;
150} 151}
151 152
152static struct notifier_block __cpuinitdata os_lock_nb = { 153static struct notifier_block os_lock_nb = {
153 .notifier_call = os_lock_notify, 154 .notifier_call = os_lock_notify,
154}; 155};
155 156
156static int __cpuinit debug_monitors_init(void) 157static int debug_monitors_init(void)
157{ 158{
158 /* Clear the OS lock. */ 159 /* Clear the OS lock. */
159 smp_call_function(clear_os_lock, NULL, 1); 160 smp_call_function(clear_os_lock, NULL, 1);
@@ -226,13 +227,74 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
226 return 0; 227 return 0;
227} 228}
228 229
229static int __init single_step_init(void) 230static int brk_handler(unsigned long addr, unsigned int esr,
231 struct pt_regs *regs)
232{
233 siginfo_t info;
234
235 if (!user_mode(regs))
236 return -EFAULT;
237
238 info = (siginfo_t) {
239 .si_signo = SIGTRAP,
240 .si_errno = 0,
241 .si_code = TRAP_BRKPT,
242 .si_addr = (void __user *)instruction_pointer(regs),
243 };
244
245 force_sig_info(SIGTRAP, &info, current);
246 return 0;
247}
248
249int aarch32_break_handler(struct pt_regs *regs)
250{
251 siginfo_t info;
252 unsigned int instr;
253 bool bp = false;
254 void __user *pc = (void __user *)instruction_pointer(regs);
255
256 if (!compat_user_mode(regs))
257 return -EFAULT;
258
259 if (compat_thumb_mode(regs)) {
260 /* get 16-bit Thumb instruction */
261 get_user(instr, (u16 __user *)pc);
262 if (instr == AARCH32_BREAK_THUMB2_LO) {
263 /* get second half of 32-bit Thumb-2 instruction */
264 get_user(instr, (u16 __user *)(pc + 2));
265 bp = instr == AARCH32_BREAK_THUMB2_HI;
266 } else {
267 bp = instr == AARCH32_BREAK_THUMB;
268 }
269 } else {
270 /* 32-bit ARM instruction */
271 get_user(instr, (u32 __user *)pc);
272 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
273 }
274
275 if (!bp)
276 return -EFAULT;
277
278 info = (siginfo_t) {
279 .si_signo = SIGTRAP,
280 .si_errno = 0,
281 .si_code = TRAP_BRKPT,
282 .si_addr = pc,
283 };
284
285 force_sig_info(SIGTRAP, &info, current);
286 return 0;
287}
288
289static int __init debug_traps_init(void)
230{ 290{
231 hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, 291 hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
232 TRAP_HWBKPT, "single-step handler"); 292 TRAP_HWBKPT, "single-step handler");
293 hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
294 TRAP_BRKPT, "ptrace BRK handler");
233 return 0; 295 return 0;
234} 296}
235arch_initcall(single_step_init); 297arch_initcall(debug_traps_init);
236 298
237/* Re-enable single step for syscall restarting. */ 299/* Re-enable single step for syscall restarting. */
238void user_rewind_single_step(struct task_struct *task) 300void user_rewind_single_step(struct task_struct *task)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..6ad781b21c08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
121 121
122 .macro get_thread_info, rd 122 .macro get_thread_info, rd
123 mov \rd, sp 123 mov \rd, sp
124 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
125 .endm 125 .endm
126 126
127/* 127/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..329218ca9ffb 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -821,7 +821,7 @@ static void reset_ctrl_regs(void *unused)
821 } 821 }
822} 822}
823 823
824static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, 824static int hw_breakpoint_reset_notify(struct notifier_block *self,
825 unsigned long action, 825 unsigned long action,
826 void *hcpu) 826 void *hcpu)
827{ 827{
@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
831 return NOTIFY_OK; 831 return NOTIFY_OK;
832} 832}
833 833
834static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { 834static struct notifier_block hw_breakpoint_reset_nb = {
835 .notifier_call = hw_breakpoint_reset_notify, 835 .notifier_call = hw_breakpoint_reset_notify,
836}; 836};
837 837
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..57fb55c44c90 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
81void (*pm_power_off)(void); 81void (*pm_power_off)(void);
82EXPORT_SYMBOL_GPL(pm_power_off); 82EXPORT_SYMBOL_GPL(pm_power_off);
83 83
84void (*arm_pm_restart)(char str, const char *cmd); 84void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
85EXPORT_SYMBOL_GPL(arm_pm_restart); 85EXPORT_SYMBOL_GPL(arm_pm_restart);
86 86
87void arch_cpu_idle_prepare(void) 87void arch_cpu_idle_prepare(void)
@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
132 132
133 /* Now call the architecture specific reboot code. */ 133 /* Now call the architecture specific reboot code. */
134 if (arm_pm_restart) 134 if (arm_pm_restart)
135 arm_pm_restart('h', cmd); 135 arm_pm_restart(reboot_mode, cmd);
136 136
137 /* 137 /*
138 * Whoops - the architecture was unable to reboot. 138 * Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6e1e77f1831c..fecdbf7de82e 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -53,28 +53,6 @@ void ptrace_disable(struct task_struct *child)
53{ 53{
54} 54}
55 55
56/*
57 * Handle hitting a breakpoint.
58 */
59static int ptrace_break(struct pt_regs *regs)
60{
61 siginfo_t info = {
62 .si_signo = SIGTRAP,
63 .si_errno = 0,
64 .si_code = TRAP_BRKPT,
65 .si_addr = (void __user *)instruction_pointer(regs),
66 };
67
68 force_sig_info(SIGTRAP, &info, current);
69 return 0;
70}
71
72static int arm64_break_trap(unsigned long addr, unsigned int esr,
73 struct pt_regs *regs)
74{
75 return ptrace_break(regs);
76}
77
78#ifdef CONFIG_HAVE_HW_BREAKPOINT 56#ifdef CONFIG_HAVE_HW_BREAKPOINT
79/* 57/*
80 * Handle hitting a HW-breakpoint. 58 * Handle hitting a HW-breakpoint.
@@ -817,33 +795,6 @@ static const struct user_regset_view user_aarch32_view = {
817 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 795 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
818}; 796};
819 797
820int aarch32_break_trap(struct pt_regs *regs)
821{
822 unsigned int instr;
823 bool bp = false;
824 void __user *pc = (void __user *)instruction_pointer(regs);
825
826 if (compat_thumb_mode(regs)) {
827 /* get 16-bit Thumb instruction */
828 get_user(instr, (u16 __user *)pc);
829 if (instr == AARCH32_BREAK_THUMB2_LO) {
830 /* get second half of 32-bit Thumb-2 instruction */
831 get_user(instr, (u16 __user *)(pc + 2));
832 bp = instr == AARCH32_BREAK_THUMB2_HI;
833 } else {
834 bp = instr == AARCH32_BREAK_THUMB;
835 }
836 } else {
837 /* 32-bit ARM instruction */
838 get_user(instr, (u32 __user *)pc);
839 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
840 }
841
842 if (bp)
843 return ptrace_break(regs);
844 return 1;
845}
846
847static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 798static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
848 compat_ulong_t __user *ret) 799 compat_ulong_t __user *ret)
849{ 800{
@@ -1111,16 +1062,6 @@ long arch_ptrace(struct task_struct *child, long request,
1111 return ptrace_request(child, request, addr, data); 1062 return ptrace_request(child, request, addr, data);
1112} 1063}
1113 1064
1114
1115static int __init ptrace_break_init(void)
1116{
1117 hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
1118 TRAP_BRKPT, "ptrace BRK handler");
1119 return 0;
1120}
1121core_initcall(ptrace_break_init);
1122
1123
1124asmlinkage int syscall_trace(int dir, struct pt_regs *regs) 1065asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
1125{ 1066{
1126 unsigned long saved_reg; 1067 unsigned long saved_reg;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..fee5cce83450 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -71,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
71 * in coherency or not. This is necessary for the hotplug code to work 71 * in coherency or not. This is necessary for the hotplug code to work
72 * reliably. 72 * reliably.
73 */ 73 */
74static void __cpuinit write_pen_release(u64 val) 74static void write_pen_release(u64 val)
75{ 75{
76 void *start = (void *)&secondary_holding_pen_release; 76 void *start = (void *)&secondary_holding_pen_release;
77 unsigned long size = sizeof(secondary_holding_pen_release); 77 unsigned long size = sizeof(secondary_holding_pen_release);
@@ -84,7 +84,7 @@ static void __cpuinit write_pen_release(u64 val)
84 * Boot a secondary CPU, and assign it the specified idle task. 84 * Boot a secondary CPU, and assign it the specified idle task.
85 * This also gives us the initial stack to use for this CPU. 85 * This also gives us the initial stack to use for this CPU.
86 */ 86 */
87static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 87static int boot_secondary(unsigned int cpu, struct task_struct *idle)
88{ 88{
89 unsigned long timeout; 89 unsigned long timeout;
90 90
@@ -122,7 +122,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
122 122
123static DECLARE_COMPLETION(cpu_running); 123static DECLARE_COMPLETION(cpu_running);
124 124
125int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 125int __cpu_up(unsigned int cpu, struct task_struct *idle)
126{ 126{
127 int ret; 127 int ret;
128 128
@@ -162,7 +162,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
162 * This is the secondary CPU boot entry. We're using this CPUs 162 * This is the secondary CPU boot entry. We're using this CPUs
163 * idle thread stack, but a set of temporary page tables. 163 * idle thread stack, but a set of temporary page tables.
164 */ 164 */
165asmlinkage void __cpuinit secondary_start_kernel(void) 165asmlinkage void secondary_start_kernel(void)
166{ 166{
167 struct mm_struct *mm = &init_mm; 167 struct mm_struct *mm = &init_mm;
168 unsigned int cpu = smp_processor_id(); 168 unsigned int cpu = smp_processor_id();
@@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
200 raw_spin_unlock(&boot_lock); 200 raw_spin_unlock(&boot_lock);
201 201
202 /* 202 /*
203 * Enable local interrupts.
204 */
205 notify_cpu_starting(cpu);
206 local_irq_enable();
207 local_fiq_enable();
208
209 /*
210 * OK, now it's safe to let the boot CPU continue. Wait for 203 * OK, now it's safe to let the boot CPU continue. Wait for
211 * the CPU migration code to notice that the CPU is online 204 * the CPU migration code to notice that the CPU is online
212 * before we continue. 205 * before we continue.
@@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
215 complete(&cpu_running); 208 complete(&cpu_running);
216 209
217 /* 210 /*
211 * Enable GIC and timers.
212 */
213 notify_cpu_starting(cpu);
214
215 local_irq_enable();
216 local_fiq_enable();
217
218 /*
218 * OK, it's off to the idle thread for us 219 * OK, it's off to the idle thread for us
219 */ 220 */
220 cpu_startup_entry(CPUHP_ONLINE); 221 cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a551f88ae2c1..03dc3718eb13 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -68,12 +68,6 @@ unsigned long long notrace sched_clock(void)
68 return arch_timer_read_counter() * sched_clock_mult; 68 return arch_timer_read_counter() * sched_clock_mult;
69} 69}
70 70
71int read_current_timer(unsigned long *timer_value)
72{
73 *timer_value = arch_timer_read_counter();
74 return 0;
75}
76
77void __init time_init(void) 71void __init time_init(void)
78{ 72{
79 u32 arch_timer_rate; 73 u32 arch_timer_rate;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f30852d28590..7ffadddb645d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -32,6 +32,7 @@
32#include <linux/syscalls.h> 32#include <linux/syscalls.h>
33 33
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/debug-monitors.h>
35#include <asm/traps.h> 36#include <asm/traps.h>
36#include <asm/stacktrace.h> 37#include <asm/stacktrace.h>
37#include <asm/exception.h> 38#include <asm/exception.h>
@@ -261,11 +262,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
261 siginfo_t info; 262 siginfo_t info;
262 void __user *pc = (void __user *)instruction_pointer(regs); 263 void __user *pc = (void __user *)instruction_pointer(regs);
263 264
264#ifdef CONFIG_COMPAT
265 /* check for AArch32 breakpoint instructions */ 265 /* check for AArch32 breakpoint instructions */
266 if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) 266 if (!aarch32_break_handler(regs))
267 return; 267 return;
268#endif
269 268
270 if (show_unhandled_signals && unhandled_signal(current, SIGILL) && 269 if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
271 printk_ratelimit()) { 270 printk_ratelimit()) {
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3fae2be8b016..f5e55747242f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -17,6 +17,19 @@ ENTRY(stext)
17 17
18jiffies = jiffies_64; 18jiffies = jiffies_64;
19 19
20#define HYPERVISOR_TEXT \
21 /* \
22 * Force the alignment to be compatible with \
23 * the vectors requirements \
24 */ \
25 . = ALIGN(2048); \
26 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
27 *(.hyp.idmap.text) \
28 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
29 VMLINUX_SYMBOL(__hyp_text_start) = .; \
30 *(.hyp.text) \
31 VMLINUX_SYMBOL(__hyp_text_end) = .;
32
20SECTIONS 33SECTIONS
21{ 34{
22 /* 35 /*
@@ -49,6 +62,7 @@ SECTIONS
49 TEXT_TEXT 62 TEXT_TEXT
50 SCHED_TEXT 63 SCHED_TEXT
51 LOCK_TEXT 64 LOCK_TEXT
65 HYPERVISOR_TEXT
52 *(.fixup) 66 *(.fixup)
53 *(.gnu.warning) 67 *(.gnu.warning)
54 . = ALIGN(16); 68 . = ALIGN(16);
@@ -56,7 +70,7 @@ SECTIONS
56 } 70 }
57 71
58 RO_DATA(PAGE_SIZE) 72 RO_DATA(PAGE_SIZE)
59 73 EXCEPTION_TABLE(8)
60 _etext = .; /* End of text and rodata section */ 74 _etext = .; /* End of text and rodata section */
61 75
62 . = ALIGN(PAGE_SIZE); 76 . = ALIGN(PAGE_SIZE);
@@ -99,14 +113,6 @@ SECTIONS
99 READ_MOSTLY_DATA(64) 113 READ_MOSTLY_DATA(64)
100 114
101 /* 115 /*
102 * The exception fixup table (might need resorting at runtime)
103 */
104 . = ALIGN(32);
105 __start___ex_table = .;
106 *(__ex_table)
107 __stop___ex_table = .;
108
109 /*
110 * and the usual data section 116 * and the usual data section
111 */ 117 */
112 DATA_DATA 118 DATA_DATA
@@ -124,3 +130,9 @@ SECTIONS
124 STABS_DEBUG 130 STABS_DEBUG
125 .comment 0 : { *(.comment) } 131 .comment 0 : { *(.comment) }
126} 132}
133
134/*
135 * The HYP init code can't be more than a page long.
136 */
137ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
138 "HYP init code too big")