diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 152 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 127 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 408 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 159 |
4 files changed, 846 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h new file mode 100644 index 000000000000..f68465dee026 --- /dev/null +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/mmu_context.h | ||
3 | * | ||
4 | * Copyright (C) 1996 Russell King. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_MMU_CONTEXT_H | ||
20 | #define __ASM_MMU_CONTEXT_H | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <linux/sched.h> | ||
24 | |||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/proc-fns.h> | ||
27 | #include <asm-generic/mm_hooks.h> | ||
28 | #include <asm/cputype.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | |||
31 | #define MAX_ASID_BITS 16 | ||
32 | |||
33 | extern unsigned int cpu_last_asid; | ||
34 | |||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
36 | void __new_context(struct mm_struct *mm); | ||
37 | |||
38 | /* | ||
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | ||
40 | */ | ||
41 | static inline void cpu_set_reserved_ttbr0(void) | ||
42 | { | ||
43 | unsigned long ttbr = page_to_phys(empty_zero_page); | ||
44 | |||
45 | asm( | ||
46 | " msr ttbr0_el1, %0 // set TTBR0\n" | ||
47 | " isb" | ||
48 | : | ||
49 | : "r" (ttbr)); | ||
50 | } | ||
51 | |||
52 | static inline void switch_new_context(struct mm_struct *mm) | ||
53 | { | ||
54 | unsigned long flags; | ||
55 | |||
56 | __new_context(mm); | ||
57 | |||
58 | local_irq_save(flags); | ||
59 | cpu_switch_mm(mm->pgd, mm); | ||
60 | local_irq_restore(flags); | ||
61 | } | ||
62 | |||
63 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
64 | struct task_struct *tsk) | ||
65 | { | ||
66 | /* | ||
67 | * Required during context switch to avoid speculative page table | ||
68 | * walking with the wrong TTBR. | ||
69 | */ | ||
70 | cpu_set_reserved_ttbr0(); | ||
71 | |||
72 | if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) | ||
73 | /* | ||
74 | * The ASID is from the current generation, just switch to the | ||
75 | * new pgd. This condition is only true for calls from | ||
76 | * context_switch() and interrupts are already disabled. | ||
77 | */ | ||
78 | cpu_switch_mm(mm->pgd, mm); | ||
79 | else if (irqs_disabled()) | ||
80 | /* | ||
81 | * Defer the new ASID allocation until after the context | ||
82 | * switch critical region since __new_context() cannot be | ||
83 | * called with interrupts disabled. | ||
84 | */ | ||
85 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
86 | else | ||
87 | /* | ||
88 | * That is a direct call to switch_mm() or activate_mm() with | ||
89 | * interrupts enabled and a new context. | ||
90 | */ | ||
91 | switch_new_context(mm); | ||
92 | } | ||
93 | |||
94 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | ||
95 | #define destroy_context(mm) do { } while(0) | ||
96 | |||
97 | #define finish_arch_post_lock_switch \ | ||
98 | finish_arch_post_lock_switch | ||
99 | static inline void finish_arch_post_lock_switch(void) | ||
100 | { | ||
101 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | ||
102 | struct mm_struct *mm = current->mm; | ||
103 | unsigned long flags; | ||
104 | |||
105 | __new_context(mm); | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | cpu_switch_mm(mm->pgd, mm); | ||
109 | local_irq_restore(flags); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * This is called when "tsk" is about to enter lazy TLB mode. | ||
115 | * | ||
116 | * mm: describes the currently active mm context | ||
117 | * tsk: task which is entering lazy tlb | ||
118 | * cpu: cpu number which is entering lazy tlb | ||
119 | * | ||
120 | * tsk->mm will be NULL | ||
121 | */ | ||
122 | static inline void | ||
123 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
124 | { | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * This is the actual mm switch as far as the scheduler | ||
129 | * is concerned. No registers are touched. We avoid | ||
130 | * calling the CPU specific function when the mm hasn't | ||
131 | * actually changed. | ||
132 | */ | ||
133 | static inline void | ||
134 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
135 | struct task_struct *tsk) | ||
136 | { | ||
137 | unsigned int cpu = smp_processor_id(); | ||
138 | |||
139 | #ifdef CONFIG_SMP | ||
140 | /* check for possible thread migration */ | ||
141 | if (!cpumask_empty(mm_cpumask(next)) && | ||
142 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | ||
143 | __flush_icache_all(); | ||
144 | #endif | ||
145 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | ||
146 | check_and_switch_context(next, tsk); | ||
147 | } | ||
148 | |||
149 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
150 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
151 | |||
152 | #endif | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h new file mode 100644 index 000000000000..3659e460071d --- /dev/null +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/thread_info.h | ||
3 | * | ||
4 | * Copyright (C) 2002 Russell King. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_THREAD_INFO_H | ||
20 | #define __ASM_THREAD_INFO_H | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | |||
24 | #include <linux/compiler.h> | ||
25 | |||
26 | #ifndef CONFIG_ARM64_64K_PAGES | ||
27 | #define THREAD_SIZE_ORDER 1 | ||
28 | #endif | ||
29 | |||
30 | #define THREAD_SIZE 8192 | ||
31 | #define THREAD_START_SP (THREAD_SIZE - 16) | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | |||
35 | struct task_struct; | ||
36 | struct exec_domain; | ||
37 | |||
38 | #include <asm/types.h> | ||
39 | |||
40 | typedef unsigned long mm_segment_t; | ||
41 | |||
42 | /* | ||
43 | * low level task data that entry.S needs immediate access to. | ||
44 | * __switch_to() assumes cpu_context follows immediately after cpu_domain. | ||
45 | */ | ||
46 | struct thread_info { | ||
47 | unsigned long flags; /* low level flags */ | ||
48 | mm_segment_t addr_limit; /* address limit */ | ||
49 | struct task_struct *task; /* main task structure */ | ||
50 | struct exec_domain *exec_domain; /* execution domain */ | ||
51 | struct restart_block restart_block; | ||
52 | int preempt_count; /* 0 => preemptable, <0 => bug */ | ||
53 | int cpu; /* cpu */ | ||
54 | }; | ||
55 | |||
56 | #define INIT_THREAD_INFO(tsk) \ | ||
57 | { \ | ||
58 | .task = &tsk, \ | ||
59 | .exec_domain = &default_exec_domain, \ | ||
60 | .flags = 0, \ | ||
61 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
62 | .addr_limit = KERNEL_DS, \ | ||
63 | .restart_block = { \ | ||
64 | .fn = do_no_restart_syscall, \ | ||
65 | }, \ | ||
66 | } | ||
67 | |||
68 | #define init_thread_info (init_thread_union.thread_info) | ||
69 | #define init_stack (init_thread_union.stack) | ||
70 | |||
71 | /* | ||
72 | * how to get the thread information struct from C | ||
73 | */ | ||
74 | static inline struct thread_info *current_thread_info(void) __attribute_const__; | ||
75 | |||
76 | static inline struct thread_info *current_thread_info(void) | ||
77 | { | ||
78 | register unsigned long sp asm ("sp"); | ||
79 | return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); | ||
80 | } | ||
81 | |||
82 | #define thread_saved_pc(tsk) \ | ||
83 | ((unsigned long)(tsk->thread.cpu_context.pc)) | ||
84 | #define thread_saved_sp(tsk) \ | ||
85 | ((unsigned long)(tsk->thread.cpu_context.sp)) | ||
86 | #define thread_saved_fp(tsk) \ | ||
87 | ((unsigned long)(tsk->thread.cpu_context.fp)) | ||
88 | |||
89 | #endif | ||
90 | |||
91 | /* | ||
92 | * We use bit 30 of the preempt_count to indicate that kernel | ||
93 | * preemption is occurring. See <asm/hardirq.h>. | ||
94 | */ | ||
95 | #define PREEMPT_ACTIVE 0x40000000 | ||
96 | |||
97 | /* | ||
98 | * thread information flags: | ||
99 | * TIF_SYSCALL_TRACE - syscall trace active | ||
100 | * TIF_SIGPENDING - signal pending | ||
101 | * TIF_NEED_RESCHED - rescheduling necessary | ||
102 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
103 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | ||
104 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | ||
105 | */ | ||
106 | #define TIF_SIGPENDING 0 | ||
107 | #define TIF_NEED_RESCHED 1 | ||
108 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | ||
109 | #define TIF_SYSCALL_TRACE 8 | ||
110 | #define TIF_POLLING_NRFLAG 16 | ||
111 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | ||
112 | #define TIF_FREEZE 19 | ||
113 | #define TIF_RESTORE_SIGMASK 20 | ||
114 | #define TIF_SINGLESTEP 21 | ||
115 | #define TIF_32BIT 22 /* 32bit process */ | ||
116 | #define TIF_SWITCH_MM 23 /* deferred switch_mm */ | ||
117 | |||
118 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
119 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
120 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
121 | #define _TIF_32BIT (1 << TIF_32BIT) | ||
122 | |||
123 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | ||
124 | _TIF_NOTIFY_RESUME) | ||
125 | |||
126 | #endif /* __KERNEL__ */ | ||
127 | #endif /* __ASM_THREAD_INFO_H */ | ||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c new file mode 100644 index 000000000000..f22965ea1cfc --- /dev/null +++ b/arch/arm64/kernel/process.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/kernel/process.c | ||
3 | * | ||
4 | * Original Copyright (C) 1995 Linus Torvalds | ||
5 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | ||
6 | * Copyright (C) 2012 ARM Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <stdarg.h> | ||
22 | |||
23 | #include <linux/export.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/unistd.h> | ||
29 | #include <linux/user.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/reboot.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/kallsyms.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/cpu.h> | ||
36 | #include <linux/elfcore.h> | ||
37 | #include <linux/pm.h> | ||
38 | #include <linux/tick.h> | ||
39 | #include <linux/utsname.h> | ||
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/random.h> | ||
42 | #include <linux/hw_breakpoint.h> | ||
43 | #include <linux/personality.h> | ||
44 | #include <linux/notifier.h> | ||
45 | |||
46 | #include <asm/compat.h> | ||
47 | #include <asm/cacheflush.h> | ||
48 | #include <asm/processor.h> | ||
49 | #include <asm/stacktrace.h> | ||
50 | #include <asm/fpsimd.h> | ||
51 | |||
52 | static void setup_restart(void) | ||
53 | { | ||
54 | /* | ||
55 | * Tell the mm system that we are going to reboot - | ||
56 | * we may need it to insert some 1:1 mappings so that | ||
57 | * soft boot works. | ||
58 | */ | ||
59 | setup_mm_for_reboot(); | ||
60 | |||
61 | /* Clean and invalidate caches */ | ||
62 | flush_cache_all(); | ||
63 | |||
64 | /* Turn D-cache off */ | ||
65 | cpu_cache_off(); | ||
66 | |||
67 | /* Push out any further dirty data, and ensure cache is empty */ | ||
68 | flush_cache_all(); | ||
69 | } | ||
70 | |||
71 | void soft_restart(unsigned long addr) | ||
72 | { | ||
73 | setup_restart(); | ||
74 | cpu_reset(addr); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Function pointers to optional machine specific functions | ||
79 | */ | ||
80 | void (*pm_power_off)(void); | ||
81 | EXPORT_SYMBOL_GPL(pm_power_off); | ||
82 | |||
83 | void (*pm_restart)(const char *cmd); | ||
84 | EXPORT_SYMBOL_GPL(pm_restart); | ||
85 | |||
86 | |||
87 | /* | ||
88 | * This is our default idle handler. | ||
89 | */ | ||
90 | static void default_idle(void) | ||
91 | { | ||
92 | /* | ||
93 | * This should do all the clock switching and wait for interrupt | ||
94 | * tricks | ||
95 | */ | ||
96 | cpu_do_idle(); | ||
97 | local_irq_enable(); | ||
98 | } | ||
99 | |||
100 | void (*pm_idle)(void) = default_idle; | ||
101 | EXPORT_SYMBOL_GPL(pm_idle); | ||
102 | |||
103 | /* | ||
104 | * The idle thread, has rather strange semantics for calling pm_idle, | ||
105 | * but this is what x86 does and we need to do the same, so that | ||
106 | * things like cpuidle get called in the same way. The only difference | ||
107 | * is that we always respect 'hlt_counter' to prevent low power idle. | ||
108 | */ | ||
109 | void cpu_idle(void) | ||
110 | { | ||
111 | local_fiq_enable(); | ||
112 | |||
113 | /* endless idle loop with no priority at all */ | ||
114 | while (1) { | ||
115 | tick_nohz_idle_enter(); | ||
116 | rcu_idle_enter(); | ||
117 | while (!need_resched()) { | ||
118 | /* | ||
119 | * We need to disable interrupts here to ensure | ||
120 | * we don't miss a wakeup call. | ||
121 | */ | ||
122 | local_irq_disable(); | ||
123 | if (!need_resched()) { | ||
124 | stop_critical_timings(); | ||
125 | pm_idle(); | ||
126 | start_critical_timings(); | ||
127 | /* | ||
128 | * pm_idle functions should always return | ||
129 | * with IRQs enabled. | ||
130 | */ | ||
131 | WARN_ON(irqs_disabled()); | ||
132 | } else { | ||
133 | local_irq_enable(); | ||
134 | } | ||
135 | } | ||
136 | rcu_idle_exit(); | ||
137 | tick_nohz_idle_exit(); | ||
138 | schedule_preempt_disabled(); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | void machine_shutdown(void) | ||
143 | { | ||
144 | #ifdef CONFIG_SMP | ||
145 | smp_send_stop(); | ||
146 | #endif | ||
147 | } | ||
148 | |||
149 | void machine_halt(void) | ||
150 | { | ||
151 | machine_shutdown(); | ||
152 | while (1); | ||
153 | } | ||
154 | |||
155 | void machine_power_off(void) | ||
156 | { | ||
157 | machine_shutdown(); | ||
158 | if (pm_power_off) | ||
159 | pm_power_off(); | ||
160 | } | ||
161 | |||
162 | void machine_restart(char *cmd) | ||
163 | { | ||
164 | machine_shutdown(); | ||
165 | |||
166 | /* Disable interrupts first */ | ||
167 | local_irq_disable(); | ||
168 | local_fiq_disable(); | ||
169 | |||
170 | /* Now call the architecture specific reboot code. */ | ||
171 | if (pm_restart) | ||
172 | pm_restart(cmd); | ||
173 | |||
174 | /* | ||
175 | * Whoops - the architecture was unable to reboot. | ||
176 | */ | ||
177 | printk("Reboot failed -- System halted\n"); | ||
178 | while (1); | ||
179 | } | ||
180 | |||
181 | void __show_regs(struct pt_regs *regs) | ||
182 | { | ||
183 | int i; | ||
184 | |||
185 | printk("CPU: %d %s (%s %.*s)\n", | ||
186 | raw_smp_processor_id(), print_tainted(), | ||
187 | init_utsname()->release, | ||
188 | (int)strcspn(init_utsname()->version, " "), | ||
189 | init_utsname()->version); | ||
190 | print_symbol("PC is at %s\n", instruction_pointer(regs)); | ||
191 | print_symbol("LR is at %s\n", regs->regs[30]); | ||
192 | printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", | ||
193 | regs->pc, regs->regs[30], regs->pstate); | ||
194 | printk("sp : %016llx\n", regs->sp); | ||
195 | for (i = 29; i >= 0; i--) { | ||
196 | printk("x%-2d: %016llx ", i, regs->regs[i]); | ||
197 | if (i % 2 == 0) | ||
198 | printk("\n"); | ||
199 | } | ||
200 | printk("\n"); | ||
201 | } | ||
202 | |||
203 | void show_regs(struct pt_regs * regs) | ||
204 | { | ||
205 | printk("\n"); | ||
206 | printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); | ||
207 | __show_regs(regs); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Free current thread data structures etc.. | ||
212 | */ | ||
213 | void exit_thread(void) | ||
214 | { | ||
215 | } | ||
216 | |||
217 | void flush_thread(void) | ||
218 | { | ||
219 | fpsimd_flush_thread(); | ||
220 | flush_ptrace_hw_breakpoint(current); | ||
221 | } | ||
222 | |||
223 | void release_thread(struct task_struct *dead_task) | ||
224 | { | ||
225 | } | ||
226 | |||
227 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | ||
228 | { | ||
229 | fpsimd_save_state(¤t->thread.fpsimd_state); | ||
230 | *dst = *src; | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | ||
235 | |||
236 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | ||
237 | unsigned long stk_sz, struct task_struct *p, | ||
238 | struct pt_regs *regs) | ||
239 | { | ||
240 | struct pt_regs *childregs = task_pt_regs(p); | ||
241 | unsigned long tls = p->thread.tp_value; | ||
242 | |||
243 | *childregs = *regs; | ||
244 | childregs->regs[0] = 0; | ||
245 | |||
246 | if (is_compat_thread(task_thread_info(p))) | ||
247 | childregs->compat_sp = stack_start; | ||
248 | else { | ||
249 | /* | ||
250 | * Read the current TLS pointer from tpidr_el0 as it may be | ||
251 | * out-of-sync with the saved value. | ||
252 | */ | ||
253 | asm("mrs %0, tpidr_el0" : "=r" (tls)); | ||
254 | childregs->sp = stack_start; | ||
255 | } | ||
256 | |||
257 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); | ||
258 | p->thread.cpu_context.sp = (unsigned long)childregs; | ||
259 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; | ||
260 | |||
261 | /* If a TLS pointer was passed to clone, use that for the new thread. */ | ||
262 | if (clone_flags & CLONE_SETTLS) | ||
263 | tls = regs->regs[3]; | ||
264 | p->thread.tp_value = tls; | ||
265 | |||
266 | ptrace_hw_copy_thread(p); | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static void tls_thread_switch(struct task_struct *next) | ||
272 | { | ||
273 | unsigned long tpidr, tpidrro; | ||
274 | |||
275 | if (!is_compat_task()) { | ||
276 | asm("mrs %0, tpidr_el0" : "=r" (tpidr)); | ||
277 | current->thread.tp_value = tpidr; | ||
278 | } | ||
279 | |||
280 | if (is_compat_thread(task_thread_info(next))) { | ||
281 | tpidr = 0; | ||
282 | tpidrro = next->thread.tp_value; | ||
283 | } else { | ||
284 | tpidr = next->thread.tp_value; | ||
285 | tpidrro = 0; | ||
286 | } | ||
287 | |||
288 | asm( | ||
289 | " msr tpidr_el0, %0\n" | ||
290 | " msr tpidrro_el0, %1" | ||
291 | : : "r" (tpidr), "r" (tpidrro)); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Thread switching. | ||
296 | */ | ||
297 | struct task_struct *__switch_to(struct task_struct *prev, | ||
298 | struct task_struct *next) | ||
299 | { | ||
300 | struct task_struct *last; | ||
301 | |||
302 | fpsimd_thread_switch(next); | ||
303 | tls_thread_switch(next); | ||
304 | hw_breakpoint_thread_switch(next); | ||
305 | |||
306 | /* the actual thread switch */ | ||
307 | last = cpu_switch_to(prev, next); | ||
308 | |||
309 | return last; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Fill in the task's elfregs structure for a core dump. | ||
314 | */ | ||
315 | int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) | ||
316 | { | ||
317 | elf_core_copy_regs(elfregs, task_pt_regs(t)); | ||
318 | return 1; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * fill in the fpe structure for a core dump... | ||
323 | */ | ||
324 | int dump_fpu (struct pt_regs *regs, struct user_fp *fp) | ||
325 | { | ||
326 | return 0; | ||
327 | } | ||
328 | EXPORT_SYMBOL(dump_fpu); | ||
329 | |||
330 | /* | ||
331 | * Shuffle the argument into the correct register before calling the | ||
332 | * thread function. x1 is the thread argument, x2 is the pointer to | ||
333 | * the thread function, and x3 points to the exit function. | ||
334 | */ | ||
335 | extern void kernel_thread_helper(void); | ||
336 | asm( ".section .text\n" | ||
337 | " .align\n" | ||
338 | " .type kernel_thread_helper, #function\n" | ||
339 | "kernel_thread_helper:\n" | ||
340 | " mov x0, x1\n" | ||
341 | " mov x30, x3\n" | ||
342 | " br x2\n" | ||
343 | " .size kernel_thread_helper, . - kernel_thread_helper\n" | ||
344 | " .previous"); | ||
345 | |||
346 | #define kernel_thread_exit do_exit | ||
347 | |||
348 | /* | ||
349 | * Create a kernel thread. | ||
350 | */ | ||
351 | pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | ||
352 | { | ||
353 | struct pt_regs regs; | ||
354 | |||
355 | memset(®s, 0, sizeof(regs)); | ||
356 | |||
357 | regs.regs[1] = (unsigned long)arg; | ||
358 | regs.regs[2] = (unsigned long)fn; | ||
359 | regs.regs[3] = (unsigned long)kernel_thread_exit; | ||
360 | regs.pc = (unsigned long)kernel_thread_helper; | ||
361 | regs.pstate = PSR_MODE_EL1h; | ||
362 | |||
363 | return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | ||
364 | } | ||
365 | EXPORT_SYMBOL(kernel_thread); | ||
366 | |||
367 | unsigned long get_wchan(struct task_struct *p) | ||
368 | { | ||
369 | struct stackframe frame; | ||
370 | int count = 0; | ||
371 | if (!p || p == current || p->state == TASK_RUNNING) | ||
372 | return 0; | ||
373 | |||
374 | frame.fp = thread_saved_fp(p); | ||
375 | frame.sp = thread_saved_sp(p); | ||
376 | frame.pc = thread_saved_pc(p); | ||
377 | do { | ||
378 | int ret = unwind_frame(&frame); | ||
379 | if (ret < 0) | ||
380 | return 0; | ||
381 | if (!in_sched_functions(frame.pc)) | ||
382 | return frame.pc; | ||
383 | } while (count ++ < 16); | ||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | unsigned long arch_align_stack(unsigned long sp) | ||
388 | { | ||
389 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | ||
390 | sp -= get_random_int() & ~PAGE_MASK; | ||
391 | return sp & ~0xf; | ||
392 | } | ||
393 | |||
394 | static unsigned long randomize_base(unsigned long base) | ||
395 | { | ||
396 | unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; | ||
397 | return randomize_range(base, range_end, 0) ? : base; | ||
398 | } | ||
399 | |||
400 | unsigned long arch_randomize_brk(struct mm_struct *mm) | ||
401 | { | ||
402 | return randomize_base(mm->brk); | ||
403 | } | ||
404 | |||
405 | unsigned long randomize_et_dyn(unsigned long base) | ||
406 | { | ||
407 | return randomize_base(base); | ||
408 | } | ||
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c new file mode 100644 index 000000000000..baa758d37021 --- /dev/null +++ b/arch/arm64/mm/context.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/mm/context.c | ||
3 | * | ||
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/percpu.h> | ||
25 | |||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/cachetype.h> | ||
29 | |||
30 | #define asid_bits(reg) \ | ||
31 | (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8) | ||
32 | |||
33 | #define ASID_FIRST_VERSION (1 << MAX_ASID_BITS) | ||
34 | |||
35 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | ||
36 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | ||
37 | |||
38 | /* | ||
39 | * We fork()ed a process, and we need a new context for the child to run in. | ||
40 | */ | ||
41 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
42 | { | ||
43 | mm->context.id = 0; | ||
44 | raw_spin_lock_init(&mm->context.id_lock); | ||
45 | } | ||
46 | |||
47 | static void flush_context(void) | ||
48 | { | ||
49 | /* set the reserved TTBR0 before flushing the TLB */ | ||
50 | cpu_set_reserved_ttbr0(); | ||
51 | flush_tlb_all(); | ||
52 | if (icache_is_aivivt()) | ||
53 | __flush_icache_all(); | ||
54 | } | ||
55 | |||
56 | #ifdef CONFIG_SMP | ||
57 | |||
58 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | |||
62 | /* | ||
63 | * Locking needed for multi-threaded applications where the same | ||
64 | * mm->context.id could be set from different CPUs during the | ||
65 | * broadcast. This function is also called via IPI so the | ||
66 | * mm->context.id_lock has to be IRQ-safe. | ||
67 | */ | ||
68 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); | ||
69 | if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { | ||
70 | /* | ||
71 | * Old version of ASID found. Set the new one and reset | ||
72 | * mm_cpumask(mm). | ||
73 | */ | ||
74 | mm->context.id = asid; | ||
75 | cpumask_clear(mm_cpumask(mm)); | ||
76 | } | ||
77 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
78 | |||
79 | /* | ||
80 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
81 | */ | ||
82 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Reset the ASID on the current CPU. This function call is broadcast from the | ||
87 | * CPU handling the ASID rollover and holding cpu_asid_lock. | ||
88 | */ | ||
89 | static void reset_context(void *info) | ||
90 | { | ||
91 | unsigned int asid; | ||
92 | unsigned int cpu = smp_processor_id(); | ||
93 | struct mm_struct *mm = current->active_mm; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | cpu_switch_mm(mm->pgd, mm); | ||
103 | } | ||
104 | |||
105 | #else | ||
106 | |||
107 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
108 | { | ||
109 | mm->context.id = asid; | ||
110 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
111 | } | ||
112 | |||
113 | #endif | ||
114 | |||
115 | void __new_context(struct mm_struct *mm) | ||
116 | { | ||
117 | unsigned int asid; | ||
118 | unsigned int bits = asid_bits(); | ||
119 | |||
120 | raw_spin_lock(&cpu_asid_lock); | ||
121 | #ifdef CONFIG_SMP | ||
122 | /* | ||
123 | * Check the ASID again, in case the change was broadcast from another | ||
124 | * CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | raw_spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with an old | ||
134 | * ASID) isn't active on any other CPU since the ASIDs are changed | ||
135 | * simultaneously via IPI. | ||
136 | */ | ||
137 | asid = ++cpu_last_asid; | ||
138 | |||
139 | /* | ||
140 | * If we've used up all our ASIDs, we need to start a new version and | ||
141 | * flush the TLB. | ||
142 | */ | ||
143 | if (unlikely((asid & ((1 << bits) - 1)) == 0)) { | ||
144 | /* increment the ASID version */ | ||
145 | cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits); | ||
146 | if (cpu_last_asid == 0) | ||
147 | cpu_last_asid = ASID_FIRST_VERSION; | ||
148 | asid = cpu_last_asid + smp_processor_id(); | ||
149 | flush_context(); | ||
150 | #ifdef CONFIG_SMP | ||
151 | smp_wmb(); | ||
152 | smp_call_function(reset_context, NULL, 1); | ||
153 | #endif | ||
154 | cpu_last_asid += NR_CPUS - 1; | ||
155 | } | ||
156 | |||
157 | set_mm_context(mm, asid); | ||
158 | raw_spin_unlock(&cpu_asid_lock); | ||
159 | } | ||