diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2012-03-05 06:49:28 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-09-17 08:41:58 -0400 |
commit | b3901d54dc4f73acdc6b7c6e5a7a496d3afeae61 (patch) | |
tree | d7948a9ba283b767beced9f03f4effc26c120304 /arch/arm64/include | |
parent | 1d18c47c735e8adfe531fc41fae31e98f86b68fe (diff) |
arm64: Process management
The patch adds support for thread creation and context switching. The
context switching CPU specific code is introduced with the CPU support
patch (part of the arch/arm64/mm/proc.S file). AArch64 supports
ASID-tagged TLBs and the ASID can be either 8 or 16-bit wide (detectable
via the ID_AA64AFR0_EL1 register).
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 152 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 127 |
2 files changed, 279 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h new file mode 100644 index 000000000000..f68465dee026 --- /dev/null +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/mmu_context.h | ||
3 | * | ||
4 | * Copyright (C) 1996 Russell King. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_MMU_CONTEXT_H | ||
20 | #define __ASM_MMU_CONTEXT_H | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <linux/sched.h> | ||
24 | |||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/proc-fns.h> | ||
27 | #include <asm-generic/mm_hooks.h> | ||
28 | #include <asm/cputype.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | |||
31 | #define MAX_ASID_BITS 16 | ||
32 | |||
33 | extern unsigned int cpu_last_asid; | ||
34 | |||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
36 | void __new_context(struct mm_struct *mm); | ||
37 | |||
38 | /* | ||
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | ||
40 | */ | ||
41 | static inline void cpu_set_reserved_ttbr0(void) | ||
42 | { | ||
43 | unsigned long ttbr = page_to_phys(empty_zero_page); | ||
44 | |||
45 | asm( | ||
46 | " msr ttbr0_el1, %0 // set TTBR0\n" | ||
47 | " isb" | ||
48 | : | ||
49 | : "r" (ttbr)); | ||
50 | } | ||
51 | |||
52 | static inline void switch_new_context(struct mm_struct *mm) | ||
53 | { | ||
54 | unsigned long flags; | ||
55 | |||
56 | __new_context(mm); | ||
57 | |||
58 | local_irq_save(flags); | ||
59 | cpu_switch_mm(mm->pgd, mm); | ||
60 | local_irq_restore(flags); | ||
61 | } | ||
62 | |||
63 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
64 | struct task_struct *tsk) | ||
65 | { | ||
66 | /* | ||
67 | * Required during context switch to avoid speculative page table | ||
68 | * walking with the wrong TTBR. | ||
69 | */ | ||
70 | cpu_set_reserved_ttbr0(); | ||
71 | |||
72 | if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) | ||
73 | /* | ||
74 | * The ASID is from the current generation, just switch to the | ||
75 | * new pgd. This condition is only true for calls from | ||
76 | * context_switch() and interrupts are already disabled. | ||
77 | */ | ||
78 | cpu_switch_mm(mm->pgd, mm); | ||
79 | else if (irqs_disabled()) | ||
80 | /* | ||
81 | * Defer the new ASID allocation until after the context | ||
82 | * switch critical region since __new_context() cannot be | ||
83 | * called with interrupts disabled. | ||
84 | */ | ||
85 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
86 | else | ||
87 | /* | ||
88 | * That is a direct call to switch_mm() or activate_mm() with | ||
89 | * interrupts enabled and a new context. | ||
90 | */ | ||
91 | switch_new_context(mm); | ||
92 | } | ||
93 | |||
94 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | ||
95 | #define destroy_context(mm) do { } while(0) | ||
96 | |||
97 | #define finish_arch_post_lock_switch \ | ||
98 | finish_arch_post_lock_switch | ||
99 | static inline void finish_arch_post_lock_switch(void) | ||
100 | { | ||
101 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | ||
102 | struct mm_struct *mm = current->mm; | ||
103 | unsigned long flags; | ||
104 | |||
105 | __new_context(mm); | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | cpu_switch_mm(mm->pgd, mm); | ||
109 | local_irq_restore(flags); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * This is called when "tsk" is about to enter lazy TLB mode. | ||
115 | * | ||
116 | * mm: describes the currently active mm context | ||
117 | * tsk: task which is entering lazy tlb | ||
118 | * cpu: cpu number which is entering lazy tlb | ||
119 | * | ||
120 | * tsk->mm will be NULL | ||
121 | */ | ||
122 | static inline void | ||
123 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
124 | { | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * This is the actual mm switch as far as the scheduler | ||
129 | * is concerned. No registers are touched. We avoid | ||
130 | * calling the CPU specific function when the mm hasn't | ||
131 | * actually changed. | ||
132 | */ | ||
133 | static inline void | ||
134 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
135 | struct task_struct *tsk) | ||
136 | { | ||
137 | unsigned int cpu = smp_processor_id(); | ||
138 | |||
139 | #ifdef CONFIG_SMP | ||
140 | /* check for possible thread migration */ | ||
141 | if (!cpumask_empty(mm_cpumask(next)) && | ||
142 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | ||
143 | __flush_icache_all(); | ||
144 | #endif | ||
145 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | ||
146 | check_and_switch_context(next, tsk); | ||
147 | } | ||
148 | |||
149 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
150 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
151 | |||
152 | #endif | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h new file mode 100644 index 000000000000..3659e460071d --- /dev/null +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/thread_info.h | ||
3 | * | ||
4 | * Copyright (C) 2002 Russell King. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_THREAD_INFO_H | ||
20 | #define __ASM_THREAD_INFO_H | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | |||
24 | #include <linux/compiler.h> | ||
25 | |||
26 | #ifndef CONFIG_ARM64_64K_PAGES | ||
27 | #define THREAD_SIZE_ORDER 1 | ||
28 | #endif | ||
29 | |||
30 | #define THREAD_SIZE 8192 | ||
31 | #define THREAD_START_SP (THREAD_SIZE - 16) | ||
32 | |||
33 | #ifndef __ASSEMBLY__ | ||
34 | |||
35 | struct task_struct; | ||
36 | struct exec_domain; | ||
37 | |||
38 | #include <asm/types.h> | ||
39 | |||
40 | typedef unsigned long mm_segment_t; | ||
41 | |||
42 | /* | ||
43 | * low level task data that entry.S needs immediate access to. | ||
44 | * __switch_to() assumes cpu_context follows immediately after cpu_domain. | ||
45 | */ | ||
46 | struct thread_info { | ||
47 | unsigned long flags; /* low level flags */ | ||
48 | mm_segment_t addr_limit; /* address limit */ | ||
49 | struct task_struct *task; /* main task structure */ | ||
50 | struct exec_domain *exec_domain; /* execution domain */ | ||
51 | struct restart_block restart_block; | ||
52 | int preempt_count; /* 0 => preemptable, <0 => bug */ | ||
53 | int cpu; /* cpu */ | ||
54 | }; | ||
55 | |||
56 | #define INIT_THREAD_INFO(tsk) \ | ||
57 | { \ | ||
58 | .task = &tsk, \ | ||
59 | .exec_domain = &default_exec_domain, \ | ||
60 | .flags = 0, \ | ||
61 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
62 | .addr_limit = KERNEL_DS, \ | ||
63 | .restart_block = { \ | ||
64 | .fn = do_no_restart_syscall, \ | ||
65 | }, \ | ||
66 | } | ||
67 | |||
68 | #define init_thread_info (init_thread_union.thread_info) | ||
69 | #define init_stack (init_thread_union.stack) | ||
70 | |||
71 | /* | ||
72 | * how to get the thread information struct from C | ||
73 | */ | ||
74 | static inline struct thread_info *current_thread_info(void) __attribute_const__; | ||
75 | |||
76 | static inline struct thread_info *current_thread_info(void) | ||
77 | { | ||
78 | register unsigned long sp asm ("sp"); | ||
79 | return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); | ||
80 | } | ||
81 | |||
82 | #define thread_saved_pc(tsk) \ | ||
83 | ((unsigned long)(tsk->thread.cpu_context.pc)) | ||
84 | #define thread_saved_sp(tsk) \ | ||
85 | ((unsigned long)(tsk->thread.cpu_context.sp)) | ||
86 | #define thread_saved_fp(tsk) \ | ||
87 | ((unsigned long)(tsk->thread.cpu_context.fp)) | ||
88 | |||
89 | #endif | ||
90 | |||
91 | /* | ||
92 | * We use bit 30 of the preempt_count to indicate that kernel | ||
93 | * preemption is occurring. See <asm/hardirq.h>. | ||
94 | */ | ||
95 | #define PREEMPT_ACTIVE 0x40000000 | ||
96 | |||
97 | /* | ||
98 | * thread information flags: | ||
99 | * TIF_SYSCALL_TRACE - syscall trace active | ||
100 | * TIF_SIGPENDING - signal pending | ||
101 | * TIF_NEED_RESCHED - rescheduling necessary | ||
102 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
103 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | ||
104 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | ||
105 | */ | ||
106 | #define TIF_SIGPENDING 0 | ||
107 | #define TIF_NEED_RESCHED 1 | ||
108 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | ||
109 | #define TIF_SYSCALL_TRACE 8 | ||
110 | #define TIF_POLLING_NRFLAG 16 | ||
111 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | ||
112 | #define TIF_FREEZE 19 | ||
113 | #define TIF_RESTORE_SIGMASK 20 | ||
114 | #define TIF_SINGLESTEP 21 | ||
115 | #define TIF_32BIT 22 /* 32bit process */ | ||
116 | #define TIF_SWITCH_MM 23 /* deferred switch_mm */ | ||
117 | |||
118 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
119 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
120 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
121 | #define _TIF_32BIT (1 << TIF_32BIT) | ||
122 | |||
123 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | ||
124 | _TIF_NOTIFY_RESUME) | ||
125 | |||
126 | #endif /* __KERNEL__ */ | ||
127 | #endif /* __ASM_THREAD_INFO_H */ | ||