diff options
Diffstat (limited to 'kernel/context_tracking.c')
-rw-r--r-- | kernel/context_tracking.c | 125 |
1 files changed, 71 insertions, 54 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 383f8231e436..247091bf0587 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -20,22 +20,33 @@ | |||
20 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | 22 | ||
23 | DEFINE_PER_CPU(struct context_tracking, context_tracking) = { | 23 | #define CREATE_TRACE_POINTS |
24 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE | 24 | #include <trace/events/context_tracking.h> |
25 | .active = true, | 25 | |
26 | #endif | 26 | struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE; |
27 | }; | 27 | EXPORT_SYMBOL_GPL(context_tracking_enabled); |
28 | |||
29 | DEFINE_PER_CPU(struct context_tracking, context_tracking); | ||
30 | EXPORT_SYMBOL_GPL(context_tracking); | ||
31 | |||
32 | void context_tracking_cpu_set(int cpu) | ||
33 | { | ||
34 | if (!per_cpu(context_tracking.active, cpu)) { | ||
35 | per_cpu(context_tracking.active, cpu) = true; | ||
36 | static_key_slow_inc(&context_tracking_enabled); | ||
37 | } | ||
38 | } | ||
28 | 39 | ||
29 | /** | 40 | /** |
30 | * user_enter - Inform the context tracking that the CPU is going to | 41 | * context_tracking_user_enter - Inform the context tracking that the CPU is going to |
31 | * enter userspace mode. | 42 | * enter userspace mode. |
32 | * | 43 | * |
33 | * This function must be called right before we switch from the kernel | 44 | * This function must be called right before we switch from the kernel |
34 | * to userspace, when it's guaranteed the remaining kernel instructions | 45 | * to userspace, when it's guaranteed the remaining kernel instructions |
35 | * to execute won't use any RCU read side critical section because this | 46 | * to execute won't use any RCU read side critical section because this |
36 | * function sets RCU in extended quiescent state. | 47 | * function sets RCU in extended quiescent state. |
37 | */ | 48 | */ |
38 | void user_enter(void) | 49 | void context_tracking_user_enter(void) |
39 | { | 50 | { |
40 | unsigned long flags; | 51 | unsigned long flags; |
41 | 52 | ||
@@ -54,17 +65,32 @@ void user_enter(void) | |||
54 | WARN_ON_ONCE(!current->mm); | 65 | WARN_ON_ONCE(!current->mm); |
55 | 66 | ||
56 | local_irq_save(flags); | 67 | local_irq_save(flags); |
57 | if (__this_cpu_read(context_tracking.active) && | 68 | if ( __this_cpu_read(context_tracking.state) != IN_USER) { |
58 | __this_cpu_read(context_tracking.state) != IN_USER) { | 69 | if (__this_cpu_read(context_tracking.active)) { |
70 | trace_user_enter(0); | ||
71 | /* | ||
72 | * At this stage, only low level arch entry code remains and | ||
73 | * then we'll run in userspace. We can assume there won't be | ||
74 | * any RCU read-side critical section until the next call to | ||
75 | * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency | ||
76 | * on the tick. | ||
77 | */ | ||
78 | vtime_user_enter(current); | ||
79 | rcu_user_enter(); | ||
80 | } | ||
59 | /* | 81 | /* |
60 | * At this stage, only low level arch entry code remains and | 82 | * Even if context tracking is disabled on this CPU, because it's outside |
61 | * then we'll run in userspace. We can assume there won't be | 83 | * the full dynticks mask for example, we still have to keep track of the |
62 | * any RCU read-side critical section until the next call to | 84 | * context transitions and states to prevent inconsistency on those of |
63 | * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency | 85 | * other CPUs. |
64 | * on the tick. | 86 | * If a task triggers an exception in userspace, sleep on the exception |
87 | * handler and then migrate to another CPU, that new CPU must know where | ||
88 | * the exception returns by the time we call exception_exit(). | ||
89 | * This information can only be provided by the previous CPU when it called | ||
90 | * exception_enter(). | ||
91 | * OTOH we can spare the calls to vtime and RCU when context_tracking.active | ||
92 | * is false because we know that CPU is not tickless. | ||
65 | */ | 93 | */ |
66 | vtime_user_enter(current); | ||
67 | rcu_user_enter(); | ||
68 | __this_cpu_write(context_tracking.state, IN_USER); | 94 | __this_cpu_write(context_tracking.state, IN_USER); |
69 | } | 95 | } |
70 | local_irq_restore(flags); | 96 | local_irq_restore(flags); |
@@ -87,10 +113,9 @@ void user_enter(void) | |||
87 | */ | 113 | */ |
88 | void __sched notrace preempt_schedule_context(void) | 114 | void __sched notrace preempt_schedule_context(void) |
89 | { | 115 | { |
90 | struct thread_info *ti = current_thread_info(); | ||
91 | enum ctx_state prev_ctx; | 116 | enum ctx_state prev_ctx; |
92 | 117 | ||
93 | if (likely(ti->preempt_count || irqs_disabled())) | 118 | if (likely(!preemptible())) |
94 | return; | 119 | return; |
95 | 120 | ||
96 | /* | 121 | /* |
@@ -112,8 +137,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context); | |||
112 | #endif /* CONFIG_PREEMPT */ | 137 | #endif /* CONFIG_PREEMPT */ |
113 | 138 | ||
114 | /** | 139 | /** |
115 | * user_exit - Inform the context tracking that the CPU is | 140 | * context_tracking_user_exit - Inform the context tracking that the CPU is |
116 | * exiting userspace mode and entering the kernel. | 141 | * exiting userspace mode and entering the kernel. |
117 | * | 142 | * |
118 | * This function must be called after we entered the kernel from userspace | 143 | * This function must be called after we entered the kernel from userspace |
119 | * before any use of RCU read side critical section. This potentially include | 144 | * before any use of RCU read side critical section. This potentially include |
@@ -122,7 +147,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context); | |||
122 | * This call supports re-entrancy. This way it can be called from any exception | 147 | * This call supports re-entrancy. This way it can be called from any exception |
123 | * handler without needing to know if we came from userspace or not. | 148 | * handler without needing to know if we came from userspace or not. |
124 | */ | 149 | */ |
125 | void user_exit(void) | 150 | void context_tracking_user_exit(void) |
126 | { | 151 | { |
127 | unsigned long flags; | 152 | unsigned long flags; |
128 | 153 | ||
@@ -131,38 +156,22 @@ void user_exit(void) | |||
131 | 156 | ||
132 | local_irq_save(flags); | 157 | local_irq_save(flags); |
133 | if (__this_cpu_read(context_tracking.state) == IN_USER) { | 158 | if (__this_cpu_read(context_tracking.state) == IN_USER) { |
134 | /* | 159 | if (__this_cpu_read(context_tracking.active)) { |
135 | * We are going to run code that may use RCU. Inform | 160 | /* |
136 | * RCU core about that (ie: we may need the tick again). | 161 | * We are going to run code that may use RCU. Inform |
137 | */ | 162 | * RCU core about that (ie: we may need the tick again). |
138 | rcu_user_exit(); | 163 | */ |
139 | vtime_user_exit(current); | 164 | rcu_user_exit(); |
165 | vtime_user_exit(current); | ||
166 | trace_user_exit(0); | ||
167 | } | ||
140 | __this_cpu_write(context_tracking.state, IN_KERNEL); | 168 | __this_cpu_write(context_tracking.state, IN_KERNEL); |
141 | } | 169 | } |
142 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
143 | } | 171 | } |
144 | 172 | ||
145 | void guest_enter(void) | ||
146 | { | ||
147 | if (vtime_accounting_enabled()) | ||
148 | vtime_guest_enter(current); | ||
149 | else | ||
150 | __guest_enter(); | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(guest_enter); | ||
153 | |||
154 | void guest_exit(void) | ||
155 | { | ||
156 | if (vtime_accounting_enabled()) | ||
157 | vtime_guest_exit(current); | ||
158 | else | ||
159 | __guest_exit(); | ||
160 | } | ||
161 | EXPORT_SYMBOL_GPL(guest_exit); | ||
162 | |||
163 | |||
164 | /** | 173 | /** |
165 | * context_tracking_task_switch - context switch the syscall callbacks | 174 | * __context_tracking_task_switch - context switch the syscall callbacks |
166 | * @prev: the task that is being switched out | 175 | * @prev: the task that is being switched out |
167 | * @next: the task that is being switched in | 176 | * @next: the task that is being switched in |
168 | * | 177 | * |
@@ -174,11 +183,19 @@ EXPORT_SYMBOL_GPL(guest_exit); | |||
174 | * migrate to some CPU that doesn't do the context tracking. As such the TIF | 183 | * migrate to some CPU that doesn't do the context tracking. As such the TIF |
175 | * flag may not be desired there. | 184 | * flag may not be desired there. |
176 | */ | 185 | */ |
177 | void context_tracking_task_switch(struct task_struct *prev, | 186 | void __context_tracking_task_switch(struct task_struct *prev, |
178 | struct task_struct *next) | 187 | struct task_struct *next) |
179 | { | 188 | { |
180 | if (__this_cpu_read(context_tracking.active)) { | 189 | clear_tsk_thread_flag(prev, TIF_NOHZ); |
181 | clear_tsk_thread_flag(prev, TIF_NOHZ); | 190 | set_tsk_thread_flag(next, TIF_NOHZ); |
182 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
183 | } | ||
184 | } | 191 | } |
192 | |||
193 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE | ||
194 | void __init context_tracking_init(void) | ||
195 | { | ||
196 | int cpu; | ||
197 | |||
198 | for_each_possible_cpu(cpu) | ||
199 | context_tracking_cpu_set(cpu); | ||
200 | } | ||
201 | #endif | ||