aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c8
-rw-r--r--kernel/sched/core.c2
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 937ecdfdf258..8ad53c9d38b6 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -75,7 +75,7 @@ void context_tracking_user_enter(void)
75 WARN_ON_ONCE(!current->mm); 75 WARN_ON_ONCE(!current->mm);
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 if ( __this_cpu_read(context_tracking.state) != IN_USER) { 78 if ( __this_cpu_read(context_tracking.state) != CONTEXT_USER) {
79 if (__this_cpu_read(context_tracking.active)) { 79 if (__this_cpu_read(context_tracking.active)) {
80 trace_user_enter(0); 80 trace_user_enter(0);
81 /* 81 /*
@@ -101,7 +101,7 @@ void context_tracking_user_enter(void)
101 * OTOH we can spare the calls to vtime and RCU when context_tracking.active 101 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
102 * is false because we know that CPU is not tickless. 102 * is false because we know that CPU is not tickless.
103 */ 103 */
104 __this_cpu_write(context_tracking.state, IN_USER); 104 __this_cpu_write(context_tracking.state, CONTEXT_USER);
105 } 105 }
106 local_irq_restore(flags); 106 local_irq_restore(flags);
107} 107}
@@ -129,7 +129,7 @@ void context_tracking_user_exit(void)
129 return; 129 return;
130 130
131 local_irq_save(flags); 131 local_irq_save(flags);
132 if (__this_cpu_read(context_tracking.state) == IN_USER) { 132 if (__this_cpu_read(context_tracking.state) == CONTEXT_USER) {
133 if (__this_cpu_read(context_tracking.active)) { 133 if (__this_cpu_read(context_tracking.active)) {
134 /* 134 /*
135 * We are going to run code that may use RCU. Inform 135 * We are going to run code that may use RCU. Inform
@@ -139,7 +139,7 @@ void context_tracking_user_exit(void)
139 vtime_user_exit(current); 139 vtime_user_exit(current);
140 trace_user_exit(0); 140 trace_user_exit(0);
141 } 141 }
142 __this_cpu_write(context_tracking.state, IN_KERNEL); 142 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
143 } 143 }
144 local_irq_restore(flags); 144 local_irq_restore(flags);
145} 145}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f0f831e8a345..06b9a00871e0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2818,7 +2818,7 @@ asmlinkage __visible void __sched schedule_user(void)
2818 * we find a better solution. 2818 * we find a better solution.
2819 * 2819 *
2820 * NB: There are buggy callers of this function. Ideally we 2820 * NB: There are buggy callers of this function. Ideally we
2821 * should warn if prev_state != IN_USER, but that will trigger 2821 * should warn if prev_state != CONTEXT_USER, but that will trigger
2822 * too frequently to make sense yet. 2822 * too frequently to make sense yet.
2823 */ 2823 */
2824 enum ctx_state prev_state = exception_enter(); 2824 enum ctx_state prev_state = exception_enter();