aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 16:58:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 16:58:48 -0400
commite95e7f627062be5e6ce971ce873e6234c91ffc50 (patch)
treec00c00f2afb284037d4acc301f3be8a35a858acd /kernel
parent078838d56574694d0a4815d9c1b7f28e8844638b (diff)
parent1524b745406a85ba201cb25df72110c1ccac0f72 (diff)
Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull NOHZ changes from Ingo Molnar: "This tree adds full dynticks support to KVM guests (support the disabling of the timer tick on the guest). The main missing piece was the recognition of guest execution as RCU extended quiescent state and related changes" * 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: kvm,rcu,nohz: use RCU extended quiescent state when running KVM guest context_tracking: Export context_tracking_user_enter/exit context_tracking: Run vtime_user_enter/exit only when state == CONTEXT_USER context_tracking: Add stub context_tracking_is_enabled context_tracking: Generalize context tracking APIs to support user and guest context_tracking: Rename context symbols to prepare for transition state ppc: Remove unused cpp symbols in kvm headers
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c59
-rw-r--r--kernel/sched/core.c2
2 files changed, 40 insertions, 21 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 937ecdfdf258..72d59a1a6eb6 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu)
39} 39}
40 40
41/** 41/**
42 * context_tracking_user_enter - Inform the context tracking that the CPU is going to 42 * context_tracking_enter - Inform the context tracking that the CPU is going
43 * enter userspace mode. 43 * enter user or guest space mode.
44 * 44 *
45 * This function must be called right before we switch from the kernel 45 * This function must be called right before we switch from the kernel
46 * to userspace, when it's guaranteed the remaining kernel instructions 46 * to user or guest space, when it's guaranteed the remaining kernel
47 * to execute won't use any RCU read side critical section because this 47 * instructions to execute won't use any RCU read side critical section
48 * function sets RCU in extended quiescent state. 48 * because this function sets RCU in extended quiescent state.
49 */ 49 */
50void context_tracking_user_enter(void) 50void context_tracking_enter(enum ctx_state state)
51{ 51{
52 unsigned long flags; 52 unsigned long flags;
53 53
@@ -75,9 +75,8 @@ void context_tracking_user_enter(void)
75 WARN_ON_ONCE(!current->mm); 75 WARN_ON_ONCE(!current->mm);
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 if ( __this_cpu_read(context_tracking.state) != IN_USER) { 78 if ( __this_cpu_read(context_tracking.state) != state) {
79 if (__this_cpu_read(context_tracking.active)) { 79 if (__this_cpu_read(context_tracking.active)) {
80 trace_user_enter(0);
81 /* 80 /*
82 * At this stage, only low level arch entry code remains and 81 * At this stage, only low level arch entry code remains and
83 * then we'll run in userspace. We can assume there won't be 82 * then we'll run in userspace. We can assume there won't be
@@ -85,7 +84,10 @@ void context_tracking_user_enter(void)
85 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency 84 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
86 * on the tick. 85 * on the tick.
87 */ 86 */
88 vtime_user_enter(current); 87 if (state == CONTEXT_USER) {
88 trace_user_enter(0);
89 vtime_user_enter(current);
90 }
89 rcu_user_enter(); 91 rcu_user_enter();
90 } 92 }
91 /* 93 /*
@@ -101,24 +103,32 @@ void context_tracking_user_enter(void)
101 * OTOH we can spare the calls to vtime and RCU when context_tracking.active 103 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
102 * is false because we know that CPU is not tickless. 104 * is false because we know that CPU is not tickless.
103 */ 105 */
104 __this_cpu_write(context_tracking.state, IN_USER); 106 __this_cpu_write(context_tracking.state, state);
105 } 107 }
106 local_irq_restore(flags); 108 local_irq_restore(flags);
107} 109}
110NOKPROBE_SYMBOL(context_tracking_enter);
111EXPORT_SYMBOL_GPL(context_tracking_enter);
112
113void context_tracking_user_enter(void)
114{
115 context_tracking_enter(CONTEXT_USER);
116}
108NOKPROBE_SYMBOL(context_tracking_user_enter); 117NOKPROBE_SYMBOL(context_tracking_user_enter);
109 118
110/** 119/**
111 * context_tracking_user_exit - Inform the context tracking that the CPU is 120 * context_tracking_exit - Inform the context tracking that the CPU is
112 * exiting userspace mode and entering the kernel. 121 * exiting user or guest mode and entering the kernel.
113 * 122 *
114 * This function must be called after we entered the kernel from userspace 123 * This function must be called after we entered the kernel from user or
115 * before any use of RCU read side critical section. This potentially include 124 * guest space before any use of RCU read side critical section. This
116 * any high level kernel code like syscalls, exceptions, signal handling, etc... 125 * potentially include any high level kernel code like syscalls, exceptions,
126 * signal handling, etc...
117 * 127 *
118 * This call supports re-entrancy. This way it can be called from any exception 128 * This call supports re-entrancy. This way it can be called from any exception
119 * handler without needing to know if we came from userspace or not. 129 * handler without needing to know if we came from userspace or not.
120 */ 130 */
121void context_tracking_user_exit(void) 131void context_tracking_exit(enum ctx_state state)
122{ 132{
123 unsigned long flags; 133 unsigned long flags;
124 134
@@ -129,20 +139,29 @@ void context_tracking_user_exit(void)
129 return; 139 return;
130 140
131 local_irq_save(flags); 141 local_irq_save(flags);
132 if (__this_cpu_read(context_tracking.state) == IN_USER) { 142 if (__this_cpu_read(context_tracking.state) == state) {
133 if (__this_cpu_read(context_tracking.active)) { 143 if (__this_cpu_read(context_tracking.active)) {
134 /* 144 /*
135 * We are going to run code that may use RCU. Inform 145 * We are going to run code that may use RCU. Inform
136 * RCU core about that (ie: we may need the tick again). 146 * RCU core about that (ie: we may need the tick again).
137 */ 147 */
138 rcu_user_exit(); 148 rcu_user_exit();
139 vtime_user_exit(current); 149 if (state == CONTEXT_USER) {
140 trace_user_exit(0); 150 vtime_user_exit(current);
151 trace_user_exit(0);
152 }
141 } 153 }
142 __this_cpu_write(context_tracking.state, IN_KERNEL); 154 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
143 } 155 }
144 local_irq_restore(flags); 156 local_irq_restore(flags);
145} 157}
158NOKPROBE_SYMBOL(context_tracking_exit);
159EXPORT_SYMBOL_GPL(context_tracking_exit);
160
161void context_tracking_user_exit(void)
162{
163 context_tracking_exit(CONTEXT_USER);
164}
146NOKPROBE_SYMBOL(context_tracking_user_exit); 165NOKPROBE_SYMBOL(context_tracking_user_exit);
147 166
148/** 167/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2f7937ee9e3a..f9123a82cbb6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2853,7 +2853,7 @@ asmlinkage __visible void __sched schedule_user(void)
2853 * we find a better solution. 2853 * we find a better solution.
2854 * 2854 *
2855 * NB: There are buggy callers of this function. Ideally we 2855 * NB: There are buggy callers of this function. Ideally we
2856 * should warn if prev_state != IN_USER, but that will trigger 2856 * should warn if prev_state != CONTEXT_USER, but that will trigger
2857 * too frequently to make sense yet. 2857 * too frequently to make sense yet.
2858 */ 2858 */
2859 enum ctx_state prev_state = exception_enter(); 2859 enum ctx_state prev_state = exception_enter();