aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-10-27 21:39:56 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-10 06:06:23 -0500
commitd0e536d89395ecd8ab78fe999dc4d6f5d140ce46 (patch)
treebac5690350e345061db0206bbbe91dbeeb6ebdad
parentf70cd6b07e629f367bb9b1ac9d0e3e669eb325c0 (diff)
context_tracking: avoid irq_save/irq_restore on guest entry and exit
guest_enter and guest_exit must be called with interrupts disabled, since they take the vtime_seqlock with write_seq{lock,unlock}. Therefore, it is not necessary to check for exceptions, nor to save/restore the IRQ state, when context tracking functions are called by guest_enter and guest_exit. Split the body of context_tracking_entry and context_tracking_exit out to __-prefixed functions, and use them from KVM. Rik van Riel has measured this to speed up a tight vmentry/vmexit loop by about 2%. Cc: Andy Lutomirski <luto@kernel.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Rik van Riel <riel@redhat.com> Tested-by: Rik van Riel <riel@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--include/linux/context_tracking.h8
-rw-r--r--kernel/context_tracking.c64
2 files changed, 44 insertions, 28 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 6ef136ff0897..68b575afe5f5 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,6 +10,10 @@
10#ifdef CONFIG_CONTEXT_TRACKING 10#ifdef CONFIG_CONTEXT_TRACKING
11extern void context_tracking_cpu_set(int cpu); 11extern void context_tracking_cpu_set(int cpu);
12 12
13/* Called with interrupts disabled. */
14extern void __context_tracking_enter(enum ctx_state state);
15extern void __context_tracking_exit(enum ctx_state state);
16
13extern void context_tracking_enter(enum ctx_state state); 17extern void context_tracking_enter(enum ctx_state state);
14extern void context_tracking_exit(enum ctx_state state); 18extern void context_tracking_exit(enum ctx_state state);
15extern void context_tracking_user_enter(void); 19extern void context_tracking_user_enter(void);
@@ -88,13 +92,13 @@ static inline void guest_enter(void)
88 current->flags |= PF_VCPU; 92 current->flags |= PF_VCPU;
89 93
90 if (context_tracking_is_enabled()) 94 if (context_tracking_is_enabled())
91 context_tracking_enter(CONTEXT_GUEST); 95 __context_tracking_enter(CONTEXT_GUEST);
92} 96}
93 97
94static inline void guest_exit(void) 98static inline void guest_exit(void)
95{ 99{
96 if (context_tracking_is_enabled()) 100 if (context_tracking_is_enabled())
97 context_tracking_exit(CONTEXT_GUEST); 101 __context_tracking_exit(CONTEXT_GUEST);
98 102
99 if (vtime_accounting_enabled()) 103 if (vtime_accounting_enabled())
100 vtime_guest_exit(current); 104 vtime_guest_exit(current);
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 6d4c6ce21275..d8560ee3bab7 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -58,27 +58,13 @@ static void context_tracking_recursion_exit(void)
58 * instructions to execute won't use any RCU read side critical section 58 * instructions to execute won't use any RCU read side critical section
59 * because this function sets RCU in extended quiescent state. 59 * because this function sets RCU in extended quiescent state.
60 */ 60 */
61void context_tracking_enter(enum ctx_state state) 61void __context_tracking_enter(enum ctx_state state)
62{ 62{
63 unsigned long flags;
64
65 /*
66 * Some contexts may involve an exception occuring in an irq,
67 * leading to that nesting:
68 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
69 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
70 * helpers are enough to protect RCU uses inside the exception. So
71 * just return immediately if we detect we are in an IRQ.
72 */
73 if (in_interrupt())
74 return;
75
76 /* Kernel threads aren't supposed to go to userspace */ 63 /* Kernel threads aren't supposed to go to userspace */
77 WARN_ON_ONCE(!current->mm); 64 WARN_ON_ONCE(!current->mm);
78 65
79 local_irq_save(flags);
80 if (!context_tracking_recursion_enter()) 66 if (!context_tracking_recursion_enter())
81 goto out_irq_restore; 67 return;
82 68
83 if ( __this_cpu_read(context_tracking.state) != state) { 69 if ( __this_cpu_read(context_tracking.state) != state) {
84 if (__this_cpu_read(context_tracking.active)) { 70 if (__this_cpu_read(context_tracking.active)) {
@@ -111,7 +97,27 @@ void context_tracking_enter(enum ctx_state state)
111 __this_cpu_write(context_tracking.state, state); 97 __this_cpu_write(context_tracking.state, state);
112 } 98 }
113 context_tracking_recursion_exit(); 99 context_tracking_recursion_exit();
114out_irq_restore: 100}
101NOKPROBE_SYMBOL(__context_tracking_enter);
102EXPORT_SYMBOL_GPL(__context_tracking_enter);
103
104void context_tracking_enter(enum ctx_state state)
105{
106 unsigned long flags;
107
108 /*
109 * Some contexts may involve an exception occuring in an irq,
110 * leading to that nesting:
111 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
112 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
113 * helpers are enough to protect RCU uses inside the exception. So
114 * just return immediately if we detect we are in an IRQ.
115 */
116 if (in_interrupt())
117 return;
118
119 local_irq_save(flags);
120 __context_tracking_enter(state);
115 local_irq_restore(flags); 121 local_irq_restore(flags);
116} 122}
117NOKPROBE_SYMBOL(context_tracking_enter); 123NOKPROBE_SYMBOL(context_tracking_enter);
@@ -135,16 +141,10 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
135 * This call supports re-entrancy. This way it can be called from any exception 141 * This call supports re-entrancy. This way it can be called from any exception
136 * handler without needing to know if we came from userspace or not. 142 * handler without needing to know if we came from userspace or not.
137 */ 143 */
138void context_tracking_exit(enum ctx_state state) 144void __context_tracking_exit(enum ctx_state state)
139{ 145{
140 unsigned long flags;
141
142 if (in_interrupt())
143 return;
144
145 local_irq_save(flags);
146 if (!context_tracking_recursion_enter()) 146 if (!context_tracking_recursion_enter())
147 goto out_irq_restore; 147 return;
148 148
149 if (__this_cpu_read(context_tracking.state) == state) { 149 if (__this_cpu_read(context_tracking.state) == state) {
150 if (__this_cpu_read(context_tracking.active)) { 150 if (__this_cpu_read(context_tracking.active)) {
@@ -161,7 +161,19 @@ void context_tracking_exit(enum ctx_state state)
161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); 161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
162 } 162 }
163 context_tracking_recursion_exit(); 163 context_tracking_recursion_exit();
164out_irq_restore: 164}
165NOKPROBE_SYMBOL(__context_tracking_exit);
166EXPORT_SYMBOL_GPL(__context_tracking_exit);
167
168void context_tracking_exit(enum ctx_state state)
169{
170 unsigned long flags;
171
172 if (in_interrupt())
173 return;
174
175 local_irq_save(flags);
176 __context_tracking_exit(state);
165 local_irq_restore(flags); 177 local_irq_restore(flags);
166} 178}
167NOKPROBE_SYMBOL(context_tracking_exit); 179NOKPROBE_SYMBOL(context_tracking_exit);