aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-18 02:24:03 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-03-30 02:35:59 -0400
commit7fd7d83d49914f03aefffba6aee09032fcd54cce (patch)
tree8c9f5b95f6b63b5d28887f47d92a6c79139eac5f /arch/x86
parentb8bcfe997e46150fedcc3f5b26b846400122fdd9 (diff)
x86/pvops: replace arch_enter_lazy_cpu_mode with arch_start_context_switch
Impact: simplification, prepare for later changes Make lazy cpu mode more specific to context switching, so that it makes sense to do more context-switch specific things in the callbacks. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/paravirt.h8
-rw-r--r--arch/x86/kernel/paravirt.c13
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/xen/mmu.c5
5 files changed, 6 insertions, 24 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 0617d5cc971..7b28abac323 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1420,19 +1420,17 @@ void paravirt_enter_lazy_mmu(void);
1420void paravirt_leave_lazy_mmu(void); 1420void paravirt_leave_lazy_mmu(void);
1421void paravirt_leave_lazy(enum paravirt_lazy_mode mode); 1421void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1422 1422
1423#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE 1423#define __HAVE_ARCH_START_CONTEXT_SWITCH
1424static inline void arch_enter_lazy_cpu_mode(void) 1424static inline void arch_start_context_switch(void)
1425{ 1425{
1426 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); 1426 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1427} 1427}
1428 1428
1429static inline void arch_leave_lazy_cpu_mode(void) 1429static inline void arch_end_context_switch(void)
1430{ 1430{
1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); 1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1432} 1432}
1433 1433
1434void arch_flush_lazy_cpu_mode(void);
1435
1436#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 1434#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1437static inline void arch_enter_lazy_mmu_mode(void) 1435static inline void arch_enter_lazy_mmu_mode(void)
1438{ 1436{
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8ab250ac498..5eea9548216 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -301,19 +301,6 @@ void arch_flush_lazy_mmu_mode(void)
301 preempt_enable(); 301 preempt_enable();
302} 302}
303 303
304void arch_flush_lazy_cpu_mode(void)
305{
306 preempt_disable();
307
308 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
309 WARN_ON(preempt_count() == 1);
310 arch_leave_lazy_cpu_mode();
311 arch_enter_lazy_cpu_mode();
312 }
313
314 preempt_enable();
315}
316
317struct pv_info pv_info = { 304struct pv_info pv_info = {
318 .name = "bare hardware", 305 .name = "bare hardware",
319 .paravirt_enabled = 0, 306 .paravirt_enabled = 0,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 14014d766ca..57e49a8278a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
407 * done before math_state_restore, so the TS bit is up 407 * done before math_state_restore, so the TS bit is up
408 * to date. 408 * to date.
409 */ 409 */
410 arch_leave_lazy_cpu_mode(); 410 arch_end_context_switch();
411 411
412 /* If the task has used fpu the last 5 timeslices, just do a full 412 /* If the task has used fpu the last 5 timeslices, just do a full
413 * restore of the math state immediately to avoid the trap; the 413 * restore of the math state immediately to avoid the trap; the
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index abb7e6a7f0c..7115e608532 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
428 * done before math_state_restore, so the TS bit is up 428 * done before math_state_restore, so the TS bit is up
429 * to date. 429 * to date.
430 */ 430 */
431 arch_leave_lazy_cpu_mode(); 431 arch_end_context_switch();
432 432
433 /* 433 /*
434 * Switch FS and GS. 434 * Switch FS and GS.
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index cb6afa4ec95..6b98f87232a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1119,10 +1119,8 @@ static void drop_other_mm_ref(void *info)
1119 1119
1120 /* If this cpu still has a stale cr3 reference, then make sure 1120 /* If this cpu still has a stale cr3 reference, then make sure
1121 it has been flushed. */ 1121 it has been flushed. */
1122 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) { 1122 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
1123 load_cr3(swapper_pg_dir); 1123 load_cr3(swapper_pg_dir);
1124 arch_flush_lazy_cpu_mode();
1125 }
1126} 1124}
1127 1125
1128static void xen_drop_mm_ref(struct mm_struct *mm) 1126static void xen_drop_mm_ref(struct mm_struct *mm)
@@ -1135,7 +1133,6 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1135 load_cr3(swapper_pg_dir); 1133 load_cr3(swapper_pg_dir);
1136 else 1134 else
1137 leave_mm(smp_processor_id()); 1135 leave_mm(smp_processor_id());
1138 arch_flush_lazy_cpu_mode();
1139 } 1136 }
1140 1137
1141 /* Get the "official" set of cpus referring to our pagetable. */ 1138 /* Get the "official" set of cpus referring to our pagetable. */