aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-18 14:18:57 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-03-30 02:36:01 -0400
commit224101ed69d3fbb486868e0f6e0f9fa37302efb4 (patch)
tree46830842a99659421eeabee65d299ab4c3b59f28 /arch
parentb407fc57b815b2016186220baabc76cc8264206e (diff)
x86/paravirt: finish change from lazy cpu to context switch start/end
Impact: fix lazy context switch API Pass the previous and next tasks into the context switch start end calls, so that the called functions can properly access the task state (esp in end_context_switch, in which the next task is not yet completely current). Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/paravirt.h17
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/vmi_32.c12
-rw-r--r--arch/x86/lguest/boot.c8
-rw-r--r--arch/x86/xen/enlighten.c10
8 files changed, 34 insertions, 33 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 58d2481b01a6..dfdee0ca57d3 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -56,6 +56,7 @@ struct desc_ptr;
56struct tss_struct; 56struct tss_struct;
57struct mm_struct; 57struct mm_struct;
58struct desc_struct; 58struct desc_struct;
59struct task_struct;
59 60
60/* 61/*
61 * Wrapper type for pointers to code which uses the non-standard 62 * Wrapper type for pointers to code which uses the non-standard
@@ -203,7 +204,8 @@ struct pv_cpu_ops {
203 204
204 void (*swapgs)(void); 205 void (*swapgs)(void);
205 206
206 struct pv_lazy_ops lazy_mode; 207 void (*start_context_switch)(struct task_struct *prev);
208 void (*end_context_switch)(struct task_struct *next);
207}; 209};
208 210
209struct pv_irq_ops { 211struct pv_irq_ops {
@@ -1414,20 +1416,21 @@ enum paravirt_lazy_mode {
1414}; 1416};
1415 1417
1416enum paravirt_lazy_mode paravirt_get_lazy_mode(void); 1418enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1417void paravirt_enter_lazy_cpu(void); 1419void paravirt_start_context_switch(struct task_struct *prev);
1418void paravirt_leave_lazy_cpu(void); 1420void paravirt_end_context_switch(struct task_struct *next);
1421
1419void paravirt_enter_lazy_mmu(void); 1422void paravirt_enter_lazy_mmu(void);
1420void paravirt_leave_lazy_mmu(void); 1423void paravirt_leave_lazy_mmu(void);
1421 1424
1422#define __HAVE_ARCH_START_CONTEXT_SWITCH 1425#define __HAVE_ARCH_START_CONTEXT_SWITCH
1423static inline void arch_start_context_switch(void) 1426static inline void arch_start_context_switch(struct task_struct *prev)
1424{ 1427{
1425 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); 1428 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
1426} 1429}
1427 1430
1428static inline void arch_end_context_switch(void) 1431static inline void arch_end_context_switch(struct task_struct *next)
1429{ 1432{
1430 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); 1433 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
1431} 1434}
1432 1435
1433#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 1436#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index d0812e155f1d..24e42836e921 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -83,6 +83,8 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
83#define pte_val(x) native_pte_val(x) 83#define pte_val(x) native_pte_val(x)
84#define __pte(x) native_make_pte(x) 84#define __pte(x) native_make_pte(x)
85 85
86#define arch_end_context_switch(prev) do {} while(0)
87
86#endif /* CONFIG_PARAVIRT */ 88#endif /* CONFIG_PARAVIRT */
87 89
88/* 90/*
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 430a0e30577b..cf1437503bab 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -270,20 +270,20 @@ void paravirt_leave_lazy_mmu(void)
270 leave_lazy(PARAVIRT_LAZY_MMU); 270 leave_lazy(PARAVIRT_LAZY_MMU);
271} 271}
272 272
273void paravirt_enter_lazy_cpu(void) 273void paravirt_start_context_switch(struct task_struct *prev)
274{ 274{
275 if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { 275 if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
276 arch_leave_lazy_mmu_mode(); 276 arch_leave_lazy_mmu_mode();
277 set_thread_flag(TIF_LAZY_MMU_UPDATES); 277 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
278 } 278 }
279 enter_lazy(PARAVIRT_LAZY_CPU); 279 enter_lazy(PARAVIRT_LAZY_CPU);
280} 280}
281 281
282void paravirt_leave_lazy_cpu(void) 282void paravirt_end_context_switch(struct task_struct *next)
283{ 283{
284 leave_lazy(PARAVIRT_LAZY_CPU); 284 leave_lazy(PARAVIRT_LAZY_CPU);
285 285
286 if (test_and_clear_thread_flag(TIF_LAZY_MMU_UPDATES)) 286 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
287 arch_enter_lazy_mmu_mode(); 287 arch_enter_lazy_mmu_mode();
288} 288}
289 289
@@ -399,10 +399,8 @@ struct pv_cpu_ops pv_cpu_ops = {
399 .set_iopl_mask = native_set_iopl_mask, 399 .set_iopl_mask = native_set_iopl_mask,
400 .io_delay = native_io_delay, 400 .io_delay = native_io_delay,
401 401
402 .lazy_mode = { 402 .start_context_switch = paravirt_nop,
403 .enter = paravirt_nop, 403 .end_context_switch = paravirt_nop,
404 .leave = paravirt_nop,
405 },
406}; 404};
407 405
408struct pv_apic_ops pv_apic_ops = { 406struct pv_apic_ops pv_apic_ops = {
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 57e49a8278a9..d766c7616fd7 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
407 * done before math_state_restore, so the TS bit is up 407 * done before math_state_restore, so the TS bit is up
408 * to date. 408 * to date.
409 */ 409 */
410 arch_end_context_switch(); 410 arch_end_context_switch(next_p);
411 411
412 /* If the task has used fpu the last 5 timeslices, just do a full 412 /* If the task has used fpu the last 5 timeslices, just do a full
413 * restore of the math state immediately to avoid the trap; the 413 * restore of the math state immediately to avoid the trap; the
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7115e6085326..e8a9aaf9df88 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
428 * done before math_state_restore, so the TS bit is up 428 * done before math_state_restore, so the TS bit is up
429 * to date. 429 * to date.
430 */ 430 */
431 arch_end_context_switch(); 431 arch_end_context_switch(next_p);
432 432
433 /* 433 /*
434 * Switch FS and GS. 434 * Switch FS and GS.
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 950929c607d3..55a5d6938e5e 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -467,16 +467,16 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
467} 467}
468#endif 468#endif
469 469
470static void vmi_enter_lazy_cpu(void) 470static void vmi_start_context_switch(struct task_struct *prev)
471{ 471{
472 paravirt_enter_lazy_cpu(); 472 paravirt_start_context_switch(prev);
473 vmi_ops.set_lazy_mode(2); 473 vmi_ops.set_lazy_mode(2);
474} 474}
475 475
476static void vmi_leave_lazy_cpu(void) 476static void vmi_end_context_switch(struct task_struct *next)
477{ 477{
478 vmi_ops.set_lazy_mode(0); 478 vmi_ops.set_lazy_mode(0);
479 paravirt_leave_lazy_cpu(); 479 paravirt_end_context_switch(next);
480} 480}
481 481
482static void vmi_enter_lazy_mmu(void) 482static void vmi_enter_lazy_mmu(void)
@@ -722,9 +722,9 @@ static inline int __init activate_vmi(void)
722 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); 722 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
723 para_fill(pv_cpu_ops.io_delay, IODelay); 723 para_fill(pv_cpu_ops.io_delay, IODelay);
724 724
725 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, 725 para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
726 set_lazy_mode, SetLazyMode); 726 set_lazy_mode, SetLazyMode);
727 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy_cpu, 727 para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
728 set_lazy_mode, SetLazyMode); 728 set_lazy_mode, SetLazyMode);
729 729
730 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, 730 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 41a5562e710e..5287081b3567 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -153,10 +153,10 @@ static void lguest_leave_lazy_mmu_mode(void)
153 paravirt_leave_lazy_mmu(); 153 paravirt_leave_lazy_mmu();
154} 154}
155 155
156static void lguest_leave_lazy_cpu_mode(void) 156static void lguest_end_context_switch(struct task_struct *next)
157{ 157{
158 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); 158 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
159 paravirt_leave_lazy_cpu(); 159 paravirt_end_context_switch(next);
160} 160}
161 161
162/*G:033 162/*G:033
@@ -1031,8 +1031,8 @@ __init void lguest_init(void)
1031 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry; 1031 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1032 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; 1032 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1033 pv_cpu_ops.wbinvd = lguest_wbinvd; 1033 pv_cpu_ops.wbinvd = lguest_wbinvd;
1034 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu; 1034 pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
1035 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_cpu_mode; 1035 pv_cpu_ops.end_context_switch = lguest_end_context_switch;
1036 1036
1037 /* pagetable management */ 1037 /* pagetable management */
1038 pv_mmu_ops.write_cr3 = lguest_write_cr3; 1038 pv_mmu_ops.write_cr3 = lguest_write_cr3;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f586e63b9a63..70b355d3a86c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -203,10 +203,10 @@ static unsigned long xen_get_debugreg(int reg)
203 return HYPERVISOR_get_debugreg(reg); 203 return HYPERVISOR_get_debugreg(reg);
204} 204}
205 205
206static void xen_leave_lazy_cpu(void) 206static void xen_end_context_switch(struct task_struct *next)
207{ 207{
208 xen_mc_flush(); 208 xen_mc_flush();
209 paravirt_leave_lazy_cpu(); 209 paravirt_end_context_switch(next);
210} 210}
211 211
212static unsigned long xen_store_tr(void) 212static unsigned long xen_store_tr(void)
@@ -817,10 +817,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
817 /* Xen takes care of %gs when switching to usermode for us */ 817 /* Xen takes care of %gs when switching to usermode for us */
818 .swapgs = paravirt_nop, 818 .swapgs = paravirt_nop,
819 819
820 .lazy_mode = { 820 .start_context_switch = paravirt_start_context_switch,
821 .enter = paravirt_enter_lazy_cpu, 821 .end_context_switch = xen_end_context_switch,
822 .leave = xen_leave_lazy_cpu,
823 },
824}; 822};
825 823
826static const struct pv_apic_ops xen_apic_ops __initdata = { 824static const struct pv_apic_ops xen_apic_ops __initdata = {