diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-18 02:24:03 -0500 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-03-30 02:35:59 -0400 |
commit | 7fd7d83d49914f03aefffba6aee09032fcd54cce (patch) | |
tree | 8c9f5b95f6b63b5d28887f47d92a6c79139eac5f | |
parent | b8bcfe997e46150fedcc3f5b26b846400122fdd9 (diff) |
x86/pvops: replace arch_enter_lazy_cpu_mode with arch_start_context_switch
Impact: simplification, prepare for later changes
Make lazy cpu mode more specific to context switching, so that
it makes sense to do more context-switch specific things in
the callbacks.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 5 | ||||
-rw-r--r-- | include/asm-frv/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 21 | ||||
-rw-r--r-- | kernel/sched.c | 2 |
8 files changed, 20 insertions, 37 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 0617d5cc9712..7b28abac323f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -1420,19 +1420,17 @@ void paravirt_enter_lazy_mmu(void); | |||
1420 | void paravirt_leave_lazy_mmu(void); | 1420 | void paravirt_leave_lazy_mmu(void); |
1421 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode); | 1421 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode); |
1422 | 1422 | ||
1423 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 1423 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
1424 | static inline void arch_enter_lazy_cpu_mode(void) | 1424 | static inline void arch_start_context_switch(void) |
1425 | { | 1425 | { |
1426 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); | 1426 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | static inline void arch_leave_lazy_cpu_mode(void) | 1429 | static inline void arch_end_context_switch(void) |
1430 | { | 1430 | { |
1431 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); | 1431 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); |
1432 | } | 1432 | } |
1433 | 1433 | ||
1434 | void arch_flush_lazy_cpu_mode(void); | ||
1435 | |||
1436 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 1434 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
1437 | static inline void arch_enter_lazy_mmu_mode(void) | 1435 | static inline void arch_enter_lazy_mmu_mode(void) |
1438 | { | 1436 | { |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8ab250ac498b..5eea9548216b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -301,19 +301,6 @@ void arch_flush_lazy_mmu_mode(void) | |||
301 | preempt_enable(); | 301 | preempt_enable(); |
302 | } | 302 | } |
303 | 303 | ||
304 | void arch_flush_lazy_cpu_mode(void) | ||
305 | { | ||
306 | preempt_disable(); | ||
307 | |||
308 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { | ||
309 | WARN_ON(preempt_count() == 1); | ||
310 | arch_leave_lazy_cpu_mode(); | ||
311 | arch_enter_lazy_cpu_mode(); | ||
312 | } | ||
313 | |||
314 | preempt_enable(); | ||
315 | } | ||
316 | |||
317 | struct pv_info pv_info = { | 304 | struct pv_info pv_info = { |
318 | .name = "bare hardware", | 305 | .name = "bare hardware", |
319 | .paravirt_enabled = 0, | 306 | .paravirt_enabled = 0, |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 14014d766cad..57e49a8278a9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -407,7 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
407 | * done before math_state_restore, so the TS bit is up | 407 | * done before math_state_restore, so the TS bit is up |
408 | * to date. | 408 | * to date. |
409 | */ | 409 | */ |
410 | arch_leave_lazy_cpu_mode(); | 410 | arch_end_context_switch(); |
411 | 411 | ||
412 | /* If the task has used fpu the last 5 timeslices, just do a full | 412 | /* If the task has used fpu the last 5 timeslices, just do a full |
413 | * restore of the math state immediately to avoid the trap; the | 413 | * restore of the math state immediately to avoid the trap; the |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index abb7e6a7f0c6..7115e6085326 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -428,7 +428,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
428 | * done before math_state_restore, so the TS bit is up | 428 | * done before math_state_restore, so the TS bit is up |
429 | * to date. | 429 | * to date. |
430 | */ | 430 | */ |
431 | arch_leave_lazy_cpu_mode(); | 431 | arch_end_context_switch(); |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * Switch FS and GS. | 434 | * Switch FS and GS. |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index cb6afa4ec95c..6b98f87232ac 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1119,10 +1119,8 @@ static void drop_other_mm_ref(void *info) | |||
1119 | 1119 | ||
1120 | /* If this cpu still has a stale cr3 reference, then make sure | 1120 | /* If this cpu still has a stale cr3 reference, then make sure |
1121 | it has been flushed. */ | 1121 | it has been flushed. */ |
1122 | if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) { | 1122 | if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) |
1123 | load_cr3(swapper_pg_dir); | 1123 | load_cr3(swapper_pg_dir); |
1124 | arch_flush_lazy_cpu_mode(); | ||
1125 | } | ||
1126 | } | 1124 | } |
1127 | 1125 | ||
1128 | static void xen_drop_mm_ref(struct mm_struct *mm) | 1126 | static void xen_drop_mm_ref(struct mm_struct *mm) |
@@ -1135,7 +1133,6 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1135 | load_cr3(swapper_pg_dir); | 1133 | load_cr3(swapper_pg_dir); |
1136 | else | 1134 | else |
1137 | leave_mm(smp_processor_id()); | 1135 | leave_mm(smp_processor_id()); |
1138 | arch_flush_lazy_cpu_mode(); | ||
1139 | } | 1136 | } |
1140 | 1137 | ||
1141 | /* Get the "official" set of cpus referring to our pagetable. */ | 1138 | /* Get the "official" set of cpus referring to our pagetable. */ |
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index e16fdb1f4f4f..235e34a7a340 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h | |||
@@ -73,8 +73,8 @@ static inline int pte_file(pte_t pte) { return 0; } | |||
73 | #define pgtable_cache_init() do {} while (0) | 73 | #define pgtable_cache_init() do {} while (0) |
74 | #define arch_enter_lazy_mmu_mode() do {} while (0) | 74 | #define arch_enter_lazy_mmu_mode() do {} while (0) |
75 | #define arch_leave_lazy_mmu_mode() do {} while (0) | 75 | #define arch_leave_lazy_mmu_mode() do {} while (0) |
76 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 76 | |
77 | #define arch_leave_lazy_cpu_mode() do {} while (0) | 77 | #define arch_start_context_switch() do {} while (0) |
78 | 78 | ||
79 | #else /* !CONFIG_MMU */ | 79 | #else /* !CONFIG_MMU */ |
80 | /*****************************************************************************/ | 80 | /*****************************************************************************/ |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 8e6d0ca70aba..922f03671dd8 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -280,17 +280,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
280 | #endif | 280 | #endif |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * A facility to provide batching of the reload of page tables with the | 283 | * A facility to provide batching of the reload of page tables and |
284 | * actual context switch code for paravirtualized guests. By convention, | 284 | * other process state with the actual context switch code for |
285 | * only one of the lazy modes (CPU, MMU) should be active at any given | 285 | * paravirtualized guests. By convention, only one of the batched |
286 | * time, entry should never be nested, and entry and exits should always | 286 | * update (lazy) modes (CPU, MMU) should be active at any given time, |
287 | * be paired. This is for sanity of maintaining and reasoning about the | 287 | * entry should never be nested, and entry and exits should always be |
288 | * kernel code. | 288 | * paired. This is for sanity of maintaining and reasoning about the |
289 | * kernel code. In this case, the exit (end of the context switch) is | ||
290 | * in architecture-specific code, and so doesn't need a generic | ||
291 | * definition. | ||
289 | */ | 292 | */ |
290 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 293 | #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
291 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 294 | #define arch_start_context_switch() do {} while (0) |
292 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
293 | #define arch_flush_lazy_cpu_mode() do {} while (0) | ||
294 | #endif | 295 | #endif |
295 | 296 | ||
296 | #ifndef __HAVE_PFNMAP_TRACKING | 297 | #ifndef __HAVE_PFNMAP_TRACKING |
diff --git a/kernel/sched.c b/kernel/sched.c index 5757e03cfac0..7530fdd7c982 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2746,7 +2746,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2746 | * combine the page table reload and the switch backend into | 2746 | * combine the page table reload and the switch backend into |
2747 | * one hypercall. | 2747 | * one hypercall. |
2748 | */ | 2748 | */ |
2749 | arch_enter_lazy_cpu_mode(); | 2749 | arch_start_context_switch(); |
2750 | 2750 | ||
2751 | if (unlikely(!mm)) { | 2751 | if (unlikely(!mm)) { |
2752 | next->active_mm = oldmm; | 2752 | next->active_mm = oldmm; |