aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-02-18 02:53:19 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-03-30 02:36:02 -0400
commit2829b449276aed45f3d649efb21e3418e39dd5d1 (patch)
treeeb16821bc9ad330f8c2f289510603ca528feaeae /arch/x86
parent224101ed69d3fbb486868e0f6e0f9fa37302efb4 (diff)
x86/paravirt: allow preemption with lazy mmu mode
Impact: remove obsolete checks, simplification Lift restrictions on preemption with lazy mmu mode, as it is now allowed. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/paravirt.c7
-rw-r--r--arch/x86/xen/mmu.c8
2 files changed, 5 insertions, 10 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cf1437503ba..bf2e86eee80 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -247,7 +247,6 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA
247static inline void enter_lazy(enum paravirt_lazy_mode mode) 247static inline void enter_lazy(enum paravirt_lazy_mode mode)
248{ 248{
249 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); 249 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
250 BUG_ON(preemptible());
251 250
252 __get_cpu_var(paravirt_lazy_mode) = mode; 251 __get_cpu_var(paravirt_lazy_mode) = mode;
253} 252}
@@ -255,7 +254,6 @@ static inline void enter_lazy(enum paravirt_lazy_mode mode)
255static void leave_lazy(enum paravirt_lazy_mode mode) 254static void leave_lazy(enum paravirt_lazy_mode mode)
256{ 255{
257 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); 256 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode);
258 BUG_ON(preemptible());
259 257
260 __get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 258 __get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
261} 259}
@@ -272,6 +270,8 @@ void paravirt_leave_lazy_mmu(void)
272 270
273void paravirt_start_context_switch(struct task_struct *prev) 271void paravirt_start_context_switch(struct task_struct *prev)
274{ 272{
273 BUG_ON(preemptible());
274
275 if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { 275 if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
276 arch_leave_lazy_mmu_mode(); 276 arch_leave_lazy_mmu_mode();
277 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); 277 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
@@ -281,6 +281,8 @@ void paravirt_start_context_switch(struct task_struct *prev)
281 281
282void paravirt_end_context_switch(struct task_struct *next) 282void paravirt_end_context_switch(struct task_struct *next)
283{ 283{
284 BUG_ON(preemptible());
285
284 leave_lazy(PARAVIRT_LAZY_CPU); 286 leave_lazy(PARAVIRT_LAZY_CPU);
285 287
286 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) 288 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
@@ -300,7 +302,6 @@ void arch_flush_lazy_mmu_mode(void)
300 preempt_disable(); 302 preempt_disable();
301 303
302 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 304 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
303 WARN_ON(preempt_count() == 1);
304 arch_leave_lazy_mmu_mode(); 305 arch_leave_lazy_mmu_mode();
305 arch_enter_lazy_mmu_mode(); 306 arch_enter_lazy_mmu_mode();
306 } 307 }
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f5f8faa4f76..3f2d0fe5e6a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -419,10 +419,6 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
419void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 419void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
420 pte_t *ptep, pte_t pteval) 420 pte_t *ptep, pte_t pteval)
421{ 421{
422 /* updates to init_mm may be done without lock */
423 if (mm == &init_mm)
424 preempt_disable();
425
426 ADD_STATS(set_pte_at, 1); 422 ADD_STATS(set_pte_at, 1);
427// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); 423// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
428 ADD_STATS(set_pte_at_current, mm == current->mm); 424 ADD_STATS(set_pte_at_current, mm == current->mm);
@@ -443,9 +439,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
443 } 439 }
444 xen_set_pte(ptep, pteval); 440 xen_set_pte(ptep, pteval);
445 441
446out: 442out: return;
447 if (mm == &init_mm)
448 preempt_enable();
449} 443}
450 444
451pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 445pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,