aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-04-08 19:04:01 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-08 22:47:55 -0400
commit49f19710512c825aaea73b9207b3a848027cda1d (patch)
tree06da31bd9a84273e12aa43f536f90eb8146ff92e /arch/i386/mm
parenta5bfffac645a7b2d8119f8bbae34df5c94832799 (diff)
[PATCH] Proper fix for highmem kmap_atomic functions for VMI for 2.6.21
Since lazy MMU batching mode still allows interrupts to enter, it is possible for interrupt handlers to try to use kmap_atomic, which fails when lazy mode is active, since the PTE update to highmem will be delayed. The best workaround is to issue an explicit flush in kmap_atomic_functions case; this is the only way nested PTE updates can happen in the interrupt handler. Thanks to Jeremy Fitzhardinge for noting the bug and suggestions on a fix. This patch gets reverted again when we start 2.6.22 and the bug gets fixed differently. Signed-off-by: Zachary Amsden <zach@vmware.com> Cc: Andi Kleen <ak@muc.de> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/highmem.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index bb2de1089add..ac70d09df7ee 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
42 42
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); 44 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
45 arch_flush_lazy_mmu_mode();
45 46
46 return (void*) vaddr; 47 return (void*) vaddr;
47} 48}
@@ -82,6 +83,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
82 idx = type + KM_TYPE_NR*smp_processor_id(); 83 idx = type + KM_TYPE_NR*smp_processor_id();
83 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 84 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
84 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 85 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
86 arch_flush_lazy_mmu_mode();
85 87
86 return (void*) vaddr; 88 return (void*) vaddr;
87} 89}