aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2006-12-06 23:32:22 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:21 -0500
commit3b17979bda74493633364c2c263b452b7788e350 (patch)
tree3b15ec560ae56bb7730eedb3e7e8594ced2f7029 /arch
parentad76fb6b5a5183255279e0ab5260715481770678 (diff)
[PATCH] Fix kunmap_atomic's use of kpte_clear_flush()
kunmap_atomic() will call kpte_clear_flush with vaddr/ptep arguments which don't correspond if the vaddr is just a normal lowmem address (ie, not in the KMAP area). This patch makes sure that the pte is only cleared if kmap area was actually used for the mapping. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/mm/highmem.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index 178bbfe6cbac..e0fa6cb655a8 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -50,22 +50,20 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
50 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 50 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
51 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 51 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
52 52
53#ifdef CONFIG_DEBUG_HIGHMEM
54 if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
55 pagefault_enable();
56 return;
57 }
58
59 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
60 BUG();
61#endif
62 /* 53 /*
63 * Force other mappings to Oops if they'll try to access this pte 54 * Force other mappings to Oops if they'll try to access this pte
64 * without first remap it. Keeping stale mappings around is a bad idea 55 * without first remap it. Keeping stale mappings around is a bad idea
65 * also, in case the page changes cacheability attributes or becomes 56 * also, in case the page changes cacheability attributes or becomes
66 * a protected page in a hypervisor. 57 * a protected page in a hypervisor.
67 */ 58 */
68 kpte_clear_flush(kmap_pte-idx, vaddr); 59 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
60 kpte_clear_flush(kmap_pte-idx, vaddr);
61 else {
62#ifdef CONFIG_DEBUG_HIGHMEM
63 BUG_ON(vaddr < PAGE_OFFSET);
64 BUG_ON(vaddr >= (unsigned long)high_memory);
65#endif
66 }
69 67
70 pagefault_enable(); 68 pagefault_enable();
71} 69}