diff options
author | Zachary Amsden <zach@vmware.com> | 2006-10-01 02:29:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-01 03:39:34 -0400 |
commit | 23002d88be309a7c78db69363c9d933a29a3b0bb (patch) | |
tree | ecd99ef70c0f38ff81bf2e3c6d7caeed2b88b41e /arch/i386 | |
parent | 25e4df5bae333a06cd2c9b88baf14432652dc9f7 (diff) |
[PATCH] paravirt: kpte flush
Create a new PTE function which combines clearing a kernel PTE with the
subsequent flush. This allows the two to be easily combined into a single
hypercall or paravirt-op. More subtly, reverse the order of the flush for
kmap_atomic. Instead of flushing on establishing a mapping, flush on clearing
a mapping. This eliminates the possibility of leaving stale kmap entries
which may still have valid TLB mappings. This is required for direct mode
hypervisors, which need to reprotect all mappings of a given page when
changing the page type from a normal page to a protected page (such as a page
table or descriptor table page). But it also provides some nicer semantics
for real hardware, by providing extra debug-proofing against using stale
mappings, as well as ensuring that no stale mappings exist when changing the
cacheability attributes of a page, which could lead to cache conflicts when
two different types of mappings exist for the same page.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/mm/highmem.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index ba44000b9069..f9f647cdbc7b 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c | |||
@@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
38 | 38 | ||
39 | idx = type + KM_TYPE_NR*smp_processor_id(); | 39 | idx = type + KM_TYPE_NR*smp_processor_id(); |
40 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 40 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
41 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
42 | if (!pte_none(*(kmap_pte-idx))) | 41 | if (!pte_none(*(kmap_pte-idx))) |
43 | BUG(); | 42 | BUG(); |
44 | #endif | ||
45 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); | 43 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); |
46 | __flush_tlb_one(vaddr); | ||
47 | 44 | ||
48 | return (void*) vaddr; | 45 | return (void*) vaddr; |
49 | } | 46 | } |
50 | 47 | ||
51 | void kunmap_atomic(void *kvaddr, enum km_type type) | 48 | void kunmap_atomic(void *kvaddr, enum km_type type) |
52 | { | 49 | { |
53 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
54 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 50 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
55 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 51 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
56 | 52 | ||
53 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
57 | if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { | 54 | if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { |
58 | dec_preempt_count(); | 55 | dec_preempt_count(); |
59 | preempt_check_resched(); | 56 | preempt_check_resched(); |
@@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
62 | 59 | ||
63 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) | 60 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
64 | BUG(); | 61 | BUG(); |
65 | 62 | #endif | |
66 | /* | 63 | /* |
67 | * force other mappings to Oops if they'll try to access | 64 | * Force other mappings to Oops if they'll try to access this pte |
68 | * this pte without first remap it | 65 | * without first remap it. Keeping stale mappings around is a bad idea |
66 | * also, in case the page changes cacheability attributes or becomes | ||
67 | * a protected page in a hypervisor. | ||
69 | */ | 68 | */ |
70 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | 69 | kpte_clear_flush(kmap_pte-idx, vaddr); |
71 | __flush_tlb_one(vaddr); | ||
72 | #endif | ||
73 | 70 | ||
74 | dec_preempt_count(); | 71 | dec_preempt_count(); |
75 | preempt_check_resched(); | 72 | preempt_check_resched(); |
@@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |||
88 | idx = type + KM_TYPE_NR*smp_processor_id(); | 85 | idx = type + KM_TYPE_NR*smp_processor_id(); |
89 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 86 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
90 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | 87 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); |
91 | __flush_tlb_one(vaddr); | ||
92 | 88 | ||
93 | return (void*) vaddr; | 89 | return (void*) vaddr; |
94 | } | 90 | } |