aboutsummaryrefslogtreecommitdiffstats
path: root/mm/highmem.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:15 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:15 -0400
commitce6234b5298902aaec831a67d5f8d9bd2ef5a488 (patch)
tree939c22684e11a4f5f17abb89c4898f016e878e21 /mm/highmem.c
parenta27fe809b82c5e18932fcceded28d0d1481ce7bb (diff)
[PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
Diffstat (limited to 'mm/highmem.c')
-rw-r--r--mm/highmem.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 51e1c1995fe..be8f8d36a8b 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -99,6 +99,15 @@ static void flush_all_zero_pkmaps(void)
99 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 99 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
100} 100}
101 101
102/* Flush all unused kmap mappings in order to remove stray
103 mappings. */
104void kmap_flush_unused(void)
105{
106 spin_lock(&kmap_lock);
107 flush_all_zero_pkmaps();
108 spin_unlock(&kmap_lock);
109}
110
102static inline unsigned long map_new_virtual(struct page *page) 111static inline unsigned long map_new_virtual(struct page *page)
103{ 112{
104 unsigned long vaddr; 113 unsigned long vaddr;