aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/highmem.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:15 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:15 -0400
commitce6234b5298902aaec831a67d5f8d9bd2ef5a488 (patch)
tree939c22684e11a4f5f17abb89c4898f016e878e21 /include/linux/highmem.h
parenta27fe809b82c5e18932fcceded28d0d1481ce7bb (diff)
[PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
Diffstat (limited to 'include/linux/highmem.h')
-rw-r--r--include/linux/highmem.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 645d440807c2..bca8e2dfa355 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -27,6 +27,8 @@ static inline void flush_kernel_dcache_page(struct page *page)
27unsigned int nr_free_highpages(void); 27unsigned int nr_free_highpages(void);
28extern unsigned long totalhigh_pages; 28extern unsigned long totalhigh_pages;
29 29
30void kmap_flush_unused(void);
31
30#else /* CONFIG_HIGHMEM */ 32#else /* CONFIG_HIGHMEM */
31 33
32static inline unsigned int nr_free_highpages(void) { return 0; } 34static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -44,9 +46,13 @@ static inline void *kmap(struct page *page)
44 46
45#define kmap_atomic(page, idx) \ 47#define kmap_atomic(page, idx) \
46 ({ pagefault_disable(); page_address(page); }) 48 ({ pagefault_disable(); page_address(page); })
49#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
50
47#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 51#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
48#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 52#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
49#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 53#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
54
55#define kmap_flush_unused() do {} while(0)
50#endif 56#endif
51 57
52#endif /* CONFIG_HIGHMEM */ 58#endif /* CONFIG_HIGHMEM */