aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:15 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:15 -0400
commitce6234b5298902aaec831a67d5f8d9bd2ef5a488 (patch)
tree939c22684e11a4f5f17abb89c4898f016e878e21 /arch/i386
parenta27fe809b82c5e18932fcceded28d0d1481ce7bb (diff)
[PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/paravirt.c5
-rw-r--r--arch/i386/mm/highmem.c9
2 files changed, 12 insertions, 2 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 13f41b5c887a..596f382c641c 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -20,6 +20,7 @@
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/bcd.h> 21#include <linux/bcd.h>
22#include <linux/start_kernel.h> 22#include <linux/start_kernel.h>
23#include <linux/highmem.h>
23 24
24#include <asm/bug.h> 25#include <asm/bug.h>
25#include <asm/paravirt.h> 26#include <asm/paravirt.h>
@@ -316,6 +317,10 @@ struct paravirt_ops paravirt_ops = {
316 317
317 .ptep_get_and_clear = native_ptep_get_and_clear, 318 .ptep_get_and_clear = native_ptep_get_and_clear,
318 319
320#ifdef CONFIG_HIGHPTE
321 .kmap_atomic_pte = kmap_atomic,
322#endif
323
319#ifdef CONFIG_X86_PAE 324#ifdef CONFIG_X86_PAE
320 .set_pte_atomic = native_set_pte_atomic, 325 .set_pte_atomic = native_set_pte_atomic,
321 .set_pte_present = native_set_pte_present, 326 .set_pte_present = native_set_pte_present,
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index ac70d09df7ee..a1a21abf742e 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -26,7 +26,7 @@ void kunmap(struct page *page)
26 * However when holding an atomic kmap is is not legal to sleep, so atomic 26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only. 27 * kmaps are appropriate for short, tight code paths only.
28 */ 28 */
29void *kmap_atomic(struct page *page, enum km_type type) 29void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
30{ 30{
31 enum fixed_addresses idx; 31 enum fixed_addresses idx;
32 unsigned long vaddr; 32 unsigned long vaddr;
@@ -41,12 +41,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
41 return page_address(page); 41 return page_address(page);
42 42
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); 44 set_pte(kmap_pte-idx, mk_pte(page, prot));
45 arch_flush_lazy_mmu_mode(); 45 arch_flush_lazy_mmu_mode();
46 46
47 return (void*) vaddr; 47 return (void*) vaddr;
48} 48}
49 49
50void *kmap_atomic(struct page *page, enum km_type type)
51{
52 return kmap_atomic_prot(page, type, kmap_prot);
53}
54
50void kunmap_atomic(void *kvaddr, enum km_type type) 55void kunmap_atomic(void *kvaddr, enum km_type type)
51{ 56{
52 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;