aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:15 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:15 -0400
commitce6234b5298902aaec831a67d5f8d9bd2ef5a488 (patch)
tree939c22684e11a4f5f17abb89c4898f016e878e21
parenta27fe809b82c5e18932fcceded28d0d1481ce7bb (diff)
[PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
-rw-r--r--arch/i386/kernel/paravirt.c5
-rw-r--r--arch/i386/mm/highmem.c9
-rw-r--r--include/asm-i386/highmem.h6
-rw-r--r--include/asm-i386/paravirt.h15
-rw-r--r--include/asm-i386/pgtable.h4
-rw-r--r--include/linux/highmem.h6
-rw-r--r--mm/highmem.c9
7 files changed, 50 insertions, 4 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 13f41b5c887a..596f382c641c 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -20,6 +20,7 @@
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/bcd.h> 21#include <linux/bcd.h>
22#include <linux/start_kernel.h> 22#include <linux/start_kernel.h>
23#include <linux/highmem.h>
23 24
24#include <asm/bug.h> 25#include <asm/bug.h>
25#include <asm/paravirt.h> 26#include <asm/paravirt.h>
@@ -316,6 +317,10 @@ struct paravirt_ops paravirt_ops = {
316 317
317 .ptep_get_and_clear = native_ptep_get_and_clear, 318 .ptep_get_and_clear = native_ptep_get_and_clear,
318 319
320#ifdef CONFIG_HIGHPTE
321 .kmap_atomic_pte = kmap_atomic,
322#endif
323
319#ifdef CONFIG_X86_PAE 324#ifdef CONFIG_X86_PAE
320 .set_pte_atomic = native_set_pte_atomic, 325 .set_pte_atomic = native_set_pte_atomic,
321 .set_pte_present = native_set_pte_present, 326 .set_pte_present = native_set_pte_present,
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index ac70d09df7ee..a1a21abf742e 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -26,7 +26,7 @@ void kunmap(struct page *page)
26 * However when holding an atomic kmap is is not legal to sleep, so atomic 26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only. 27 * kmaps are appropriate for short, tight code paths only.
28 */ 28 */
29void *kmap_atomic(struct page *page, enum km_type type) 29void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
30{ 30{
31 enum fixed_addresses idx; 31 enum fixed_addresses idx;
32 unsigned long vaddr; 32 unsigned long vaddr;
@@ -41,12 +41,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
41 return page_address(page); 41 return page_address(page);
42 42
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); 44 set_pte(kmap_pte-idx, mk_pte(page, prot));
45 arch_flush_lazy_mmu_mode(); 45 arch_flush_lazy_mmu_mode();
46 46
47 return (void*) vaddr; 47 return (void*) vaddr;
48} 48}
49 49
50void *kmap_atomic(struct page *page, enum km_type type)
51{
52 return kmap_atomic_prot(page, type, kmap_prot);
53}
54
50void kunmap_atomic(void *kvaddr, enum km_type type) 55void kunmap_atomic(void *kvaddr, enum km_type type)
51{ 56{
52 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
index e9a34ebc25d5..13cdcd66fff2 100644
--- a/include/asm-i386/highmem.h
+++ b/include/asm-i386/highmem.h
@@ -24,6 +24,7 @@
24#include <linux/threads.h> 24#include <linux/threads.h>
25#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/paravirt.h>
27 28
28/* declarations for highmem.c */ 29/* declarations for highmem.c */
29extern unsigned long highstart_pfn, highend_pfn; 30extern unsigned long highstart_pfn, highend_pfn;
@@ -67,11 +68,16 @@ extern void FASTCALL(kunmap_high(struct page *page));
67 68
68void *kmap(struct page *page); 69void *kmap(struct page *page);
69void kunmap(struct page *page); 70void kunmap(struct page *page);
71void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
70void *kmap_atomic(struct page *page, enum km_type type); 72void *kmap_atomic(struct page *page, enum km_type type);
71void kunmap_atomic(void *kvaddr, enum km_type type); 73void kunmap_atomic(void *kvaddr, enum km_type type);
72void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 74void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
73struct page *kmap_atomic_to_page(void *ptr); 75struct page *kmap_atomic_to_page(void *ptr);
74 76
77#ifndef CONFIG_PARAVIRT
78#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
79#endif
80
75#define flush_cache_kmaps() do { } while (0) 81#define flush_cache_kmaps() do { } while (0)
76 82
77#endif /* __KERNEL__ */ 83#endif /* __KERNEL__ */
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 10f44af76b49..5048b41428fa 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -16,7 +16,9 @@
16#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <asm/kmap_types.h>
19 20
21struct page;
20struct thread_struct; 22struct thread_struct;
21struct Xgt_desc_struct; 23struct Xgt_desc_struct;
22struct tss_struct; 24struct tss_struct;
@@ -187,6 +189,10 @@ struct paravirt_ops
187 189
188 pte_t (*ptep_get_and_clear)(pte_t *ptep); 190 pte_t (*ptep_get_and_clear)(pte_t *ptep);
189 191
192#ifdef CONFIG_HIGHPTE
193 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
194#endif
195
190#ifdef CONFIG_X86_PAE 196#ifdef CONFIG_X86_PAE
191 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 197 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
192 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 198 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
@@ -884,6 +890,15 @@ static inline void paravirt_release_pd(unsigned pfn)
884 PVOP_VCALL1(release_pd, pfn); 890 PVOP_VCALL1(release_pd, pfn);
885} 891}
886 892
893#ifdef CONFIG_HIGHPTE
894static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
895{
896 unsigned long ret;
897 ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type);
898 return (void *)ret;
899}
900#endif
901
887static inline void pte_update(struct mm_struct *mm, unsigned long addr, 902static inline void pte_update(struct mm_struct *mm, unsigned long addr,
888 pte_t *ptep) 903 pte_t *ptep)
889{ 904{
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 6599f2aa91b7..befc697821e5 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -476,9 +476,9 @@ extern pte_t *lookup_address(unsigned long address);
476 476
477#if defined(CONFIG_HIGHPTE) 477#if defined(CONFIG_HIGHPTE)
478#define pte_offset_map(dir, address) \ 478#define pte_offset_map(dir, address) \
479 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) 479 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
480#define pte_offset_map_nested(dir, address) \ 480#define pte_offset_map_nested(dir, address) \
481 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) 481 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
482#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 482#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
483#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 483#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
484#else 484#else
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 645d440807c2..bca8e2dfa355 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -27,6 +27,8 @@ static inline void flush_kernel_dcache_page(struct page *page)
27unsigned int nr_free_highpages(void); 27unsigned int nr_free_highpages(void);
28extern unsigned long totalhigh_pages; 28extern unsigned long totalhigh_pages;
29 29
30void kmap_flush_unused(void);
31
30#else /* CONFIG_HIGHMEM */ 32#else /* CONFIG_HIGHMEM */
31 33
32static inline unsigned int nr_free_highpages(void) { return 0; } 34static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -44,9 +46,13 @@ static inline void *kmap(struct page *page)
44 46
45#define kmap_atomic(page, idx) \ 47#define kmap_atomic(page, idx) \
46 ({ pagefault_disable(); page_address(page); }) 48 ({ pagefault_disable(); page_address(page); })
49#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
50
47#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 51#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
48#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 52#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
49#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 53#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
54
55#define kmap_flush_unused() do {} while(0)
50#endif 56#endif
51 57
52#endif /* CONFIG_HIGHMEM */ 58#endif /* CONFIG_HIGHMEM */
diff --git a/mm/highmem.c b/mm/highmem.c
index 51e1c1995fec..be8f8d36a8b9 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -99,6 +99,15 @@ static void flush_all_zero_pkmaps(void)
99 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 99 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
100} 100}
101 101
102/* Flush all unused kmap mappings in order to remove stray
103 mappings. */
104void kmap_flush_unused(void)
105{
106 spin_lock(&kmap_lock);
107 flush_all_zero_pkmaps();
108 spin_unlock(&kmap_lock);
109}
110
102static inline unsigned long map_new_virtual(struct page *page) 111static inline unsigned long map_new_virtual(struct page *page)
103{ 112{
104 unsigned long vaddr; 113 unsigned long vaddr;