diff options
-rw-r--r-- | arch/x86/include/asm/cacheflush.h | 59 | ||||
-rw-r--r-- | arch/x86/include/asm/fb.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/highmem.h | 25 | ||||
-rw-r--r-- | arch/x86/include/asm/io.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pat.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 19 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32_types.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_types.h | 96 | ||||
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 24 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 37 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 16 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 12 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 63 | ||||
-rw-r--r-- | arch/x86/mm/mm_internal.h | 2 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 84 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 245 | ||||
-rw-r--r-- | arch/x86/mm/pat_internal.h | 22 | ||||
-rw-r--r-- | arch/x86/mm/pat_rbtree.c | 8 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 25 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 47 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 1 | ||||
-rw-r--r-- | drivers/video/fbdev/gbefb.c | 3 | ||||
-rw-r--r-- | drivers/video/fbdev/vermilion/vermilion.c | 6 |
25 files changed, 474 insertions, 343 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 9863ee3747da..47c8e32f621a 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -5,65 +5,6 @@ | |||
5 | #include <asm-generic/cacheflush.h> | 5 | #include <asm-generic/cacheflush.h> |
6 | #include <asm/special_insns.h> | 6 | #include <asm/special_insns.h> |
7 | 7 | ||
8 | #ifdef CONFIG_X86_PAT | ||
9 | /* | ||
10 | * X86 PAT uses page flags WC and Uncached together to keep track of | ||
11 | * memory type of pages that have backing page struct. X86 PAT supports 3 | ||
12 | * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and | ||
13 | * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not | ||
14 | * been changed from its default (value of -1 used to denote this). | ||
15 | * Note we do not support _PAGE_CACHE_UC here. | ||
16 | */ | ||
17 | |||
18 | #define _PGMT_DEFAULT 0 | ||
19 | #define _PGMT_WC (1UL << PG_arch_1) | ||
20 | #define _PGMT_UC_MINUS (1UL << PG_uncached) | ||
21 | #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) | ||
22 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) | ||
23 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) | ||
24 | |||
25 | static inline unsigned long get_page_memtype(struct page *pg) | ||
26 | { | ||
27 | unsigned long pg_flags = pg->flags & _PGMT_MASK; | ||
28 | |||
29 | if (pg_flags == _PGMT_DEFAULT) | ||
30 | return -1; | ||
31 | else if (pg_flags == _PGMT_WC) | ||
32 | return _PAGE_CACHE_WC; | ||
33 | else if (pg_flags == _PGMT_UC_MINUS) | ||
34 | return _PAGE_CACHE_UC_MINUS; | ||
35 | else | ||
36 | return _PAGE_CACHE_WB; | ||
37 | } | ||
38 | |||
39 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) | ||
40 | { | ||
41 | unsigned long memtype_flags = _PGMT_DEFAULT; | ||
42 | unsigned long old_flags; | ||
43 | unsigned long new_flags; | ||
44 | |||
45 | switch (memtype) { | ||
46 | case _PAGE_CACHE_WC: | ||
47 | memtype_flags = _PGMT_WC; | ||
48 | break; | ||
49 | case _PAGE_CACHE_UC_MINUS: | ||
50 | memtype_flags = _PGMT_UC_MINUS; | ||
51 | break; | ||
52 | case _PAGE_CACHE_WB: | ||
53 | memtype_flags = _PGMT_WB; | ||
54 | break; | ||
55 | } | ||
56 | |||
57 | do { | ||
58 | old_flags = pg->flags; | ||
59 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | ||
60 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | ||
61 | } | ||
62 | #else | ||
63 | static inline unsigned long get_page_memtype(struct page *pg) { return -1; } | ||
64 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } | ||
65 | #endif | ||
66 | |||
67 | /* | 8 | /* |
68 | * The set_memory_* API can be used to change various attributes of a virtual | 9 | * The set_memory_* API can be used to change various attributes of a virtual |
69 | * address range. The attributes include: | 10 | * address range. The attributes include: |
diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h index 2519d0679d99..c3dd5e71f439 100644 --- a/arch/x86/include/asm/fb.h +++ b/arch/x86/include/asm/fb.h | |||
@@ -8,8 +8,12 @@ | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | 8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, |
9 | unsigned long off) | 9 | unsigned long off) |
10 | { | 10 | { |
11 | unsigned long prot; | ||
12 | |||
13 | prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; | ||
11 | if (boot_cpu_data.x86 > 3) | 14 | if (boot_cpu_data.x86 > 3) |
12 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | 15 | pgprot_val(vma->vm_page_prot) = |
16 | prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); | ||
13 | } | 17 | } |
14 | 18 | ||
15 | extern int fb_is_primary_device(struct fb_info *info); | 19 | extern int fb_is_primary_device(struct fb_info *info); |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index ffb1733ac91f..bf728e49c53c 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -136,9 +136,7 @@ enum fixed_addresses { | |||
136 | extern void reserve_top_address(unsigned long reserve); | 136 | extern void reserve_top_address(unsigned long reserve); |
137 | 137 | ||
138 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | 138 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) |
139 | #define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
140 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 139 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
141 | #define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE) | ||
142 | 140 | ||
143 | extern int fixmaps_set; | 141 | extern int fixmaps_set; |
144 | 142 | ||
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 302a323b3f67..04e9d023168f 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -38,17 +38,20 @@ extern unsigned long highstart_pfn, highend_pfn; | |||
38 | /* | 38 | /* |
39 | * Ordering is: | 39 | * Ordering is: |
40 | * | 40 | * |
41 | * FIXADDR_TOP | 41 | * high memory on: high_memory off: |
42 | * fixed_addresses | 42 | * FIXADDR_TOP FIXADDR_TOP |
43 | * FIXADDR_START | 43 | * fixed addresses fixed addresses |
44 | * temp fixed addresses | 44 | * FIXADDR_START FIXADDR_START |
45 | * FIXADDR_BOOT_START | 45 | * temp fixed addresses/persistent kmap area VMALLOC_END |
46 | * Persistent kmap area | 46 | * PKMAP_BASE temp fixed addresses/vmalloc area |
47 | * PKMAP_BASE | 47 | * VMALLOC_END VMALLOC_START |
48 | * VMALLOC_END | 48 | * vmalloc area high_memory |
49 | * Vmalloc area | 49 | * VMALLOC_START |
50 | * VMALLOC_START | 50 | * high_memory |
51 | * high_memory | 51 | * |
52 | * The temp fixed area is only used during boot for early_ioremap(), and | ||
53 | * it is unused when the ioremap() is functional. vmalloc/pkmap area become | ||
54 | * available after early boot so the temp fixed area is available for re-use. | ||
52 | */ | 55 | */ |
53 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | 56 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) |
54 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | 57 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 0cdbe6e81b45..34a5b93704d3 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -318,7 +318,7 @@ extern void *xlate_dev_mem_ptr(phys_addr_t phys); | |||
318 | extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); | 318 | extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); |
319 | 319 | ||
320 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, | 320 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
321 | unsigned long prot_val); | 321 | enum page_cache_mode pcm); |
322 | extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); | 322 | extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); |
323 | 323 | ||
324 | extern bool is_early_ioremap_ptep(pte_t *ptep); | 324 | extern bool is_early_ioremap_ptep(pte_t *ptep); |
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index e2c1668dde7a..91bc4ba95f91 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h | |||
@@ -11,16 +11,17 @@ static const int pat_enabled; | |||
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | extern void pat_init(void); | 13 | extern void pat_init(void); |
14 | void pat_init_cache_modes(void); | ||
14 | 15 | ||
15 | extern int reserve_memtype(u64 start, u64 end, | 16 | extern int reserve_memtype(u64 start, u64 end, |
16 | unsigned long req_type, unsigned long *ret_type); | 17 | enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); |
17 | extern int free_memtype(u64 start, u64 end); | 18 | extern int free_memtype(u64 start, u64 end); |
18 | 19 | ||
19 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, | 20 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, |
20 | unsigned long flag); | 21 | enum page_cache_mode pcm); |
21 | 22 | ||
22 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | 23 | int io_reserve_memtype(resource_size_t start, resource_size_t end, |
23 | unsigned long *type); | 24 | enum page_cache_mode *pcm); |
24 | 25 | ||
25 | void io_free_memtype(resource_size_t start, resource_size_t end); | 26 | void io_free_memtype(resource_size_t start, resource_size_t end); |
26 | 27 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index aa97a070f09f..c112ea63f40d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -9,9 +9,10 @@ | |||
9 | /* | 9 | /* |
10 | * Macro to mark a page protection value as UC- | 10 | * Macro to mark a page protection value as UC- |
11 | */ | 11 | */ |
12 | #define pgprot_noncached(prot) \ | 12 | #define pgprot_noncached(prot) \ |
13 | ((boot_cpu_data.x86 > 3) \ | 13 | ((boot_cpu_data.x86 > 3) \ |
14 | ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ | 14 | ? (__pgprot(pgprot_val(prot) | \ |
15 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ | ||
15 | : (prot)) | 16 | : (prot)) |
16 | 17 | ||
17 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
@@ -404,8 +405,8 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
404 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) | 405 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
405 | 406 | ||
406 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, | 407 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
407 | unsigned long flags, | 408 | enum page_cache_mode pcm, |
408 | unsigned long new_flags) | 409 | enum page_cache_mode new_pcm) |
409 | { | 410 | { |
410 | /* | 411 | /* |
411 | * PAT type is always WB for untracked ranges, so no need to check. | 412 | * PAT type is always WB for untracked ranges, so no need to check. |
@@ -419,10 +420,10 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, | |||
419 | * - request is uncached, return cannot be write-back | 420 | * - request is uncached, return cannot be write-back |
420 | * - request is write-combine, return cannot be write-back | 421 | * - request is write-combine, return cannot be write-back |
421 | */ | 422 | */ |
422 | if ((flags == _PAGE_CACHE_UC_MINUS && | 423 | if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
423 | new_flags == _PAGE_CACHE_WB) || | 424 | new_pcm == _PAGE_CACHE_MODE_WB) || |
424 | (flags == _PAGE_CACHE_WC && | 425 | (pcm == _PAGE_CACHE_MODE_WC && |
425 | new_flags == _PAGE_CACHE_WB)) { | 426 | new_pcm == _PAGE_CACHE_MODE_WB)) { |
426 | return 0; | 427 | return 0; |
427 | } | 428 | } |
428 | 429 | ||
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index ed5903be26fe..9fb2f2bc8245 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h | |||
@@ -37,7 +37,7 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ | |||
37 | #define LAST_PKMAP 1024 | 37 | #define LAST_PKMAP 1024 |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ | 40 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
41 | & PMD_MASK) | 41 | & PMD_MASK) |
42 | 42 | ||
43 | #ifdef CONFIG_HIGHMEM | 43 | #ifdef CONFIG_HIGHMEM |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 07789647bf33..af447f95e3be 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -128,11 +128,28 @@ | |||
128 | _PAGE_SOFT_DIRTY | _PAGE_NUMA) | 128 | _PAGE_SOFT_DIRTY | _PAGE_NUMA) |
129 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) | 129 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) |
130 | 130 | ||
131 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | 131 | /* |
132 | #define _PAGE_CACHE_WB (0) | 132 | * The cache modes defined here are used to translate between pure SW usage |
133 | #define _PAGE_CACHE_WC (_PAGE_PWT) | 133 | * and the HW defined cache mode bits and/or PAT entries. |
134 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | 134 | * |
135 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | 135 | * The resulting bits for PWT, PCD and PAT should be chosen in a way |
136 | * to have the WB mode at index 0 (all bits clear). This is the default | ||
137 | * right now and likely would break too much if changed. | ||
138 | */ | ||
139 | #ifndef __ASSEMBLY__ | ||
140 | enum page_cache_mode { | ||
141 | _PAGE_CACHE_MODE_WB = 0, | ||
142 | _PAGE_CACHE_MODE_WC = 1, | ||
143 | _PAGE_CACHE_MODE_UC_MINUS = 2, | ||
144 | _PAGE_CACHE_MODE_UC = 3, | ||
145 | _PAGE_CACHE_MODE_WT = 4, | ||
146 | _PAGE_CACHE_MODE_WP = 5, | ||
147 | _PAGE_CACHE_MODE_NUM = 8 | ||
148 | }; | ||
149 | #endif | ||
150 | |||
151 | #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) | ||
152 | #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) | ||
136 | 153 | ||
137 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 154 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
138 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | 155 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
@@ -156,41 +173,27 @@ | |||
156 | 173 | ||
157 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | 174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
158 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | 175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
159 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | 176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) |
160 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) | ||
161 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | ||
162 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) | ||
163 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | 177 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
164 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) | 178 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) |
165 | #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) | ||
166 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | 179 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) |
167 | #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) | ||
168 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | 180 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
169 | 181 | ||
170 | #define __PAGE_KERNEL_IO (__PAGE_KERNEL) | 182 | #define __PAGE_KERNEL_IO (__PAGE_KERNEL) |
171 | #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) | 183 | #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) |
172 | #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS) | ||
173 | #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC) | ||
174 | 184 | ||
175 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | 185 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) |
176 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | 186 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) |
177 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | 187 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) |
178 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) | 188 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) |
179 | #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) | ||
180 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | 189 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) |
181 | #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) | ||
182 | #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) | ||
183 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | 190 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) |
184 | #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) | ||
185 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | 191 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) |
186 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) | 192 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) |
187 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) | 193 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) |
188 | #define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE) | ||
189 | 194 | ||
190 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) | 195 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) |
191 | #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) | 196 | #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) |
192 | #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) | ||
193 | #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) | ||
194 | 197 | ||
195 | /* xwr */ | 198 | /* xwr */ |
196 | #define __P000 PAGE_NONE | 199 | #define __P000 PAGE_NONE |
@@ -341,6 +344,59 @@ static inline pmdval_t pmdnuma_flags(pmd_t pmd) | |||
341 | #define pgprot_val(x) ((x).pgprot) | 344 | #define pgprot_val(x) ((x).pgprot) |
342 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 345 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
343 | 346 | ||
347 | extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; | ||
348 | extern uint8_t __pte2cachemode_tbl[8]; | ||
349 | |||
350 | #define __pte2cm_idx(cb) \ | ||
351 | ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ | ||
352 | (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ | ||
353 | (((cb) >> _PAGE_BIT_PWT) & 1)) | ||
354 | #define __cm_idx2pte(i) \ | ||
355 | ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ | ||
356 | (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ | ||
357 | (((i) & 1) << _PAGE_BIT_PWT)) | ||
358 | |||
359 | static inline unsigned long cachemode2protval(enum page_cache_mode pcm) | ||
360 | { | ||
361 | if (likely(pcm == 0)) | ||
362 | return 0; | ||
363 | return __cachemode2pte_tbl[pcm]; | ||
364 | } | ||
365 | static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm) | ||
366 | { | ||
367 | return __pgprot(cachemode2protval(pcm)); | ||
368 | } | ||
369 | static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) | ||
370 | { | ||
371 | unsigned long masked; | ||
372 | |||
373 | masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; | ||
374 | if (likely(masked == 0)) | ||
375 | return 0; | ||
376 | return __pte2cachemode_tbl[__pte2cm_idx(masked)]; | ||
377 | } | ||
378 | static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) | ||
379 | { | ||
380 | pgprot_t new; | ||
381 | unsigned long val; | ||
382 | |||
383 | val = pgprot_val(pgprot); | ||
384 | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | | ||
385 | ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); | ||
386 | return new; | ||
387 | } | ||
388 | static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) | ||
389 | { | ||
390 | pgprot_t new; | ||
391 | unsigned long val; | ||
392 | |||
393 | val = pgprot_val(pgprot); | ||
394 | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | | ||
395 | ((val & _PAGE_PAT_LARGE) >> | ||
396 | (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); | ||
397 | return new; | ||
398 | } | ||
399 | |||
344 | 400 | ||
345 | typedef struct page *pgtable_t; | 401 | typedef struct page *pgtable_t; |
346 | 402 | ||
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 1a8053d1012e..f0cedf3395af 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -129,7 +129,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) | |||
129 | 129 | ||
130 | if (!pgprot_val(prot)) { | 130 | if (!pgprot_val(prot)) { |
131 | /* Not present */ | 131 | /* Not present */ |
132 | pt_dump_cont_printf(m, dmsg, " "); | 132 | pt_dump_cont_printf(m, dmsg, " "); |
133 | } else { | 133 | } else { |
134 | if (pr & _PAGE_USER) | 134 | if (pr & _PAGE_USER) |
135 | pt_dump_cont_printf(m, dmsg, "USR "); | 135 | pt_dump_cont_printf(m, dmsg, "USR "); |
@@ -148,18 +148,16 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) | |||
148 | else | 148 | else |
149 | pt_dump_cont_printf(m, dmsg, " "); | 149 | pt_dump_cont_printf(m, dmsg, " "); |
150 | 150 | ||
151 | /* Bit 9 has a different meaning on level 3 vs 4 */ | 151 | /* Bit 7 has a different meaning on level 3 vs 4 */ |
152 | if (level <= 3) { | 152 | if (level <= 3 && pr & _PAGE_PSE) |
153 | if (pr & _PAGE_PSE) | 153 | pt_dump_cont_printf(m, dmsg, "PSE "); |
154 | pt_dump_cont_printf(m, dmsg, "PSE "); | 154 | else |
155 | else | 155 | pt_dump_cont_printf(m, dmsg, " "); |
156 | pt_dump_cont_printf(m, dmsg, " "); | 156 | if ((level == 4 && pr & _PAGE_PAT) || |
157 | } else { | 157 | ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE)) |
158 | if (pr & _PAGE_PAT) | 158 | pt_dump_cont_printf(m, dmsg, "pat "); |
159 | pt_dump_cont_printf(m, dmsg, "pat "); | 159 | else |
160 | else | 160 | pt_dump_cont_printf(m, dmsg, " "); |
161 | pt_dump_cont_printf(m, dmsg, " "); | ||
162 | } | ||
163 | if (pr & _PAGE_GLOBAL) | 161 | if (pr & _PAGE_GLOBAL) |
164 | pt_dump_cont_printf(m, dmsg, "GLB "); | 162 | pt_dump_cont_printf(m, dmsg, "GLB "); |
165 | else | 163 | else |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 66dba36f2343..82b41d56bb98 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -27,6 +27,35 @@ | |||
27 | 27 | ||
28 | #include "mm_internal.h" | 28 | #include "mm_internal.h" |
29 | 29 | ||
30 | /* | ||
31 | * Tables translating between page_cache_type_t and pte encoding. | ||
32 | * Minimal supported modes are defined statically, modified if more supported | ||
33 | * cache modes are available. | ||
34 | * Index into __cachemode2pte_tbl is the cachemode. | ||
35 | * Index into __pte2cachemode_tbl are the caching attribute bits of the pte | ||
36 | * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. | ||
37 | */ | ||
38 | uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { | ||
39 | [_PAGE_CACHE_MODE_WB] = 0, | ||
40 | [_PAGE_CACHE_MODE_WC] = _PAGE_PWT, | ||
41 | [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD, | ||
42 | [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT, | ||
43 | [_PAGE_CACHE_MODE_WT] = _PAGE_PCD, | ||
44 | [_PAGE_CACHE_MODE_WP] = _PAGE_PCD, | ||
45 | }; | ||
46 | EXPORT_SYMBOL_GPL(__cachemode2pte_tbl); | ||
47 | uint8_t __pte2cachemode_tbl[8] = { | ||
48 | [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB, | ||
49 | [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC, | ||
50 | [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS, | ||
51 | [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC, | ||
52 | [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB, | ||
53 | [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC, | ||
54 | [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, | ||
55 | [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, | ||
56 | }; | ||
57 | EXPORT_SYMBOL_GPL(__pte2cachemode_tbl); | ||
58 | |||
30 | static unsigned long __initdata pgt_buf_start; | 59 | static unsigned long __initdata pgt_buf_start; |
31 | static unsigned long __initdata pgt_buf_end; | 60 | static unsigned long __initdata pgt_buf_end; |
32 | static unsigned long __initdata pgt_buf_top; | 61 | static unsigned long __initdata pgt_buf_top; |
@@ -687,3 +716,11 @@ void __init zone_sizes_init(void) | |||
687 | free_area_init_nodes(max_zone_pfns); | 716 | free_area_init_nodes(max_zone_pfns); |
688 | } | 717 | } |
689 | 718 | ||
719 | void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) | ||
720 | { | ||
721 | /* entry 0 MUST be WB (hardwired to speed up translations) */ | ||
722 | BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); | ||
723 | |||
724 | __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); | ||
725 | __pte2cachemode_tbl[entry] = cache; | ||
726 | } | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4e5dfec750fc..78e53c80fc12 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <asm/numa.h> | 52 | #include <asm/numa.h> |
53 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
54 | #include <asm/init.h> | 54 | #include <asm/init.h> |
55 | #include <asm/uv/uv.h> | ||
56 | #include <asm/setup.h> | 55 | #include <asm/setup.h> |
57 | 56 | ||
58 | #include "mm_internal.h" | 57 | #include "mm_internal.h" |
@@ -338,12 +337,15 @@ pte_t * __init populate_extra_pte(unsigned long vaddr) | |||
338 | * Create large page table mappings for a range of physical addresses. | 337 | * Create large page table mappings for a range of physical addresses. |
339 | */ | 338 | */ |
340 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | 339 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, |
341 | pgprot_t prot) | 340 | enum page_cache_mode cache) |
342 | { | 341 | { |
343 | pgd_t *pgd; | 342 | pgd_t *pgd; |
344 | pud_t *pud; | 343 | pud_t *pud; |
345 | pmd_t *pmd; | 344 | pmd_t *pmd; |
345 | pgprot_t prot; | ||
346 | 346 | ||
347 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | | ||
348 | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); | ||
347 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); | 349 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
348 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | 350 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { |
349 | pgd = pgd_offset_k((unsigned long)__va(phys)); | 351 | pgd = pgd_offset_k((unsigned long)__va(phys)); |
@@ -366,12 +368,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |||
366 | 368 | ||
367 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | 369 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) |
368 | { | 370 | { |
369 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); | 371 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
370 | } | 372 | } |
371 | 373 | ||
372 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | 374 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) |
373 | { | 375 | { |
374 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); | 376 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
375 | } | 377 | } |
376 | 378 | ||
377 | /* | 379 | /* |
@@ -1256,12 +1258,10 @@ static unsigned long probe_memory_block_size(void) | |||
1256 | /* start from 2g */ | 1258 | /* start from 2g */ |
1257 | unsigned long bz = 1UL<<31; | 1259 | unsigned long bz = 1UL<<31; |
1258 | 1260 | ||
1259 | #ifdef CONFIG_X86_UV | 1261 | if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) { |
1260 | if (is_uv_system()) { | 1262 | pr_info("Using 2GB memory block size for large-memory system\n"); |
1261 | printk(KERN_INFO "UV: memory block size 2GB\n"); | ||
1262 | return 2UL * 1024 * 1024 * 1024; | 1263 | return 2UL * 1024 * 1024 * 1024; |
1263 | } | 1264 | } |
1264 | #endif | ||
1265 | 1265 | ||
1266 | /* less than 64g installed */ | 1266 | /* less than 64g installed */ |
1267 | if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) | 1267 | if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 7b179b499fa3..9ca35fc60cfe 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -33,17 +33,17 @@ static int is_io_mapping_possible(resource_size_t base, unsigned long size) | |||
33 | 33 | ||
34 | int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) | 34 | int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) |
35 | { | 35 | { |
36 | unsigned long flag = _PAGE_CACHE_WC; | 36 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC; |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | if (!is_io_mapping_possible(base, size)) | 39 | if (!is_io_mapping_possible(base, size)) |
40 | return -EINVAL; | 40 | return -EINVAL; |
41 | 41 | ||
42 | ret = io_reserve_memtype(base, base + size, &flag); | 42 | ret = io_reserve_memtype(base, base + size, &pcm); |
43 | if (ret) | 43 | if (ret) |
44 | return ret; | 44 | return ret; |
45 | 45 | ||
46 | *prot = __pgprot(__PAGE_KERNEL | flag); | 46 | *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); |
47 | return 0; | 47 | return 0; |
48 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(iomap_create_wc); | 49 | EXPORT_SYMBOL_GPL(iomap_create_wc); |
@@ -82,8 +82,10 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) | |||
82 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the | 82 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the |
83 | * user, which is "WC if the MTRR is WC, UC if you can't do that." | 83 | * user, which is "WC if the MTRR is WC, UC if you can't do that." |
84 | */ | 84 | */ |
85 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | 85 | if (!pat_enabled && pgprot_val(prot) == |
86 | prot = PAGE_KERNEL_UC_MINUS; | 86 | (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) |
87 | prot = __pgprot(__PAGE_KERNEL | | ||
88 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | ||
87 | 89 | ||
88 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); | 90 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); |
89 | } | 91 | } |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b12f43c192cf..fdf617c00e2f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -29,20 +29,20 @@ | |||
29 | * conflicts. | 29 | * conflicts. |
30 | */ | 30 | */ |
31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, | 31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
32 | unsigned long prot_val) | 32 | enum page_cache_mode pcm) |
33 | { | 33 | { |
34 | unsigned long nrpages = size >> PAGE_SHIFT; | 34 | unsigned long nrpages = size >> PAGE_SHIFT; |
35 | int err; | 35 | int err; |
36 | 36 | ||
37 | switch (prot_val) { | 37 | switch (pcm) { |
38 | case _PAGE_CACHE_UC: | 38 | case _PAGE_CACHE_MODE_UC: |
39 | default: | 39 | default: |
40 | err = _set_memory_uc(vaddr, nrpages); | 40 | err = _set_memory_uc(vaddr, nrpages); |
41 | break; | 41 | break; |
42 | case _PAGE_CACHE_WC: | 42 | case _PAGE_CACHE_MODE_WC: |
43 | err = _set_memory_wc(vaddr, nrpages); | 43 | err = _set_memory_wc(vaddr, nrpages); |
44 | break; | 44 | break; |
45 | case _PAGE_CACHE_WB: | 45 | case _PAGE_CACHE_MODE_WB: |
46 | err = _set_memory_wb(vaddr, nrpages); | 46 | err = _set_memory_wb(vaddr, nrpages); |
47 | break; | 47 | break; |
48 | } | 48 | } |
@@ -75,14 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, | |||
75 | * caller shouldn't need to know that small detail. | 75 | * caller shouldn't need to know that small detail. |
76 | */ | 76 | */ |
77 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, | 77 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
78 | unsigned long size, unsigned long prot_val, void *caller) | 78 | unsigned long size, enum page_cache_mode pcm, void *caller) |
79 | { | 79 | { |
80 | unsigned long offset, vaddr; | 80 | unsigned long offset, vaddr; |
81 | resource_size_t pfn, last_pfn, last_addr; | 81 | resource_size_t pfn, last_pfn, last_addr; |
82 | const resource_size_t unaligned_phys_addr = phys_addr; | 82 | const resource_size_t unaligned_phys_addr = phys_addr; |
83 | const unsigned long unaligned_size = size; | 83 | const unsigned long unaligned_size = size; |
84 | struct vm_struct *area; | 84 | struct vm_struct *area; |
85 | unsigned long new_prot_val; | 85 | enum page_cache_mode new_pcm; |
86 | pgprot_t prot; | 86 | pgprot_t prot; |
87 | int retval; | 87 | int retval; |
88 | void __iomem *ret_addr; | 88 | void __iomem *ret_addr; |
@@ -134,38 +134,40 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
134 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 134 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
135 | 135 | ||
136 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, | 136 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
137 | prot_val, &new_prot_val); | 137 | pcm, &new_pcm); |
138 | if (retval) { | 138 | if (retval) { |
139 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); | 139 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
140 | return NULL; | 140 | return NULL; |
141 | } | 141 | } |
142 | 142 | ||
143 | if (prot_val != new_prot_val) { | 143 | if (pcm != new_pcm) { |
144 | if (!is_new_memtype_allowed(phys_addr, size, | 144 | if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { |
145 | prot_val, new_prot_val)) { | ||
146 | printk(KERN_ERR | 145 | printk(KERN_ERR |
147 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", | 146 | "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", |
148 | (unsigned long long)phys_addr, | 147 | (unsigned long long)phys_addr, |
149 | (unsigned long long)(phys_addr + size), | 148 | (unsigned long long)(phys_addr + size), |
150 | prot_val, new_prot_val); | 149 | pcm, new_pcm); |
151 | goto err_free_memtype; | 150 | goto err_free_memtype; |
152 | } | 151 | } |
153 | prot_val = new_prot_val; | 152 | pcm = new_pcm; |
154 | } | 153 | } |
155 | 154 | ||
156 | switch (prot_val) { | 155 | prot = PAGE_KERNEL_IO; |
157 | case _PAGE_CACHE_UC: | 156 | switch (pcm) { |
157 | case _PAGE_CACHE_MODE_UC: | ||
158 | default: | 158 | default: |
159 | prot = PAGE_KERNEL_IO_NOCACHE; | 159 | prot = __pgprot(pgprot_val(prot) | |
160 | cachemode2protval(_PAGE_CACHE_MODE_UC)); | ||
160 | break; | 161 | break; |
161 | case _PAGE_CACHE_UC_MINUS: | 162 | case _PAGE_CACHE_MODE_UC_MINUS: |
162 | prot = PAGE_KERNEL_IO_UC_MINUS; | 163 | prot = __pgprot(pgprot_val(prot) | |
164 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | ||
163 | break; | 165 | break; |
164 | case _PAGE_CACHE_WC: | 166 | case _PAGE_CACHE_MODE_WC: |
165 | prot = PAGE_KERNEL_IO_WC; | 167 | prot = __pgprot(pgprot_val(prot) | |
168 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | ||
166 | break; | 169 | break; |
167 | case _PAGE_CACHE_WB: | 170 | case _PAGE_CACHE_MODE_WB: |
168 | prot = PAGE_KERNEL_IO; | ||
169 | break; | 171 | break; |
170 | } | 172 | } |
171 | 173 | ||
@@ -178,7 +180,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
178 | area->phys_addr = phys_addr; | 180 | area->phys_addr = phys_addr; |
179 | vaddr = (unsigned long) area->addr; | 181 | vaddr = (unsigned long) area->addr; |
180 | 182 | ||
181 | if (kernel_map_sync_memtype(phys_addr, size, prot_val)) | 183 | if (kernel_map_sync_memtype(phys_addr, size, pcm)) |
182 | goto err_free_area; | 184 | goto err_free_area; |
183 | 185 | ||
184 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) | 186 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
@@ -227,14 +229,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | |||
227 | { | 229 | { |
228 | /* | 230 | /* |
229 | * Ideally, this should be: | 231 | * Ideally, this should be: |
230 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; | 232 | * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; |
231 | * | 233 | * |
232 | * Till we fix all X drivers to use ioremap_wc(), we will use | 234 | * Till we fix all X drivers to use ioremap_wc(), we will use |
233 | * UC MINUS. | 235 | * UC MINUS. |
234 | */ | 236 | */ |
235 | unsigned long val = _PAGE_CACHE_UC_MINUS; | 237 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; |
236 | 238 | ||
237 | return __ioremap_caller(phys_addr, size, val, | 239 | return __ioremap_caller(phys_addr, size, pcm, |
238 | __builtin_return_address(0)); | 240 | __builtin_return_address(0)); |
239 | } | 241 | } |
240 | EXPORT_SYMBOL(ioremap_nocache); | 242 | EXPORT_SYMBOL(ioremap_nocache); |
@@ -252,7 +254,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
252 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) | 254 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
253 | { | 255 | { |
254 | if (pat_enabled) | 256 | if (pat_enabled) |
255 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, | 257 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, |
256 | __builtin_return_address(0)); | 258 | __builtin_return_address(0)); |
257 | else | 259 | else |
258 | return ioremap_nocache(phys_addr, size); | 260 | return ioremap_nocache(phys_addr, size); |
@@ -261,7 +263,7 @@ EXPORT_SYMBOL(ioremap_wc); | |||
261 | 263 | ||
262 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | 264 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
263 | { | 265 | { |
264 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, | 266 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, |
265 | __builtin_return_address(0)); | 267 | __builtin_return_address(0)); |
266 | } | 268 | } |
267 | EXPORT_SYMBOL(ioremap_cache); | 269 | EXPORT_SYMBOL(ioremap_cache); |
@@ -269,7 +271,8 @@ EXPORT_SYMBOL(ioremap_cache); | |||
269 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 271 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
270 | unsigned long prot_val) | 272 | unsigned long prot_val) |
271 | { | 273 | { |
272 | return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), | 274 | return __ioremap_caller(phys_addr, size, |
275 | pgprot2cachemode(__pgprot(prot_val)), | ||
273 | __builtin_return_address(0)); | 276 | __builtin_return_address(0)); |
274 | } | 277 | } |
275 | EXPORT_SYMBOL(ioremap_prot); | 278 | EXPORT_SYMBOL(ioremap_prot); |
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 6b563a118891..62474ba66c8e 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h | |||
@@ -16,4 +16,6 @@ void zone_sizes_init(void); | |||
16 | 16 | ||
17 | extern int after_bootmem; | 17 | extern int after_bootmem; |
18 | 18 | ||
19 | void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); | ||
20 | |||
19 | #endif /* __X86_MM_INTERNAL_H */ | 21 | #endif /* __X86_MM_INTERNAL_H */ |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 36de293caf25..a3a5d46605d2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -485,14 +485,23 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
485 | 485 | ||
486 | /* | 486 | /* |
487 | * We are safe now. Check whether the new pgprot is the same: | 487 | * We are safe now. Check whether the new pgprot is the same: |
488 | * Convert protection attributes to 4k-format, as cpa->mask* are set | ||
489 | * up accordingly. | ||
488 | */ | 490 | */ |
489 | old_pte = *kpte; | 491 | old_pte = *kpte; |
490 | old_prot = req_prot = pte_pgprot(old_pte); | 492 | old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte)); |
491 | 493 | ||
492 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); | 494 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
493 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | 495 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
494 | 496 | ||
495 | /* | 497 | /* |
498 | * req_prot is in format of 4k pages. It must be converted to large | ||
499 | * page format: the caching mode includes the PAT bit located at | ||
500 | * different bit positions in the two formats. | ||
501 | */ | ||
502 | req_prot = pgprot_4k_2_large(req_prot); | ||
503 | |||
504 | /* | ||
496 | * Set the PSE and GLOBAL flags only if the PRESENT flag is | 505 | * Set the PSE and GLOBAL flags only if the PRESENT flag is |
497 | * set otherwise pmd_present/pmd_huge will return true even on | 506 | * set otherwise pmd_present/pmd_huge will return true even on |
498 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL | 507 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL |
@@ -585,13 +594,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, | |||
585 | 594 | ||
586 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); | 595 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
587 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | 596 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
588 | /* | 597 | |
589 | * If we ever want to utilize the PAT bit, we need to | 598 | /* promote PAT bit to correct position */ |
590 | * update this function to make sure it's converted from | 599 | if (level == PG_LEVEL_2M) |
591 | * bit 12 to bit 7 when we cross from the 2MB level to | 600 | ref_prot = pgprot_large_2_4k(ref_prot); |
592 | * the 4K level: | ||
593 | */ | ||
594 | WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); | ||
595 | 601 | ||
596 | #ifdef CONFIG_X86_64 | 602 | #ifdef CONFIG_X86_64 |
597 | if (level == PG_LEVEL_1G) { | 603 | if (level == PG_LEVEL_1G) { |
@@ -879,6 +885,7 @@ static int populate_pmd(struct cpa_data *cpa, | |||
879 | { | 885 | { |
880 | unsigned int cur_pages = 0; | 886 | unsigned int cur_pages = 0; |
881 | pmd_t *pmd; | 887 | pmd_t *pmd; |
888 | pgprot_t pmd_pgprot; | ||
882 | 889 | ||
883 | /* | 890 | /* |
884 | * Not on a 2M boundary? | 891 | * Not on a 2M boundary? |
@@ -910,6 +917,8 @@ static int populate_pmd(struct cpa_data *cpa, | |||
910 | if (num_pages == cur_pages) | 917 | if (num_pages == cur_pages) |
911 | return cur_pages; | 918 | return cur_pages; |
912 | 919 | ||
920 | pmd_pgprot = pgprot_4k_2_large(pgprot); | ||
921 | |||
913 | while (end - start >= PMD_SIZE) { | 922 | while (end - start >= PMD_SIZE) { |
914 | 923 | ||
915 | /* | 924 | /* |
@@ -921,7 +930,8 @@ static int populate_pmd(struct cpa_data *cpa, | |||
921 | 930 | ||
922 | pmd = pmd_offset(pud, start); | 931 | pmd = pmd_offset(pud, start); |
923 | 932 | ||
924 | set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); | 933 | set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | |
934 | massage_pgprot(pmd_pgprot))); | ||
925 | 935 | ||
926 | start += PMD_SIZE; | 936 | start += PMD_SIZE; |
927 | cpa->pfn += PMD_SIZE; | 937 | cpa->pfn += PMD_SIZE; |
@@ -949,6 +959,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, | |||
949 | pud_t *pud; | 959 | pud_t *pud; |
950 | unsigned long end; | 960 | unsigned long end; |
951 | int cur_pages = 0; | 961 | int cur_pages = 0; |
962 | pgprot_t pud_pgprot; | ||
952 | 963 | ||
953 | end = start + (cpa->numpages << PAGE_SHIFT); | 964 | end = start + (cpa->numpages << PAGE_SHIFT); |
954 | 965 | ||
@@ -986,12 +997,14 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, | |||
986 | return cur_pages; | 997 | return cur_pages; |
987 | 998 | ||
988 | pud = pud_offset(pgd, start); | 999 | pud = pud_offset(pgd, start); |
1000 | pud_pgprot = pgprot_4k_2_large(pgprot); | ||
989 | 1001 | ||
990 | /* | 1002 | /* |
991 | * Map everything starting from the Gb boundary, possibly with 1G pages | 1003 | * Map everything starting from the Gb boundary, possibly with 1G pages |
992 | */ | 1004 | */ |
993 | while (end - start >= PUD_SIZE) { | 1005 | while (end - start >= PUD_SIZE) { |
994 | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); | 1006 | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | |
1007 | massage_pgprot(pud_pgprot))); | ||
995 | 1008 | ||
996 | start += PUD_SIZE; | 1009 | start += PUD_SIZE; |
997 | cpa->pfn += PUD_SIZE; | 1010 | cpa->pfn += PUD_SIZE; |
@@ -1304,12 +1317,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |||
1304 | return 0; | 1317 | return 0; |
1305 | } | 1318 | } |
1306 | 1319 | ||
1307 | static inline int cache_attr(pgprot_t attr) | ||
1308 | { | ||
1309 | return pgprot_val(attr) & | ||
1310 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | ||
1311 | } | ||
1312 | |||
1313 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1320 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1314 | pgprot_t mask_set, pgprot_t mask_clr, | 1321 | pgprot_t mask_set, pgprot_t mask_clr, |
1315 | int force_split, int in_flag, | 1322 | int force_split, int in_flag, |
@@ -1390,7 +1397,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
1390 | * No need to flush, when we did not set any of the caching | 1397 | * No need to flush, when we did not set any of the caching |
1391 | * attributes: | 1398 | * attributes: |
1392 | */ | 1399 | */ |
1393 | cache = cache_attr(mask_set); | 1400 | cache = !!pgprot2cachemode(mask_set); |
1394 | 1401 | ||
1395 | /* | 1402 | /* |
1396 | * On success we use CLFLUSH, when the CPU supports it to | 1403 | * On success we use CLFLUSH, when the CPU supports it to |
@@ -1445,7 +1452,8 @@ int _set_memory_uc(unsigned long addr, int numpages) | |||
1445 | * for now UC MINUS. see comments in ioremap_nocache() | 1452 | * for now UC MINUS. see comments in ioremap_nocache() |
1446 | */ | 1453 | */ |
1447 | return change_page_attr_set(&addr, numpages, | 1454 | return change_page_attr_set(&addr, numpages, |
1448 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | 1455 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1456 | 0); | ||
1449 | } | 1457 | } |
1450 | 1458 | ||
1451 | int set_memory_uc(unsigned long addr, int numpages) | 1459 | int set_memory_uc(unsigned long addr, int numpages) |
@@ -1456,7 +1464,7 @@ int set_memory_uc(unsigned long addr, int numpages) | |||
1456 | * for now UC MINUS. see comments in ioremap_nocache() | 1464 | * for now UC MINUS. see comments in ioremap_nocache() |
1457 | */ | 1465 | */ |
1458 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | 1466 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
1459 | _PAGE_CACHE_UC_MINUS, NULL); | 1467 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
1460 | if (ret) | 1468 | if (ret) |
1461 | goto out_err; | 1469 | goto out_err; |
1462 | 1470 | ||
@@ -1474,7 +1482,7 @@ out_err: | |||
1474 | EXPORT_SYMBOL(set_memory_uc); | 1482 | EXPORT_SYMBOL(set_memory_uc); |
1475 | 1483 | ||
1476 | static int _set_memory_array(unsigned long *addr, int addrinarray, | 1484 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
1477 | unsigned long new_type) | 1485 | enum page_cache_mode new_type) |
1478 | { | 1486 | { |
1479 | int i, j; | 1487 | int i, j; |
1480 | int ret; | 1488 | int ret; |
@@ -1490,11 +1498,13 @@ static int _set_memory_array(unsigned long *addr, int addrinarray, | |||
1490 | } | 1498 | } |
1491 | 1499 | ||
1492 | ret = change_page_attr_set(addr, addrinarray, | 1500 | ret = change_page_attr_set(addr, addrinarray, |
1493 | __pgprot(_PAGE_CACHE_UC_MINUS), 1); | 1501 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1502 | 1); | ||
1494 | 1503 | ||
1495 | if (!ret && new_type == _PAGE_CACHE_WC) | 1504 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
1496 | ret = change_page_attr_set_clr(addr, addrinarray, | 1505 | ret = change_page_attr_set_clr(addr, addrinarray, |
1497 | __pgprot(_PAGE_CACHE_WC), | 1506 | cachemode2pgprot( |
1507 | _PAGE_CACHE_MODE_WC), | ||
1498 | __pgprot(_PAGE_CACHE_MASK), | 1508 | __pgprot(_PAGE_CACHE_MASK), |
1499 | 0, CPA_ARRAY, NULL); | 1509 | 0, CPA_ARRAY, NULL); |
1500 | if (ret) | 1510 | if (ret) |
@@ -1511,13 +1521,13 @@ out_free: | |||
1511 | 1521 | ||
1512 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | 1522 | int set_memory_array_uc(unsigned long *addr, int addrinarray) |
1513 | { | 1523 | { |
1514 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); | 1524 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
1515 | } | 1525 | } |
1516 | EXPORT_SYMBOL(set_memory_array_uc); | 1526 | EXPORT_SYMBOL(set_memory_array_uc); |
1517 | 1527 | ||
1518 | int set_memory_array_wc(unsigned long *addr, int addrinarray) | 1528 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
1519 | { | 1529 | { |
1520 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); | 1530 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
1521 | } | 1531 | } |
1522 | EXPORT_SYMBOL(set_memory_array_wc); | 1532 | EXPORT_SYMBOL(set_memory_array_wc); |
1523 | 1533 | ||
@@ -1527,10 +1537,12 @@ int _set_memory_wc(unsigned long addr, int numpages) | |||
1527 | unsigned long addr_copy = addr; | 1537 | unsigned long addr_copy = addr; |
1528 | 1538 | ||
1529 | ret = change_page_attr_set(&addr, numpages, | 1539 | ret = change_page_attr_set(&addr, numpages, |
1530 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | 1540 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1541 | 0); | ||
1531 | if (!ret) { | 1542 | if (!ret) { |
1532 | ret = change_page_attr_set_clr(&addr_copy, numpages, | 1543 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
1533 | __pgprot(_PAGE_CACHE_WC), | 1544 | cachemode2pgprot( |
1545 | _PAGE_CACHE_MODE_WC), | ||
1534 | __pgprot(_PAGE_CACHE_MASK), | 1546 | __pgprot(_PAGE_CACHE_MASK), |
1535 | 0, 0, NULL); | 1547 | 0, 0, NULL); |
1536 | } | 1548 | } |
@@ -1545,7 +1557,7 @@ int set_memory_wc(unsigned long addr, int numpages) | |||
1545 | return set_memory_uc(addr, numpages); | 1557 | return set_memory_uc(addr, numpages); |
1546 | 1558 | ||
1547 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | 1559 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
1548 | _PAGE_CACHE_WC, NULL); | 1560 | _PAGE_CACHE_MODE_WC, NULL); |
1549 | if (ret) | 1561 | if (ret) |
1550 | goto out_err; | 1562 | goto out_err; |
1551 | 1563 | ||
@@ -1564,6 +1576,7 @@ EXPORT_SYMBOL(set_memory_wc); | |||
1564 | 1576 | ||
1565 | int _set_memory_wb(unsigned long addr, int numpages) | 1577 | int _set_memory_wb(unsigned long addr, int numpages) |
1566 | { | 1578 | { |
1579 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ | ||
1567 | return change_page_attr_clear(&addr, numpages, | 1580 | return change_page_attr_clear(&addr, numpages, |
1568 | __pgprot(_PAGE_CACHE_MASK), 0); | 1581 | __pgprot(_PAGE_CACHE_MASK), 0); |
1569 | } | 1582 | } |
@@ -1586,6 +1599,7 @@ int set_memory_array_wb(unsigned long *addr, int addrinarray) | |||
1586 | int i; | 1599 | int i; |
1587 | int ret; | 1600 | int ret; |
1588 | 1601 | ||
1602 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ | ||
1589 | ret = change_page_attr_clear(addr, addrinarray, | 1603 | ret = change_page_attr_clear(addr, addrinarray, |
1590 | __pgprot(_PAGE_CACHE_MASK), 1); | 1604 | __pgprot(_PAGE_CACHE_MASK), 1); |
1591 | if (ret) | 1605 | if (ret) |
@@ -1648,7 +1662,7 @@ int set_pages_uc(struct page *page, int numpages) | |||
1648 | EXPORT_SYMBOL(set_pages_uc); | 1662 | EXPORT_SYMBOL(set_pages_uc); |
1649 | 1663 | ||
1650 | static int _set_pages_array(struct page **pages, int addrinarray, | 1664 | static int _set_pages_array(struct page **pages, int addrinarray, |
1651 | unsigned long new_type) | 1665 | enum page_cache_mode new_type) |
1652 | { | 1666 | { |
1653 | unsigned long start; | 1667 | unsigned long start; |
1654 | unsigned long end; | 1668 | unsigned long end; |
@@ -1666,10 +1680,11 @@ static int _set_pages_array(struct page **pages, int addrinarray, | |||
1666 | } | 1680 | } |
1667 | 1681 | ||
1668 | ret = cpa_set_pages_array(pages, addrinarray, | 1682 | ret = cpa_set_pages_array(pages, addrinarray, |
1669 | __pgprot(_PAGE_CACHE_UC_MINUS)); | 1683 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS)); |
1670 | if (!ret && new_type == _PAGE_CACHE_WC) | 1684 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
1671 | ret = change_page_attr_set_clr(NULL, addrinarray, | 1685 | ret = change_page_attr_set_clr(NULL, addrinarray, |
1672 | __pgprot(_PAGE_CACHE_WC), | 1686 | cachemode2pgprot( |
1687 | _PAGE_CACHE_MODE_WC), | ||
1673 | __pgprot(_PAGE_CACHE_MASK), | 1688 | __pgprot(_PAGE_CACHE_MASK), |
1674 | 0, CPA_PAGES_ARRAY, pages); | 1689 | 0, CPA_PAGES_ARRAY, pages); |
1675 | if (ret) | 1690 | if (ret) |
@@ -1689,13 +1704,13 @@ err_out: | |||
1689 | 1704 | ||
1690 | int set_pages_array_uc(struct page **pages, int addrinarray) | 1705 | int set_pages_array_uc(struct page **pages, int addrinarray) |
1691 | { | 1706 | { |
1692 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); | 1707 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
1693 | } | 1708 | } |
1694 | EXPORT_SYMBOL(set_pages_array_uc); | 1709 | EXPORT_SYMBOL(set_pages_array_uc); |
1695 | 1710 | ||
1696 | int set_pages_array_wc(struct page **pages, int addrinarray) | 1711 | int set_pages_array_wc(struct page **pages, int addrinarray) |
1697 | { | 1712 | { |
1698 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); | 1713 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
1699 | } | 1714 | } |
1700 | EXPORT_SYMBOL(set_pages_array_wc); | 1715 | EXPORT_SYMBOL(set_pages_array_wc); |
1701 | 1716 | ||
@@ -1714,6 +1729,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray) | |||
1714 | unsigned long end; | 1729 | unsigned long end; |
1715 | int i; | 1730 | int i; |
1716 | 1731 | ||
1732 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ | ||
1717 | retval = cpa_clear_pages_array(pages, addrinarray, | 1733 | retval = cpa_clear_pages_array(pages, addrinarray, |
1718 | __pgprot(_PAGE_CACHE_MASK)); | 1734 | __pgprot(_PAGE_CACHE_MASK)); |
1719 | if (retval) | 1735 | if (retval) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index c7eddbe6a612..edf299c8ff6c 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | 32 | ||
33 | #include "pat_internal.h" | 33 | #include "pat_internal.h" |
34 | #include "mm_internal.h" | ||
34 | 35 | ||
35 | #ifdef CONFIG_X86_PAT | 36 | #ifdef CONFIG_X86_PAT |
36 | int __read_mostly pat_enabled = 1; | 37 | int __read_mostly pat_enabled = 1; |
@@ -66,6 +67,75 @@ __setup("debugpat", pat_debug_setup); | |||
66 | 67 | ||
67 | static u64 __read_mostly boot_pat_state; | 68 | static u64 __read_mostly boot_pat_state; |
68 | 69 | ||
70 | #ifdef CONFIG_X86_PAT | ||
71 | /* | ||
72 | * X86 PAT uses page flags WC and Uncached together to keep track of | ||
73 | * memory type of pages that have backing page struct. X86 PAT supports 3 | ||
74 | * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and | ||
75 | * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not | ||
76 | * been changed from its default (value of -1 used to denote this). | ||
77 | * Note we do not support _PAGE_CACHE_MODE_UC here. | ||
78 | */ | ||
79 | |||
80 | #define _PGMT_DEFAULT 0 | ||
81 | #define _PGMT_WC (1UL << PG_arch_1) | ||
82 | #define _PGMT_UC_MINUS (1UL << PG_uncached) | ||
83 | #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) | ||
84 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) | ||
85 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) | ||
86 | |||
87 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | ||
88 | { | ||
89 | unsigned long pg_flags = pg->flags & _PGMT_MASK; | ||
90 | |||
91 | if (pg_flags == _PGMT_DEFAULT) | ||
92 | return -1; | ||
93 | else if (pg_flags == _PGMT_WC) | ||
94 | return _PAGE_CACHE_MODE_WC; | ||
95 | else if (pg_flags == _PGMT_UC_MINUS) | ||
96 | return _PAGE_CACHE_MODE_UC_MINUS; | ||
97 | else | ||
98 | return _PAGE_CACHE_MODE_WB; | ||
99 | } | ||
100 | |||
101 | static inline void set_page_memtype(struct page *pg, | ||
102 | enum page_cache_mode memtype) | ||
103 | { | ||
104 | unsigned long memtype_flags; | ||
105 | unsigned long old_flags; | ||
106 | unsigned long new_flags; | ||
107 | |||
108 | switch (memtype) { | ||
109 | case _PAGE_CACHE_MODE_WC: | ||
110 | memtype_flags = _PGMT_WC; | ||
111 | break; | ||
112 | case _PAGE_CACHE_MODE_UC_MINUS: | ||
113 | memtype_flags = _PGMT_UC_MINUS; | ||
114 | break; | ||
115 | case _PAGE_CACHE_MODE_WB: | ||
116 | memtype_flags = _PGMT_WB; | ||
117 | break; | ||
118 | default: | ||
119 | memtype_flags = _PGMT_DEFAULT; | ||
120 | break; | ||
121 | } | ||
122 | |||
123 | do { | ||
124 | old_flags = pg->flags; | ||
125 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | ||
126 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | ||
127 | } | ||
128 | #else | ||
129 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | ||
130 | { | ||
131 | return -1; | ||
132 | } | ||
133 | static inline void set_page_memtype(struct page *pg, | ||
134 | enum page_cache_mode memtype) | ||
135 | { | ||
136 | } | ||
137 | #endif | ||
138 | |||
69 | enum { | 139 | enum { |
70 | PAT_UC = 0, /* uncached */ | 140 | PAT_UC = 0, /* uncached */ |
71 | PAT_WC = 1, /* Write combining */ | 141 | PAT_WC = 1, /* Write combining */ |
@@ -75,6 +145,52 @@ enum { | |||
75 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | 145 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ |
76 | }; | 146 | }; |
77 | 147 | ||
148 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) | ||
149 | |||
150 | static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) | ||
151 | { | ||
152 | enum page_cache_mode cache; | ||
153 | char *cache_mode; | ||
154 | |||
155 | switch (pat_val) { | ||
156 | case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; | ||
157 | case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; | ||
158 | case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; | ||
159 | case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; | ||
160 | case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; | ||
161 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; | ||
162 | default: cache = CM(WB); cache_mode = "WB "; break; | ||
163 | } | ||
164 | |||
165 | memcpy(msg, cache_mode, 4); | ||
166 | |||
167 | return cache; | ||
168 | } | ||
169 | |||
170 | #undef CM | ||
171 | |||
172 | /* | ||
173 | * Update the cache mode to pgprot translation tables according to PAT | ||
174 | * configuration. | ||
175 | * Using lower indices is preferred, so we start with highest index. | ||
176 | */ | ||
177 | void pat_init_cache_modes(void) | ||
178 | { | ||
179 | int i; | ||
180 | enum page_cache_mode cache; | ||
181 | char pat_msg[33]; | ||
182 | u64 pat; | ||
183 | |||
184 | rdmsrl(MSR_IA32_CR_PAT, pat); | ||
185 | pat_msg[32] = 0; | ||
186 | for (i = 7; i >= 0; i--) { | ||
187 | cache = pat_get_cache_mode((pat >> (i * 8)) & 7, | ||
188 | pat_msg + 4 * i); | ||
189 | update_cache_mode_entry(i, cache); | ||
190 | } | ||
191 | pr_info("PAT configuration [0-7]: %s\n", pat_msg); | ||
192 | } | ||
193 | |||
78 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) | 194 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
79 | 195 | ||
80 | void pat_init(void) | 196 | void pat_init(void) |
@@ -124,8 +240,7 @@ void pat_init(void) | |||
124 | wrmsrl(MSR_IA32_CR_PAT, pat); | 240 | wrmsrl(MSR_IA32_CR_PAT, pat); |
125 | 241 | ||
126 | if (boot_cpu) | 242 | if (boot_cpu) |
127 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | 243 | pat_init_cache_modes(); |
128 | smp_processor_id(), boot_pat_state, pat); | ||
129 | } | 244 | } |
130 | 245 | ||
131 | #undef PAT | 246 | #undef PAT |
@@ -139,20 +254,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ | |||
139 | * The intersection is based on "Effective Memory Type" tables in IA-32 | 254 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
140 | * SDM vol 3a | 255 | * SDM vol 3a |
141 | */ | 256 | */ |
142 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) | 257 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
258 | enum page_cache_mode req_type) | ||
143 | { | 259 | { |
144 | /* | 260 | /* |
145 | * Look for MTRR hint to get the effective type in case where PAT | 261 | * Look for MTRR hint to get the effective type in case where PAT |
146 | * request is for WB. | 262 | * request is for WB. |
147 | */ | 263 | */ |
148 | if (req_type == _PAGE_CACHE_WB) { | 264 | if (req_type == _PAGE_CACHE_MODE_WB) { |
149 | u8 mtrr_type; | 265 | u8 mtrr_type; |
150 | 266 | ||
151 | mtrr_type = mtrr_type_lookup(start, end); | 267 | mtrr_type = mtrr_type_lookup(start, end); |
152 | if (mtrr_type != MTRR_TYPE_WRBACK) | 268 | if (mtrr_type != MTRR_TYPE_WRBACK) |
153 | return _PAGE_CACHE_UC_MINUS; | 269 | return _PAGE_CACHE_MODE_UC_MINUS; |
154 | 270 | ||
155 | return _PAGE_CACHE_WB; | 271 | return _PAGE_CACHE_MODE_WB; |
156 | } | 272 | } |
157 | 273 | ||
158 | return req_type; | 274 | return req_type; |
@@ -207,25 +323,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) | |||
207 | * - Find the memtype of all the pages in the range, look for any conflicts | 323 | * - Find the memtype of all the pages in the range, look for any conflicts |
208 | * - In case of no conflicts, set the new memtype for pages in the range | 324 | * - In case of no conflicts, set the new memtype for pages in the range |
209 | */ | 325 | */ |
210 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | 326 | static int reserve_ram_pages_type(u64 start, u64 end, |
211 | unsigned long *new_type) | 327 | enum page_cache_mode req_type, |
328 | enum page_cache_mode *new_type) | ||
212 | { | 329 | { |
213 | struct page *page; | 330 | struct page *page; |
214 | u64 pfn; | 331 | u64 pfn; |
215 | 332 | ||
216 | if (req_type == _PAGE_CACHE_UC) { | 333 | if (req_type == _PAGE_CACHE_MODE_UC) { |
217 | /* We do not support strong UC */ | 334 | /* We do not support strong UC */ |
218 | WARN_ON_ONCE(1); | 335 | WARN_ON_ONCE(1); |
219 | req_type = _PAGE_CACHE_UC_MINUS; | 336 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
220 | } | 337 | } |
221 | 338 | ||
222 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 339 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
223 | unsigned long type; | 340 | enum page_cache_mode type; |
224 | 341 | ||
225 | page = pfn_to_page(pfn); | 342 | page = pfn_to_page(pfn); |
226 | type = get_page_memtype(page); | 343 | type = get_page_memtype(page); |
227 | if (type != -1) { | 344 | if (type != -1) { |
228 | printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", | 345 | pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
229 | start, end - 1, type, req_type); | 346 | start, end - 1, type, req_type); |
230 | if (new_type) | 347 | if (new_type) |
231 | *new_type = type; | 348 | *new_type = type; |
@@ -258,21 +375,21 @@ static int free_ram_pages_type(u64 start, u64 end) | |||
258 | 375 | ||
259 | /* | 376 | /* |
260 | * req_type typically has one of the: | 377 | * req_type typically has one of the: |
261 | * - _PAGE_CACHE_WB | 378 | * - _PAGE_CACHE_MODE_WB |
262 | * - _PAGE_CACHE_WC | 379 | * - _PAGE_CACHE_MODE_WC |
263 | * - _PAGE_CACHE_UC_MINUS | 380 | * - _PAGE_CACHE_MODE_UC_MINUS |
264 | * - _PAGE_CACHE_UC | 381 | * - _PAGE_CACHE_MODE_UC |
265 | * | 382 | * |
266 | * If new_type is NULL, function will return an error if it cannot reserve the | 383 | * If new_type is NULL, function will return an error if it cannot reserve the |
267 | * region with req_type. If new_type is non-NULL, function will return | 384 | * region with req_type. If new_type is non-NULL, function will return |
268 | * available type in new_type in case of no error. In case of any error | 385 | * available type in new_type in case of no error. In case of any error |
269 | * it will return a negative return value. | 386 | * it will return a negative return value. |
270 | */ | 387 | */ |
271 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | 388 | int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, |
272 | unsigned long *new_type) | 389 | enum page_cache_mode *new_type) |
273 | { | 390 | { |
274 | struct memtype *new; | 391 | struct memtype *new; |
275 | unsigned long actual_type; | 392 | enum page_cache_mode actual_type; |
276 | int is_range_ram; | 393 | int is_range_ram; |
277 | int err = 0; | 394 | int err = 0; |
278 | 395 | ||
@@ -281,10 +398,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
281 | if (!pat_enabled) { | 398 | if (!pat_enabled) { |
282 | /* This is identical to page table setting without PAT */ | 399 | /* This is identical to page table setting without PAT */ |
283 | if (new_type) { | 400 | if (new_type) { |
284 | if (req_type == _PAGE_CACHE_WC) | 401 | if (req_type == _PAGE_CACHE_MODE_WC) |
285 | *new_type = _PAGE_CACHE_UC_MINUS; | 402 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; |
286 | else | 403 | else |
287 | *new_type = req_type & _PAGE_CACHE_MASK; | 404 | *new_type = req_type; |
288 | } | 405 | } |
289 | return 0; | 406 | return 0; |
290 | } | 407 | } |
@@ -292,7 +409,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
292 | /* Low ISA region is always mapped WB in page table. No need to track */ | 409 | /* Low ISA region is always mapped WB in page table. No need to track */ |
293 | if (x86_platform.is_untracked_pat_range(start, end)) { | 410 | if (x86_platform.is_untracked_pat_range(start, end)) { |
294 | if (new_type) | 411 | if (new_type) |
295 | *new_type = _PAGE_CACHE_WB; | 412 | *new_type = _PAGE_CACHE_MODE_WB; |
296 | return 0; | 413 | return 0; |
297 | } | 414 | } |
298 | 415 | ||
@@ -302,7 +419,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
302 | * tools and ACPI tools). Use WB request for WB memory and use | 419 | * tools and ACPI tools). Use WB request for WB memory and use |
303 | * UC_MINUS otherwise. | 420 | * UC_MINUS otherwise. |
304 | */ | 421 | */ |
305 | actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); | 422 | actual_type = pat_x_mtrr_type(start, end, req_type); |
306 | 423 | ||
307 | if (new_type) | 424 | if (new_type) |
308 | *new_type = actual_type; | 425 | *new_type = actual_type; |
@@ -394,12 +511,12 @@ int free_memtype(u64 start, u64 end) | |||
394 | * | 511 | * |
395 | * Only to be called when PAT is enabled | 512 | * Only to be called when PAT is enabled |
396 | * | 513 | * |
397 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or | 514 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
398 | * _PAGE_CACHE_UC | 515 | * or _PAGE_CACHE_MODE_UC |
399 | */ | 516 | */ |
400 | static unsigned long lookup_memtype(u64 paddr) | 517 | static enum page_cache_mode lookup_memtype(u64 paddr) |
401 | { | 518 | { |
402 | int rettype = _PAGE_CACHE_WB; | 519 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
403 | struct memtype *entry; | 520 | struct memtype *entry; |
404 | 521 | ||
405 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) | 522 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
@@ -414,7 +531,7 @@ static unsigned long lookup_memtype(u64 paddr) | |||
414 | * default state and not reserved, and hence of type WB | 531 | * default state and not reserved, and hence of type WB |
415 | */ | 532 | */ |
416 | if (rettype == -1) | 533 | if (rettype == -1) |
417 | rettype = _PAGE_CACHE_WB; | 534 | rettype = _PAGE_CACHE_MODE_WB; |
418 | 535 | ||
419 | return rettype; | 536 | return rettype; |
420 | } | 537 | } |
@@ -425,7 +542,7 @@ static unsigned long lookup_memtype(u64 paddr) | |||
425 | if (entry != NULL) | 542 | if (entry != NULL) |
426 | rettype = entry->type; | 543 | rettype = entry->type; |
427 | else | 544 | else |
428 | rettype = _PAGE_CACHE_UC_MINUS; | 545 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
429 | 546 | ||
430 | spin_unlock(&memtype_lock); | 547 | spin_unlock(&memtype_lock); |
431 | return rettype; | 548 | return rettype; |
@@ -442,11 +559,11 @@ static unsigned long lookup_memtype(u64 paddr) | |||
442 | * On failure, returns non-zero | 559 | * On failure, returns non-zero |
443 | */ | 560 | */ |
444 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | 561 | int io_reserve_memtype(resource_size_t start, resource_size_t end, |
445 | unsigned long *type) | 562 | enum page_cache_mode *type) |
446 | { | 563 | { |
447 | resource_size_t size = end - start; | 564 | resource_size_t size = end - start; |
448 | unsigned long req_type = *type; | 565 | enum page_cache_mode req_type = *type; |
449 | unsigned long new_type; | 566 | enum page_cache_mode new_type; |
450 | int ret; | 567 | int ret; |
451 | 568 | ||
452 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); | 569 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
@@ -520,13 +637,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
520 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 637 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
521 | unsigned long size, pgprot_t *vma_prot) | 638 | unsigned long size, pgprot_t *vma_prot) |
522 | { | 639 | { |
523 | unsigned long flags = _PAGE_CACHE_WB; | 640 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
524 | 641 | ||
525 | if (!range_is_allowed(pfn, size)) | 642 | if (!range_is_allowed(pfn, size)) |
526 | return 0; | 643 | return 0; |
527 | 644 | ||
528 | if (file->f_flags & O_DSYNC) | 645 | if (file->f_flags & O_DSYNC) |
529 | flags = _PAGE_CACHE_UC_MINUS; | 646 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
530 | 647 | ||
531 | #ifdef CONFIG_X86_32 | 648 | #ifdef CONFIG_X86_32 |
532 | /* | 649 | /* |
@@ -543,12 +660,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
543 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | 660 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || |
544 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | 661 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && |
545 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | 662 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { |
546 | flags = _PAGE_CACHE_UC; | 663 | pcm = _PAGE_CACHE_MODE_UC; |
547 | } | 664 | } |
548 | #endif | 665 | #endif |
549 | 666 | ||
550 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | 667 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
551 | flags); | 668 | cachemode2protval(pcm)); |
552 | return 1; | 669 | return 1; |
553 | } | 670 | } |
554 | 671 | ||
@@ -556,7 +673,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
556 | * Change the memory type for the physial address range in kernel identity | 673 | * Change the memory type for the physial address range in kernel identity |
557 | * mapping space if that range is a part of identity map. | 674 | * mapping space if that range is a part of identity map. |
558 | */ | 675 | */ |
559 | int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | 676 | int kernel_map_sync_memtype(u64 base, unsigned long size, |
677 | enum page_cache_mode pcm) | ||
560 | { | 678 | { |
561 | unsigned long id_sz; | 679 | unsigned long id_sz; |
562 | 680 | ||
@@ -574,11 +692,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | |||
574 | __pa(high_memory) - base : | 692 | __pa(high_memory) - base : |
575 | size; | 693 | size; |
576 | 694 | ||
577 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { | 695 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { |
578 | printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " | 696 | printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " |
579 | "for [mem %#010Lx-%#010Lx]\n", | 697 | "for [mem %#010Lx-%#010Lx]\n", |
580 | current->comm, current->pid, | 698 | current->comm, current->pid, |
581 | cattr_name(flags), | 699 | cattr_name(pcm), |
582 | base, (unsigned long long)(base + size-1)); | 700 | base, (unsigned long long)(base + size-1)); |
583 | return -EINVAL; | 701 | return -EINVAL; |
584 | } | 702 | } |
@@ -595,8 +713,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
595 | { | 713 | { |
596 | int is_ram = 0; | 714 | int is_ram = 0; |
597 | int ret; | 715 | int ret; |
598 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); | 716 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
599 | unsigned long flags = want_flags; | 717 | enum page_cache_mode pcm = want_pcm; |
600 | 718 | ||
601 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); | 719 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
602 | 720 | ||
@@ -609,36 +727,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
609 | if (!pat_enabled) | 727 | if (!pat_enabled) |
610 | return 0; | 728 | return 0; |
611 | 729 | ||
612 | flags = lookup_memtype(paddr); | 730 | pcm = lookup_memtype(paddr); |
613 | if (want_flags != flags) { | 731 | if (want_pcm != pcm) { |
614 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", | 732 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
615 | current->comm, current->pid, | 733 | current->comm, current->pid, |
616 | cattr_name(want_flags), | 734 | cattr_name(want_pcm), |
617 | (unsigned long long)paddr, | 735 | (unsigned long long)paddr, |
618 | (unsigned long long)(paddr + size - 1), | 736 | (unsigned long long)(paddr + size - 1), |
619 | cattr_name(flags)); | 737 | cattr_name(pcm)); |
620 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | 738 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
621 | (~_PAGE_CACHE_MASK)) | | 739 | (~_PAGE_CACHE_MASK)) | |
622 | flags); | 740 | cachemode2protval(pcm)); |
623 | } | 741 | } |
624 | return 0; | 742 | return 0; |
625 | } | 743 | } |
626 | 744 | ||
627 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 745 | ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); |
628 | if (ret) | 746 | if (ret) |
629 | return ret; | 747 | return ret; |
630 | 748 | ||
631 | if (flags != want_flags) { | 749 | if (pcm != want_pcm) { |
632 | if (strict_prot || | 750 | if (strict_prot || |
633 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { | 751 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
634 | free_memtype(paddr, paddr + size); | 752 | free_memtype(paddr, paddr + size); |
635 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | 753 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
636 | " for [mem %#010Lx-%#010Lx], got %s\n", | 754 | " for [mem %#010Lx-%#010Lx], got %s\n", |
637 | current->comm, current->pid, | 755 | current->comm, current->pid, |
638 | cattr_name(want_flags), | 756 | cattr_name(want_pcm), |
639 | (unsigned long long)paddr, | 757 | (unsigned long long)paddr, |
640 | (unsigned long long)(paddr + size - 1), | 758 | (unsigned long long)(paddr + size - 1), |
641 | cattr_name(flags)); | 759 | cattr_name(pcm)); |
642 | return -EINVAL; | 760 | return -EINVAL; |
643 | } | 761 | } |
644 | /* | 762 | /* |
@@ -647,10 +765,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
647 | */ | 765 | */ |
648 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | 766 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
649 | (~_PAGE_CACHE_MASK)) | | 767 | (~_PAGE_CACHE_MASK)) | |
650 | flags); | 768 | cachemode2protval(pcm)); |
651 | } | 769 | } |
652 | 770 | ||
653 | if (kernel_map_sync_memtype(paddr, size, flags) < 0) { | 771 | if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { |
654 | free_memtype(paddr, paddr + size); | 772 | free_memtype(paddr, paddr + size); |
655 | return -EINVAL; | 773 | return -EINVAL; |
656 | } | 774 | } |
@@ -709,7 +827,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
709 | unsigned long pfn, unsigned long addr, unsigned long size) | 827 | unsigned long pfn, unsigned long addr, unsigned long size) |
710 | { | 828 | { |
711 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; | 829 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
712 | unsigned long flags; | 830 | enum page_cache_mode pcm; |
713 | 831 | ||
714 | /* reserve the whole chunk starting from paddr */ | 832 | /* reserve the whole chunk starting from paddr */ |
715 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { | 833 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { |
@@ -728,18 +846,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
728 | * For anything smaller than the vma size we set prot based on the | 846 | * For anything smaller than the vma size we set prot based on the |
729 | * lookup. | 847 | * lookup. |
730 | */ | 848 | */ |
731 | flags = lookup_memtype(paddr); | 849 | pcm = lookup_memtype(paddr); |
732 | 850 | ||
733 | /* Check memtype for the remaining pages */ | 851 | /* Check memtype for the remaining pages */ |
734 | while (size > PAGE_SIZE) { | 852 | while (size > PAGE_SIZE) { |
735 | size -= PAGE_SIZE; | 853 | size -= PAGE_SIZE; |
736 | paddr += PAGE_SIZE; | 854 | paddr += PAGE_SIZE; |
737 | if (flags != lookup_memtype(paddr)) | 855 | if (pcm != lookup_memtype(paddr)) |
738 | return -EINVAL; | 856 | return -EINVAL; |
739 | } | 857 | } |
740 | 858 | ||
741 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | 859 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
742 | flags); | 860 | cachemode2protval(pcm)); |
743 | 861 | ||
744 | return 0; | 862 | return 0; |
745 | } | 863 | } |
@@ -747,15 +865,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, | |||
747 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, | 865 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
748 | unsigned long pfn) | 866 | unsigned long pfn) |
749 | { | 867 | { |
750 | unsigned long flags; | 868 | enum page_cache_mode pcm; |
751 | 869 | ||
752 | if (!pat_enabled) | 870 | if (!pat_enabled) |
753 | return 0; | 871 | return 0; |
754 | 872 | ||
755 | /* Set prot based on lookup */ | 873 | /* Set prot based on lookup */ |
756 | flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); | 874 | pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); |
757 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | 875 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
758 | flags); | 876 | cachemode2protval(pcm)); |
759 | 877 | ||
760 | return 0; | 878 | return 0; |
761 | } | 879 | } |
@@ -791,7 +909,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, | |||
791 | pgprot_t pgprot_writecombine(pgprot_t prot) | 909 | pgprot_t pgprot_writecombine(pgprot_t prot) |
792 | { | 910 | { |
793 | if (pat_enabled) | 911 | if (pat_enabled) |
794 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | 912 | return __pgprot(pgprot_val(prot) | |
913 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | ||
795 | else | 914 | else |
796 | return pgprot_noncached(prot); | 915 | return pgprot_noncached(prot); |
797 | } | 916 | } |
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h index 77e5ba153fac..f6411620305d 100644 --- a/arch/x86/mm/pat_internal.h +++ b/arch/x86/mm/pat_internal.h | |||
@@ -10,30 +10,32 @@ struct memtype { | |||
10 | u64 start; | 10 | u64 start; |
11 | u64 end; | 11 | u64 end; |
12 | u64 subtree_max_end; | 12 | u64 subtree_max_end; |
13 | unsigned long type; | 13 | enum page_cache_mode type; |
14 | struct rb_node rb; | 14 | struct rb_node rb; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | static inline char *cattr_name(unsigned long flags) | 17 | static inline char *cattr_name(enum page_cache_mode pcm) |
18 | { | 18 | { |
19 | switch (flags & _PAGE_CACHE_MASK) { | 19 | switch (pcm) { |
20 | case _PAGE_CACHE_UC: return "uncached"; | 20 | case _PAGE_CACHE_MODE_UC: return "uncached"; |
21 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | 21 | case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus"; |
22 | case _PAGE_CACHE_WB: return "write-back"; | 22 | case _PAGE_CACHE_MODE_WB: return "write-back"; |
23 | case _PAGE_CACHE_WC: return "write-combining"; | 23 | case _PAGE_CACHE_MODE_WC: return "write-combining"; |
24 | default: return "broken"; | 24 | case _PAGE_CACHE_MODE_WT: return "write-through"; |
25 | case _PAGE_CACHE_MODE_WP: return "write-protected"; | ||
26 | default: return "broken"; | ||
25 | } | 27 | } |
26 | } | 28 | } |
27 | 29 | ||
28 | #ifdef CONFIG_X86_PAT | 30 | #ifdef CONFIG_X86_PAT |
29 | extern int rbt_memtype_check_insert(struct memtype *new, | 31 | extern int rbt_memtype_check_insert(struct memtype *new, |
30 | unsigned long *new_type); | 32 | enum page_cache_mode *new_type); |
31 | extern struct memtype *rbt_memtype_erase(u64 start, u64 end); | 33 | extern struct memtype *rbt_memtype_erase(u64 start, u64 end); |
32 | extern struct memtype *rbt_memtype_lookup(u64 addr); | 34 | extern struct memtype *rbt_memtype_lookup(u64 addr); |
33 | extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); | 35 | extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); |
34 | #else | 36 | #else |
35 | static inline int rbt_memtype_check_insert(struct memtype *new, | 37 | static inline int rbt_memtype_check_insert(struct memtype *new, |
36 | unsigned long *new_type) | 38 | enum page_cache_mode *new_type) |
37 | { return 0; } | 39 | { return 0; } |
38 | static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) | 40 | static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) |
39 | { return NULL; } | 41 | { return NULL; } |
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 415f6c4ced36..6582adcc8bd9 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c | |||
@@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root, | |||
122 | 122 | ||
123 | static int memtype_rb_check_conflict(struct rb_root *root, | 123 | static int memtype_rb_check_conflict(struct rb_root *root, |
124 | u64 start, u64 end, | 124 | u64 start, u64 end, |
125 | unsigned long reqtype, unsigned long *newtype) | 125 | enum page_cache_mode reqtype, |
126 | enum page_cache_mode *newtype) | ||
126 | { | 127 | { |
127 | struct rb_node *node; | 128 | struct rb_node *node; |
128 | struct memtype *match; | 129 | struct memtype *match; |
129 | int found_type = reqtype; | 130 | enum page_cache_mode found_type = reqtype; |
130 | 131 | ||
131 | match = memtype_rb_lowest_match(&memtype_rbroot, start, end); | 132 | match = memtype_rb_lowest_match(&memtype_rbroot, start, end); |
132 | if (match == NULL) | 133 | if (match == NULL) |
@@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | |||
187 | rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); | 188 | rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); |
188 | } | 189 | } |
189 | 190 | ||
190 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | 191 | int rbt_memtype_check_insert(struct memtype *new, |
192 | enum page_cache_mode *ret_type) | ||
191 | { | 193 | { |
192 | int err = 0; | 194 | int err = 0; |
193 | 195 | ||
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 37c1435889ce..9b18ef315a55 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -433,14 +433,14 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
433 | return -EINVAL; | 433 | return -EINVAL; |
434 | 434 | ||
435 | if (pat_enabled && write_combine) | 435 | if (pat_enabled && write_combine) |
436 | prot |= _PAGE_CACHE_WC; | 436 | prot |= cachemode2protval(_PAGE_CACHE_MODE_WC); |
437 | else if (pat_enabled || boot_cpu_data.x86 > 3) | 437 | else if (pat_enabled || boot_cpu_data.x86 > 3) |
438 | /* | 438 | /* |
439 | * ioremap() and ioremap_nocache() defaults to UC MINUS for now. | 439 | * ioremap() and ioremap_nocache() defaults to UC MINUS for now. |
440 | * To avoid attribute conflicts, request UC MINUS here | 440 | * To avoid attribute conflicts, request UC MINUS here |
441 | * as well. | 441 | * as well. |
442 | */ | 442 | */ |
443 | prot |= _PAGE_CACHE_UC_MINUS; | 443 | prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); |
444 | 444 | ||
445 | vma->vm_page_prot = __pgprot(prot); | 445 | vma->vm_page_prot = __pgprot(prot); |
446 | 446 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index fac5e4f9607c..6bf3a13e3e0f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1100,12 +1100,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
1100 | /* Fast syscall setup is all done in hypercalls, so | 1100 | /* Fast syscall setup is all done in hypercalls, so |
1101 | these are all ignored. Stub them out here to stop | 1101 | these are all ignored. Stub them out here to stop |
1102 | Xen console noise. */ | 1102 | Xen console noise. */ |
1103 | break; | ||
1104 | |||
1105 | case MSR_IA32_CR_PAT: | ||
1106 | if (smp_processor_id() == 0) | ||
1107 | xen_set_pat(((u64)high << 32) | low); | ||
1108 | break; | ||
1109 | 1103 | ||
1110 | default: | 1104 | default: |
1111 | ret = native_write_msr_safe(msr, low, high); | 1105 | ret = native_write_msr_safe(msr, low, high); |
@@ -1561,10 +1555,6 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1561 | 1555 | ||
1562 | /* Prevent unwanted bits from being set in PTEs. */ | 1556 | /* Prevent unwanted bits from being set in PTEs. */ |
1563 | __supported_pte_mask &= ~_PAGE_GLOBAL; | 1557 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1564 | #if 0 | ||
1565 | if (!xen_initial_domain()) | ||
1566 | #endif | ||
1567 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | ||
1568 | 1558 | ||
1569 | /* | 1559 | /* |
1570 | * Prevent page tables from being allocated in highmem, even | 1560 | * Prevent page tables from being allocated in highmem, even |
@@ -1618,14 +1608,6 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1618 | */ | 1608 | */ |
1619 | acpi_numa = -1; | 1609 | acpi_numa = -1; |
1620 | #endif | 1610 | #endif |
1621 | #ifdef CONFIG_X86_PAT | ||
1622 | /* | ||
1623 | * For right now disable the PAT. We should remove this once | ||
1624 | * git commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1 | ||
1625 | * (xen/pat: Disable PAT support for now) is reverted. | ||
1626 | */ | ||
1627 | pat_enabled = 0; | ||
1628 | #endif | ||
1629 | /* Don't do the full vcpu_info placement stuff until we have a | 1611 | /* Don't do the full vcpu_info placement stuff until we have a |
1630 | possible map and a non-dummy shared_info. */ | 1612 | possible map and a non-dummy shared_info. */ |
1631 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1613 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
@@ -1636,6 +1618,13 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1636 | xen_raw_console_write("mapping kernel into physical memory\n"); | 1618 | xen_raw_console_write("mapping kernel into physical memory\n"); |
1637 | xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); | 1619 | xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); |
1638 | 1620 | ||
1621 | /* | ||
1622 | * Modify the cache mode translation tables to match Xen's PAT | ||
1623 | * configuration. | ||
1624 | */ | ||
1625 | |||
1626 | pat_init_cache_modes(); | ||
1627 | |||
1639 | /* keep using Xen gdt for now; no urgent need to change it */ | 1628 | /* keep using Xen gdt for now; no urgent need to change it */ |
1640 | 1629 | ||
1641 | #ifdef CONFIG_X86_32 | 1630 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a8a1a3d08d4d..9855eb8ee4b3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -410,13 +410,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
410 | __visible pteval_t xen_pte_val(pte_t pte) | 410 | __visible pteval_t xen_pte_val(pte_t pte) |
411 | { | 411 | { |
412 | pteval_t pteval = pte.pte; | 412 | pteval_t pteval = pte.pte; |
413 | #if 0 | 413 | |
414 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ | ||
415 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | ||
416 | WARN_ON(!pat_enabled); | ||
417 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | ||
418 | } | ||
419 | #endif | ||
420 | return pte_mfn_to_pfn(pteval); | 414 | return pte_mfn_to_pfn(pteval); |
421 | } | 415 | } |
422 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 416 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
@@ -427,47 +421,8 @@ __visible pgdval_t xen_pgd_val(pgd_t pgd) | |||
427 | } | 421 | } |
428 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); | 422 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); |
429 | 423 | ||
430 | /* | ||
431 | * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 | ||
432 | * are reserved for now, to correspond to the Intel-reserved PAT | ||
433 | * types. | ||
434 | * | ||
435 | * We expect Linux's PAT set as follows: | ||
436 | * | ||
437 | * Idx PTE flags Linux Xen Default | ||
438 | * 0 WB WB WB | ||
439 | * 1 PWT WC WT WT | ||
440 | * 2 PCD UC- UC- UC- | ||
441 | * 3 PCD PWT UC UC UC | ||
442 | * 4 PAT WB WC WB | ||
443 | * 5 PAT PWT WC WP WT | ||
444 | * 6 PAT PCD UC- rsv UC- | ||
445 | * 7 PAT PCD PWT UC rsv UC | ||
446 | */ | ||
447 | |||
448 | void xen_set_pat(u64 pat) | ||
449 | { | ||
450 | /* We expect Linux to use a PAT setting of | ||
451 | * UC UC- WC WB (ignoring the PAT flag) */ | ||
452 | WARN_ON(pat != 0x0007010600070106ull); | ||
453 | } | ||
454 | |||
455 | __visible pte_t xen_make_pte(pteval_t pte) | 424 | __visible pte_t xen_make_pte(pteval_t pte) |
456 | { | 425 | { |
457 | #if 0 | ||
458 | /* If Linux is trying to set a WC pte, then map to the Xen WC. | ||
459 | * If _PAGE_PAT is set, then it probably means it is really | ||
460 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | ||
461 | * things work out OK... | ||
462 | * | ||
463 | * (We should never see kernel mappings with _PAGE_PSE set, | ||
464 | * but we could see hugetlbfs mappings, I think.). | ||
465 | */ | ||
466 | if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { | ||
467 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | ||
468 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | ||
469 | } | ||
470 | #endif | ||
471 | pte = pte_pfn_to_mfn(pte); | 426 | pte = pte_pfn_to_mfn(pte); |
472 | 427 | ||
473 | return native_make_pte(pte); | 428 | return native_make_pte(pte); |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 28c7e0be56e4..4ab9298c5e17 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -33,7 +33,6 @@ extern unsigned long xen_max_p2m_pfn; | |||
33 | 33 | ||
34 | void xen_mm_pin_all(void); | 34 | void xen_mm_pin_all(void); |
35 | void xen_mm_unpin_all(void); | 35 | void xen_mm_unpin_all(void); |
36 | void xen_set_pat(u64); | ||
37 | 36 | ||
38 | char * __init xen_memory_setup(void); | 37 | char * __init xen_memory_setup(void); |
39 | char * xen_auto_xlated_memory_setup(void); | 38 | char * xen_auto_xlated_memory_setup(void); |
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c index 4aa56ba78f32..6d9ef39810c8 100644 --- a/drivers/video/fbdev/gbefb.c +++ b/drivers/video/fbdev/gbefb.c | |||
@@ -54,7 +54,8 @@ struct gbefb_par { | |||
54 | #endif | 54 | #endif |
55 | #endif | 55 | #endif |
56 | #ifdef CONFIG_X86 | 56 | #ifdef CONFIG_X86 |
57 | #define pgprot_fb(_prot) ((_prot) | _PAGE_PCD) | 57 | #define pgprot_fb(_prot) (((_prot) & ~_PAGE_CACHE_MASK) | \ |
58 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)) | ||
58 | #endif | 59 | #endif |
59 | 60 | ||
60 | /* | 61 | /* |
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 5f930aeccf1f..6b70d7f62b2f 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c | |||
@@ -1003,13 +1003,15 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma) | |||
1003 | struct vml_info *vinfo = container_of(info, struct vml_info, info); | 1003 | struct vml_info *vinfo = container_of(info, struct vml_info, info); |
1004 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | 1004 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
1005 | int ret; | 1005 | int ret; |
1006 | unsigned long prot; | ||
1006 | 1007 | ||
1007 | ret = vmlfb_vram_offset(vinfo, offset); | 1008 | ret = vmlfb_vram_offset(vinfo, offset); |
1008 | if (ret) | 1009 | if (ret) |
1009 | return -EINVAL; | 1010 | return -EINVAL; |
1010 | 1011 | ||
1011 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | 1012 | prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; |
1012 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; | 1013 | pgprot_val(vma->vm_page_prot) = |
1014 | prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); | ||
1013 | 1015 | ||
1014 | return vm_iomap_memory(vma, vinfo->vram_start, | 1016 | return vm_iomap_memory(vma, vinfo->vram_start, |
1015 | vinfo->vram_contig_size); | 1017 | vinfo->vram_contig_size); |