aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/pat.h2
-rw-r--r--arch/x86/mm/ioremap.c65
-rw-r--r--arch/x86/mm/pat.c12
4 files changed, 44 insertions, 37 deletions
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index b8237d8a1e0c..71b9e65daf25 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -314,7 +314,7 @@ extern void *xlate_dev_mem_ptr(unsigned long phys);
314extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 314extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
315 315
316extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 316extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
317 unsigned long prot_val); 317 enum page_cache_mode pcm);
318extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 318extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
319 319
320extern bool is_early_ioremap_ptep(pte_t *ptep); 320extern bool is_early_ioremap_ptep(pte_t *ptep);
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index a8438bc06871..d35ee2d976ca 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -17,7 +17,7 @@ extern int reserve_memtype(u64 start, u64 end,
17extern int free_memtype(u64 start, u64 end); 17extern int free_memtype(u64 start, u64 end);
18 18
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 enum page_cache_mode pcm);
21 21
22int io_reserve_memtype(resource_size_t start, resource_size_t end, 22int io_reserve_memtype(resource_size_t start, resource_size_t end,
23 enum page_cache_mode *pcm); 23 enum page_cache_mode *pcm);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 3a81eb9aad78..f31507f6f60b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -29,20 +29,20 @@
29 * conflicts. 29 * conflicts.
30 */ 30 */
31int ioremap_change_attr(unsigned long vaddr, unsigned long size, 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val) 32 enum page_cache_mode pcm)
33{ 33{
34 unsigned long nrpages = size >> PAGE_SHIFT; 34 unsigned long nrpages = size >> PAGE_SHIFT;
35 int err; 35 int err;
36 36
37 switch (prot_val) { 37 switch (pcm) {
38 case _PAGE_CACHE_UC: 38 case _PAGE_CACHE_MODE_UC:
39 default: 39 default:
40 err = _set_memory_uc(vaddr, nrpages); 40 err = _set_memory_uc(vaddr, nrpages);
41 break; 41 break;
42 case _PAGE_CACHE_WC: 42 case _PAGE_CACHE_MODE_WC:
43 err = _set_memory_wc(vaddr, nrpages); 43 err = _set_memory_wc(vaddr, nrpages);
44 break; 44 break;
45 case _PAGE_CACHE_WB: 45 case _PAGE_CACHE_MODE_WB:
46 err = _set_memory_wb(vaddr, nrpages); 46 err = _set_memory_wb(vaddr, nrpages);
47 break; 47 break;
48 } 48 }
@@ -75,13 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
75 * caller shouldn't need to know that small detail. 75 * caller shouldn't need to know that small detail.
76 */ 76 */
77static void __iomem *__ioremap_caller(resource_size_t phys_addr, 77static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, unsigned long prot_val, void *caller) 78 unsigned long size, enum page_cache_mode pcm, void *caller)
79{ 79{
80 unsigned long offset, vaddr; 80 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr; 81 resource_size_t pfn, last_pfn, last_addr;
82 const resource_size_t unaligned_phys_addr = phys_addr; 82 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size; 83 const unsigned long unaligned_size = size;
84 struct vm_struct *area; 84 struct vm_struct *area;
85 enum page_cache_mode new_pcm;
85 unsigned long new_prot_val; 86 unsigned long new_prot_val;
86 pgprot_t prot; 87 pgprot_t prot;
87 int retval; 88 int retval;
@@ -134,39 +135,42 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
134 size = PAGE_ALIGN(last_addr+1) - phys_addr; 135 size = PAGE_ALIGN(last_addr+1) - phys_addr;
135 136
136 retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 137 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
137 prot_val, &new_prot_val); 138 cachemode2protval(pcm), &new_prot_val);
138 if (retval) { 139 if (retval) {
139 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 140 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
140 return NULL; 141 return NULL;
141 } 142 }
142 143
143 if (prot_val != new_prot_val) { 144 new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
144 if (!is_new_memtype_allowed(phys_addr, size, 145
145 pgprot2cachemode(__pgprot(prot_val)), 146 if (pcm != new_pcm) {
146 pgprot2cachemode(__pgprot(new_prot_val)))) { 147 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
147 printk(KERN_ERR 148 printk(KERN_ERR
148 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 149 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
149 (unsigned long long)phys_addr, 150 (unsigned long long)phys_addr,
150 (unsigned long long)(phys_addr + size), 151 (unsigned long long)(phys_addr + size),
151 prot_val, new_prot_val); 152 pcm, new_pcm);
152 goto err_free_memtype; 153 goto err_free_memtype;
153 } 154 }
154 prot_val = new_prot_val; 155 pcm = new_pcm;
155 } 156 }
156 157
157 switch (prot_val) { 158 prot = PAGE_KERNEL_IO;
158 case _PAGE_CACHE_UC: 159 switch (pcm) {
160 case _PAGE_CACHE_MODE_UC:
159 default: 161 default:
160 prot = PAGE_KERNEL_IO_NOCACHE; 162 prot = __pgprot(pgprot_val(prot) |
163 cachemode2protval(_PAGE_CACHE_MODE_UC));
161 break; 164 break;
162 case _PAGE_CACHE_UC_MINUS: 165 case _PAGE_CACHE_MODE_UC_MINUS:
163 prot = PAGE_KERNEL_IO_UC_MINUS; 166 prot = __pgprot(pgprot_val(prot) |
167 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
164 break; 168 break;
165 case _PAGE_CACHE_WC: 169 case _PAGE_CACHE_MODE_WC:
166 prot = PAGE_KERNEL_IO_WC; 170 prot = __pgprot(pgprot_val(prot) |
171 cachemode2protval(_PAGE_CACHE_MODE_WC));
167 break; 172 break;
168 case _PAGE_CACHE_WB: 173 case _PAGE_CACHE_MODE_WB:
169 prot = PAGE_KERNEL_IO;
170 break; 174 break;
171 } 175 }
172 176
@@ -179,7 +183,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
179 area->phys_addr = phys_addr; 183 area->phys_addr = phys_addr;
180 vaddr = (unsigned long) area->addr; 184 vaddr = (unsigned long) area->addr;
181 185
182 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) 186 if (kernel_map_sync_memtype(phys_addr, size, pcm))
183 goto err_free_area; 187 goto err_free_area;
184 188
185 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 189 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
@@ -228,14 +232,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
228{ 232{
229 /* 233 /*
230 * Ideally, this should be: 234 * Ideally, this should be:
231 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; 235 * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
232 * 236 *
233 * Till we fix all X drivers to use ioremap_wc(), we will use 237 * Till we fix all X drivers to use ioremap_wc(), we will use
234 * UC MINUS. 238 * UC MINUS.
235 */ 239 */
236 unsigned long val = _PAGE_CACHE_UC_MINUS; 240 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
237 241
238 return __ioremap_caller(phys_addr, size, val, 242 return __ioremap_caller(phys_addr, size, pcm,
239 __builtin_return_address(0)); 243 __builtin_return_address(0));
240} 244}
241EXPORT_SYMBOL(ioremap_nocache); 245EXPORT_SYMBOL(ioremap_nocache);
@@ -253,7 +257,7 @@ EXPORT_SYMBOL(ioremap_nocache);
253void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 257void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
254{ 258{
255 if (pat_enabled) 259 if (pat_enabled)
256 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 260 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
257 __builtin_return_address(0)); 261 __builtin_return_address(0));
258 else 262 else
259 return ioremap_nocache(phys_addr, size); 263 return ioremap_nocache(phys_addr, size);
@@ -262,7 +266,7 @@ EXPORT_SYMBOL(ioremap_wc);
262 266
263void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 267void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
264{ 268{
265 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, 269 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
266 __builtin_return_address(0)); 270 __builtin_return_address(0));
267} 271}
268EXPORT_SYMBOL(ioremap_cache); 272EXPORT_SYMBOL(ioremap_cache);
@@ -270,7 +274,8 @@ EXPORT_SYMBOL(ioremap_cache);
270void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 274void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
271 unsigned long prot_val) 275 unsigned long prot_val)
272{ 276{
273 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), 277 return __ioremap_caller(phys_addr, size,
278 pgprot2cachemode(__pgprot(prot_val)),
274 __builtin_return_address(0)); 279 __builtin_return_address(0));
275} 280}
276EXPORT_SYMBOL(ioremap_prot); 281EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2f3744fdc741..8f68a83491ba 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -462,7 +462,7 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
462 if (!is_new_memtype_allowed(start, size, req_type, new_type)) 462 if (!is_new_memtype_allowed(start, size, req_type, new_type))
463 goto out_free; 463 goto out_free;
464 464
465 if (kernel_map_sync_memtype(start, size, new_prot) < 0) 465 if (kernel_map_sync_memtype(start, size, new_type) < 0)
466 goto out_free; 466 goto out_free;
467 467
468 *type = new_type; 468 *type = new_type;
@@ -560,7 +560,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
560 * Change the memory type for the physial address range in kernel identity 560 * Change the memory type for the physial address range in kernel identity
561 * mapping space if that range is a part of identity map. 561 * mapping space if that range is a part of identity map.
562 */ 562 */
563int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) 563int kernel_map_sync_memtype(u64 base, unsigned long size,
564 enum page_cache_mode pcm)
564{ 565{
565 unsigned long id_sz; 566 unsigned long id_sz;
566 567
@@ -578,11 +579,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
578 __pa(high_memory) - base : 579 __pa(high_memory) - base :
579 size; 580 size;
580 581
581 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { 582 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
582 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " 583 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
583 "for [mem %#010Lx-%#010Lx]\n", 584 "for [mem %#010Lx-%#010Lx]\n",
584 current->comm, current->pid, 585 current->comm, current->pid,
585 cattr_name(flags), 586 cattr_name(cachemode2protval(pcm)),
586 base, (unsigned long long)(base + size-1)); 587 base, (unsigned long long)(base + size-1));
587 return -EINVAL; 588 return -EINVAL;
588 } 589 }
@@ -656,7 +657,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
656 flags); 657 flags);
657 } 658 }
658 659
659 if (kernel_map_sync_memtype(paddr, size, flags) < 0) { 660 if (kernel_map_sync_memtype(paddr, size,
661 pgprot2cachemode(__pgprot(flags))) < 0) {
660 free_memtype(paddr, paddr + size); 662 free_memtype(paddr, paddr + size);
661 return -EINVAL; 663 return -EINVAL;
662 } 664 }