aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c114
1 files changed, 65 insertions, 49 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036427df..05f9aef6818a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,7 +30,7 @@
30#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
31int __read_mostly pat_enabled = 1; 31int __read_mostly pat_enabled = 1;
32 32
33void __cpuinit pat_disable(char *reason) 33void __cpuinit pat_disable(const char *reason)
34{ 34{
35 pat_enabled = 0; 35 pat_enabled = 0;
36 printk(KERN_INFO "%s\n", reason); 36 printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
42 return 0; 42 return 0;
43} 43}
44early_param("nopat", nopat); 44early_param("nopat", nopat);
45#else
46static inline void pat_disable(const char *reason)
47{
48 (void)reason;
49}
45#endif 50#endif
46 51
47 52
@@ -78,16 +83,20 @@ void pat_init(void)
78 if (!pat_enabled) 83 if (!pat_enabled)
79 return; 84 return;
80 85
81 /* Paranoia check. */ 86 if (!cpu_has_pat) {
82 if (!cpu_has_pat && boot_pat_state) { 87 if (!boot_pat_state) {
83 /* 88 pat_disable("PAT not supported by CPU.");
84 * If this happens we are on a secondary CPU, but 89 return;
85 * switched to PAT on the boot CPU. We have no way to 90 } else {
86 * undo PAT. 91 /*
87 */ 92 * If this happens we are on a secondary CPU, but
88 printk(KERN_ERR "PAT enabled, " 93 * switched to PAT on the boot CPU. We have no way to
89 "but not supported by secondary CPU\n"); 94 * undo PAT.
90 BUG(); 95 */
96 printk(KERN_ERR "PAT enabled, "
97 "but not supported by secondary CPU\n");
98 BUG();
99 }
91 } 100 }
92 101
93 /* Set PWT to Write-Combining. All other bits stay the same */ 102 /* Set PWT to Write-Combining. All other bits stay the same */
@@ -211,6 +220,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
211static struct memtype *cached_entry; 220static struct memtype *cached_entry;
212static u64 cached_start; 221static u64 cached_start;
213 222
223static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
224{
225 int ram_page = 0, not_rampage = 0;
226 unsigned long page_nr;
227
228 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
229 ++page_nr) {
230 /*
231 * For legacy reasons, physical address range in the legacy ISA
232 * region is tracked as non-RAM. This will allow users of
233 * /dev/mem to map portions of legacy ISA region, even when
234 * some of those portions are listed(or not even listed) with
235 * different e820 types(RAM/reserved/..)
236 */
237 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
238 page_is_ram(page_nr))
239 ram_page = 1;
240 else
241 not_rampage = 1;
242
243 if (ram_page == not_rampage)
244 return -1;
245 }
246
247 return ram_page;
248}
249
214/* 250/*
215 * For RAM pages, mark the pages as non WB memory type using 251 * For RAM pages, mark the pages as non WB memory type using
216 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 252 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -336,20 +372,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
336 if (new_type) 372 if (new_type)
337 *new_type = actual_type; 373 *new_type = actual_type;
338 374
339 /* 375 is_range_ram = pat_pagerange_is_ram(start, end);
340 * For legacy reasons, some parts of the physical address range in the 376 if (is_range_ram == 1)
341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 377 return reserve_ram_pages_type(start, end, req_type,
342 * the e820 tables). So we will track the memory attributes of this 378 new_type);
343 * legacy 1MB region using the linear memtype_list always. 379 else if (is_range_ram < 0)
344 */ 380 return -EINVAL;
345 if (end >= ISA_END_ADDRESS) {
346 is_range_ram = pagerange_is_ram(start, end);
347 if (is_range_ram == 1)
348 return reserve_ram_pages_type(start, end, req_type,
349 new_type);
350 else if (is_range_ram < 0)
351 return -EINVAL;
352 }
353 381
354 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 382 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
355 if (!new) 383 if (!new)
@@ -446,19 +474,11 @@ int free_memtype(u64 start, u64 end)
446 if (is_ISA_range(start, end - 1)) 474 if (is_ISA_range(start, end - 1))
447 return 0; 475 return 0;
448 476
449 /* 477 is_range_ram = pat_pagerange_is_ram(start, end);
450 * For legacy reasons, some parts of the physical address range in the 478 if (is_range_ram == 1)
451 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 479 return free_ram_pages_type(start, end);
452 * the e820 tables). So we will track the memory attributes of this 480 else if (is_range_ram < 0)
453 * legacy 1MB region using the linear memtype_list always. 481 return -EINVAL;
454 */
455 if (end >= ISA_END_ADDRESS) {
456 is_range_ram = pagerange_is_ram(start, end);
457 if (is_range_ram == 1)
458 return free_ram_pages_type(start, end);
459 else if (is_range_ram < 0)
460 return -EINVAL;
461 }
462 482
463 spin_lock(&memtype_lock); 483 spin_lock(&memtype_lock);
464 list_for_each_entry(entry, &memtype_list, nd) { 484 list_for_each_entry(entry, &memtype_list, nd) {
@@ -626,17 +646,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
626 unsigned long flags; 646 unsigned long flags;
627 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
628 648
629 is_ram = pagerange_is_ram(paddr, paddr + size); 649 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
630 650
631 if (is_ram != 0) { 651 /*
632 /* 652 * reserve_pfn_range() doesn't support RAM pages.
633 * For mapping RAM pages, drivers need to call 653 */
634 * set_memory_[uc|wc|wb] directly, for reserve and free, before 654 if (is_ram != 0)
635 * setting up the PTE. 655 return -EINVAL;
636 */
637 WARN_ON_ONCE(1);
638 return 0;
639 }
640 656
641 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 657 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
642 if (ret) 658 if (ret)
@@ -693,7 +709,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
693{ 709{
694 int is_ram; 710 int is_ram;
695 711
696 is_ram = pagerange_is_ram(paddr, paddr + size); 712 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
697 if (is_ram == 0) 713 if (is_ram == 0)
698 free_memtype(paddr, paddr + size); 714 free_memtype(paddr, paddr + size);
699} 715}