aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c76
1 files changed, 49 insertions, 27 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ffc88cc00fda..9127e31c7268 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,7 +30,7 @@
30#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
31int __read_mostly pat_enabled = 1; 31int __read_mostly pat_enabled = 1;
32 32
33void __cpuinit pat_disable(char *reason) 33void __cpuinit pat_disable(const char *reason)
34{ 34{
35 pat_enabled = 0; 35 pat_enabled = 0;
36 printk(KERN_INFO "%s\n", reason); 36 printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
42 return 0; 42 return 0;
43} 43}
44early_param("nopat", nopat); 44early_param("nopat", nopat);
45#else
46static inline void pat_disable(const char *reason)
47{
48 (void)reason;
49}
45#endif 50#endif
46 51
47 52
@@ -78,16 +83,20 @@ void pat_init(void)
78 if (!pat_enabled) 83 if (!pat_enabled)
79 return; 84 return;
80 85
81 /* Paranoia check. */ 86 if (!cpu_has_pat) {
82 if (!cpu_has_pat && boot_pat_state) { 87 if (!boot_pat_state) {
83 /* 88 pat_disable("PAT not supported by CPU.");
84 * If this happens we are on a secondary CPU, but 89 return;
85 * switched to PAT on the boot CPU. We have no way to 90 } else {
86 * undo PAT. 91 /*
87 */ 92 * If this happens we are on a secondary CPU, but
88 printk(KERN_ERR "PAT enabled, " 93 * switched to PAT on the boot CPU. We have no way to
89 "but not supported by secondary CPU\n"); 94 * undo PAT.
90 BUG(); 95 */
96 printk(KERN_ERR "PAT enabled, "
97 "but not supported by secondary CPU\n");
98 BUG();
99 }
91 } 100 }
92 101
93 /* Set PWT to Write-Combining. All other bits stay the same */ 102 /* Set PWT to Write-Combining. All other bits stay the same */
@@ -618,12 +627,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
618 * Reserved non RAM regions only and after successful reserve_memtype, 627 * Reserved non RAM regions only and after successful reserve_memtype,
619 * this func also keeps identity mapping (if any) in sync with this new prot. 628 * this func also keeps identity mapping (if any) in sync with this new prot.
620 */ 629 */
621static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 630static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
631 int strict_prot)
622{ 632{
623 int is_ram = 0; 633 int is_ram = 0;
624 int id_sz, ret; 634 int id_sz, ret;
625 unsigned long flags; 635 unsigned long flags;
626 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 636 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
627 637
628 is_ram = pagerange_is_ram(paddr, paddr + size); 638 is_ram = pagerange_is_ram(paddr, paddr + size);
629 639
@@ -642,15 +652,24 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
642 return ret; 652 return ret;
643 653
644 if (flags != want_flags) { 654 if (flags != want_flags) {
645 free_memtype(paddr, paddr + size); 655 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
646 printk(KERN_ERR 656 free_memtype(paddr, paddr + size);
647 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 657 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
648 current->comm, current->pid, 658 " for %Lx-%Lx, got %s\n",
649 cattr_name(want_flags), 659 current->comm, current->pid,
650 (unsigned long long)paddr, 660 cattr_name(want_flags),
651 (unsigned long long)(paddr + size), 661 (unsigned long long)paddr,
652 cattr_name(flags)); 662 (unsigned long long)(paddr + size),
653 return -EINVAL; 663 cattr_name(flags));
664 return -EINVAL;
665 }
666 /*
667 * We allow returning different type than the one requested in
668 * non strict case.
669 */
670 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
671 (~_PAGE_CACHE_MASK)) |
672 flags);
654 } 673 }
655 674
656 /* Need to keep identity mapping in sync */ 675 /* Need to keep identity mapping in sync */
@@ -706,6 +725,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
706 unsigned long vma_start = vma->vm_start; 725 unsigned long vma_start = vma->vm_start;
707 unsigned long vma_end = vma->vm_end; 726 unsigned long vma_end = vma->vm_end;
708 unsigned long vma_size = vma_end - vma_start; 727 unsigned long vma_size = vma_end - vma_start;
728 pgprot_t pgprot;
709 729
710 if (!pat_enabled) 730 if (!pat_enabled)
711 return 0; 731 return 0;
@@ -719,7 +739,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
719 WARN_ON_ONCE(1); 739 WARN_ON_ONCE(1);
720 return -EINVAL; 740 return -EINVAL;
721 } 741 }
722 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 742 pgprot = __pgprot(prot);
743 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
723 } 744 }
724 745
725 /* reserve entire vma page by page, using pfn and prot from pte */ 746 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -727,7 +748,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
727 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 748 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
728 continue; 749 continue;
729 750
730 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 751 pgprot = __pgprot(prot);
752 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
731 if (retval) 753 if (retval)
732 goto cleanup_ret; 754 goto cleanup_ret;
733 } 755 }
@@ -758,7 +780,7 @@ cleanup_ret:
758 * Note that this function can be called with caller trying to map only a 780 * Note that this function can be called with caller trying to map only a
759 * subrange/page inside the vma. 781 * subrange/page inside the vma.
760 */ 782 */
761int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 783int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
762 unsigned long pfn, unsigned long size) 784 unsigned long pfn, unsigned long size)
763{ 785{
764 int retval = 0; 786 int retval = 0;
@@ -775,14 +797,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
775 if (is_linear_pfn_mapping(vma)) { 797 if (is_linear_pfn_mapping(vma)) {
776 /* reserve the whole chunk starting from vm_pgoff */ 798 /* reserve the whole chunk starting from vm_pgoff */
777 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 799 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
778 return reserve_pfn_range(paddr, vma_size, prot); 800 return reserve_pfn_range(paddr, vma_size, prot, 0);
779 } 801 }
780 802
781 /* reserve page by page using pfn and size */ 803 /* reserve page by page using pfn and size */
782 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 804 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
783 for (i = 0; i < size; i += PAGE_SIZE) { 805 for (i = 0; i < size; i += PAGE_SIZE) {
784 paddr = base_paddr + i; 806 paddr = base_paddr + i;
785 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 807 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
786 if (retval) 808 if (retval)
787 goto cleanup_ret; 809 goto cleanup_ret;
788 } 810 }