aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c127
1 files changed, 88 insertions, 39 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3cd3723..3be399013de6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
333 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 } 334 }
335 335
336 is_range_ram = pagerange_is_ram(start, end); 336 /*
337 if (is_range_ram == 1) 337 * For legacy reasons, some parts of the physical address range in the
338 return reserve_ram_pages_type(start, end, req_type, new_type); 338 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
339 else if (is_range_ram < 0) 339 * the e820 tables). So we will track the memory attributes of this
340 return -EINVAL; 340 * legacy 1MB region using the linear memtype_list always.
341 */
342 if (end >= ISA_END_ADDRESS) {
343 is_range_ram = pagerange_is_ram(start, end);
344 if (is_range_ram == 1)
345 return reserve_ram_pages_type(start, end, req_type,
346 new_type);
347 else if (is_range_ram < 0)
348 return -EINVAL;
349 }
341 350
342 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 351 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
343 if (!new) 352 if (!new)
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
437 if (is_ISA_range(start, end - 1)) 446 if (is_ISA_range(start, end - 1))
438 return 0; 447 return 0;
439 448
440 is_range_ram = pagerange_is_ram(start, end); 449 /*
441 if (is_range_ram == 1) 450 * For legacy reasons, some parts of the physical address range in the
442 return free_ram_pages_type(start, end); 451 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
443 else if (is_range_ram < 0) 452 * the e820 tables). So we will track the memory attributes of this
444 return -EINVAL; 453 * legacy 1MB region using the linear memtype_list always.
454 */
455 if (end >= ISA_END_ADDRESS) {
456 is_range_ram = pagerange_is_ram(start, end);
457 if (is_range_ram == 1)
458 return free_ram_pages_type(start, end);
459 else if (is_range_ram < 0)
460 return -EINVAL;
461 }
445 462
446 spin_lock(&memtype_lock); 463 spin_lock(&memtype_lock);
447 list_for_each_entry(entry, &memtype_list, nd) { 464 list_for_each_entry(entry, &memtype_list, nd) {
@@ -505,6 +522,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
505} 522}
506#endif /* CONFIG_STRICT_DEVMEM */ 523#endif /* CONFIG_STRICT_DEVMEM */
507 524
525/*
526 * Change the memory type for the physial address range in kernel identity
527 * mapping space if that range is a part of identity map.
528 */
529static int kernel_map_sync_memtype(u64 base, unsigned long size,
530 unsigned long flags)
531{
532 unsigned long id_sz;
533 int ret;
534
535 if (!pat_enabled || base >= __pa(high_memory))
536 return 0;
537
538 id_sz = (__pa(high_memory) < base + size) ?
539 __pa(high_memory) - base :
540 size;
541
542 ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
543 /*
544 * -EFAULT return means that the addr was not valid and did not have
545 * any identity mapping. That case is a success for
546 * kernel_map_sync_memtype.
547 */
548 if (ret == -EFAULT)
549 ret = 0;
550
551 return ret;
552}
553
508int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 554int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
509 unsigned long size, pgprot_t *vma_prot) 555 unsigned long size, pgprot_t *vma_prot)
510{ 556{
@@ -555,9 +601,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
555 if (retval < 0) 601 if (retval < 0)
556 return 0; 602 return 0;
557 603
558 if (((pfn < max_low_pfn_mapped) || 604 if (kernel_map_sync_memtype(offset, size, flags)) {
559 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
560 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
561 free_memtype(offset, offset + size); 605 free_memtype(offset, offset + size);
562 printk(KERN_INFO 606 printk(KERN_INFO
563 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", 607 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -601,12 +645,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
601 * Reserved non RAM regions only and after successful reserve_memtype, 645 * Reserved non RAM regions only and after successful reserve_memtype,
602 * this func also keeps identity mapping (if any) in sync with this new prot. 646 * this func also keeps identity mapping (if any) in sync with this new prot.
603 */ 647 */
604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 648static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
649 int strict_prot)
605{ 650{
606 int is_ram = 0; 651 int is_ram = 0;
607 int id_sz, ret; 652 int ret;
608 unsigned long flags; 653 unsigned long flags;
609 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 654 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 655
611 is_ram = pagerange_is_ram(paddr, paddr + size); 656 is_ram = pagerange_is_ram(paddr, paddr + size);
612 657
@@ -625,26 +670,27 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
625 return ret; 670 return ret;
626 671
627 if (flags != want_flags) { 672 if (flags != want_flags) {
628 free_memtype(paddr, paddr + size); 673 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
629 printk(KERN_ERR 674 free_memtype(paddr, paddr + size);
630 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 675 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
631 current->comm, current->pid, 676 " for %Lx-%Lx, got %s\n",
632 cattr_name(want_flags), 677 current->comm, current->pid,
633 (unsigned long long)paddr, 678 cattr_name(want_flags),
634 (unsigned long long)(paddr + size), 679 (unsigned long long)paddr,
635 cattr_name(flags)); 680 (unsigned long long)(paddr + size),
636 return -EINVAL; 681 cattr_name(flags));
682 return -EINVAL;
683 }
684 /*
685 * We allow returning different type than the one requested in
686 * non strict case.
687 */
688 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
689 (~_PAGE_CACHE_MASK)) |
690 flags);
637 } 691 }
638 692
639 /* Need to keep identity mapping in sync */ 693 if (kernel_map_sync_memtype(paddr, size, flags)) {
640 if (paddr >= __pa(high_memory))
641 return 0;
642
643 id_sz = (__pa(high_memory) < paddr + size) ?
644 __pa(high_memory) - paddr :
645 size;
646
647 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
648 free_memtype(paddr, paddr + size); 694 free_memtype(paddr, paddr + size);
649 printk(KERN_ERR 695 printk(KERN_ERR
650 "%s:%d reserve_pfn_range ioremap_change_attr failed %s " 696 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
@@ -689,6 +735,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
689 unsigned long vma_start = vma->vm_start; 735 unsigned long vma_start = vma->vm_start;
690 unsigned long vma_end = vma->vm_end; 736 unsigned long vma_end = vma->vm_end;
691 unsigned long vma_size = vma_end - vma_start; 737 unsigned long vma_size = vma_end - vma_start;
738 pgprot_t pgprot;
692 739
693 if (!pat_enabled) 740 if (!pat_enabled)
694 return 0; 741 return 0;
@@ -702,7 +749,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
702 WARN_ON_ONCE(1); 749 WARN_ON_ONCE(1);
703 return -EINVAL; 750 return -EINVAL;
704 } 751 }
705 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 752 pgprot = __pgprot(prot);
753 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
706 } 754 }
707 755
708 /* reserve entire vma page by page, using pfn and prot from pte */ 756 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +758,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
710 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 758 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
711 continue; 759 continue;
712 760
713 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 761 pgprot = __pgprot(prot);
762 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
714 if (retval) 763 if (retval)
715 goto cleanup_ret; 764 goto cleanup_ret;
716 } 765 }
@@ -741,7 +790,7 @@ cleanup_ret:
741 * Note that this function can be called with caller trying to map only a 790 * Note that this function can be called with caller trying to map only a
742 * subrange/page inside the vma. 791 * subrange/page inside the vma.
743 */ 792 */
744int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 793int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
745 unsigned long pfn, unsigned long size) 794 unsigned long pfn, unsigned long size)
746{ 795{
747 int retval = 0; 796 int retval = 0;
@@ -758,14 +807,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
758 if (is_linear_pfn_mapping(vma)) { 807 if (is_linear_pfn_mapping(vma)) {
759 /* reserve the whole chunk starting from vm_pgoff */ 808 /* reserve the whole chunk starting from vm_pgoff */
760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 809 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
761 return reserve_pfn_range(paddr, vma_size, prot); 810 return reserve_pfn_range(paddr, vma_size, prot, 0);
762 } 811 }
763 812
764 /* reserve page by page using pfn and size */ 813 /* reserve page by page using pfn and size */
765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 814 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
766 for (i = 0; i < size; i += PAGE_SIZE) { 815 for (i = 0; i < size; i += PAGE_SIZE) {
767 paddr = base_paddr + i; 816 paddr = base_paddr + i;
768 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 817 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
769 if (retval) 818 if (retval)
770 goto cleanup_ret; 819 goto cleanup_ret;
771 } 820 }