aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-13 17:53:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-13 17:53:16 -0500
commita6525042bfdfcab128bd91fad264de10fd24a55e (patch)
tree088102ff69846c71c963e970a90d627498202928
parentb9a0d06a35c382c02f471a0815e2e27cdfe1c7f9 (diff)
parent58dab916dfb57328d50deb0aa9b3fc92efa248ff (diff)
Merge branch 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86 PAT: remove CPA WARN_ON for zero pte x86 PAT: return compatible mapping to remap_pfn_range callers x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param x86 PAT: consolidate old memtype new memtype check into a function x86 PAT: remove PFNMAP type on track_pfn_vma_new() error
-rw-r--r--arch/x86/include/asm/pgtable.h19
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c90
-rw-r--r--arch/x86/pci/i386.c12
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--mm/memory.c15
6 files changed, 100 insertions, 50 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 83e69f4a37f..06bbcbd66e9 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -341,6 +341,25 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
341 341
342#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 342#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
343 343
344static inline int is_new_memtype_allowed(unsigned long flags,
345 unsigned long new_flags)
346{
347 /*
348 * Certain new memtypes are not allowed with certain
349 * requested memtype:
350 * - request is uncached, return cannot be write-back
351 * - request is write-combine, return cannot be write-back
352 */
353 if ((flags == _PAGE_CACHE_UC_MINUS &&
354 new_flags == _PAGE_CACHE_WB) ||
355 (flags == _PAGE_CACHE_WC &&
356 new_flags == _PAGE_CACHE_WB)) {
357 return 0;
358 }
359
360 return 1;
361}
362
344#ifndef __ASSEMBLY__ 363#ifndef __ASSEMBLY__
345/* Indicate that x86 has its own track and untrack pfn vma functions */ 364/* Indicate that x86 has its own track and untrack pfn vma functions */
346#define __HAVE_PFNMAP_TRACKING 365#define __HAVE_PFNMAP_TRACKING
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e89d24815f2..4cf30dee816 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -555,10 +555,12 @@ repeat:
555 if (!pte_val(old_pte)) { 555 if (!pte_val(old_pte)) {
556 if (!primary) 556 if (!primary)
557 return 0; 557 return 0;
558 WARN(1, KERN_WARNING "CPA: called for zero pte. " 558
559 "vaddr = %lx cpa->vaddr = %lx\n", address, 559 /*
560 *cpa->vaddr); 560 * Special error value returned, indicating that the mapping
561 return -EINVAL; 561 * did not exist at this address.
562 */
563 return -EFAULT;
562 } 564 }
563 565
564 if (level == PG_LEVEL_4K) { 566 if (level == PG_LEVEL_4K) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3cd372..160c42d3eb8 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -505,6 +505,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
505} 505}
506#endif /* CONFIG_STRICT_DEVMEM */ 506#endif /* CONFIG_STRICT_DEVMEM */
507 507
508/*
509 * Change the memory type for the physial address range in kernel identity
510 * mapping space if that range is a part of identity map.
511 */
512static int kernel_map_sync_memtype(u64 base, unsigned long size,
513 unsigned long flags)
514{
515 unsigned long id_sz;
516 int ret;
517
518 if (!pat_enabled || base >= __pa(high_memory))
519 return 0;
520
521 id_sz = (__pa(high_memory) < base + size) ?
522 __pa(high_memory) - base :
523 size;
524
525 ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
526 /*
527 * -EFAULT return means that the addr was not valid and did not have
528 * any identity mapping. That case is a success for
529 * kernel_map_sync_memtype.
530 */
531 if (ret == -EFAULT)
532 ret = 0;
533
534 return ret;
535}
536
508int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 537int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
509 unsigned long size, pgprot_t *vma_prot) 538 unsigned long size, pgprot_t *vma_prot)
510{ 539{
@@ -555,9 +584,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
555 if (retval < 0) 584 if (retval < 0)
556 return 0; 585 return 0;
557 586
558 if (((pfn < max_low_pfn_mapped) || 587 if (kernel_map_sync_memtype(offset, size, flags)) {
559 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
560 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
561 free_memtype(offset, offset + size); 588 free_memtype(offset, offset + size);
562 printk(KERN_INFO 589 printk(KERN_INFO
563 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", 590 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -601,12 +628,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
601 * Reserved non RAM regions only and after successful reserve_memtype, 628 * Reserved non RAM regions only and after successful reserve_memtype,
602 * this func also keeps identity mapping (if any) in sync with this new prot. 629 * this func also keeps identity mapping (if any) in sync with this new prot.
603 */ 630 */
604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) 631static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
632 int strict_prot)
605{ 633{
606 int is_ram = 0; 634 int is_ram = 0;
607 int id_sz, ret; 635 int ret;
608 unsigned long flags; 636 unsigned long flags;
609 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 637 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
610 638
611 is_ram = pagerange_is_ram(paddr, paddr + size); 639 is_ram = pagerange_is_ram(paddr, paddr + size);
612 640
@@ -625,26 +653,27 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
625 return ret; 653 return ret;
626 654
627 if (flags != want_flags) { 655 if (flags != want_flags) {
628 free_memtype(paddr, paddr + size); 656 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
629 printk(KERN_ERR 657 free_memtype(paddr, paddr + size);
630 "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", 658 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
631 current->comm, current->pid, 659 " for %Lx-%Lx, got %s\n",
632 cattr_name(want_flags), 660 current->comm, current->pid,
633 (unsigned long long)paddr, 661 cattr_name(want_flags),
634 (unsigned long long)(paddr + size), 662 (unsigned long long)paddr,
635 cattr_name(flags)); 663 (unsigned long long)(paddr + size),
636 return -EINVAL; 664 cattr_name(flags));
665 return -EINVAL;
666 }
667 /*
668 * We allow returning different type than the one requested in
669 * non strict case.
670 */
671 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
672 (~_PAGE_CACHE_MASK)) |
673 flags);
637 } 674 }
638 675
639 /* Need to keep identity mapping in sync */ 676 if (kernel_map_sync_memtype(paddr, size, flags)) {
640 if (paddr >= __pa(high_memory))
641 return 0;
642
643 id_sz = (__pa(high_memory) < paddr + size) ?
644 __pa(high_memory) - paddr :
645 size;
646
647 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
648 free_memtype(paddr, paddr + size); 677 free_memtype(paddr, paddr + size);
649 printk(KERN_ERR 678 printk(KERN_ERR
650 "%s:%d reserve_pfn_range ioremap_change_attr failed %s " 679 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
@@ -689,6 +718,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
689 unsigned long vma_start = vma->vm_start; 718 unsigned long vma_start = vma->vm_start;
690 unsigned long vma_end = vma->vm_end; 719 unsigned long vma_end = vma->vm_end;
691 unsigned long vma_size = vma_end - vma_start; 720 unsigned long vma_size = vma_end - vma_start;
721 pgprot_t pgprot;
692 722
693 if (!pat_enabled) 723 if (!pat_enabled)
694 return 0; 724 return 0;
@@ -702,7 +732,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
702 WARN_ON_ONCE(1); 732 WARN_ON_ONCE(1);
703 return -EINVAL; 733 return -EINVAL;
704 } 734 }
705 return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); 735 pgprot = __pgprot(prot);
736 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
706 } 737 }
707 738
708 /* reserve entire vma page by page, using pfn and prot from pte */ 739 /* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +741,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
710 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) 741 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
711 continue; 742 continue;
712 743
713 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); 744 pgprot = __pgprot(prot);
745 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
714 if (retval) 746 if (retval)
715 goto cleanup_ret; 747 goto cleanup_ret;
716 } 748 }
@@ -741,7 +773,7 @@ cleanup_ret:
741 * Note that this function can be called with caller trying to map only a 773 * Note that this function can be called with caller trying to map only a
742 * subrange/page inside the vma. 774 * subrange/page inside the vma.
743 */ 775 */
744int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 776int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
745 unsigned long pfn, unsigned long size) 777 unsigned long pfn, unsigned long size)
746{ 778{
747 int retval = 0; 779 int retval = 0;
@@ -758,14 +790,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
758 if (is_linear_pfn_mapping(vma)) { 790 if (is_linear_pfn_mapping(vma)) {
759 /* reserve the whole chunk starting from vm_pgoff */ 791 /* reserve the whole chunk starting from vm_pgoff */
760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 792 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
761 return reserve_pfn_range(paddr, vma_size, prot); 793 return reserve_pfn_range(paddr, vma_size, prot, 0);
762 } 794 }
763 795
764 /* reserve page by page using pfn and size */ 796 /* reserve page by page using pfn and size */
765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 797 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
766 for (i = 0; i < size; i += PAGE_SIZE) { 798 for (i = 0; i < size; i += PAGE_SIZE) {
767 paddr = base_paddr + i; 799 paddr = base_paddr + i;
768 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 800 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
769 if (retval) 801 if (retval)
770 goto cleanup_ret; 802 goto cleanup_ret;
771 } 803 }
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f884740da31..5ead808dd70 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -314,17 +314,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
314 return retval; 314 return retval;
315 315
316 if (flags != new_flags) { 316 if (flags != new_flags) {
317 /* 317 if (!is_new_memtype_allowed(flags, new_flags)) {
318 * Do not fallback to certain memory types with certain
319 * requested type:
320 * - request is uncached, return cannot be write-back
321 * - request is uncached, return cannot be write-combine
322 * - request is write-combine, return cannot be write-back
323 */
324 if ((flags == _PAGE_CACHE_UC_MINUS &&
325 (new_flags == _PAGE_CACHE_WB)) ||
326 (flags == _PAGE_CACHE_WC &&
327 new_flags == _PAGE_CACHE_WB)) {
328 free_memtype(addr, addr+len); 318 free_memtype(addr, addr+len);
329 return -EINVAL; 319 return -EINVAL;
330 } 320 }
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 72ebe91005a..8e6d0ca70ab 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
301 * track_pfn_vma_new is called when a _new_ pfn mapping is being established 301 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
302 * for physical range indicated by pfn and size. 302 * for physical range indicated by pfn and size.
303 */ 303 */
304static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 304static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
305 unsigned long pfn, unsigned long size) 305 unsigned long pfn, unsigned long size)
306{ 306{
307 return 0; 307 return 0;
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
332{ 332{
333} 333}
334#else 334#else
335extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 335extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
336 unsigned long pfn, unsigned long size); 336 unsigned long pfn, unsigned long size);
337extern int track_pfn_vma_copy(struct vm_area_struct *vma); 337extern int track_pfn_vma_copy(struct vm_area_struct *vma);
338extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 338extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
diff --git a/mm/memory.c b/mm/memory.c
index c2d4c477e5b..22bfa7a47a0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1511 unsigned long pfn) 1511 unsigned long pfn)
1512{ 1512{
1513 int ret; 1513 int ret;
1514 pgprot_t pgprot = vma->vm_page_prot;
1514 /* 1515 /*
1515 * Technically, architectures with pte_special can avoid all these 1516 * Technically, architectures with pte_special can avoid all these
1516 * restrictions (same for remap_pfn_range). However we would like 1517 * restrictions (same for remap_pfn_range). However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1525 1526
1526 if (addr < vma->vm_start || addr >= vma->vm_end) 1527 if (addr < vma->vm_start || addr >= vma->vm_end)
1527 return -EFAULT; 1528 return -EFAULT;
1528 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) 1529 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
1529 return -EINVAL; 1530 return -EINVAL;
1530 1531
1531 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1532 ret = insert_pfn(vma, addr, pfn, pgprot);
1532 1533
1533 if (ret) 1534 if (ret)
1534 untrack_pfn_vma(vma, pfn, PAGE_SIZE); 1535 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,9 +1672,15 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1671 1672
1672 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1673 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1673 1674
1674 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); 1675 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1675 if (err) 1676 if (err) {
1677 /*
1678 * To indicate that track_pfn related cleanup is not
1679 * needed from higher level routine calling unmap_vmas
1680 */
1681 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1676 return -EINVAL; 1682 return -EINVAL;
1683 }
1677 1684
1678 BUG_ON(addr >= end); 1685 BUG_ON(addr >= end);
1679 pfn -= addr >> PAGE_SHIFT; 1686 pfn -= addr >> PAGE_SHIFT;