aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPallipadi, Venkatesh <venkatesh.pallipadi@intel.com>2009-04-08 18:37:16 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-16 18:44:22 -0400
commit4b065046273afa01ec8e3de7da407e8d3599251d (patch)
tree36860bac75f5f26db25529c6c279c9727b7281a9
parentd45b41ae8da0f54aec0eebcc6f893ba5f22a1e8e (diff)
x86, PAT: Remove page granularity tracking for vm_insert_pfn maps
This change resolves the problem of too many single page entries in pat_memtype_list and "freeing invalid memtype" errors with i915, reported here: http://marc.info/?l=linux-kernel&m=123845244713183&w=2 Remove page level granularity track and untrack of vm_insert_pfn. memtype tracking at page granularity does not scale and cleaner approach would be for the driver to request a type for a bigger IO address range or PCI io memory range for that device, either at mmap time or driver init time and just use that type during vm_insert_pfn. This patch just removes the track/untrack of vm_insert_pfn. That means we will be in same state as 2.6.28, with respect to these APIs. Newer APIs for the drivers to request a memtype for a bigger region is coming soon. [ Impact: fix Xorg startup warnings and hangs ] Reported-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com> Tested-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> LKML-Reference: <20090408223716.GC3493@linux-os.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/pat.c98
1 files changed, 19 insertions, 79 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index cc5e0e24e443..41c805718158 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -669,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
669 * 669 *
670 * If the vma has a linear pfn mapping for the entire range, we get the prot 670 * If the vma has a linear pfn mapping for the entire range, we get the prot
671 * from pte and reserve the entire vma range with single reserve_pfn_range call. 671 * from pte and reserve the entire vma range with single reserve_pfn_range call.
672 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
673 * by page to get physical address and protection.
674 */ 672 */
675int track_pfn_vma_copy(struct vm_area_struct *vma) 673int track_pfn_vma_copy(struct vm_area_struct *vma)
676{ 674{
677 int retval = 0;
678 unsigned long i, j;
679 resource_size_t paddr; 675 resource_size_t paddr;
680 unsigned long prot; 676 unsigned long prot;
681 unsigned long vma_start = vma->vm_start; 677 unsigned long vma_size = vma->vm_end - vma->vm_start;
682 unsigned long vma_end = vma->vm_end;
683 unsigned long vma_size = vma_end - vma_start;
684 pgprot_t pgprot; 678 pgprot_t pgprot;
685 679
686 if (!pat_enabled) 680 if (!pat_enabled)
687 return 0; 681 return 0;
688 682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
689 if (is_linear_pfn_mapping(vma)) { 688 if (is_linear_pfn_mapping(vma)) {
690 /* 689 /*
691 * reserve the whole chunk covered by vma. We need the 690 * reserve the whole chunk covered by vma. We need the
692 * starting address and protection from pte. 691 * starting address and protection from pte.
693 */ 692 */
694 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { 693 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
695 WARN_ON_ONCE(1); 694 WARN_ON_ONCE(1);
696 return -EINVAL; 695 return -EINVAL;
697 } 696 }
@@ -699,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
699 return reserve_pfn_range(paddr, vma_size, &pgprot, 1); 698 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
700 } 699 }
701 700
702 /* reserve entire vma page by page, using pfn and prot from pte */
703 for (i = 0; i < vma_size; i += PAGE_SIZE) {
704 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
705 continue;
706
707 pgprot = __pgprot(prot);
708 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
709 if (retval)
710 goto cleanup_ret;
711 }
712 return 0; 701 return 0;
713
714cleanup_ret:
715 /* Reserve error: Cleanup partial reservation and return error */
716 for (j = 0; j < i; j += PAGE_SIZE) {
717 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
718 continue;
719
720 free_pfn_range(paddr, PAGE_SIZE);
721 }
722
723 return retval;
724} 702}
725 703
726/* 704/*
@@ -730,50 +708,28 @@ cleanup_ret:
730 * prot is passed in as a parameter for the new mapping. If the vma has a 708 * prot is passed in as a parameter for the new mapping. If the vma has a
731 * linear pfn mapping for the entire range reserve the entire vma range with 709 * linear pfn mapping for the entire range reserve the entire vma range with
732 * single reserve_pfn_range call. 710 * single reserve_pfn_range call.
733 * Otherwise, we look t the pfn and size and reserve only the specified range
734 * page by page.
735 *
736 * Note that this function can be called with caller trying to map only a
737 * subrange/page inside the vma.
738 */ 711 */
739int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
740 unsigned long pfn, unsigned long size) 713 unsigned long pfn, unsigned long size)
741{ 714{
742 int retval = 0;
743 unsigned long i, j;
744 resource_size_t base_paddr;
745 resource_size_t paddr; 715 resource_size_t paddr;
746 unsigned long vma_start = vma->vm_start; 716 unsigned long vma_size = vma->vm_end - vma->vm_start;
747 unsigned long vma_end = vma->vm_end;
748 unsigned long vma_size = vma_end - vma_start;
749 717
750 if (!pat_enabled) 718 if (!pat_enabled)
751 return 0; 719 return 0;
752 720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
753 if (is_linear_pfn_mapping(vma)) { 726 if (is_linear_pfn_mapping(vma)) {
754 /* reserve the whole chunk starting from vm_pgoff */ 727 /* reserve the whole chunk starting from vm_pgoff */
755 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
756 return reserve_pfn_range(paddr, vma_size, prot, 0); 729 return reserve_pfn_range(paddr, vma_size, prot, 0);
757 } 730 }
758 731
759 /* reserve page by page using pfn and size */
760 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
761 for (i = 0; i < size; i += PAGE_SIZE) {
762 paddr = base_paddr + i;
763 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
764 if (retval)
765 goto cleanup_ret;
766 }
767 return 0; 732 return 0;
768
769cleanup_ret:
770 /* Reserve error: Cleanup partial reservation and return error */
771 for (j = 0; j < i; j += PAGE_SIZE) {
772 paddr = base_paddr + j;
773 free_pfn_range(paddr, PAGE_SIZE);
774 }
775
776 return retval;
777} 733}
778 734
779/* 735/*
@@ -784,39 +740,23 @@ cleanup_ret:
784void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 740void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
785 unsigned long size) 741 unsigned long size)
786{ 742{
787 unsigned long i;
788 resource_size_t paddr; 743 resource_size_t paddr;
789 unsigned long prot; 744 unsigned long vma_size = vma->vm_end - vma->vm_start;
790 unsigned long vma_start = vma->vm_start;
791 unsigned long vma_end = vma->vm_end;
792 unsigned long vma_size = vma_end - vma_start;
793 745
794 if (!pat_enabled) 746 if (!pat_enabled)
795 return; 747 return;
796 748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
797 if (is_linear_pfn_mapping(vma)) { 754 if (is_linear_pfn_mapping(vma)) {
798 /* free the whole chunk starting from vm_pgoff */ 755 /* free the whole chunk starting from vm_pgoff */
799 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
800 free_pfn_range(paddr, vma_size); 757 free_pfn_range(paddr, vma_size);
801 return; 758 return;
802 } 759 }
803
804 if (size != 0 && size != vma_size) {
805 /* free page by page, using pfn and size */
806 paddr = (resource_size_t)pfn << PAGE_SHIFT;
807 for (i = 0; i < size; i += PAGE_SIZE) {
808 paddr = paddr + i;
809 free_pfn_range(paddr, PAGE_SIZE);
810 }
811 } else {
812 /* free entire vma, page by page, using the pfn from pte */
813 for (i = 0; i < vma_size; i += PAGE_SIZE) {
814 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
815 continue;
816
817 free_pfn_range(paddr, PAGE_SIZE);
818 }
819 }
820} 760}
821 761
822pgprot_t pgprot_writecombine(pgprot_t prot) 762pgprot_t pgprot_writecombine(pgprot_t prot)