aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2009-07-10 12:57:40 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-08-26 18:41:32 -0400
commit1087637616dd5e96d834164ea462aed6159d039b (patch)
treeb433bda73d2c37dca006f898b5cc42f9d3131ae3 /arch/x86/mm/pat.c
parent637b86e75f4c255a4446bc0b67ce9d914b9d2d42 (diff)
x86, pat: Lookup the protection from memtype list on vm_insert_pfn()
Lookup the reserved memtype during vm_insert_pfn and use that memtype for the new mapping. This takes care or handling of vm_insert_pfn() interface in track_pfn_vma*/untrack_pfn_vma. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 71aa6f7246c6..b629f75f73de 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -848,11 +848,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
848 unsigned long vma_size = vma->vm_end - vma->vm_start; 848 unsigned long vma_size = vma->vm_end - vma->vm_start;
849 pgprot_t pgprot; 849 pgprot_t pgprot;
850 850
851 /*
852 * For now, only handle remap_pfn_range() vmas where
853 * is_linear_pfn_mapping() == TRUE. Handling of
854 * vm_insert_pfn() is TBD.
855 */
856 if (is_linear_pfn_mapping(vma)) { 851 if (is_linear_pfn_mapping(vma)) {
857 /* 852 /*
858 * reserve the whole chunk covered by vma. We need the 853 * reserve the whole chunk covered by vma. We need the
@@ -880,20 +875,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
880int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 875int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
881 unsigned long pfn, unsigned long size) 876 unsigned long pfn, unsigned long size)
882{ 877{
878 unsigned long flags;
883 resource_size_t paddr; 879 resource_size_t paddr;
884 unsigned long vma_size = vma->vm_end - vma->vm_start; 880 unsigned long vma_size = vma->vm_end - vma->vm_start;
885 881
886 /*
887 * For now, only handle remap_pfn_range() vmas where
888 * is_linear_pfn_mapping() == TRUE. Handling of
889 * vm_insert_pfn() is TBD.
890 */
891 if (is_linear_pfn_mapping(vma)) { 882 if (is_linear_pfn_mapping(vma)) {
892 /* reserve the whole chunk starting from vm_pgoff */ 883 /* reserve the whole chunk starting from vm_pgoff */
893 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 884 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
894 return reserve_pfn_range(paddr, vma_size, prot, 0); 885 return reserve_pfn_range(paddr, vma_size, prot, 0);
895 } 886 }
896 887
888 if (!pat_enabled)
889 return 0;
890
891 /* for vm_insert_pfn and friends, we set prot based on lookup */
892 flags = lookup_memtype(pfn << PAGE_SHIFT);
893 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
894 flags);
895
897 return 0; 896 return 0;
898} 897}
899 898
@@ -908,11 +907,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
908 resource_size_t paddr; 907 resource_size_t paddr;
909 unsigned long vma_size = vma->vm_end - vma->vm_start; 908 unsigned long vma_size = vma->vm_end - vma->vm_start;
910 909
911 /*
912 * For now, only handle remap_pfn_range() vmas where
913 * is_linear_pfn_mapping() == TRUE. Handling of
914 * vm_insert_pfn() is TBD.
915 */
916 if (is_linear_pfn_mapping(vma)) { 910 if (is_linear_pfn_mapping(vma)) {
917 /* free the whole chunk starting from vm_pgoff */ 911 /* free the whole chunk starting from vm_pgoff */
918 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 912 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;