aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2009-07-10 12:57:32 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-08-26 18:40:58 -0400
commit5fc517466dd3d0fc6d2a5180ca6792e60344d8be (patch)
tree75fd75c8773848359e96dfb68ce37a2872a32a8e /arch/x86/mm/pat.c
parent5400743db5a06a4e6e298725a2044c40edcb27b9 (diff)
x86, pat: Keep identity maps consistent with mmaps even when pat_disabled
Make reserve_memtype internally take care of pat disabled case and fallback to default return values. Remove the specific pat_disabled checks in track_* routines. Change kernel_map_sync_memtype to sync identity map even when pat_disabled. This change ensures that, even for pat_disabled case, we take care of keeping identity map in sync. Before this patch, in pat disabled case, ioremap() keeps the identity maps in sync and other APIs like pci and /dev/mem mmap don't, which is not a very consistent behavior. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index e6718bb28065..d5af2792d2fd 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -339,6 +339,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
339 if (new_type) { 339 if (new_type) {
340 if (req_type == -1) 340 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB; 341 *new_type = _PAGE_CACHE_WB;
342 else if (req_type == _PAGE_CACHE_WC)
343 *new_type = _PAGE_CACHE_UC_MINUS;
342 else 344 else
343 *new_type = req_type & _PAGE_CACHE_MASK; 345 *new_type = req_type & _PAGE_CACHE_MASK;
344 } 346 }
@@ -577,7 +579,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
577{ 579{
578 unsigned long id_sz; 580 unsigned long id_sz;
579 581
580 if (!pat_enabled || base >= __pa(high_memory)) 582 if (base >= __pa(high_memory))
581 return 0; 583 return 0;
582 584
583 id_sz = (__pa(high_memory) < base + size) ? 585 id_sz = (__pa(high_memory) < base + size) ?
@@ -677,9 +679,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
677 unsigned long vma_size = vma->vm_end - vma->vm_start; 679 unsigned long vma_size = vma->vm_end - vma->vm_start;
678 pgprot_t pgprot; 680 pgprot_t pgprot;
679 681
680 if (!pat_enabled)
681 return 0;
682
683 /* 682 /*
684 * For now, only handle remap_pfn_range() vmas where 683 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of 684 * is_linear_pfn_mapping() == TRUE. Handling of
@@ -715,9 +714,6 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
715 resource_size_t paddr; 714 resource_size_t paddr;
716 unsigned long vma_size = vma->vm_end - vma->vm_start; 715 unsigned long vma_size = vma->vm_end - vma->vm_start;
717 716
718 if (!pat_enabled)
719 return 0;
720
721 /* 717 /*
722 * For now, only handle remap_pfn_range() vmas where 718 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of 719 * is_linear_pfn_mapping() == TRUE. Handling of
@@ -743,9 +739,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
743 resource_size_t paddr; 739 resource_size_t paddr;
744 unsigned long vma_size = vma->vm_end - vma->vm_start; 740 unsigned long vma_size = vma->vm_end - vma->vm_start;
745 741
746 if (!pat_enabled)
747 return;
748
749 /* 742 /*
750 * For now, only handle remap_pfn_range() vmas where 743 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of 744 * is_linear_pfn_mapping() == TRUE. Handling of