aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-18 14:41:27 -0500
committerH. Peter Anvin <hpa@zytor.com>2008-12-18 16:30:15 -0500
commit3c8bb73ace6249bd089b70c941440441940e3365 (patch)
tree974c8b86fa68c26daa76e5122bab3cf5651a51dd
parent55dac3a5553b13891f0ae4bbd11920619b5436d4 (diff)
x86: PAT: store vm_pgoff for all linear_over_vma_region mappings - v3
Impact: Code transformation, new functions added should have no effect. Drivers use mmap followed by pgprot_* and remap_pfn_range or vm_insert_pfn, in order to export reserved memory to userspace. Currently, such mappings are not tracked and hence not kept consistent with other mappings (/dev/mem, pci resource, ioremap) for the sme memory, that may exist in the system. The following patchset adds x86 PAT attribute tracking and untracking for pfnmap related APIs. First three patches in the patchset are changing the generic mm code to fit in this tracking. Last four patches are x86 specific to make things work with x86 PAT code. The patchset aso introduces pgprot_writecombine interface, which gives writecombine mapping when enabled, falling back to pgprot_noncached otherwise. This patch: While working on x86 PAT, we faced some hurdles with trackking remap_pfn_range() regions, as we do not have any information to say whether that PFNMAP mapping is linear for the entire vma range or it is smaller granularity regions within the vma. A simple solution to this is to use vm_pgoff as an indicator for linear mapping over the vma region. Currently, remap_pfn_range only sets vm_pgoff for COW mappings. Below patch changes the logic and sets the vm_pgoff irrespective of COW. This will still not be enough for the case where pfn is zero (vma region mapped to physical address zero). But, for all the other cases, we can look at pfnmap VMAs and say whether the mappng is for the entire vma region or not. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--include/linux/mm.h9
-rw-r--r--mm/memory.c7
2 files changed, 12 insertions, 4 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ffee2f743418..2be8d9b5e46f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -145,6 +145,15 @@ extern pgprot_t protection_map[16];
145#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 145#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
146#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 146#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
147 147
148static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
149{
150 return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
151}
152
153static inline int is_pfn_mapping(struct vm_area_struct *vma)
154{
155 return (vma->vm_flags & VM_PFNMAP);
156}
148 157
149/* 158/*
150 * vm_fault is filled by the the pagefault handler and passed to the vma's 159 * vm_fault is filled by the the pagefault handler and passed to the vma's
diff --git a/mm/memory.c b/mm/memory.c
index 164951c47305..cef95c8c77fa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1575,11 +1575,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1575 * behaviour that some programs depend on. We mark the "original" 1575 * behaviour that some programs depend on. We mark the "original"
1576 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1576 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1577 */ 1577 */
1578 if (is_cow_mapping(vma->vm_flags)) { 1578 if (addr == vma->vm_start && end == vma->vm_end)
1579 if (addr != vma->vm_start || end != vma->vm_end)
1580 return -EINVAL;
1581 vma->vm_pgoff = pfn; 1579 vma->vm_pgoff = pfn;
1582 } 1580 else if (is_cow_mapping(vma->vm_flags))
1581 return -EINVAL;
1583 1582
1584 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1583 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1585 1584