aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h15
1 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 065cdf8c09fb..3daa05feed9f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -98,7 +98,7 @@ extern unsigned int kobjsize(const void *objp);
98#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 98#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
99#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 99#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
100#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 100#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
101#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 101#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it. Refer note in VM_PFNMAP_AT_MMAP below */
102#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 102#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
103 103
104#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 104#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
@@ -127,6 +127,17 @@ extern unsigned int kobjsize(const void *objp);
127#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 127#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
128 128
129/* 129/*
130 * pfnmap vmas that are fully mapped at mmap time (not mapped on fault).
131 * Used by x86 PAT to identify such PFNMAP mappings and optimize their handling.
132 * Note VM_INSERTPAGE flag is overloaded here. i.e,
133 * VM_INSERTPAGE && !VM_PFNMAP implies
134 * The vma has had "vm_insert_page()" done on it
135 * VM_INSERTPAGE && VM_PFNMAP implies
136 * The vma is PFNMAP with full mapping at mmap time
137 */
138#define VM_PFNMAP_AT_MMAP (VM_INSERTPAGE | VM_PFNMAP)
139
140/*
130 * mapping from the currently active vm_flags protection bits (the 141 * mapping from the currently active vm_flags protection bits (the
131 * low four bits) to a page protection mask.. 142 * low four bits) to a page protection mask..
132 */ 143 */
@@ -145,7 +156,7 @@ extern pgprot_t protection_map[16];
145 */ 156 */
146static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) 157static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
147{ 158{
148 return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); 159 return ((vma->vm_flags & VM_PFNMAP_AT_MMAP) == VM_PFNMAP_AT_MMAP);
149} 160}
150 161
151static inline int is_pfn_mapping(struct vm_area_struct *vma) 162static inline int is_pfn_mapping(struct vm_area_struct *vma)