diff options
| -rw-r--r-- | include/linux/mm.h | 16 | ||||
| -rw-r--r-- | mm/memory.c | 4 |
2 files changed, 5 insertions, 15 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 3daa05feed9f..b1ea37fc7a24 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -98,12 +98,13 @@ extern unsigned int kobjsize(const void *objp); | |||
| 98 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 98 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
| 99 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 99 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
| 100 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 100 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
| 101 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it. Refer note in VM_PFNMAP_AT_MMAP below */ | 101 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ |
| 102 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ | 102 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
| 103 | 103 | ||
| 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
| 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
| 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ |
| 107 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | ||
| 107 | 108 | ||
| 108 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
| 109 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
| @@ -127,17 +128,6 @@ extern unsigned int kobjsize(const void *objp); | |||
| 127 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) | 128 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) |
| 128 | 129 | ||
| 129 | /* | 130 | /* |
| 130 | * pfnmap vmas that are fully mapped at mmap time (not mapped on fault). | ||
| 131 | * Used by x86 PAT to identify such PFNMAP mappings and optimize their handling. | ||
| 132 | * Note VM_INSERTPAGE flag is overloaded here. i.e, | ||
| 133 | * VM_INSERTPAGE && !VM_PFNMAP implies | ||
| 134 | * The vma has had "vm_insert_page()" done on it | ||
| 135 | * VM_INSERTPAGE && VM_PFNMAP implies | ||
| 136 | * The vma is PFNMAP with full mapping at mmap time | ||
| 137 | */ | ||
| 138 | #define VM_PFNMAP_AT_MMAP (VM_INSERTPAGE | VM_PFNMAP) | ||
| 139 | |||
| 140 | /* | ||
| 141 | * mapping from the currently active vm_flags protection bits (the | 131 | * mapping from the currently active vm_flags protection bits (the |
| 142 | * low four bits) to a page protection mask.. | 132 | * low four bits) to a page protection mask.. |
| 143 | */ | 133 | */ |
| @@ -156,7 +146,7 @@ extern pgprot_t protection_map[16]; | |||
| 156 | */ | 146 | */ |
| 157 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | 147 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
| 158 | { | 148 | { |
| 159 | return ((vma->vm_flags & VM_PFNMAP_AT_MMAP) == VM_PFNMAP_AT_MMAP); | 149 | return (vma->vm_flags & VM_PFN_AT_MMAP); |
| 160 | } | 150 | } |
| 161 | 151 | ||
| 162 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | 152 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
diff --git a/mm/memory.c b/mm/memory.c index d7df5babcba9..2032ad2fc34b 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1667,7 +1667,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1667 | */ | 1667 | */ |
| 1668 | if (addr == vma->vm_start && end == vma->vm_end) { | 1668 | if (addr == vma->vm_start && end == vma->vm_end) { |
| 1669 | vma->vm_pgoff = pfn; | 1669 | vma->vm_pgoff = pfn; |
| 1670 | vma->vm_flags |= VM_PFNMAP_AT_MMAP; | 1670 | vma->vm_flags |= VM_PFN_AT_MMAP; |
| 1671 | } else if (is_cow_mapping(vma->vm_flags)) | 1671 | } else if (is_cow_mapping(vma->vm_flags)) |
| 1672 | return -EINVAL; | 1672 | return -EINVAL; |
| 1673 | 1673 | ||
| @@ -1680,7 +1680,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1680 | * needed from higher level routine calling unmap_vmas | 1680 | * needed from higher level routine calling unmap_vmas |
| 1681 | */ | 1681 | */ |
| 1682 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); | 1682 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); |
| 1683 | vma->vm_flags &= ~VM_PFNMAP_AT_MMAP; | 1683 | vma->vm_flags &= ~VM_PFN_AT_MMAP; |
| 1684 | return -EINVAL; | 1684 | return -EINVAL; |
| 1685 | } | 1685 | } |
| 1686 | 1686 | ||
