diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index ffee2f743418..aaa8b843be28 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -145,6 +145,23 @@ extern pgprot_t protection_map[16]; | |||
145 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 145 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ |
146 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 146 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
147 | 147 | ||
148 | /* | ||
149 | * This interface is used by x86 PAT code to identify a pfn mapping that is | ||
150 | * linear over entire vma. This is to optimize PAT code that deals with | ||
151 | * marking the physical region with a particular prot. This is not for generic | ||
152 | * mm use. Note also that this check will not work if the pfn mapping is | ||
153 | * linear for a vma starting at physical address 0. In which case PAT code | ||
154 | * falls back to slow path of reserving physical range page by page. | ||
155 | */ | ||
156 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | ||
157 | { | ||
158 | return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); | ||
159 | } | ||
160 | |||
161 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | ||
162 | { | ||
163 | return (vma->vm_flags & VM_PFNMAP); | ||
164 | } | ||
148 | 165 | ||
149 | /* | 166 | /* |
150 | * vm_fault is filled by the the pagefault handler and passed to the vma's | 167 | * vm_fault is filled by the the pagefault handler and passed to the vma's |
@@ -781,6 +798,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
781 | struct vm_area_struct *vma); | 798 | struct vm_area_struct *vma); |
782 | void unmap_mapping_range(struct address_space *mapping, | 799 | void unmap_mapping_range(struct address_space *mapping, |
783 | loff_t const holebegin, loff_t const holelen, int even_cows); | 800 | loff_t const holebegin, loff_t const holelen, int even_cows); |
801 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | ||
802 | unsigned int flags, unsigned long *prot, resource_size_t *phys); | ||
784 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | 803 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
785 | void *buf, int len, int write); | 804 | void *buf, int len, int write); |
786 | 805 | ||
@@ -1286,5 +1305,7 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
1286 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1305 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
1287 | void vmemmap_populate_print_last(void); | 1306 | void vmemmap_populate_print_last(void); |
1288 | 1307 | ||
1308 | extern void *alloc_locked_buffer(size_t size); | ||
1309 | extern void free_locked_buffer(void *buffer, size_t size); | ||
1289 | #endif /* __KERNEL__ */ | 1310 | #endif /* __KERNEL__ */ |
1290 | #endif /* _LINUX_MM_H */ | 1311 | #endif /* _LINUX_MM_H */ |