diff options
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/memory.c | 36 |
2 files changed, 35 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0e73f1539d08..29f02d8513f6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -956,6 +956,7 @@ struct page *vmalloc_to_page(void *addr); | |||
956 | unsigned long vmalloc_to_pfn(void *addr); | 956 | unsigned long vmalloc_to_pfn(void *addr); |
957 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 957 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
958 | unsigned long pfn, unsigned long size, pgprot_t); | 958 | unsigned long pfn, unsigned long size, pgprot_t); |
959 | int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); | ||
959 | 960 | ||
960 | struct page *follow_page(struct vm_area_struct *, unsigned long address, | 961 | struct page *follow_page(struct vm_area_struct *, unsigned long address, |
961 | unsigned int foll_flags); | 962 | unsigned int foll_flags); |
diff --git a/mm/memory.c b/mm/memory.c index 8d10b5540c73..4b4fc3a7ea48 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1172,7 +1172,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa | |||
1172 | spinlock_t *ptl; | 1172 | spinlock_t *ptl; |
1173 | 1173 | ||
1174 | retval = -EINVAL; | 1174 | retval = -EINVAL; |
1175 | if (PageAnon(page) || !PageReserved(page)) | 1175 | if (PageAnon(page)) |
1176 | goto out; | 1176 | goto out; |
1177 | retval = -ENOMEM; | 1177 | retval = -ENOMEM; |
1178 | flush_dcache_page(page); | 1178 | flush_dcache_page(page); |
@@ -1197,6 +1197,35 @@ out: | |||
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | /* | 1199 | /* |
1200 | * This allows drivers to insert individual pages they've allocated | ||
1201 | * into a user vma. | ||
1202 | * | ||
1203 | * The page has to be a nice clean _individual_ kernel allocation. | ||
1204 | * If you allocate a compound page, you need to have marked it as | ||
1205 | * such (__GFP_COMP), or manually just split the page up yourself | ||
1206 | * (which is mainly an issue of doing "set_page_count(page, 1)" for | ||
1207 | * each sub-page, and then freeing them one by one when you free | ||
1208 | * them rather than freeing it as a compound page). | ||
1209 | * | ||
1210 | * NOTE! Traditionally this was done with "remap_pfn_range()" which | ||
1211 | * took an arbitrary page protection parameter. This doesn't allow | ||
1212 | * that. Your vma protection will have to be set up correctly, which | ||
1213 | * means that if you want a shared writable mapping, you'd better | ||
1214 | * ask for a shared writable mapping! | ||
1215 | * | ||
1216 | * The page does not need to be reserved. | ||
1217 | */ | ||
1218 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) | ||
1219 | { | ||
1220 | if (addr < vma->vm_start || addr >= vma->vm_end) | ||
1221 | return -EFAULT; | ||
1222 | if (!page_count(page)) | ||
1223 | return -EINVAL; | ||
1224 | return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); | ||
1225 | } | ||
1226 | EXPORT_SYMBOL_GPL(vm_insert_page); | ||
1227 | |||
1228 | /* | ||
1200 | * Somebody does a pfn remapping that doesn't actually work as a vma. | 1229 | * Somebody does a pfn remapping that doesn't actually work as a vma. |
1201 | * | 1230 | * |
1202 | * Do it as individual pages instead, and warn about it. It's bad form, | 1231 | * Do it as individual pages instead, and warn about it. It's bad form, |
@@ -1225,8 +1254,11 @@ static int incomplete_pfn_remap(struct vm_area_struct *vma, | |||
1225 | if (!pfn_valid(pfn)) | 1254 | if (!pfn_valid(pfn)) |
1226 | return -EINVAL; | 1255 | return -EINVAL; |
1227 | 1256 | ||
1228 | retval = 0; | ||
1229 | page = pfn_to_page(pfn); | 1257 | page = pfn_to_page(pfn); |
1258 | if (!PageReserved(page)) | ||
1259 | return -EINVAL; | ||
1260 | |||
1261 | retval = 0; | ||
1230 | while (start < end) { | 1262 | while (start < end) { |
1231 | retval = insert_page(vma->vm_mm, start, page, prot); | 1263 | retval = insert_page(vma->vm_mm, start, page, prot); |
1232 | if (retval < 0) | 1264 | if (retval < 0) |