aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c36
1 files changed, 34 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8d10b5540c73..4b4fc3a7ea48 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1172,7 +1172,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
1172 spinlock_t *ptl; 1172 spinlock_t *ptl;
1173 1173
1174 retval = -EINVAL; 1174 retval = -EINVAL;
1175 if (PageAnon(page) || !PageReserved(page)) 1175 if (PageAnon(page))
1176 goto out; 1176 goto out;
1177 retval = -ENOMEM; 1177 retval = -ENOMEM;
1178 flush_dcache_page(page); 1178 flush_dcache_page(page);
@@ -1197,6 +1197,35 @@ out:
1197} 1197}
1198 1198
1199/* 1199/*
1200 * This allows drivers to insert individual pages they've allocated
1201 * into a user vma.
1202 *
1203 * The page has to be a nice clean _individual_ kernel allocation.
1204 * If you allocate a compound page, you need to have marked it as
1205 * such (__GFP_COMP), or manually just split the page up yourself
1206 * (which is mainly an issue of doing "set_page_count(page, 1)" for
1207 * each sub-page, and then freeing them one by one when you free
1208 * them rather than freeing it as a compound page).
1209 *
1210 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1211 * took an arbitrary page protection parameter. This doesn't allow
1212 * that. Your vma protection will have to be set up correctly, which
1213 * means that if you want a shared writable mapping, you'd better
1214 * ask for a shared writable mapping!
1215 *
1216 * The page does not need to be reserved.
1217 */
1218int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
1219{
1220 if (addr < vma->vm_start || addr >= vma->vm_end)
1221 return -EFAULT;
1222 if (!page_count(page))
1223 return -EINVAL;
1224 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1225}
1226EXPORT_SYMBOL_GPL(vm_insert_page);
1227
1228/*
1200 * Somebody does a pfn remapping that doesn't actually work as a vma. 1229 * Somebody does a pfn remapping that doesn't actually work as a vma.
1201 * 1230 *
1202 * Do it as individual pages instead, and warn about it. It's bad form, 1231 * Do it as individual pages instead, and warn about it. It's bad form,
@@ -1225,8 +1254,11 @@ static int incomplete_pfn_remap(struct vm_area_struct *vma,
1225 if (!pfn_valid(pfn)) 1254 if (!pfn_valid(pfn))
1226 return -EINVAL; 1255 return -EINVAL;
1227 1256
1228 retval = 0;
1229 page = pfn_to_page(pfn); 1257 page = pfn_to_page(pfn);
1258 if (!PageReserved(page))
1259 return -EINVAL;
1260
1261 retval = 0;
1230 while (start < end) { 1262 while (start < end) {
1231 retval = insert_page(vma->vm_mm, start, page, prot); 1263 retval = insert_page(vma->vm_mm, start, page, prot);
1232 if (retval < 0) 1264 if (retval < 0)