aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c72
1 files changed, 21 insertions, 51 deletions
diff --git a/mm/memory.c b/mm/memory.c
index aa8af0e20269..d8dde07a3656 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -349,6 +349,11 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
349 dump_stack(); 349 dump_stack();
350} 350}
351 351
352static inline int is_cow_mapping(unsigned int flags)
353{
354 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
355}
356
352/* 357/*
353 * This function gets the "struct page" associated with a pte. 358 * This function gets the "struct page" associated with a pte.
354 * 359 *
@@ -377,6 +382,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
377 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 382 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
378 if (pfn == vma->vm_pgoff + off) 383 if (pfn == vma->vm_pgoff + off)
379 return NULL; 384 return NULL;
385 if (!is_cow_mapping(vma->vm_flags))
386 return NULL;
380 } 387 }
381 388
382 /* 389 /*
@@ -437,7 +444,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
437 * If it's a COW mapping, write protect it both 444 * If it's a COW mapping, write protect it both
438 * in the parent and the child 445 * in the parent and the child
439 */ 446 */
440 if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) { 447 if (is_cow_mapping(vm_flags)) {
441 ptep_set_wrprotect(src_mm, addr, src_pte); 448 ptep_set_wrprotect(src_mm, addr, src_pte);
442 pte = *src_pte; 449 pte = *src_pte;
443 } 450 }
@@ -567,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
567 * readonly mappings. The tradeoff is that copy_page_range is more 574 * readonly mappings. The tradeoff is that copy_page_range is more
568 * efficient than faulting. 575 * efficient than faulting.
569 */ 576 */
570 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { 577 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
571 if (!vma->anon_vma) 578 if (!vma->anon_vma)
572 return 0; 579 return 0;
573 } 580 }
@@ -1002,7 +1009,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1002 continue; 1009 continue;
1003 } 1010 }
1004 1011
1005 if (!vma || (vma->vm_flags & VM_IO) 1012 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
1006 || !(vm_flags & vma->vm_flags)) 1013 || !(vm_flags & vma->vm_flags))
1007 return i ? : -EFAULT; 1014 return i ? : -EFAULT;
1008 1015
@@ -1221,55 +1228,12 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
1221 return -EFAULT; 1228 return -EFAULT;
1222 if (!page_count(page)) 1229 if (!page_count(page))
1223 return -EINVAL; 1230 return -EINVAL;
1231 vma->vm_flags |= VM_INSERTPAGE;
1224 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1232 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1225} 1233}
1226EXPORT_SYMBOL(vm_insert_page); 1234EXPORT_SYMBOL(vm_insert_page);
1227 1235
1228/* 1236/*
1229 * Somebody does a pfn remapping that doesn't actually work as a vma.
1230 *
1231 * Do it as individual pages instead, and warn about it. It's bad form,
1232 * and very inefficient.
1233 */
1234static int incomplete_pfn_remap(struct vm_area_struct *vma,
1235 unsigned long start, unsigned long end,
1236 unsigned long pfn, pgprot_t prot)
1237{
1238 static int warn = 10;
1239 struct page *page;
1240 int retval;
1241
1242 if (!(vma->vm_flags & VM_INCOMPLETE)) {
1243 if (warn) {
1244 warn--;
1245 printk("%s does an incomplete pfn remapping", current->comm);
1246 dump_stack();
1247 }
1248 }
1249 vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
1250
1251 if (start < vma->vm_start || end > vma->vm_end)
1252 return -EINVAL;
1253
1254 if (!pfn_valid(pfn))
1255 return -EINVAL;
1256
1257 page = pfn_to_page(pfn);
1258 if (!PageReserved(page))
1259 return -EINVAL;
1260
1261 retval = 0;
1262 while (start < end) {
1263 retval = insert_page(vma->vm_mm, start, page, prot);
1264 if (retval < 0)
1265 break;
1266 start += PAGE_SIZE;
1267 page++;
1268 }
1269 return retval;
1270}
1271
1272/*
1273 * maps a range of physical memory into the requested pages. the old 1237 * maps a range of physical memory into the requested pages. the old
1274 * mappings are removed. any references to nonexistent pages results 1238 * mappings are removed. any references to nonexistent pages results
1275 * in null mappings (currently treated as "copy-on-access") 1239 * in null mappings (currently treated as "copy-on-access")
@@ -1343,9 +1307,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1343 struct mm_struct *mm = vma->vm_mm; 1307 struct mm_struct *mm = vma->vm_mm;
1344 int err; 1308 int err;
1345 1309
1346 if (addr != vma->vm_start || end != vma->vm_end)
1347 return incomplete_pfn_remap(vma, addr, end, pfn, prot);
1348
1349 /* 1310 /*
1350 * Physically remapped pages are special. Tell the 1311 * Physically remapped pages are special. Tell the
1351 * rest of the world about it: 1312 * rest of the world about it:
@@ -1359,9 +1320,18 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1359 * VM_PFNMAP tells the core MM that the base pages are just 1320 * VM_PFNMAP tells the core MM that the base pages are just
1360 * raw PFN mappings, and do not have a "struct page" associated 1321 * raw PFN mappings, and do not have a "struct page" associated
1361 * with them. 1322 * with them.
1323 *
1324 * There's a horrible special case to handle copy-on-write
1325 * behaviour that some programs depend on. We mark the "original"
1326 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1362 */ 1327 */
1328 if (is_cow_mapping(vma->vm_flags)) {
1329 if (addr != vma->vm_start || end != vma->vm_end)
1330 return -EINVAL;
1331 vma->vm_pgoff = pfn;
1332 }
1333
1363 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1334 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1364 vma->vm_pgoff = pfn;
1365 1335
1366 BUG_ON(addr >= end); 1336 BUG_ON(addr >= end);
1367 pfn -= addr >> PAGE_SHIFT; 1337 pfn -= addr >> PAGE_SHIFT;