aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/memory.c72
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_alloc.c2
6 files changed, 27 insertions, 55 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index e8c567177dcf..16b9465eb4eb 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -204,6 +204,8 @@ restart_scan:
204 unsigned long j; 204 unsigned long j;
205 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i); 205 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
206 i = ALIGN(i, incr); 206 i = ALIGN(i, incr);
207 if (i >= eidx)
208 break;
207 if (test_bit(i, bdata->node_bootmem_map)) 209 if (test_bit(i, bdata->node_bootmem_map))
208 continue; 210 continue;
209 for (j = i + 1; j < i + areasize; ++j) { 211 for (j = i + 1; j < i + areasize; ++j) {
diff --git a/mm/memory.c b/mm/memory.c
index aa8af0e20269..d8dde07a3656 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -349,6 +349,11 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
349 dump_stack(); 349 dump_stack();
350} 350}
351 351
352static inline int is_cow_mapping(unsigned int flags)
353{
354 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
355}
356
352/* 357/*
353 * This function gets the "struct page" associated with a pte. 358 * This function gets the "struct page" associated with a pte.
354 * 359 *
@@ -377,6 +382,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
377 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 382 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
378 if (pfn == vma->vm_pgoff + off) 383 if (pfn == vma->vm_pgoff + off)
379 return NULL; 384 return NULL;
385 if (!is_cow_mapping(vma->vm_flags))
386 return NULL;
380 } 387 }
381 388
382 /* 389 /*
@@ -437,7 +444,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
437 * If it's a COW mapping, write protect it both 444 * If it's a COW mapping, write protect it both
438 * in the parent and the child 445 * in the parent and the child
439 */ 446 */
440 if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) { 447 if (is_cow_mapping(vm_flags)) {
441 ptep_set_wrprotect(src_mm, addr, src_pte); 448 ptep_set_wrprotect(src_mm, addr, src_pte);
442 pte = *src_pte; 449 pte = *src_pte;
443 } 450 }
@@ -567,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
567 * readonly mappings. The tradeoff is that copy_page_range is more 574 * readonly mappings. The tradeoff is that copy_page_range is more
568 * efficient than faulting. 575 * efficient than faulting.
569 */ 576 */
570 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { 577 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
571 if (!vma->anon_vma) 578 if (!vma->anon_vma)
572 return 0; 579 return 0;
573 } 580 }
@@ -1002,7 +1009,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1002 continue; 1009 continue;
1003 } 1010 }
1004 1011
1005 if (!vma || (vma->vm_flags & VM_IO) 1012 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
1006 || !(vm_flags & vma->vm_flags)) 1013 || !(vm_flags & vma->vm_flags))
1007 return i ? : -EFAULT; 1014 return i ? : -EFAULT;
1008 1015
@@ -1221,55 +1228,12 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
1221 return -EFAULT; 1228 return -EFAULT;
1222 if (!page_count(page)) 1229 if (!page_count(page))
1223 return -EINVAL; 1230 return -EINVAL;
1231 vma->vm_flags |= VM_INSERTPAGE;
1224 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1232 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1225} 1233}
1226EXPORT_SYMBOL(vm_insert_page); 1234EXPORT_SYMBOL(vm_insert_page);
1227 1235
1228/* 1236/*
1229 * Somebody does a pfn remapping that doesn't actually work as a vma.
1230 *
1231 * Do it as individual pages instead, and warn about it. It's bad form,
1232 * and very inefficient.
1233 */
1234static int incomplete_pfn_remap(struct vm_area_struct *vma,
1235 unsigned long start, unsigned long end,
1236 unsigned long pfn, pgprot_t prot)
1237{
1238 static int warn = 10;
1239 struct page *page;
1240 int retval;
1241
1242 if (!(vma->vm_flags & VM_INCOMPLETE)) {
1243 if (warn) {
1244 warn--;
1245 printk("%s does an incomplete pfn remapping", current->comm);
1246 dump_stack();
1247 }
1248 }
1249 vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
1250
1251 if (start < vma->vm_start || end > vma->vm_end)
1252 return -EINVAL;
1253
1254 if (!pfn_valid(pfn))
1255 return -EINVAL;
1256
1257 page = pfn_to_page(pfn);
1258 if (!PageReserved(page))
1259 return -EINVAL;
1260
1261 retval = 0;
1262 while (start < end) {
1263 retval = insert_page(vma->vm_mm, start, page, prot);
1264 if (retval < 0)
1265 break;
1266 start += PAGE_SIZE;
1267 page++;
1268 }
1269 return retval;
1270}
1271
1272/*
1273 * maps a range of physical memory into the requested pages. the old 1237 * maps a range of physical memory into the requested pages. the old
1274 * mappings are removed. any references to nonexistent pages results 1238 * mappings are removed. any references to nonexistent pages results
1275 * in null mappings (currently treated as "copy-on-access") 1239 * in null mappings (currently treated as "copy-on-access")
@@ -1343,9 +1307,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1343 struct mm_struct *mm = vma->vm_mm; 1307 struct mm_struct *mm = vma->vm_mm;
1344 int err; 1308 int err;
1345 1309
1346 if (addr != vma->vm_start || end != vma->vm_end)
1347 return incomplete_pfn_remap(vma, addr, end, pfn, prot);
1348
1349 /* 1310 /*
1350 * Physically remapped pages are special. Tell the 1311 * Physically remapped pages are special. Tell the
1351 * rest of the world about it: 1312 * rest of the world about it:
@@ -1359,9 +1320,18 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1359 * VM_PFNMAP tells the core MM that the base pages are just 1320 * VM_PFNMAP tells the core MM that the base pages are just
1360 * raw PFN mappings, and do not have a "struct page" associated 1321 * raw PFN mappings, and do not have a "struct page" associated
1361 * with them. 1322 * with them.
1323 *
1324 * There's a horrible special case to handle copy-on-write
1325 * behaviour that some programs depend on. We mark the "original"
1326 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1362 */ 1327 */
1328 if (is_cow_mapping(vma->vm_flags)) {
1329 if (addr != vma->vm_start || end != vma->vm_end)
1330 return -EINVAL;
1331 vma->vm_pgoff = pfn;
1332 }
1333
1363 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1334 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1364 vma->vm_pgoff = pfn;
1365 1335
1366 BUG_ON(addr >= end); 1336 BUG_ON(addr >= end);
1367 pfn -= addr >> PAGE_SHIFT; 1337 pfn -= addr >> PAGE_SHIFT;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 431a64f021c0..f6d4af8af8a8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -104,7 +104,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
104 pgdat->node_start_pfn = start_pfn; 104 pgdat->node_start_pfn = start_pfn;
105 105
106 if (end_pfn > old_pgdat_end_pfn) 106 if (end_pfn > old_pgdat_end_pfn)
107 pgdat->node_spanned_pages = end_pfn - pgdat->node_spanned_pages; 107 pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
108} 108}
109 109
110int online_pages(unsigned long pfn, unsigned long nr_pages) 110int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/mmap.c b/mm/mmap.c
index 11ca5927d5ff..64ba4dbcb7de 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -611,7 +611,7 @@ again: remove_next = 1 + (end > next->vm_end);
611 * If the vma has a ->close operation then the driver probably needs to release 611 * If the vma has a ->close operation then the driver probably needs to release
612 * per-vma resources, so we don't attempt to merge those. 612 * per-vma resources, so we don't attempt to merge those.
613 */ 613 */
614#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) 614#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
615 615
616static inline int is_mergeable_vma(struct vm_area_struct *vma, 616static inline int is_mergeable_vma(struct vm_area_struct *vma,
617 struct file *file, unsigned long vm_flags) 617 struct file *file, unsigned long vm_flags)
diff --git a/mm/mremap.c b/mm/mremap.c
index b535438c363c..ddaeee9a0b69 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -323,7 +323,7 @@ unsigned long do_mremap(unsigned long addr,
323 /* We can't remap across vm area boundaries */ 323 /* We can't remap across vm area boundaries */
324 if (old_len > vma->vm_end - addr) 324 if (old_len > vma->vm_end - addr)
325 goto out; 325 goto out;
326 if (vma->vm_flags & VM_DONTEXPAND) { 326 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
327 if (new_len > old_len) 327 if (new_len > old_len)
328 goto out; 328 goto out;
329 } 329 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b21a13d841c..fe14a8c87fc2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1896,7 +1896,7 @@ static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
1896static struct notifier_block pageset_notifier = 1896static struct notifier_block pageset_notifier =
1897 { &pageset_cpuup_callback, NULL, 0 }; 1897 { &pageset_cpuup_callback, NULL, 0 };
1898 1898
1899void __init setup_per_cpu_pageset() 1899void __init setup_per_cpu_pageset(void)
1900{ 1900{
1901 int err; 1901 int err;
1902 1902