diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 34 | ||||
-rw-r--r-- | mm/hugetlb.c | 7 | ||||
-rw-r--r-- | mm/ksm.c | 12 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/mlock.c | 41 | ||||
-rw-r--r-- | mm/mmap.c | 113 | ||||
-rw-r--r-- | mm/rmap.c | 42 | ||||
-rw-r--r-- | mm/slub.c | 2 |
8 files changed, 149 insertions, 108 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f13e067e1467..707d0dc6da0f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | 13 | ||
14 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | ||
15 | |||
14 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | 16 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) |
15 | { | 17 | { |
16 | } | 18 | } |
@@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = { | |||
25 | }; | 27 | }; |
26 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 28 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
27 | 29 | ||
30 | struct backing_dev_info noop_backing_dev_info = { | ||
31 | .name = "noop", | ||
32 | }; | ||
33 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | ||
34 | |||
28 | static struct class *bdi_class; | 35 | static struct class *bdi_class; |
29 | 36 | ||
30 | /* | 37 | /* |
@@ -715,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
715 | } | 722 | } |
716 | EXPORT_SYMBOL(bdi_destroy); | 723 | EXPORT_SYMBOL(bdi_destroy); |
717 | 724 | ||
725 | /* | ||
726 | * For use from filesystems to quickly init and register a bdi associated | ||
727 | * with dirty writeback | ||
728 | */ | ||
729 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | ||
730 | unsigned int cap) | ||
731 | { | ||
732 | char tmp[32]; | ||
733 | int err; | ||
734 | |||
735 | bdi->name = name; | ||
736 | bdi->capabilities = cap; | ||
737 | err = bdi_init(bdi); | ||
738 | if (err) | ||
739 | return err; | ||
740 | |||
741 | sprintf(tmp, "%.28s%s", name, "-%d"); | ||
742 | err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); | ||
743 | if (err) { | ||
744 | bdi_destroy(bdi); | ||
745 | return err; | ||
746 | } | ||
747 | |||
748 | return 0; | ||
749 | } | ||
750 | EXPORT_SYMBOL(bdi_setup_and_register); | ||
751 | |||
718 | static wait_queue_head_t congestion_wqh[2] = { | 752 | static wait_queue_head_t congestion_wqh[2] = { |
719 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 753 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
720 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 754 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6034dc9e9796..4c9e6bbf3772 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) | |||
546 | 546 | ||
547 | mapping = (struct address_space *) page_private(page); | 547 | mapping = (struct address_space *) page_private(page); |
548 | set_page_private(page, 0); | 548 | set_page_private(page, 0); |
549 | page->mapping = NULL; | ||
549 | BUG_ON(page_count(page)); | 550 | BUG_ON(page_count(page)); |
550 | INIT_LIST_HEAD(&page->lru); | 551 | INIT_LIST_HEAD(&page->lru); |
551 | 552 | ||
@@ -1038,7 +1039,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
1038 | page = alloc_buddy_huge_page(h, vma, addr); | 1039 | page = alloc_buddy_huge_page(h, vma, addr); |
1039 | if (!page) { | 1040 | if (!page) { |
1040 | hugetlb_put_quota(inode->i_mapping, chg); | 1041 | hugetlb_put_quota(inode->i_mapping, chg); |
1041 | return ERR_PTR(-VM_FAULT_OOM); | 1042 | return ERR_PTR(-VM_FAULT_SIGBUS); |
1042 | } | 1043 | } |
1043 | } | 1044 | } |
1044 | 1045 | ||
@@ -2447,8 +2448,10 @@ retry: | |||
2447 | spin_lock(&inode->i_lock); | 2448 | spin_lock(&inode->i_lock); |
2448 | inode->i_blocks += blocks_per_huge_page(h); | 2449 | inode->i_blocks += blocks_per_huge_page(h); |
2449 | spin_unlock(&inode->i_lock); | 2450 | spin_unlock(&inode->i_lock); |
2450 | } else | 2451 | } else { |
2451 | lock_page(page); | 2452 | lock_page(page); |
2453 | page->mapping = HUGETLB_POISON; | ||
2454 | } | ||
2452 | } | 2455 | } |
2453 | 2456 | ||
2454 | /* | 2457 | /* |
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
365 | do { | 365 | do { |
366 | cond_resched(); | 366 | cond_resched(); |
367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
369 | break; | 369 | break; |
370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
451 | goto out; | 451 | goto out; |
452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
1086 | cond_resched(); | 1086 | cond_resched(); |
1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
1090 | return NULL; | 1090 | return NULL; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
@@ -1294,7 +1294,7 @@ next_mm: | |||
1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
1295 | break; | 1295 | break; |
1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
@@ -1308,7 +1308,7 @@ next_mm: | |||
1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
1309 | return rmap_item; | 1309 | return rmap_item; |
1310 | } | 1310 | } |
1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
1312 | put_page(*page); | 1312 | put_page(*page); |
1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
1314 | cond_resched(); | 1314 | cond_resched(); |
@@ -1367,7 +1367,7 @@ next_mm: | |||
1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
1368 | { | 1368 | { |
1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
1371 | 1371 | ||
1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
1373 | cond_resched(); | 1373 | cond_resched(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ede99c8b9b..8a79a6f0f029 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1601,7 +1601,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1601 | * There is a small race that "from" or "to" can be | 1601 | * There is a small race that "from" or "to" can be |
1602 | * freed by rmdir, so we use css_tryget(). | 1602 | * freed by rmdir, so we use css_tryget(). |
1603 | */ | 1603 | */ |
1604 | rcu_read_lock(); | ||
1605 | from = mc.from; | 1604 | from = mc.from; |
1606 | to = mc.to; | 1605 | to = mc.to; |
1607 | if (from && css_tryget(&from->css)) { | 1606 | if (from && css_tryget(&from->css)) { |
@@ -1622,7 +1621,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1622 | do_continue = (to == mem_over_limit); | 1621 | do_continue = (to == mem_over_limit); |
1623 | css_put(&to->css); | 1622 | css_put(&to->css); |
1624 | } | 1623 | } |
1625 | rcu_read_unlock(); | ||
1626 | if (do_continue) { | 1624 | if (do_continue) { |
1627 | DEFINE_WAIT(wait); | 1625 | DEFINE_WAIT(wait); |
1628 | prepare_to_wait(&mc.waitq, &wait, | 1626 | prepare_to_wait(&mc.waitq, &wait, |
@@ -2429,11 +2427,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | |||
2429 | } | 2427 | } |
2430 | unlock_page_cgroup(pc); | 2428 | unlock_page_cgroup(pc); |
2431 | 2429 | ||
2430 | *ptr = mem; | ||
2432 | if (mem) { | 2431 | if (mem) { |
2433 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); | 2432 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); |
2434 | css_put(&mem->css); | 2433 | css_put(&mem->css); |
2435 | } | 2434 | } |
2436 | *ptr = mem; | ||
2437 | return ret; | 2435 | return ret; |
2438 | } | 2436 | } |
2439 | 2437 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 8f4e2dfceec1..3f82720e0515 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -607,44 +607,3 @@ void user_shm_unlock(size_t size, struct user_struct *user) | |||
607 | spin_unlock(&shmlock_user_lock); | 607 | spin_unlock(&shmlock_user_lock); |
608 | free_uid(user); | 608 | free_uid(user); |
609 | } | 609 | } |
610 | |||
611 | int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | ||
612 | size_t size) | ||
613 | { | ||
614 | unsigned long lim, vm, pgsz; | ||
615 | int error = -ENOMEM; | ||
616 | |||
617 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
618 | |||
619 | down_write(&mm->mmap_sem); | ||
620 | |||
621 | lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT; | ||
622 | vm = mm->total_vm + pgsz; | ||
623 | if (lim < vm) | ||
624 | goto out; | ||
625 | |||
626 | lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT; | ||
627 | vm = mm->locked_vm + pgsz; | ||
628 | if (lim < vm) | ||
629 | goto out; | ||
630 | |||
631 | mm->total_vm += pgsz; | ||
632 | mm->locked_vm += pgsz; | ||
633 | |||
634 | error = 0; | ||
635 | out: | ||
636 | up_write(&mm->mmap_sem); | ||
637 | return error; | ||
638 | } | ||
639 | |||
640 | void refund_locked_memory(struct mm_struct *mm, size_t size) | ||
641 | { | ||
642 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
643 | |||
644 | down_write(&mm->mmap_sem); | ||
645 | |||
646 | mm->total_vm -= pgsz; | ||
647 | mm->locked_vm -= pgsz; | ||
648 | |||
649 | up_write(&mm->mmap_sem); | ||
650 | } | ||
@@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, | |||
507 | struct address_space *mapping = NULL; | 507 | struct address_space *mapping = NULL; |
508 | struct prio_tree_root *root = NULL; | 508 | struct prio_tree_root *root = NULL; |
509 | struct file *file = vma->vm_file; | 509 | struct file *file = vma->vm_file; |
510 | struct anon_vma *anon_vma = NULL; | ||
511 | long adjust_next = 0; | 510 | long adjust_next = 0; |
512 | int remove_next = 0; | 511 | int remove_next = 0; |
513 | 512 | ||
514 | if (next && !insert) { | 513 | if (next && !insert) { |
514 | struct vm_area_struct *exporter = NULL; | ||
515 | |||
515 | if (end >= next->vm_end) { | 516 | if (end >= next->vm_end) { |
516 | /* | 517 | /* |
517 | * vma expands, overlapping all the next, and | 518 | * vma expands, overlapping all the next, and |
@@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, | |||
519 | */ | 520 | */ |
520 | again: remove_next = 1 + (end > next->vm_end); | 521 | again: remove_next = 1 + (end > next->vm_end); |
521 | end = next->vm_end; | 522 | end = next->vm_end; |
522 | anon_vma = next->anon_vma; | 523 | exporter = next; |
523 | importer = vma; | 524 | importer = vma; |
524 | } else if (end > next->vm_start) { | 525 | } else if (end > next->vm_start) { |
525 | /* | 526 | /* |
@@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
527 | * mprotect case 5 shifting the boundary up. | 528 | * mprotect case 5 shifting the boundary up. |
528 | */ | 529 | */ |
529 | adjust_next = (end - next->vm_start) >> PAGE_SHIFT; | 530 | adjust_next = (end - next->vm_start) >> PAGE_SHIFT; |
530 | anon_vma = next->anon_vma; | 531 | exporter = next; |
531 | importer = vma; | 532 | importer = vma; |
532 | } else if (end < vma->vm_end) { | 533 | } else if (end < vma->vm_end) { |
533 | /* | 534 | /* |
@@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end); | |||
536 | * mprotect case 4 shifting the boundary down. | 537 | * mprotect case 4 shifting the boundary down. |
537 | */ | 538 | */ |
538 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); | 539 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); |
539 | anon_vma = next->anon_vma; | 540 | exporter = vma; |
540 | importer = next; | 541 | importer = next; |
541 | } | 542 | } |
542 | } | ||
543 | 543 | ||
544 | /* | ||
545 | * When changing only vma->vm_end, we don't really need anon_vma lock. | ||
546 | */ | ||
547 | if (vma->anon_vma && (insert || importer || start != vma->vm_start)) | ||
548 | anon_vma = vma->anon_vma; | ||
549 | if (anon_vma) { | ||
550 | /* | 544 | /* |
551 | * Easily overlooked: when mprotect shifts the boundary, | 545 | * Easily overlooked: when mprotect shifts the boundary, |
552 | * make sure the expanding vma has anon_vma set if the | 546 | * make sure the expanding vma has anon_vma set if the |
553 | * shrinking vma had, to cover any anon pages imported. | 547 | * shrinking vma had, to cover any anon pages imported. |
554 | */ | 548 | */ |
555 | if (importer && !importer->anon_vma) { | 549 | if (exporter && exporter->anon_vma && !importer->anon_vma) { |
556 | /* Block reverse map lookups until things are set up. */ | 550 | if (anon_vma_clone(importer, exporter)) |
557 | if (anon_vma_clone(importer, vma)) { | ||
558 | return -ENOMEM; | 551 | return -ENOMEM; |
559 | } | 552 | importer->anon_vma = exporter->anon_vma; |
560 | importer->anon_vma = anon_vma; | ||
561 | } | 553 | } |
562 | } | 554 | } |
563 | 555 | ||
@@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
825 | } | 817 | } |
826 | 818 | ||
827 | /* | 819 | /* |
820 | * Rough compatbility check to quickly see if it's even worth looking | ||
821 | * at sharing an anon_vma. | ||
822 | * | ||
823 | * They need to have the same vm_file, and the flags can only differ | ||
824 | * in things that mprotect may change. | ||
825 | * | ||
826 | * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that | ||
827 | * we can merge the two vma's. For example, we refuse to merge a vma if | ||
828 | * there is a vm_ops->close() function, because that indicates that the | ||
829 | * driver is doing some kind of reference counting. But that doesn't | ||
830 | * really matter for the anon_vma sharing case. | ||
831 | */ | ||
832 | static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) | ||
833 | { | ||
834 | return a->vm_end == b->vm_start && | ||
835 | mpol_equal(vma_policy(a), vma_policy(b)) && | ||
836 | a->vm_file == b->vm_file && | ||
837 | !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && | ||
838 | b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); | ||
839 | } | ||
840 | |||
841 | /* | ||
842 | * Do some basic sanity checking to see if we can re-use the anon_vma | ||
843 | * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be | ||
844 | * the same as 'old', the other will be the new one that is trying | ||
845 | * to share the anon_vma. | ||
846 | * | ||
847 | * NOTE! This runs with mm_sem held for reading, so it is possible that | ||
848 | * the anon_vma of 'old' is concurrently in the process of being set up | ||
849 | * by another page fault trying to merge _that_. But that's ok: if it | ||
850 | * is being set up, that automatically means that it will be a singleton | ||
851 | * acceptable for merging, so we can do all of this optimistically. But | ||
852 | * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. | ||
853 | * | ||
854 | * IOW: that the "list_is_singular()" test on the anon_vma_chain only | ||
855 | * matters for the 'stable anon_vma' case (ie the thing we want to avoid | ||
856 | * is to return an anon_vma that is "complex" due to having gone through | ||
857 | * a fork). | ||
858 | * | ||
859 | * We also make sure that the two vma's are compatible (adjacent, | ||
860 | * and with the same memory policies). That's all stable, even with just | ||
861 | * a read lock on the mm_sem. | ||
862 | */ | ||
863 | static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) | ||
864 | { | ||
865 | if (anon_vma_compatible(a, b)) { | ||
866 | struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); | ||
867 | |||
868 | if (anon_vma && list_is_singular(&old->anon_vma_chain)) | ||
869 | return anon_vma; | ||
870 | } | ||
871 | return NULL; | ||
872 | } | ||
873 | |||
874 | /* | ||
828 | * find_mergeable_anon_vma is used by anon_vma_prepare, to check | 875 | * find_mergeable_anon_vma is used by anon_vma_prepare, to check |
829 | * neighbouring vmas for a suitable anon_vma, before it goes off | 876 | * neighbouring vmas for a suitable anon_vma, before it goes off |
830 | * to allocate a new anon_vma. It checks because a repetitive | 877 | * to allocate a new anon_vma. It checks because a repetitive |
@@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
834 | */ | 881 | */ |
835 | struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) | 882 | struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) |
836 | { | 883 | { |
884 | struct anon_vma *anon_vma; | ||
837 | struct vm_area_struct *near; | 885 | struct vm_area_struct *near; |
838 | unsigned long vm_flags; | ||
839 | 886 | ||
840 | near = vma->vm_next; | 887 | near = vma->vm_next; |
841 | if (!near) | 888 | if (!near) |
842 | goto try_prev; | 889 | goto try_prev; |
843 | 890 | ||
844 | /* | 891 | anon_vma = reusable_anon_vma(near, vma, near); |
845 | * Since only mprotect tries to remerge vmas, match flags | 892 | if (anon_vma) |
846 | * which might be mprotected into each other later on. | 893 | return anon_vma; |
847 | * Neither mlock nor madvise tries to remerge at present, | ||
848 | * so leave their flags as obstructing a merge. | ||
849 | */ | ||
850 | vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); | ||
851 | vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); | ||
852 | |||
853 | if (near->anon_vma && vma->vm_end == near->vm_start && | ||
854 | mpol_equal(vma_policy(vma), vma_policy(near)) && | ||
855 | can_vma_merge_before(near, vm_flags, | ||
856 | NULL, vma->vm_file, vma->vm_pgoff + | ||
857 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) | ||
858 | return near->anon_vma; | ||
859 | try_prev: | 894 | try_prev: |
860 | /* | 895 | /* |
861 | * It is potentially slow to have to call find_vma_prev here. | 896 | * It is potentially slow to have to call find_vma_prev here. |
@@ -868,14 +903,9 @@ try_prev: | |||
868 | if (!near) | 903 | if (!near) |
869 | goto none; | 904 | goto none; |
870 | 905 | ||
871 | vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); | 906 | anon_vma = reusable_anon_vma(near, near, vma); |
872 | vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); | 907 | if (anon_vma) |
873 | 908 | return anon_vma; | |
874 | if (near->anon_vma && near->vm_end == vma->vm_start && | ||
875 | mpol_equal(vma_policy(near), vma_policy(vma)) && | ||
876 | can_vma_merge_after(near, vm_flags, | ||
877 | NULL, vma->vm_file, vma->vm_pgoff)) | ||
878 | return near->anon_vma; | ||
879 | none: | 909 | none: |
880 | /* | 910 | /* |
881 | * There's no absolute need to look only at touching neighbours: | 911 | * There's no absolute need to look only at touching neighbours: |
@@ -1947,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1947 | return 0; | 1977 | return 0; |
1948 | 1978 | ||
1949 | /* Clean everything up if vma_adjust failed. */ | 1979 | /* Clean everything up if vma_adjust failed. */ |
1950 | new->vm_ops->close(new); | 1980 | if (new->vm_ops && new->vm_ops->close) |
1981 | new->vm_ops->close(new); | ||
1951 | if (new->vm_file) { | 1982 | if (new->vm_file) { |
1952 | if (vma->vm_flags & VM_EXECUTABLE) | 1983 | if (vma->vm_flags & VM_EXECUTABLE) |
1953 | removed_exe_file_vma(mm); | 1984 | removed_exe_file_vma(mm); |
@@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
133 | goto out_enomem_free_avc; | 133 | goto out_enomem_free_avc; |
134 | allocated = anon_vma; | 134 | allocated = anon_vma; |
135 | } | 135 | } |
136 | spin_lock(&anon_vma->lock); | ||
137 | 136 | ||
137 | spin_lock(&anon_vma->lock); | ||
138 | /* page_table_lock to protect against threads */ | 138 | /* page_table_lock to protect against threads */ |
139 | spin_lock(&mm->page_table_lock); | 139 | spin_lock(&mm->page_table_lock); |
140 | if (likely(!vma->anon_vma)) { | 140 | if (likely(!vma->anon_vma)) { |
@@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
144 | list_add(&avc->same_vma, &vma->anon_vma_chain); | 144 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
145 | list_add(&avc->same_anon_vma, &anon_vma->head); | 145 | list_add(&avc->same_anon_vma, &anon_vma->head); |
146 | allocated = NULL; | 146 | allocated = NULL; |
147 | avc = NULL; | ||
147 | } | 148 | } |
148 | spin_unlock(&mm->page_table_lock); | 149 | spin_unlock(&mm->page_table_lock); |
149 | |||
150 | spin_unlock(&anon_vma->lock); | 150 | spin_unlock(&anon_vma->lock); |
151 | if (unlikely(allocated)) { | 151 | |
152 | if (unlikely(allocated)) | ||
152 | anon_vma_free(allocated); | 153 | anon_vma_free(allocated); |
154 | if (unlikely(avc)) | ||
153 | anon_vma_chain_free(avc); | 155 | anon_vma_chain_free(avc); |
154 | } | ||
155 | } | 156 | } |
156 | return 0; | 157 | return 0; |
157 | 158 | ||
@@ -182,7 +183,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
182 | { | 183 | { |
183 | struct anon_vma_chain *avc, *pavc; | 184 | struct anon_vma_chain *avc, *pavc; |
184 | 185 | ||
185 | list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { | 186 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
186 | avc = anon_vma_chain_alloc(); | 187 | avc = anon_vma_chain_alloc(); |
187 | if (!avc) | 188 | if (!avc) |
188 | goto enomem_failure; | 189 | goto enomem_failure; |
@@ -335,14 +336,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
335 | 336 | ||
336 | /* | 337 | /* |
337 | * At what user virtual address is page expected in vma? | 338 | * At what user virtual address is page expected in vma? |
338 | * checking that the page matches the vma. | 339 | * Caller should check the page is actually part of the vma. |
339 | */ | 340 | */ |
340 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
341 | { | 342 | { |
342 | if (PageAnon(page)) { | 343 | if (PageAnon(page)) |
343 | if (vma->anon_vma != page_anon_vma(page)) | 344 | ; |
344 | return -EFAULT; | 345 | else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
345 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | ||
346 | if (!vma->vm_file || | 346 | if (!vma->vm_file || |
347 | vma->vm_file->f_mapping != page->mapping) | 347 | vma->vm_file->f_mapping != page->mapping) |
348 | return -EFAULT; | 348 | return -EFAULT; |
@@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page, | |||
730 | * @page: the page to add the mapping to | 730 | * @page: the page to add the mapping to |
731 | * @vma: the vm area in which the mapping is added | 731 | * @vma: the vm area in which the mapping is added |
732 | * @address: the user virtual address mapped | 732 | * @address: the user virtual address mapped |
733 | * @exclusive: the page is exclusively owned by the current process | ||
733 | */ | 734 | */ |
734 | static void __page_set_anon_rmap(struct page *page, | 735 | static void __page_set_anon_rmap(struct page *page, |
735 | struct vm_area_struct *vma, unsigned long address) | 736 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
736 | { | 737 | { |
737 | struct anon_vma *anon_vma = vma->anon_vma; | 738 | struct anon_vma *anon_vma = vma->anon_vma; |
738 | 739 | ||
739 | BUG_ON(!anon_vma); | 740 | BUG_ON(!anon_vma); |
741 | |||
742 | /* | ||
743 | * If the page isn't exclusively mapped into this vma, | ||
744 | * we must use the _oldest_ possible anon_vma for the | ||
745 | * page mapping! | ||
746 | * | ||
747 | * So take the last AVC chain entry in the vma, which is | ||
748 | * the deepest ancestor, and use the anon_vma from that. | ||
749 | */ | ||
750 | if (!exclusive) { | ||
751 | struct anon_vma_chain *avc; | ||
752 | avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); | ||
753 | anon_vma = avc->anon_vma; | ||
754 | } | ||
755 | |||
740 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 756 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
741 | page->mapping = (struct address_space *) anon_vma; | 757 | page->mapping = (struct address_space *) anon_vma; |
742 | page->index = linear_page_index(vma, address); | 758 | page->index = linear_page_index(vma, address); |
@@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page, | |||
791 | VM_BUG_ON(!PageLocked(page)); | 807 | VM_BUG_ON(!PageLocked(page)); |
792 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 808 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
793 | if (first) | 809 | if (first) |
794 | __page_set_anon_rmap(page, vma, address); | 810 | __page_set_anon_rmap(page, vma, address, 0); |
795 | else | 811 | else |
796 | __page_check_anon_rmap(page, vma, address); | 812 | __page_check_anon_rmap(page, vma, address); |
797 | } | 813 | } |
@@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
813 | SetPageSwapBacked(page); | 829 | SetPageSwapBacked(page); |
814 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | 830 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ |
815 | __inc_zone_page_state(page, NR_ANON_PAGES); | 831 | __inc_zone_page_state(page, NR_ANON_PAGES); |
816 | __page_set_anon_rmap(page, vma, address); | 832 | __page_set_anon_rmap(page, vma, address, 1); |
817 | if (page_evictable(page, vma)) | 833 | if (page_evictable(page, vma)) |
818 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | 834 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); |
819 | else | 835 | else |
@@ -2153,7 +2153,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2153 | int local_node; | 2153 | int local_node; |
2154 | 2154 | ||
2155 | if (slab_state >= UP && (s < kmalloc_caches || | 2155 | if (slab_state >= UP && (s < kmalloc_caches || |
2156 | s > kmalloc_caches + KMALLOC_CACHES)) | 2156 | s >= kmalloc_caches + KMALLOC_CACHES)) |
2157 | local_node = page_to_nid(virt_to_page(s)); | 2157 | local_node = page_to_nid(virt_to_page(s)); |
2158 | else | 2158 | else |
2159 | local_node = 0; | 2159 | local_node = 0; |