diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 41 | ||||
-rw-r--r-- | mm/rmap.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 2 |
5 files changed, 6 insertions, 50 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ffbdfc86aedf..4c9e6bbf3772 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1039,7 +1039,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
1039 | page = alloc_buddy_huge_page(h, vma, addr); | 1039 | page = alloc_buddy_huge_page(h, vma, addr); |
1040 | if (!page) { | 1040 | if (!page) { |
1041 | hugetlb_put_quota(inode->i_mapping, chg); | 1041 | hugetlb_put_quota(inode->i_mapping, chg); |
1042 | return ERR_PTR(-VM_FAULT_OOM); | 1042 | return ERR_PTR(-VM_FAULT_SIGBUS); |
1043 | } | 1043 | } |
1044 | } | 1044 | } |
1045 | 1045 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6c755de385f7..8a79a6f0f029 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1601,7 +1601,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1601 | * There is a small race that "from" or "to" can be | 1601 | * There is a small race that "from" or "to" can be |
1602 | * freed by rmdir, so we use css_tryget(). | 1602 | * freed by rmdir, so we use css_tryget(). |
1603 | */ | 1603 | */ |
1604 | rcu_read_lock(); | ||
1605 | from = mc.from; | 1604 | from = mc.from; |
1606 | to = mc.to; | 1605 | to = mc.to; |
1607 | if (from && css_tryget(&from->css)) { | 1606 | if (from && css_tryget(&from->css)) { |
@@ -1622,7 +1621,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1622 | do_continue = (to == mem_over_limit); | 1621 | do_continue = (to == mem_over_limit); |
1623 | css_put(&to->css); | 1622 | css_put(&to->css); |
1624 | } | 1623 | } |
1625 | rcu_read_unlock(); | ||
1626 | if (do_continue) { | 1624 | if (do_continue) { |
1627 | DEFINE_WAIT(wait); | 1625 | DEFINE_WAIT(wait); |
1628 | prepare_to_wait(&mc.waitq, &wait, | 1626 | prepare_to_wait(&mc.waitq, &wait, |
diff --git a/mm/mlock.c b/mm/mlock.c index 8f4e2dfceec1..3f82720e0515 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -607,44 +607,3 @@ void user_shm_unlock(size_t size, struct user_struct *user) | |||
607 | spin_unlock(&shmlock_user_lock); | 607 | spin_unlock(&shmlock_user_lock); |
608 | free_uid(user); | 608 | free_uid(user); |
609 | } | 609 | } |
610 | |||
611 | int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | ||
612 | size_t size) | ||
613 | { | ||
614 | unsigned long lim, vm, pgsz; | ||
615 | int error = -ENOMEM; | ||
616 | |||
617 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
618 | |||
619 | down_write(&mm->mmap_sem); | ||
620 | |||
621 | lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT; | ||
622 | vm = mm->total_vm + pgsz; | ||
623 | if (lim < vm) | ||
624 | goto out; | ||
625 | |||
626 | lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT; | ||
627 | vm = mm->locked_vm + pgsz; | ||
628 | if (lim < vm) | ||
629 | goto out; | ||
630 | |||
631 | mm->total_vm += pgsz; | ||
632 | mm->locked_vm += pgsz; | ||
633 | |||
634 | error = 0; | ||
635 | out: | ||
636 | up_write(&mm->mmap_sem); | ||
637 | return error; | ||
638 | } | ||
639 | |||
640 | void refund_locked_memory(struct mm_struct *mm, size_t size) | ||
641 | { | ||
642 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
643 | |||
644 | down_write(&mm->mmap_sem); | ||
645 | |||
646 | mm->total_vm -= pgsz; | ||
647 | mm->locked_vm -= pgsz; | ||
648 | |||
649 | up_write(&mm->mmap_sem); | ||
650 | } | ||
@@ -336,14 +336,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
336 | 336 | ||
337 | /* | 337 | /* |
338 | * At what user virtual address is page expected in vma? | 338 | * At what user virtual address is page expected in vma? |
339 | * checking that the page matches the vma. | 339 | * Caller should check the page is actually part of the vma. |
340 | */ | 340 | */ |
341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
342 | { | 342 | { |
343 | if (PageAnon(page)) { | 343 | if (PageAnon(page)) |
344 | if (vma->anon_vma != page_anon_vma(page)) | 344 | ; |
345 | return -EFAULT; | 345 | else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
346 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | ||
347 | if (!vma->vm_file || | 346 | if (!vma->vm_file || |
348 | vma->vm_file->f_mapping != page->mapping) | 347 | vma->vm_file->f_mapping != page->mapping) |
349 | return -EFAULT; | 348 | return -EFAULT; |
@@ -2153,7 +2153,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2153 | int local_node; | 2153 | int local_node; |
2154 | 2154 | ||
2155 | if (slab_state >= UP && (s < kmalloc_caches || | 2155 | if (slab_state >= UP && (s < kmalloc_caches || |
2156 | s > kmalloc_caches + KMALLOC_CACHES)) | 2156 | s >= kmalloc_caches + KMALLOC_CACHES)) |
2157 | local_node = page_to_nid(virt_to_page(s)); | 2157 | local_node = page_to_nid(virt_to_page(s)); |
2158 | else | 2158 | else |
2159 | local_node = 0; | 2159 | local_node = 0; |