diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-09-11 17:20:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:57:00 -0400 |
commit | ef0855d334e1e4af7c3e0c42146a8479ea14a5ab (patch) | |
tree | 5955b0424bb392e1949acc0ad5066cb461bef867 | |
parent | c07303c0af38ffb1e5fd9b5ff37d0798298a7acf (diff) |
mm: mempolicy: turn vma_set_policy() into vma_dup_policy()
Simple cleanup. Every user of vma_set_policy() does the same work, this
looks a bit annoying imho. And the new trivial helper which does
mpol_dup() + vma_set_policy() to simplify the callers.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mempolicy.h | 9 | ||||
-rw-r--r-- | kernel/fork.c | 9 | ||||
-rw-r--r-- | mm/mempolicy.c | 10 | ||||
-rw-r--r-- | mm/mmap.c | 17 |
4 files changed, 25 insertions, 20 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 0d7df39a5885..b2f897789838 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -91,7 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | #define vma_policy(vma) ((vma)->vm_policy) | 93 | #define vma_policy(vma) ((vma)->vm_policy) |
94 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | ||
95 | 94 | ||
96 | static inline void mpol_get(struct mempolicy *pol) | 95 | static inline void mpol_get(struct mempolicy *pol) |
97 | { | 96 | { |
@@ -126,6 +125,7 @@ struct shared_policy { | |||
126 | spinlock_t lock; | 125 | spinlock_t lock; |
127 | }; | 126 | }; |
128 | 127 | ||
128 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); | ||
129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); | 129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
130 | int mpol_set_shared_policy(struct shared_policy *info, | 130 | int mpol_set_shared_policy(struct shared_policy *info, |
131 | struct vm_area_struct *vma, | 131 | struct vm_area_struct *vma, |
@@ -240,7 +240,12 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |||
240 | } | 240 | } |
241 | 241 | ||
242 | #define vma_policy(vma) NULL | 242 | #define vma_policy(vma) NULL |
243 | #define vma_set_policy(vma, pol) do {} while(0) | 243 | |
244 | static inline int | ||
245 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) | ||
246 | { | ||
247 | return 0; | ||
248 | } | ||
244 | 249 | ||
245 | static inline void numa_policy_init(void) | 250 | static inline void numa_policy_init(void) |
246 | { | 251 | { |
diff --git a/kernel/fork.c b/kernel/fork.c index 84703db06cf3..81ccb4f010c2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -351,7 +351,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
351 | struct rb_node **rb_link, *rb_parent; | 351 | struct rb_node **rb_link, *rb_parent; |
352 | int retval; | 352 | int retval; |
353 | unsigned long charge; | 353 | unsigned long charge; |
354 | struct mempolicy *pol; | ||
355 | 354 | ||
356 | uprobe_start_dup_mmap(); | 355 | uprobe_start_dup_mmap(); |
357 | down_write(&oldmm->mmap_sem); | 356 | down_write(&oldmm->mmap_sem); |
@@ -400,11 +399,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
400 | goto fail_nomem; | 399 | goto fail_nomem; |
401 | *tmp = *mpnt; | 400 | *tmp = *mpnt; |
402 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | 401 | INIT_LIST_HEAD(&tmp->anon_vma_chain); |
403 | pol = mpol_dup(vma_policy(mpnt)); | 402 | retval = vma_dup_policy(mpnt, tmp); |
404 | retval = PTR_ERR(pol); | 403 | if (retval) |
405 | if (IS_ERR(pol)) | ||
406 | goto fail_nomem_policy; | 404 | goto fail_nomem_policy; |
407 | vma_set_policy(tmp, pol); | ||
408 | tmp->vm_mm = mm; | 405 | tmp->vm_mm = mm; |
409 | if (anon_vma_fork(tmp, mpnt)) | 406 | if (anon_vma_fork(tmp, mpnt)) |
410 | goto fail_nomem_anon_vma_fork; | 407 | goto fail_nomem_anon_vma_fork; |
@@ -472,7 +469,7 @@ out: | |||
472 | uprobe_end_dup_mmap(); | 469 | uprobe_end_dup_mmap(); |
473 | return retval; | 470 | return retval; |
474 | fail_nomem_anon_vma_fork: | 471 | fail_nomem_anon_vma_fork: |
475 | mpol_put(pol); | 472 | mpol_put(vma_policy(tmp)); |
476 | fail_nomem_policy: | 473 | fail_nomem_policy: |
477 | kmem_cache_free(vm_area_cachep, tmp); | 474 | kmem_cache_free(vm_area_cachep, tmp); |
478 | fail_nomem: | 475 | fail_nomem: |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4baf12e534d1..6b1d426731ae 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2065,6 +2065,16 @@ retry_cpuset: | |||
2065 | } | 2065 | } |
2066 | EXPORT_SYMBOL(alloc_pages_current); | 2066 | EXPORT_SYMBOL(alloc_pages_current); |
2067 | 2067 | ||
2068 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) | ||
2069 | { | ||
2070 | struct mempolicy *pol = mpol_dup(vma_policy(src)); | ||
2071 | |||
2072 | if (IS_ERR(pol)) | ||
2073 | return PTR_ERR(pol); | ||
2074 | dst->vm_policy = pol; | ||
2075 | return 0; | ||
2076 | } | ||
2077 | |||
2068 | /* | 2078 | /* |
2069 | * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it | 2079 | * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it |
2070 | * rebinds the mempolicy its copying by calling mpol_rebind_policy() | 2080 | * rebinds the mempolicy its copying by calling mpol_rebind_policy() |
@@ -2380,7 +2380,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2380 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | 2380 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, |
2381 | unsigned long addr, int new_below) | 2381 | unsigned long addr, int new_below) |
2382 | { | 2382 | { |
2383 | struct mempolicy *pol; | ||
2384 | struct vm_area_struct *new; | 2383 | struct vm_area_struct *new; |
2385 | int err = -ENOMEM; | 2384 | int err = -ENOMEM; |
2386 | 2385 | ||
@@ -2404,12 +2403,9 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
2404 | new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); | 2403 | new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); |
2405 | } | 2404 | } |
2406 | 2405 | ||
2407 | pol = mpol_dup(vma_policy(vma)); | 2406 | err = vma_dup_policy(vma, new); |
2408 | if (IS_ERR(pol)) { | 2407 | if (err) |
2409 | err = PTR_ERR(pol); | ||
2410 | goto out_free_vma; | 2408 | goto out_free_vma; |
2411 | } | ||
2412 | vma_set_policy(new, pol); | ||
2413 | 2409 | ||
2414 | if (anon_vma_clone(new, vma)) | 2410 | if (anon_vma_clone(new, vma)) |
2415 | goto out_free_mpol; | 2411 | goto out_free_mpol; |
@@ -2437,7 +2433,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
2437 | fput(new->vm_file); | 2433 | fput(new->vm_file); |
2438 | unlink_anon_vmas(new); | 2434 | unlink_anon_vmas(new); |
2439 | out_free_mpol: | 2435 | out_free_mpol: |
2440 | mpol_put(pol); | 2436 | mpol_put(vma_policy(new)); |
2441 | out_free_vma: | 2437 | out_free_vma: |
2442 | kmem_cache_free(vm_area_cachep, new); | 2438 | kmem_cache_free(vm_area_cachep, new); |
2443 | out_err: | 2439 | out_err: |
@@ -2780,7 +2776,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2780 | struct mm_struct *mm = vma->vm_mm; | 2776 | struct mm_struct *mm = vma->vm_mm; |
2781 | struct vm_area_struct *new_vma, *prev; | 2777 | struct vm_area_struct *new_vma, *prev; |
2782 | struct rb_node **rb_link, *rb_parent; | 2778 | struct rb_node **rb_link, *rb_parent; |
2783 | struct mempolicy *pol; | ||
2784 | bool faulted_in_anon_vma = true; | 2779 | bool faulted_in_anon_vma = true; |
2785 | 2780 | ||
2786 | /* | 2781 | /* |
@@ -2825,10 +2820,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2825 | new_vma->vm_start = addr; | 2820 | new_vma->vm_start = addr; |
2826 | new_vma->vm_end = addr + len; | 2821 | new_vma->vm_end = addr + len; |
2827 | new_vma->vm_pgoff = pgoff; | 2822 | new_vma->vm_pgoff = pgoff; |
2828 | pol = mpol_dup(vma_policy(vma)); | 2823 | if (vma_dup_policy(vma, new_vma)) |
2829 | if (IS_ERR(pol)) | ||
2830 | goto out_free_vma; | 2824 | goto out_free_vma; |
2831 | vma_set_policy(new_vma, pol); | ||
2832 | INIT_LIST_HEAD(&new_vma->anon_vma_chain); | 2825 | INIT_LIST_HEAD(&new_vma->anon_vma_chain); |
2833 | if (anon_vma_clone(new_vma, vma)) | 2826 | if (anon_vma_clone(new_vma, vma)) |
2834 | goto out_free_mempol; | 2827 | goto out_free_mempol; |
@@ -2843,7 +2836,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2843 | return new_vma; | 2836 | return new_vma; |
2844 | 2837 | ||
2845 | out_free_mempol: | 2838 | out_free_mempol: |
2846 | mpol_put(pol); | 2839 | mpol_put(vma_policy(new_vma)); |
2847 | out_free_vma: | 2840 | out_free_vma: |
2848 | kmem_cache_free(vm_area_cachep, new_vma); | 2841 | kmem_cache_free(vm_area_cachep, new_vma); |
2849 | return NULL; | 2842 | return NULL; |