diff options
-rw-r--r-- | include/linux/mempolicy.h | 14 | ||||
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 4 |
5 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 507bf5e29f24..5e19c2275a6f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -73,10 +73,10 @@ struct mm_struct; | |||
73 | * Mempolicy objects are reference counted. A mempolicy will be freed when | 73 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
74 | * mpol_put() decrements the reference count to zero. | 74 | * mpol_put() decrements the reference count to zero. |
75 | * | 75 | * |
76 | * Copying policy objects: | 76 | * Duplicating policy objects: |
77 | * mpol_copy() allocates a new mempolicy and copies the specified mempolicy | 77 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
78 | * to the new storage. The reference count of the new object is initialized | 78 | * to the new storage. The reference count of the new object is initialized |
79 | * to 1, representing the caller of mpol_copy(). | 79 | * to 1, representing the caller of mpol_dup(). |
80 | */ | 80 | */ |
81 | struct mempolicy { | 81 | struct mempolicy { |
82 | atomic_t refcnt; | 82 | atomic_t refcnt; |
@@ -105,11 +105,11 @@ static inline void mpol_put(struct mempolicy *pol) | |||
105 | __mpol_put(pol); | 105 | __mpol_put(pol); |
106 | } | 106 | } |
107 | 107 | ||
108 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); | 108 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
109 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) | 109 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
110 | { | 110 | { |
111 | if (pol) | 111 | if (pol) |
112 | pol = __mpol_copy(pol); | 112 | pol = __mpol_dup(pol); |
113 | return pol; | 113 | return pol; |
114 | } | 114 | } |
115 | 115 | ||
@@ -198,7 +198,7 @@ static inline void mpol_get(struct mempolicy *pol) | |||
198 | { | 198 | { |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) | 201 | static inline struct mempolicy *mpol_dup(struct mempolicy *old) |
202 | { | 202 | { |
203 | return NULL; | 203 | return NULL; |
204 | } | 204 | } |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index c9923e3c9a3b..024888bb9814 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -941,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
941 | cs->mems_generation = cpuset_mems_generation++; | 941 | cs->mems_generation = cpuset_mems_generation++; |
942 | mutex_unlock(&callback_mutex); | 942 | mutex_unlock(&callback_mutex); |
943 | 943 | ||
944 | cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ | 944 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
945 | 945 | ||
946 | fudge = 10; /* spare mmarray[] slots */ | 946 | fudge = 10; /* spare mmarray[] slots */ |
947 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ | 947 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ |
@@ -992,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
992 | * rebind the vma mempolicies of each mm in mmarray[] to their | 992 | * rebind the vma mempolicies of each mm in mmarray[] to their |
993 | * new cpuset, and release that mm. The mpol_rebind_mm() | 993 | * new cpuset, and release that mm. The mpol_rebind_mm() |
994 | * call takes mmap_sem, which we couldn't take while holding | 994 | * call takes mmap_sem, which we couldn't take while holding |
995 | * tasklist_lock. Forks can happen again now - the mpol_copy() | 995 | * tasklist_lock. Forks can happen again now - the mpol_dup() |
996 | * cpuset_being_rebound check will catch such forks, and rebind | 996 | * cpuset_being_rebound check will catch such forks, and rebind |
997 | * their vma mempolicies too. Because we still hold the global | 997 | * their vma mempolicies too. Because we still hold the global |
998 | * cgroup_mutex, we know that no other rebind effort will | 998 | * cgroup_mutex, we know that no other rebind effort will |
diff --git a/kernel/fork.c b/kernel/fork.c index 1a5ae2084574..6067e429f281 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
279 | if (!tmp) | 279 | if (!tmp) |
280 | goto fail_nomem; | 280 | goto fail_nomem; |
281 | *tmp = *mpnt; | 281 | *tmp = *mpnt; |
282 | pol = mpol_copy(vma_policy(mpnt)); | 282 | pol = mpol_dup(vma_policy(mpnt)); |
283 | retval = PTR_ERR(pol); | 283 | retval = PTR_ERR(pol); |
284 | if (IS_ERR(pol)) | 284 | if (IS_ERR(pol)) |
285 | goto fail_nomem_policy; | 285 | goto fail_nomem_policy; |
@@ -1116,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1116 | p->audit_context = NULL; | 1116 | p->audit_context = NULL; |
1117 | cgroup_fork(p); | 1117 | cgroup_fork(p); |
1118 | #ifdef CONFIG_NUMA | 1118 | #ifdef CONFIG_NUMA |
1119 | p->mempolicy = mpol_copy(p->mempolicy); | 1119 | p->mempolicy = mpol_dup(p->mempolicy); |
1120 | if (IS_ERR(p->mempolicy)) { | 1120 | if (IS_ERR(p->mempolicy)) { |
1121 | retval = PTR_ERR(p->mempolicy); | 1121 | retval = PTR_ERR(p->mempolicy); |
1122 | p->mempolicy = NULL; | 1122 | p->mempolicy = NULL; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ce2c5b6bf9f8..e9fc1c1ae66c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1566,15 +1566,15 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
1566 | EXPORT_SYMBOL(alloc_pages_current); | 1566 | EXPORT_SYMBOL(alloc_pages_current); |
1567 | 1567 | ||
1568 | /* | 1568 | /* |
1569 | * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it | 1569 | * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it |
1570 | * rebinds the mempolicy its copying by calling mpol_rebind_policy() | 1570 | * rebinds the mempolicy its copying by calling mpol_rebind_policy() |
1571 | * with the mems_allowed returned by cpuset_mems_allowed(). This | 1571 | * with the mems_allowed returned by cpuset_mems_allowed(). This |
1572 | * keeps mempolicies cpuset relative after its cpuset moves. See | 1572 | * keeps mempolicies cpuset relative after its cpuset moves. See |
1573 | * further kernel/cpuset.c update_nodemask(). | 1573 | * further kernel/cpuset.c update_nodemask(). |
1574 | */ | 1574 | */ |
1575 | 1575 | ||
1576 | /* Slow path of a mempolicy copy */ | 1576 | /* Slow path of a mempolicy duplicate */ |
1577 | struct mempolicy *__mpol_copy(struct mempolicy *old) | 1577 | struct mempolicy *__mpol_dup(struct mempolicy *old) |
1578 | { | 1578 | { |
1579 | struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); | 1579 | struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); |
1580 | 1580 | ||
@@ -1810,7 +1810,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1810 | new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); | 1810 | new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | pol = mpol_copy(vma_policy(vma)); | 1813 | pol = mpol_dup(vma_policy(vma)); |
1814 | if (IS_ERR(pol)) { | 1814 | if (IS_ERR(pol)) { |
1815 | kmem_cache_free(vm_area_cachep, new); | 1815 | kmem_cache_free(vm_area_cachep, new); |
1816 | return PTR_ERR(pol); | 1816 | return PTR_ERR(pol); |
@@ -2126,7 +2126,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2126 | new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 2126 | new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
2127 | if (new_vma) { | 2127 | if (new_vma) { |
2128 | *new_vma = *vma; | 2128 | *new_vma = *vma; |
2129 | pol = mpol_copy(vma_policy(vma)); | 2129 | pol = mpol_dup(vma_policy(vma)); |
2130 | if (IS_ERR(pol)) { | 2130 | if (IS_ERR(pol)) { |
2131 | kmem_cache_free(vm_area_cachep, new_vma); | 2131 | kmem_cache_free(vm_area_cachep, new_vma); |
2132 | return NULL; | 2132 | return NULL; |