aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mempolicy.h10
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/mempolicy.c26
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/shmem.c4
7 files changed, 26 insertions, 26 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 319fd342b1b7..507bf5e29f24 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -71,7 +71,7 @@ struct mm_struct;
71 * 71 *
72 * Freeing policy: 72 * Freeing policy:
73 * Mempolicy objects are reference counted. A mempolicy will be freed when 73 * Mempolicy objects are reference counted. A mempolicy will be freed when
74 * mpol_free() decrements the reference count to zero. 74 * mpol_put() decrements the reference count to zero.
75 * 75 *
76 * Copying policy objects: 76 * Copying policy objects:
77 * mpol_copy() allocates a new mempolicy and copies the specified mempolicy 77 * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
@@ -98,11 +98,11 @@ struct mempolicy {
98 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 98 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
99 */ 99 */
100 100
101extern void __mpol_free(struct mempolicy *pol); 101extern void __mpol_put(struct mempolicy *pol);
102static inline void mpol_free(struct mempolicy *pol) 102static inline void mpol_put(struct mempolicy *pol)
103{ 103{
104 if (pol) 104 if (pol)
105 __mpol_free(pol); 105 __mpol_put(pol);
106} 106}
107 107
108extern struct mempolicy *__mpol_copy(struct mempolicy *pol); 108extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
@@ -190,7 +190,7 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
190 return 1; 190 return 1;
191} 191}
192 192
193static inline void mpol_free(struct mempolicy *p) 193static inline void mpol_put(struct mempolicy *p)
194{ 194{
195} 195}
196 196
diff --git a/kernel/exit.c b/kernel/exit.c
index 97f609f574b1..2a9d98c641ac 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -967,7 +967,7 @@ NORET_TYPE void do_exit(long code)
967 proc_exit_connector(tsk); 967 proc_exit_connector(tsk);
968 exit_notify(tsk, group_dead); 968 exit_notify(tsk, group_dead);
969#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
970 mpol_free(tsk->mempolicy); 970 mpol_put(tsk->mempolicy);
971 tsk->mempolicy = NULL; 971 tsk->mempolicy = NULL;
972#endif 972#endif
973#ifdef CONFIG_FUTEX 973#ifdef CONFIG_FUTEX
diff --git a/kernel/fork.c b/kernel/fork.c
index c674aa8d3c31..1a5ae2084574 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1374,7 +1374,7 @@ bad_fork_cleanup_security:
1374 security_task_free(p); 1374 security_task_free(p);
1375bad_fork_cleanup_policy: 1375bad_fork_cleanup_policy:
1376#ifdef CONFIG_NUMA 1376#ifdef CONFIG_NUMA
1377 mpol_free(p->mempolicy); 1377 mpol_put(p->mempolicy);
1378bad_fork_cleanup_cgroup: 1378bad_fork_cleanup_cgroup:
1379#endif 1379#endif
1380 cgroup_exit(p, cgroup_callbacks_done); 1380 cgroup_exit(p, cgroup_callbacks_done);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8deae4eb9696..53afa8c76ada 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -116,7 +116,7 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
116 break; 116 break;
117 } 117 }
118 } 118 }
119 mpol_free(mpol); /* unref if mpol !NULL */ 119 mpol_put(mpol); /* unref if mpol !NULL */
120 return page; 120 return page;
121} 121}
122 122
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c1b907789d84..ce2c5b6bf9f8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -529,7 +529,7 @@ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
529 if (!err) { 529 if (!err) {
530 mpol_get(new); 530 mpol_get(new);
531 vma->vm_policy = new; 531 vma->vm_policy = new;
532 mpol_free(old); 532 mpol_put(old);
533 } 533 }
534 return err; 534 return err;
535} 535}
@@ -595,7 +595,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
595 new = mpol_new(mode, flags, nodes); 595 new = mpol_new(mode, flags, nodes);
596 if (IS_ERR(new)) 596 if (IS_ERR(new))
597 return PTR_ERR(new); 597 return PTR_ERR(new);
598 mpol_free(current->mempolicy); 598 mpol_put(current->mempolicy);
599 current->mempolicy = new; 599 current->mempolicy = new;
600 mpol_set_task_struct_flag(); 600 mpol_set_task_struct_flag();
601 if (new && new->policy == MPOL_INTERLEAVE && 601 if (new && new->policy == MPOL_INTERLEAVE &&
@@ -948,7 +948,7 @@ static long do_mbind(unsigned long start, unsigned long len,
948 } 948 }
949 949
950 up_write(&mm->mmap_sem); 950 up_write(&mm->mmap_sem);
951 mpol_free(new); 951 mpol_put(new);
952 return err; 952 return err;
953} 953}
954 954
@@ -1446,14 +1446,14 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1446 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1446 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1447 if (unlikely(pol != &default_policy && 1447 if (unlikely(pol != &default_policy &&
1448 pol != current->mempolicy)) 1448 pol != current->mempolicy))
1449 __mpol_free(pol); /* finished with pol */ 1449 __mpol_put(pol); /* finished with pol */
1450 return node_zonelist(nid, gfp_flags); 1450 return node_zonelist(nid, gfp_flags);
1451 } 1451 }
1452 1452
1453 zl = zonelist_policy(GFP_HIGHUSER, pol); 1453 zl = zonelist_policy(GFP_HIGHUSER, pol);
1454 if (unlikely(pol != &default_policy && pol != current->mempolicy)) { 1454 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1455 if (pol->policy != MPOL_BIND) 1455 if (pol->policy != MPOL_BIND)
1456 __mpol_free(pol); /* finished with pol */ 1456 __mpol_put(pol); /* finished with pol */
1457 else 1457 else
1458 *mpol = pol; /* unref needed after allocation */ 1458 *mpol = pol; /* unref needed after allocation */
1459 } 1459 }
@@ -1512,7 +1512,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1512 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1512 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1513 if (unlikely(pol != &default_policy && 1513 if (unlikely(pol != &default_policy &&
1514 pol != current->mempolicy)) 1514 pol != current->mempolicy))
1515 __mpol_free(pol); /* finished with pol */ 1515 __mpol_put(pol); /* finished with pol */
1516 return alloc_page_interleave(gfp, 0, nid); 1516 return alloc_page_interleave(gfp, 0, nid);
1517 } 1517 }
1518 zl = zonelist_policy(gfp, pol); 1518 zl = zonelist_policy(gfp, pol);
@@ -1522,7 +1522,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1522 */ 1522 */
1523 struct page *page = __alloc_pages_nodemask(gfp, 0, 1523 struct page *page = __alloc_pages_nodemask(gfp, 0,
1524 zl, nodemask_policy(gfp, pol)); 1524 zl, nodemask_policy(gfp, pol));
1525 __mpol_free(pol); 1525 __mpol_put(pol);
1526 return page; 1526 return page;
1527 } 1527 }
1528 /* 1528 /*
@@ -1624,7 +1624,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1624} 1624}
1625 1625
1626/* Slow path of a mpol destructor. */ 1626/* Slow path of a mpol destructor. */
1627void __mpol_free(struct mempolicy *p) 1627void __mpol_put(struct mempolicy *p)
1628{ 1628{
1629 if (!atomic_dec_and_test(&p->refcnt)) 1629 if (!atomic_dec_and_test(&p->refcnt))
1630 return; 1630 return;
@@ -1720,7 +1720,7 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1720{ 1720{
1721 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 1721 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1722 rb_erase(&n->nd, &sp->root); 1722 rb_erase(&n->nd, &sp->root);
1723 mpol_free(n->policy); 1723 mpol_put(n->policy);
1724 kmem_cache_free(sn_cache, n); 1724 kmem_cache_free(sn_cache, n);
1725} 1725}
1726 1726
@@ -1780,7 +1780,7 @@ restart:
1780 sp_insert(sp, new); 1780 sp_insert(sp, new);
1781 spin_unlock(&sp->lock); 1781 spin_unlock(&sp->lock);
1782 if (new2) { 1782 if (new2) {
1783 mpol_free(new2->policy); 1783 mpol_put(new2->policy);
1784 kmem_cache_free(sn_cache, new2); 1784 kmem_cache_free(sn_cache, new2);
1785 } 1785 }
1786 return 0; 1786 return 0;
@@ -1805,7 +1805,7 @@ void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
1805 /* Policy covers entire file */ 1805 /* Policy covers entire file */
1806 pvma.vm_end = TASK_SIZE; 1806 pvma.vm_end = TASK_SIZE;
1807 mpol_set_shared_policy(info, &pvma, newpol); 1807 mpol_set_shared_policy(info, &pvma, newpol);
1808 mpol_free(newpol); 1808 mpol_put(newpol);
1809 } 1809 }
1810 } 1810 }
1811} 1811}
@@ -1848,7 +1848,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
1848 n = rb_entry(next, struct sp_node, nd); 1848 n = rb_entry(next, struct sp_node, nd);
1849 next = rb_next(&n->nd); 1849 next = rb_next(&n->nd);
1850 rb_erase(&n->nd, &p->root); 1850 rb_erase(&n->nd, &p->root);
1851 mpol_free(n->policy); 1851 mpol_put(n->policy);
1852 kmem_cache_free(sn_cache, n); 1852 kmem_cache_free(sn_cache, n);
1853 } 1853 }
1854 spin_unlock(&p->lock); 1854 spin_unlock(&p->lock);
@@ -2068,7 +2068,7 @@ int show_numa_map(struct seq_file *m, void *v)
2068 * unref shared or other task's mempolicy 2068 * unref shared or other task's mempolicy
2069 */ 2069 */
2070 if (pol != &default_policy && pol != current->mempolicy) 2070 if (pol != &default_policy && pol != current->mempolicy)
2071 __mpol_free(pol); 2071 __mpol_put(pol);
2072 2072
2073 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 2073 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2074 2074
diff --git a/mm/mmap.c b/mm/mmap.c
index 6aaf657adb87..36c85e04fa93 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,7 +232,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
232 vma->vm_ops->close(vma); 232 vma->vm_ops->close(vma);
233 if (vma->vm_file) 233 if (vma->vm_file)
234 fput(vma->vm_file); 234 fput(vma->vm_file);
235 mpol_free(vma_policy(vma)); 235 mpol_put(vma_policy(vma));
236 kmem_cache_free(vm_area_cachep, vma); 236 kmem_cache_free(vm_area_cachep, vma);
237 return next; 237 return next;
238} 238}
@@ -626,7 +626,7 @@ again: remove_next = 1 + (end > next->vm_end);
626 if (file) 626 if (file)
627 fput(file); 627 fput(file);
628 mm->map_count--; 628 mm->map_count--;
629 mpol_free(vma_policy(next)); 629 mpol_put(vma_policy(next));
630 kmem_cache_free(vm_area_cachep, next); 630 kmem_cache_free(vm_area_cachep, next);
631 /* 631 /*
632 * In mprotect's case 6 (see comments on vma_merge), 632 * In mprotect's case 6 (see comments on vma_merge),
@@ -1182,7 +1182,7 @@ munmap_back:
1182 1182
1183 if (file && vma_merge(mm, prev, addr, vma->vm_end, 1183 if (file && vma_merge(mm, prev, addr, vma->vm_end,
1184 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1184 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1185 mpol_free(vma_policy(vma)); 1185 mpol_put(vma_policy(vma));
1186 kmem_cache_free(vm_area_cachep, vma); 1186 kmem_cache_free(vm_area_cachep, vma);
1187 fput(file); 1187 fput(file);
1188 } else { 1188 } else {
diff --git a/mm/shmem.c b/mm/shmem.c
index 177c7a7d2bb3..5326876d814d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1196,7 +1196,7 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1196 pvma.vm_ops = NULL; 1196 pvma.vm_ops = NULL;
1197 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1197 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1198 page = swapin_readahead(entry, gfp, &pvma, 0); 1198 page = swapin_readahead(entry, gfp, &pvma, 0);
1199 mpol_free(pvma.vm_policy); 1199 mpol_put(pvma.vm_policy);
1200 return page; 1200 return page;
1201} 1201}
1202 1202
@@ -1212,7 +1212,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
1212 pvma.vm_ops = NULL; 1212 pvma.vm_ops = NULL;
1213 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1213 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1214 page = alloc_page_vma(gfp, &pvma, 0); 1214 page = alloc_page_vma(gfp, &pvma, 0);
1215 mpol_free(pvma.vm_policy); 1215 mpol_put(pvma.vm_policy);
1216 return page; 1216 return page;
1217} 1217}
1218#else /* !CONFIG_NUMA */ 1218#else /* !CONFIG_NUMA */