aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
committerTejun Heo <tj@kernel.org>2014-06-28 08:10:14 -0400
commit9a1049da9bd2cd83fe11d46433e603c193aa9c71 (patch)
tree007d2843abf0f064c294659334e69e297ffc2b74
parent7d742075120deb831c7b94c268ca20d409e91d60 (diff)
percpu-refcount: require percpu_ref to be exited explicitly
Currently, a percpu_ref undoes percpu_ref_init() automatically by freeing the allocated percpu area when the percpu_ref is killed. While seemingly convenient, this has the following niggles. * It's impossible to re-init a released reference counter without going through re-allocation. * In the similar vein, it's impossible to initialize a percpu_ref count with static percpu variables. * We need and have an explicit destructor anyway for failure paths - percpu_ref_cancel_init(). This patch removes the automatic percpu counter freeing in percpu_ref_kill_rcu() and repurposes percpu_ref_cancel_init() into a generic destructor now named percpu_ref_exit(). percpu_ref_destroy() is considered but it gets confusing with percpu_ref_kill() while "exit" clearly indicates that it's the counterpart of percpu_ref_init(). All percpu_ref_cancel_init() users are updated to invoke percpu_ref_exit() instead and explicit percpu_ref_exit() calls are added to the destruction path of all percpu_ref users. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Benjamin LaHaise <bcrl@kvack.org> Cc: Kent Overstreet <kmo@daterainc.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org> Cc: Li Zefan <lizefan@huawei.com>
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--fs/aio.c6
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--kernel/cgroup.c8
-rw-r--r--lib/percpu-refcount.c33
5 files changed, 24 insertions, 33 deletions
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c036595b17cf..fddfae61222f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -825,7 +825,7 @@ int core_tpg_add_lun(
825 825
826 ret = core_dev_export(dev, tpg, lun); 826 ret = core_dev_export(dev, tpg, lun);
827 if (ret < 0) { 827 if (ret < 0) {
828 percpu_ref_cancel_init(&lun->lun_ref); 828 percpu_ref_exit(&lun->lun_ref);
829 return ret; 829 return ret;
830 } 830 }
831 831
@@ -880,5 +880,7 @@ int core_tpg_post_dellun(
880 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 880 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
881 spin_unlock(&tpg->tpg_lun_lock); 881 spin_unlock(&tpg->tpg_lun_lock);
882 882
883 percpu_ref_exit(&lun->lun_ref);
884
883 return 0; 885 return 0;
884} 886}
diff --git a/fs/aio.c b/fs/aio.c
index 5e0d7f9cb693..ea1bc2e8f4f3 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work)
506 506
507 aio_free_ring(ctx); 507 aio_free_ring(ctx);
508 free_percpu(ctx->cpu); 508 free_percpu(ctx->cpu);
509 percpu_ref_exit(&ctx->reqs);
510 percpu_ref_exit(&ctx->users);
509 kmem_cache_free(kioctx_cachep, ctx); 511 kmem_cache_free(kioctx_cachep, ctx);
510} 512}
511 513
@@ -715,8 +717,8 @@ err_ctx:
715err: 717err:
716 mutex_unlock(&ctx->ring_lock); 718 mutex_unlock(&ctx->ring_lock);
717 free_percpu(ctx->cpu); 719 free_percpu(ctx->cpu);
718 percpu_ref_cancel_init(&ctx->reqs); 720 percpu_ref_exit(&ctx->reqs);
719 percpu_ref_cancel_init(&ctx->users); 721 percpu_ref_exit(&ctx->users);
720 kmem_cache_free(kioctx_cachep, ctx); 722 kmem_cache_free(kioctx_cachep, ctx);
721 pr_debug("error allocating ioctx %d\n", err); 723 pr_debug("error allocating ioctx %d\n", err);
722 return ERR_PTR(err); 724 return ERR_PTR(err);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6f8cd4c0546c..0ddd2839ca84 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -57,9 +57,7 @@ struct percpu_ref {
57 atomic_t count; 57 atomic_t count;
58 /* 58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 59 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t (this is a 60 * mode; if set, then get/put will manipulate the atomic_t.
61 * hack because we need to keep the pointer around for
62 * percpu_ref_kill_rcu())
63 */ 61 */
64 unsigned long pcpu_count_ptr; 62 unsigned long pcpu_count_ptr;
65 percpu_ref_func_t *release; 63 percpu_ref_func_t *release;
@@ -69,7 +67,7 @@ struct percpu_ref {
69 67
70int __must_check percpu_ref_init(struct percpu_ref *ref, 68int __must_check percpu_ref_init(struct percpu_ref *ref,
71 percpu_ref_func_t *release); 69 percpu_ref_func_t *release);
72void percpu_ref_cancel_init(struct percpu_ref *ref); 70void percpu_ref_exit(struct percpu_ref *ref);
73void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 71void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill); 72 percpu_ref_func_t *confirm_kill);
75 73
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7868fc3c0bc5..c06aa5e257a8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1638,7 +1638,7 @@ destroy_root:
1638exit_root_id: 1638exit_root_id:
1639 cgroup_exit_root_id(root); 1639 cgroup_exit_root_id(root);
1640cancel_ref: 1640cancel_ref:
1641 percpu_ref_cancel_init(&root_cgrp->self.refcnt); 1641 percpu_ref_exit(&root_cgrp->self.refcnt);
1642out: 1642out:
1643 free_cgrp_cset_links(&tmp_links); 1643 free_cgrp_cset_links(&tmp_links);
1644 return ret; 1644 return ret;
@@ -4133,6 +4133,8 @@ static void css_free_work_fn(struct work_struct *work)
4133 container_of(work, struct cgroup_subsys_state, destroy_work); 4133 container_of(work, struct cgroup_subsys_state, destroy_work);
4134 struct cgroup *cgrp = css->cgroup; 4134 struct cgroup *cgrp = css->cgroup;
4135 4135
4136 percpu_ref_exit(&css->refcnt);
4137
4136 if (css->ss) { 4138 if (css->ss) {
4137 /* css free path */ 4139 /* css free path */
4138 if (css->parent) 4140 if (css->parent)
@@ -4330,7 +4332,7 @@ err_list_del:
4330err_free_id: 4332err_free_id:
4331 cgroup_idr_remove(&ss->css_idr, css->id); 4333 cgroup_idr_remove(&ss->css_idr, css->id);
4332err_free_percpu_ref: 4334err_free_percpu_ref:
4333 percpu_ref_cancel_init(&css->refcnt); 4335 percpu_ref_exit(&css->refcnt);
4334err_free_css: 4336err_free_css:
4335 call_rcu(&css->rcu_head, css_free_rcu_fn); 4337 call_rcu(&css->rcu_head, css_free_rcu_fn);
4336 return err; 4338 return err;
@@ -4441,7 +4443,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
4441out_free_id: 4443out_free_id:
4442 cgroup_idr_remove(&root->cgroup_idr, cgrp->id); 4444 cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4443out_cancel_ref: 4445out_cancel_ref:
4444 percpu_ref_cancel_init(&cgrp->self.refcnt); 4446 percpu_ref_exit(&cgrp->self.refcnt);
4445out_free_cgrp: 4447out_free_cgrp:
4446 kfree(cgrp); 4448 kfree(cgrp);
4447out_unlock: 4449out_unlock:
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 94e5b624de64..ac4299120087 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -61,36 +61,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
61EXPORT_SYMBOL_GPL(percpu_ref_init); 61EXPORT_SYMBOL_GPL(percpu_ref_init);
62 62
63/** 63/**
64 * percpu_ref_cancel_init - cancel percpu_ref_init() 64 * percpu_ref_exit - undo percpu_ref_init()
65 * @ref: percpu_ref to cancel init for 65 * @ref: percpu_ref to exit
66 * 66 *
67 * Once a percpu_ref is initialized, its destruction is initiated by 67 * This function exits @ref. The caller is responsible for ensuring that
68 * percpu_ref_kill() and completes asynchronously, which can be painful to 68 * @ref is no longer in active use. The usual places to invoke this
69 * do when destroying a half-constructed object in init failure path. 69 * function from are the @ref->release() callback or in init failure path
70 * 70 * where percpu_ref_init() succeeded but other parts of the initialization
71 * This function destroys @ref without invoking @ref->release and the 71 * of the embedding object failed.
72 * memory area containing it can be freed immediately on return. To
73 * prevent accidental misuse, it's required that @ref has finished
74 * percpu_ref_init(), whether successful or not, but never used.
75 *
76 * The weird name and usage restriction are to prevent people from using
77 * this function by mistake for normal shutdown instead of
78 * percpu_ref_kill().
79 */ 72 */
80void percpu_ref_cancel_init(struct percpu_ref *ref) 73void percpu_ref_exit(struct percpu_ref *ref)
81{ 74{
82 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); 75 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
83 int cpu;
84
85 WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
86 76
87 if (pcpu_count) { 77 if (pcpu_count) {
88 for_each_possible_cpu(cpu)
89 WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
90 free_percpu(pcpu_count); 78 free_percpu(pcpu_count);
79 ref->pcpu_count_ptr = PCPU_REF_DEAD;
91 } 80 }
92} 81}
93EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); 82EXPORT_SYMBOL_GPL(percpu_ref_exit);
94 83
95static void percpu_ref_kill_rcu(struct rcu_head *rcu) 84static void percpu_ref_kill_rcu(struct rcu_head *rcu)
96{ 85{
@@ -102,8 +91,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
102 for_each_possible_cpu(cpu) 91 for_each_possible_cpu(cpu)
103 count += *per_cpu_ptr(pcpu_count, cpu); 92 count += *per_cpu_ptr(pcpu_count, cpu);
104 93
105 free_percpu(pcpu_count);
106
107 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); 94 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
108 95
109 /* 96 /*