aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:50 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:50 -0400
commit2aad2a86f6685c10360ec8a5a55eb9ab7059cb72 (patch)
tree85da25f36ba0c8158becdc8ba940201652cf30ce
parentf47ad45784611297b699f3dffb6c7222b76afe64 (diff)
percpu_ref: add PERCPU_REF_INIT_* flags
With the recent addition of percpu_ref_reinit(), percpu_ref now can be used as a persistent switch which can be turned on and off repeatedly where turning off maps to killing the ref and waiting for it to drain; however, there currently isn't a way to initialize a percpu_ref in its off (killed and drained) state, which can be inconvenient for certain persistent switch use cases. Similarly, percpu_ref_switch_to_atomic/percpu() allow dynamic selection of operation mode; however, currently a newly initialized percpu_ref is always in percpu mode making it impossible to avoid the latency overhead of switching to atomic mode. This patch adds @flags to percpu_ref_init() and implements the following flags. * PERCPU_REF_INIT_ATOMIC : start ref in atomic mode * PERCPU_REF_INIT_DEAD : start ref killed and drained These flags should be able to serve the above two use cases. v2: target_core_tpg.c conversion was missing. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <hannes@cmpxchg.org>
-rw-r--r--block/blk-mq.c2
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--fs/aio.c4
-rw-r--r--include/linux/percpu-refcount.h18
-rw-r--r--kernel/cgroup.c7
-rw-r--r--lib/percpu-refcount.c23
6 files changed, 43 insertions, 13 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 44a78ae3f899..d85fe01c44ef 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1796,7 +1796,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1796 goto err_hctxs; 1796 goto err_hctxs;
1797 1797
1798 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1798 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1799 GFP_KERNEL)) 1799 0, GFP_KERNEL))
1800 goto err_map; 1800 goto err_map;
1801 1801
1802 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1802 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4ab6da338585..be783f717f19 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -819,7 +819,7 @@ int core_tpg_add_lun(
819{ 819{
820 int ret; 820 int ret;
821 821
822 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 822 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
823 GFP_KERNEL); 823 GFP_KERNEL);
824 if (ret < 0) 824 if (ret < 0)
825 return ret; 825 return ret;
diff --git a/fs/aio.c b/fs/aio.c
index 8d217ed04e6e..84a751005f5b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -661,10 +661,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
661 661
662 INIT_LIST_HEAD(&ctx->active_reqs); 662 INIT_LIST_HEAD(&ctx->active_reqs);
663 663
664 if (percpu_ref_init(&ctx->users, free_ioctx_users, GFP_KERNEL)) 664 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
665 goto err; 665 goto err;
666 666
667 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, GFP_KERNEL)) 667 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
668 goto err; 668 goto err;
669 669
670 ctx->cpu = alloc_percpu(struct kioctx_cpu); 670 ctx->cpu = alloc_percpu(struct kioctx_cpu);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index cd7e20f0fe47..b0293f268cd2 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -63,6 +63,21 @@ enum {
63 __PERCPU_REF_FLAG_BITS = 2, 63 __PERCPU_REF_FLAG_BITS = 2,
64}; 64};
65 65
66/* @flags for percpu_ref_init() */
67enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu().
71 */
72 PERCPU_REF_INIT_ATOMIC = 1 << 0,
73
74 /*
75 * Start dead w/ ref == 0 in atomic mode. Must be revived with
76 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
77 */
78 PERCPU_REF_INIT_DEAD = 1 << 1,
79};
80
66struct percpu_ref { 81struct percpu_ref {
67 atomic_long_t count; 82 atomic_long_t count;
68 /* 83 /*
@@ -76,7 +91,8 @@ struct percpu_ref {
76}; 91};
77 92
78int __must_check percpu_ref_init(struct percpu_ref *ref, 93int __must_check percpu_ref_init(struct percpu_ref *ref,
79 percpu_ref_func_t *release, gfp_t gfp); 94 percpu_ref_func_t *release, unsigned int flags,
95 gfp_t gfp);
80void percpu_ref_exit(struct percpu_ref *ref); 96void percpu_ref_exit(struct percpu_ref *ref);
81void percpu_ref_switch_to_atomic(struct percpu_ref *ref, 97void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
82 percpu_ref_func_t *confirm_switch); 98 percpu_ref_func_t *confirm_switch);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a99d504294de..753df01a9831 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1634,7 +1634,8 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
1634 goto out; 1634 goto out;
1635 root_cgrp->id = ret; 1635 root_cgrp->id = ret;
1636 1636
1637 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, GFP_KERNEL); 1637 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
1638 GFP_KERNEL);
1638 if (ret) 1639 if (ret)
1639 goto out; 1640 goto out;
1640 1641
@@ -4510,7 +4511,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
4510 4511
4511 init_and_link_css(css, ss, cgrp); 4512 init_and_link_css(css, ss, cgrp);
4512 4513
4513 err = percpu_ref_init(&css->refcnt, css_release, GFP_KERNEL); 4514 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4514 if (err) 4515 if (err)
4515 goto err_free_css; 4516 goto err_free_css;
4516 4517
@@ -4583,7 +4584,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
4583 goto out_unlock; 4584 goto out_unlock;
4584 } 4585 }
4585 4586
4586 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, GFP_KERNEL); 4587 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4587 if (ret) 4588 if (ret)
4588 goto out_free_cgrp; 4589 goto out_free_cgrp;
4589 4590
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 5a6d43baccc5..ed280fb1e5b5 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -45,27 +45,40 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
45 * percpu_ref_init - initialize a percpu refcount 45 * percpu_ref_init - initialize a percpu refcount
46 * @ref: percpu_ref to initialize 46 * @ref: percpu_ref to initialize
47 * @release: function which will be called when refcount hits 0 47 * @release: function which will be called when refcount hits 0
48 * @flags: PERCPU_REF_INIT_* flags
48 * @gfp: allocation mask to use 49 * @gfp: allocation mask to use
49 * 50 *
50 * Initializes the refcount in single atomic counter mode with a refcount of 1; 51 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
51 * analagous to atomic_long_set(ref, 1). 52 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
53 * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
52 * 54 *
53 * Note that @release must not sleep - it may potentially be called from RCU 55 * Note that @release must not sleep - it may potentially be called from RCU
54 * callback context by percpu_ref_kill(). 56 * callback context by percpu_ref_kill().
55 */ 57 */
56int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, 58int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
57 gfp_t gfp) 59 unsigned int flags, gfp_t gfp)
58{ 60{
59 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, 61 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
60 __alignof__(unsigned long)); 62 __alignof__(unsigned long));
61 63 unsigned long start_count = 0;
62 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
63 64
64 ref->percpu_count_ptr = (unsigned long) 65 ref->percpu_count_ptr = (unsigned long)
65 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); 66 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
66 if (!ref->percpu_count_ptr) 67 if (!ref->percpu_count_ptr)
67 return -ENOMEM; 68 return -ENOMEM;
68 69
70 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
71 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
72 else
73 start_count += PERCPU_COUNT_BIAS;
74
75 if (flags & PERCPU_REF_INIT_DEAD)
76 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
77 else
78 start_count++;
79
80 atomic_long_set(&ref->count, start_count);
81
69 ref->release = release; 82 ref->release = release;
70 return 0; 83 return 0;
71} 84}