diff options
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r-- | lib/percpu-refcount.c | 102 |
1 files changed, 70 insertions, 32 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 963b7034a51b..a89cf09a8268 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -31,6 +31,11 @@ | |||
31 | 31 | ||
32 | #define PCPU_COUNT_BIAS (1U << 31) | 32 | #define PCPU_COUNT_BIAS (1U << 31) |
33 | 33 | ||
34 | static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | ||
35 | { | ||
36 | return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | ||
37 | } | ||
38 | |||
34 | /** | 39 | /** |
35 | * percpu_ref_init - initialize a percpu refcount | 40 | * percpu_ref_init - initialize a percpu refcount |
36 | * @ref: percpu_ref to initialize | 41 | * @ref: percpu_ref to initialize |
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) | |||
46 | { | 51 | { |
47 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 52 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
48 | 53 | ||
49 | ref->pcpu_count = alloc_percpu(unsigned); | 54 | ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned); |
50 | if (!ref->pcpu_count) | 55 | if (!ref->pcpu_count_ptr) |
51 | return -ENOMEM; | 56 | return -ENOMEM; |
52 | 57 | ||
53 | ref->release = release; | 58 | ref->release = release; |
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) | |||
56 | EXPORT_SYMBOL_GPL(percpu_ref_init); | 61 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
57 | 62 | ||
58 | /** | 63 | /** |
59 | * percpu_ref_cancel_init - cancel percpu_ref_init() | 64 | * percpu_ref_reinit - re-initialize a percpu refcount |
60 | * @ref: percpu_ref to cancel init for | 65 | * @ref: perpcu_ref to re-initialize |
61 | * | ||
62 | * Once a percpu_ref is initialized, its destruction is initiated by | ||
63 | * percpu_ref_kill() and completes asynchronously, which can be painful to | ||
64 | * do when destroying a half-constructed object in init failure path. | ||
65 | * | 66 | * |
66 | * This function destroys @ref without invoking @ref->release and the | 67 | * Re-initialize @ref so that it's in the same state as when it finished |
67 | * memory area containing it can be freed immediately on return. To | 68 | * percpu_ref_init(). @ref must have been initialized successfully, killed |
68 | * prevent accidental misuse, it's required that @ref has finished | 69 | * and reached 0 but not exited. |
69 | * percpu_ref_init(), whether successful or not, but never used. | ||
70 | * | 70 | * |
71 | * The weird name and usage restriction are to prevent people from using | 71 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while |
72 | * this function by mistake for normal shutdown instead of | 72 | * this function is in progress. |
73 | * percpu_ref_kill(). | ||
74 | */ | 73 | */ |
75 | void percpu_ref_cancel_init(struct percpu_ref *ref) | 74 | void percpu_ref_reinit(struct percpu_ref *ref) |
76 | { | 75 | { |
77 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 76 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
78 | int cpu; | 77 | int cpu; |
79 | 78 | ||
80 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); | 79 | BUG_ON(!pcpu_count); |
80 | WARN_ON(!percpu_ref_is_zero(ref)); | ||
81 | |||
82 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | ||
83 | |||
84 | /* | ||
85 | * Restore per-cpu operation. smp_store_release() is paired with | ||
86 | * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees | ||
87 | * that the zeroing is visible to all percpu accesses which can see | ||
88 | * the following PCPU_REF_DEAD clearing. | ||
89 | */ | ||
90 | for_each_possible_cpu(cpu) | ||
91 | *per_cpu_ptr(pcpu_count, cpu) = 0; | ||
92 | |||
93 | smp_store_release(&ref->pcpu_count_ptr, | ||
94 | ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | ||
97 | |||
98 | /** | ||
99 | * percpu_ref_exit - undo percpu_ref_init() | ||
100 | * @ref: percpu_ref to exit | ||
101 | * | ||
102 | * This function exits @ref. The caller is responsible for ensuring that | ||
103 | * @ref is no longer in active use. The usual places to invoke this | ||
104 | * function from are the @ref->release() callback or in init failure path | ||
105 | * where percpu_ref_init() succeeded but other parts of the initialization | ||
106 | * of the embedding object failed. | ||
107 | */ | ||
108 | void percpu_ref_exit(struct percpu_ref *ref) | ||
109 | { | ||
110 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | ||
81 | 111 | ||
82 | if (pcpu_count) { | 112 | if (pcpu_count) { |
83 | for_each_possible_cpu(cpu) | 113 | free_percpu(pcpu_count); |
84 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); | 114 | ref->pcpu_count_ptr = PCPU_REF_DEAD; |
85 | free_percpu(ref->pcpu_count); | ||
86 | } | 115 | } |
87 | } | 116 | } |
88 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); | 117 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
89 | 118 | ||
90 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) | 119 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
91 | { | 120 | { |
92 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 121 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
93 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 122 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
94 | unsigned count = 0; | 123 | unsigned count = 0; |
95 | int cpu; | 124 | int cpu; |
96 | 125 | ||
97 | /* Mask out PCPU_REF_DEAD */ | ||
98 | pcpu_count = (unsigned __percpu *) | ||
99 | (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK); | ||
100 | |||
101 | for_each_possible_cpu(cpu) | 126 | for_each_possible_cpu(cpu) |
102 | count += *per_cpu_ptr(pcpu_count, cpu); | 127 | count += *per_cpu_ptr(pcpu_count, cpu); |
103 | 128 | ||
104 | free_percpu(pcpu_count); | ||
105 | |||
106 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); | 129 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); |
107 | 130 | ||
108 | /* | 131 | /* |
@@ -152,13 +175,28 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
152 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 175 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
153 | percpu_ref_func_t *confirm_kill) | 176 | percpu_ref_func_t *confirm_kill) |
154 | { | 177 | { |
155 | WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, | 178 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, |
156 | "percpu_ref_kill() called more than once!\n"); | 179 | "percpu_ref_kill() called more than once!\n"); |
157 | 180 | ||
158 | ref->pcpu_count = (unsigned __percpu *) | 181 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; |
159 | (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); | ||
160 | ref->confirm_kill = confirm_kill; | 182 | ref->confirm_kill = confirm_kill; |
161 | 183 | ||
162 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
163 | } | 185 | } |
164 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | 186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
187 | |||
188 | /* | ||
189 | * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by | ||
190 | * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18 | ||
191 | * devel cycle. Do not use anywhere else. | ||
192 | */ | ||
193 | void __percpu_ref_kill_expedited(struct percpu_ref *ref) | ||
194 | { | ||
195 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | ||
196 | "percpu_ref_kill() called more than once on %pf!", | ||
197 | ref->release); | ||
198 | |||
199 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | ||
200 | synchronize_sched_expedited(); | ||
201 | percpu_ref_kill_rcu(&ref->rcu); | ||
202 | } | ||