diff options
-rw-r--r-- | include/linux/percpu-refcount.h | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e6..51ce60c35f4c 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, | |||
133 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 133 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
134 | smp_read_barrier_depends(); | 134 | smp_read_barrier_depends(); |
135 | 135 | ||
136 | if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) | 136 | /* |
137 | * Theoretically, the following could test just ATOMIC; however, | ||
138 | * then we'd have to mask off DEAD separately as DEAD may be | ||
139 | * visible without ATOMIC if we race with percpu_ref_kill(). DEAD | ||
140 | * implies ATOMIC anyway. Test them together. | ||
141 | */ | ||
142 | if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) | ||
137 | return false; | 143 | return false; |
138 | 144 | ||
139 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; | 145 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |