aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-11-23 14:33:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-23 14:33:49 -0500
commit9f2e0f6370b5f37cea3deee47299c8dfb0228eeb (patch)
tree1f7d684f6e84876806fa1f3cc5fab7029d7cf0d3
parentd038a63ace6cf2ce3aeafa741b73d542ffb65163 (diff)
parent4aab3b5b3ccf94fc907e66233e6ca4d8675759a6 (diff)
Merge branch 'for-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu fix from Tejun Heo: "This contains one patch to fix a race condition which can lead to percpu_ref using a percpu pointer which is corrupted with a set DEAD bit. The bug was introduced while separating out the ATOMIC mode flag from the DEAD flag. The fix is pretty straight forward. I just committed the patch to the percpu tree but am sending out the pull request early as I'll be on vacation for a week. The patch should be fairly safe and while the latency will be higher I'll be checking emails" * 'for-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu-ref: fix DEAD flag contamination of percpu pointer
-rw-r--r--include/linux/percpu-refcount.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d5c89e0dd0e6..51ce60c35f4c 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
133 /* paired with smp_store_release() in percpu_ref_reinit() */ 133 /* paired with smp_store_release() in percpu_ref_reinit() */
134 smp_read_barrier_depends(); 134 smp_read_barrier_depends();
135 135
136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) 136 /*
137 * Theoretically, the following could test just ATOMIC; however,
138 * then we'd have to mask off DEAD separately as DEAD may be
139 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
140 * implies ATOMIC anyway. Test them together.
141 */
142 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
137 return false; 143 return false;
138 144
139 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 145 *percpu_countp = (unsigned long __percpu *)percpu_ptr;