diff options
| author | Waiman Long <Waiman.Long@hpe.com> | 2016-05-17 21:26:20 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2016-06-08 09:16:59 -0400 |
| commit | fb6a44f33be542fd81575ff93a4e8118d6a58592 (patch) | |
| tree | 6750c84b596d15ac1e9c30b1a57de0cd31efdedd /kernel/locking | |
| parent | 19c5d690e41697fcdd19379ab9d10d8d37818414 (diff) | |
locking/rwsem: Protect all writes to owner by WRITE_ONCE()
Without using WRITE_ONCE(), the compiler can potentially break a
write into multiple smaller ones (store tearing). So a read from the
same data by another task concurrently may return a partial result.
This can result in a kernel crash if the data is a memory address
that is being dereferenced.
This patch changes all write to rwsem->owner to use WRITE_ONCE()
to make sure that store tearing will not happen. READ_ONCE() may
not be needed for rwsem->owner as long as the value is only used for
comparison and not dereferencing.
Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1463534783-38814-3-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/rwsem.h | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index 8f43ba234787..a699f4048ba1 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h | |||
| @@ -16,14 +16,21 @@ | |||
| 16 | #define RWSEM_READER_OWNED ((struct task_struct *)1UL) | 16 | #define RWSEM_READER_OWNED ((struct task_struct *)1UL) |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | 18 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 19 | /* | ||
| 20 | * All writes to owner are protected by WRITE_ONCE() to make sure that | ||
| 21 | * store tearing can't happen as optimistic spinners may read and use | ||
| 22 | * the owner value concurrently without lock. Read from owner, however, | ||
| 23 | * may not need READ_ONCE() as long as the pointer value is only used | ||
| 24 | * for comparison and isn't being dereferenced. | ||
| 25 | */ | ||
| 19 | static inline void rwsem_set_owner(struct rw_semaphore *sem) | 26 | static inline void rwsem_set_owner(struct rw_semaphore *sem) |
| 20 | { | 27 | { |
| 21 | sem->owner = current; | 28 | WRITE_ONCE(sem->owner, current); |
| 22 | } | 29 | } |
| 23 | 30 | ||
| 24 | static inline void rwsem_clear_owner(struct rw_semaphore *sem) | 31 | static inline void rwsem_clear_owner(struct rw_semaphore *sem) |
| 25 | { | 32 | { |
| 26 | sem->owner = NULL; | 33 | WRITE_ONCE(sem->owner, NULL); |
| 27 | } | 34 | } |
| 28 | 35 | ||
| 29 | static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) | 36 | static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) |
| @@ -34,7 +41,7 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) | |||
| 34 | * to minimize cacheline contention. | 41 | * to minimize cacheline contention. |
| 35 | */ | 42 | */ |
| 36 | if (sem->owner != RWSEM_READER_OWNED) | 43 | if (sem->owner != RWSEM_READER_OWNED) |
| 37 | sem->owner = RWSEM_READER_OWNED; | 44 | WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); |
| 38 | } | 45 | } |
| 39 | 46 | ||
| 40 | static inline bool rwsem_owner_is_writer(struct task_struct *owner) | 47 | static inline bool rwsem_owner_is_writer(struct task_struct *owner) |
