diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
commit | 776edb59317ada867dfcddde40b55648beeb0078 (patch) | |
tree | f6a6136374642323cfefd7d6399ea429f9018ade /include/linux/sunrpc/sched.h | |
parent | 59a3d4c3631e553357b7305dc09db1990aa6757c (diff) | |
parent | 3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle were:
- reduced/streamlined smp_mb__*() interface that allows more usecases
and makes the existing ones less buggy, especially in rarer
architectures
- add rwsem implementation comments
- bump up lockdep limits"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
rwsem: Add comments to explain the meaning of the rwsem's count field
lockdep: Increase static allocations
arch: Mass conversion of smp_mb__*()
arch,doc: Convert smp_mb__*()
arch,xtensa: Convert smp_mb__*()
arch,x86: Convert smp_mb__*()
arch,tile: Convert smp_mb__*()
arch,sparc: Convert smp_mb__*()
arch,sh: Convert smp_mb__*()
arch,score: Convert smp_mb__*()
arch,s390: Convert smp_mb__*()
arch,powerpc: Convert smp_mb__*()
arch,parisc: Convert smp_mb__*()
arch,openrisc: Convert smp_mb__*()
arch,mn10300: Convert smp_mb__*()
arch,mips: Convert smp_mb__*()
arch,metag: Convert smp_mb__*()
arch,m68k: Convert smp_mb__*()
arch,m32r: Convert smp_mb__*()
arch,ia64: Convert smp_mb__*()
...
Diffstat (limited to 'include/linux/sunrpc/sched.h')
-rw-r--r-- | include/linux/sunrpc/sched.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 3a847de83fab..ad7dbe2cfecd 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -142,18 +142,18 @@ struct rpc_task_setup { | |||
142 | test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) | 142 | test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
143 | #define rpc_clear_running(t) \ | 143 | #define rpc_clear_running(t) \ |
144 | do { \ | 144 | do { \ |
145 | smp_mb__before_clear_bit(); \ | 145 | smp_mb__before_atomic(); \ |
146 | clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ | 146 | clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ |
147 | smp_mb__after_clear_bit(); \ | 147 | smp_mb__after_atomic(); \ |
148 | } while (0) | 148 | } while (0) |
149 | 149 | ||
150 | #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) | 150 | #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
151 | #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) | 151 | #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
152 | #define rpc_clear_queued(t) \ | 152 | #define rpc_clear_queued(t) \ |
153 | do { \ | 153 | do { \ |
154 | smp_mb__before_clear_bit(); \ | 154 | smp_mb__before_atomic(); \ |
155 | clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ | 155 | clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ |
156 | smp_mb__after_clear_bit(); \ | 156 | smp_mb__after_atomic(); \ |
157 | } while (0) | 157 | } while (0) |
158 | 158 | ||
159 | #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) | 159 | #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) |