diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
commit | 776edb59317ada867dfcddde40b55648beeb0078 (patch) | |
tree | f6a6136374642323cfefd7d6399ea429f9018ade /include | |
parent | 59a3d4c3631e553357b7305dc09db1990aa6757c (diff) | |
parent | 3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle were:
- reduced/streamlined smp_mb__*() interface that allows more usecases
and makes the existing ones less buggy, especially in rarer
architectures
- add rwsem implementation comments
- bump up lockdep limits"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
rwsem: Add comments to explain the meaning of the rwsem's count field
lockdep: Increase static allocations
arch: Mass conversion of smp_mb__*()
arch,doc: Convert smp_mb__*()
arch,xtensa: Convert smp_mb__*()
arch,x86: Convert smp_mb__*()
arch,tile: Convert smp_mb__*()
arch,sparc: Convert smp_mb__*()
arch,sh: Convert smp_mb__*()
arch,score: Convert smp_mb__*()
arch,s390: Convert smp_mb__*()
arch,powerpc: Convert smp_mb__*()
arch,parisc: Convert smp_mb__*()
arch,openrisc: Convert smp_mb__*()
arch,mn10300: Convert smp_mb__*()
arch,mips: Convert smp_mb__*()
arch,metag: Convert smp_mb__*()
arch,m68k: Convert smp_mb__*()
arch,m32r: Convert smp_mb__*()
arch,ia64: Convert smp_mb__*()
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/atomic.h | 7 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 8 | ||||
-rw-r--r-- | include/asm-generic/bitops.h | 9 | ||||
-rw-r--r-- | include/asm-generic/bitops/atomic.h | 2 | ||||
-rw-r--r-- | include/asm-generic/bitops/lock.h | 2 | ||||
-rw-r--r-- | include/linux/atomic.h | 36 | ||||
-rw-r--r-- | include/linux/bitops.h | 20 | ||||
-rw-r--r-- | include/linux/buffer_head.h | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 2 | ||||
-rw-r--r-- | include/linux/interrupt.h | 8 | ||||
-rw-r--r-- | include/linux/netdevice.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 6 | ||||
-rw-r--r-- | include/linux/sunrpc/sched.h | 8 | ||||
-rw-r--r-- | include/linux/sunrpc/xprt.h | 8 | ||||
-rw-r--r-- | include/linux/tracehook.h | 2 | ||||
-rw-r--r-- | include/net/ip_vs.h | 4 |
16 files changed, 88 insertions, 38 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 33bd2de3bc1e..9c79e7603459 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define __ASM_GENERIC_ATOMIC_H | 16 | #define __ASM_GENERIC_ATOMIC_H |
17 | 17 | ||
18 | #include <asm/cmpxchg.h> | 18 | #include <asm/cmpxchg.h> |
19 | #include <asm/barrier.h> | ||
19 | 20 | ||
20 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
21 | /* Force people to define core atomics */ | 22 | /* Force people to define core atomics */ |
@@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | |||
182 | } | 183 | } |
183 | #endif | 184 | #endif |
184 | 185 | ||
185 | /* Assume that atomic operations are already serializing */ | ||
186 | #define smp_mb__before_atomic_dec() barrier() | ||
187 | #define smp_mb__after_atomic_dec() barrier() | ||
188 | #define smp_mb__before_atomic_inc() barrier() | ||
189 | #define smp_mb__after_atomic_inc() barrier() | ||
190 | |||
191 | #endif /* __KERNEL__ */ | 186 | #endif /* __KERNEL__ */ |
192 | #endif /* __ASM_GENERIC_ATOMIC_H */ | 187 | #endif /* __ASM_GENERIC_ATOMIC_H */ |
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 6f692f8ac664..1402fa855388 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h | |||
@@ -62,6 +62,14 @@ | |||
62 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | 62 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #ifndef smp_mb__before_atomic | ||
66 | #define smp_mb__before_atomic() smp_mb() | ||
67 | #endif | ||
68 | |||
69 | #ifndef smp_mb__after_atomic | ||
70 | #define smp_mb__after_atomic() smp_mb() | ||
71 | #endif | ||
72 | |||
65 | #define smp_store_release(p, v) \ | 73 | #define smp_store_release(p, v) \ |
66 | do { \ | 74 | do { \ |
67 | compiletime_assert_atomic_type(*p); \ | 75 | compiletime_assert_atomic_type(*p); \ |
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 280ca7a96f75..dcdcacf2fd2b 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h | |||
@@ -11,14 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/irqflags.h> | 12 | #include <linux/irqflags.h> |
13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
14 | 14 | #include <asm/barrier.h> | |
15 | /* | ||
16 | * clear_bit may not imply a memory barrier | ||
17 | */ | ||
18 | #ifndef smp_mb__before_clear_bit | ||
19 | #define smp_mb__before_clear_bit() smp_mb() | ||
20 | #define smp_mb__after_clear_bit() smp_mb() | ||
21 | #endif | ||
22 | 15 | ||
23 | #include <asm-generic/bitops/__ffs.h> | 16 | #include <asm-generic/bitops/__ffs.h> |
24 | #include <asm-generic/bitops/ffz.h> | 17 | #include <asm-generic/bitops/ffz.h> |
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 9ae6c34dc191..49673510b484 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h | |||
@@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr) | |||
80 | * | 80 | * |
81 | * clear_bit() is atomic and may not be reordered. However, it does | 81 | * clear_bit() is atomic and may not be reordered. However, it does |
82 | * not contain a memory barrier, so if it is used for locking purposes, | 82 | * not contain a memory barrier, so if it is used for locking purposes, |
83 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 83 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
84 | * in order to ensure changes are visible on other processors. | 84 | * in order to ensure changes are visible on other processors. |
85 | */ | 85 | */ |
86 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 86 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 308a9e22c802..c30266e94806 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | #define clear_bit_unlock(nr, addr) \ | 21 | #define clear_bit_unlock(nr, addr) \ |
22 | do { \ | 22 | do { \ |
23 | smp_mb__before_clear_bit(); \ | 23 | smp_mb__before_atomic(); \ |
24 | clear_bit(nr, addr); \ | 24 | clear_bit(nr, addr); \ |
25 | } while (0) | 25 | } while (0) |
26 | 26 | ||
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..fef3a809e7cf 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -3,6 +3,42 @@ | |||
3 | #define _LINUX_ATOMIC_H | 3 | #define _LINUX_ATOMIC_H |
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | 5 | ||
6 | /* | ||
7 | * Provide __deprecated wrappers for the new interface, avoid flag day changes. | ||
8 | * We need the ugly external functions to break header recursion hell. | ||
9 | */ | ||
10 | #ifndef smp_mb__before_atomic_inc | ||
11 | static inline void __deprecated smp_mb__before_atomic_inc(void) | ||
12 | { | ||
13 | extern void __smp_mb__before_atomic(void); | ||
14 | __smp_mb__before_atomic(); | ||
15 | } | ||
16 | #endif | ||
17 | |||
18 | #ifndef smp_mb__after_atomic_inc | ||
19 | static inline void __deprecated smp_mb__after_atomic_inc(void) | ||
20 | { | ||
21 | extern void __smp_mb__after_atomic(void); | ||
22 | __smp_mb__after_atomic(); | ||
23 | } | ||
24 | #endif | ||
25 | |||
26 | #ifndef smp_mb__before_atomic_dec | ||
27 | static inline void __deprecated smp_mb__before_atomic_dec(void) | ||
28 | { | ||
29 | extern void __smp_mb__before_atomic(void); | ||
30 | __smp_mb__before_atomic(); | ||
31 | } | ||
32 | #endif | ||
33 | |||
34 | #ifndef smp_mb__after_atomic_dec | ||
35 | static inline void __deprecated smp_mb__after_atomic_dec(void) | ||
36 | { | ||
37 | extern void __smp_mb__after_atomic(void); | ||
38 | __smp_mb__after_atomic(); | ||
39 | } | ||
40 | #endif | ||
41 | |||
6 | /** | 42 | /** |
7 | * atomic_add_unless - add unless the number is already a given value | 43 | * atomic_add_unless - add unless the number is already a given value |
8 | * @v: pointer of type atomic_t | 44 | * @v: pointer of type atomic_t |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a0..cbc5833fb221 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -32,6 +32,26 @@ extern unsigned long __sw_hweight64(__u64 w); | |||
32 | */ | 32 | */ |
33 | #include <asm/bitops.h> | 33 | #include <asm/bitops.h> |
34 | 34 | ||
35 | /* | ||
36 | * Provide __deprecated wrappers for the new interface, avoid flag day changes. | ||
37 | * We need the ugly external functions to break header recursion hell. | ||
38 | */ | ||
39 | #ifndef smp_mb__before_clear_bit | ||
40 | static inline void __deprecated smp_mb__before_clear_bit(void) | ||
41 | { | ||
42 | extern void __smp_mb__before_atomic(void); | ||
43 | __smp_mb__before_atomic(); | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #ifndef smp_mb__after_clear_bit | ||
48 | static inline void __deprecated smp_mb__after_clear_bit(void) | ||
49 | { | ||
50 | extern void __smp_mb__after_atomic(void); | ||
51 | __smp_mb__after_atomic(); | ||
52 | } | ||
53 | #endif | ||
54 | |||
35 | #define for_each_set_bit(bit, addr, size) \ | 55 | #define for_each_set_bit(bit, addr, size) \ |
36 | for ((bit) = find_first_bit((addr), (size)); \ | 56 | for ((bit) = find_first_bit((addr), (size)); \ |
37 | (bit) < (size); \ | 57 | (bit) < (size); \ |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c40302f909ce..7cbf837a279c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -278,7 +278,7 @@ static inline void get_bh(struct buffer_head *bh) | |||
278 | 278 | ||
279 | static inline void put_bh(struct buffer_head *bh) | 279 | static inline void put_bh(struct buffer_head *bh) |
280 | { | 280 | { |
281 | smp_mb__before_atomic_dec(); | 281 | smp_mb__before_atomic(); |
282 | atomic_dec(&bh->b_count); | 282 | atomic_dec(&bh->b_count); |
283 | } | 283 | } |
284 | 284 | ||
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9f3c275e053e..ec274e0f4ed2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part) | |||
649 | static inline void hd_struct_get(struct hd_struct *part) | 649 | static inline void hd_struct_get(struct hd_struct *part) |
650 | { | 650 | { |
651 | atomic_inc(&part->ref); | 651 | atomic_inc(&part->ref); |
652 | smp_mb__after_atomic_inc(); | 652 | smp_mb__after_atomic(); |
653 | } | 653 | } |
654 | 654 | ||
655 | static inline int hd_struct_try_get(struct hd_struct *part) | 655 | static inline int hd_struct_try_get(struct hd_struct *part) |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 051c85032f48..cb19f09d7e3e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -491,7 +491,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t) | |||
491 | 491 | ||
492 | static inline void tasklet_unlock(struct tasklet_struct *t) | 492 | static inline void tasklet_unlock(struct tasklet_struct *t) |
493 | { | 493 | { |
494 | smp_mb__before_clear_bit(); | 494 | smp_mb__before_atomic(); |
495 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | 495 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
496 | } | 496 | } |
497 | 497 | ||
@@ -539,7 +539,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
539 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | 539 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
540 | { | 540 | { |
541 | atomic_inc(&t->count); | 541 | atomic_inc(&t->count); |
542 | smp_mb__after_atomic_inc(); | 542 | smp_mb__after_atomic(); |
543 | } | 543 | } |
544 | 544 | ||
545 | static inline void tasklet_disable(struct tasklet_struct *t) | 545 | static inline void tasklet_disable(struct tasklet_struct *t) |
@@ -551,13 +551,13 @@ static inline void tasklet_disable(struct tasklet_struct *t) | |||
551 | 551 | ||
552 | static inline void tasklet_enable(struct tasklet_struct *t) | 552 | static inline void tasklet_enable(struct tasklet_struct *t) |
553 | { | 553 | { |
554 | smp_mb__before_atomic_dec(); | 554 | smp_mb__before_atomic(); |
555 | atomic_dec(&t->count); | 555 | atomic_dec(&t->count); |
556 | } | 556 | } |
557 | 557 | ||
558 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | 558 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
559 | { | 559 | { |
560 | smp_mb__before_atomic_dec(); | 560 | smp_mb__before_atomic(); |
561 | atomic_dec(&t->count); | 561 | atomic_dec(&t->count); |
562 | } | 562 | } |
563 | 563 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b42d07b0390b..6c1ae9fd9505 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -493,7 +493,7 @@ static inline void napi_disable(struct napi_struct *n) | |||
493 | static inline void napi_enable(struct napi_struct *n) | 493 | static inline void napi_enable(struct napi_struct *n) |
494 | { | 494 | { |
495 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 495 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); |
496 | smp_mb__before_clear_bit(); | 496 | smp_mb__before_atomic(); |
497 | clear_bit(NAPI_STATE_SCHED, &n->state); | 497 | clear_bit(NAPI_STATE_SCHED, &n->state); |
498 | } | 498 | } |
499 | 499 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 221b2bde3723..4dce5d844b74 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2785,10 +2785,8 @@ static inline bool __must_check current_set_polling_and_test(void) | |||
2785 | /* | 2785 | /* |
2786 | * Polling state must be visible before we test NEED_RESCHED, | 2786 | * Polling state must be visible before we test NEED_RESCHED, |
2787 | * paired by resched_task() | 2787 | * paired by resched_task() |
2788 | * | ||
2789 | * XXX: assumes set/clear bit are identical barrier wise. | ||
2790 | */ | 2788 | */ |
2791 | smp_mb__after_clear_bit(); | 2789 | smp_mb__after_atomic(); |
2792 | 2790 | ||
2793 | return unlikely(tif_need_resched()); | 2791 | return unlikely(tif_need_resched()); |
2794 | } | 2792 | } |
@@ -2806,7 +2804,7 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
2806 | * Polling state must be visible before we test NEED_RESCHED, | 2804 | * Polling state must be visible before we test NEED_RESCHED, |
2807 | * paired by resched_task() | 2805 | * paired by resched_task() |
2808 | */ | 2806 | */ |
2809 | smp_mb__after_clear_bit(); | 2807 | smp_mb__after_atomic(); |
2810 | 2808 | ||
2811 | return unlikely(tif_need_resched()); | 2809 | return unlikely(tif_need_resched()); |
2812 | } | 2810 | } |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 3a847de83fab..ad7dbe2cfecd 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -142,18 +142,18 @@ struct rpc_task_setup { | |||
142 | test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) | 142 | test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
143 | #define rpc_clear_running(t) \ | 143 | #define rpc_clear_running(t) \ |
144 | do { \ | 144 | do { \ |
145 | smp_mb__before_clear_bit(); \ | 145 | smp_mb__before_atomic(); \ |
146 | clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ | 146 | clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ |
147 | smp_mb__after_clear_bit(); \ | 147 | smp_mb__after_atomic(); \ |
148 | } while (0) | 148 | } while (0) |
149 | 149 | ||
150 | #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) | 150 | #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
151 | #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) | 151 | #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
152 | #define rpc_clear_queued(t) \ | 152 | #define rpc_clear_queued(t) \ |
153 | do { \ | 153 | do { \ |
154 | smp_mb__before_clear_bit(); \ | 154 | smp_mb__before_atomic(); \ |
155 | clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ | 155 | clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ |
156 | smp_mb__after_clear_bit(); \ | 156 | smp_mb__after_atomic(); \ |
157 | } while (0) | 157 | } while (0) |
158 | 158 | ||
159 | #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) | 159 | #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 3e5efb2b236e..3876f0f1dfd3 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -379,9 +379,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) | |||
379 | 379 | ||
380 | static inline void xprt_clear_connecting(struct rpc_xprt *xprt) | 380 | static inline void xprt_clear_connecting(struct rpc_xprt *xprt) |
381 | { | 381 | { |
382 | smp_mb__before_clear_bit(); | 382 | smp_mb__before_atomic(); |
383 | clear_bit(XPRT_CONNECTING, &xprt->state); | 383 | clear_bit(XPRT_CONNECTING, &xprt->state); |
384 | smp_mb__after_clear_bit(); | 384 | smp_mb__after_atomic(); |
385 | } | 385 | } |
386 | 386 | ||
387 | static inline int xprt_connecting(struct rpc_xprt *xprt) | 387 | static inline int xprt_connecting(struct rpc_xprt *xprt) |
@@ -411,9 +411,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt) | |||
411 | 411 | ||
412 | static inline void xprt_clear_binding(struct rpc_xprt *xprt) | 412 | static inline void xprt_clear_binding(struct rpc_xprt *xprt) |
413 | { | 413 | { |
414 | smp_mb__before_clear_bit(); | 414 | smp_mb__before_atomic(); |
415 | clear_bit(XPRT_BINDING, &xprt->state); | 415 | clear_bit(XPRT_BINDING, &xprt->state); |
416 | smp_mb__after_clear_bit(); | 416 | smp_mb__after_atomic(); |
417 | } | 417 | } |
418 | 418 | ||
419 | static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) | 419 | static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 1e98b5530425..6f8ab7da27c4 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) | |||
191 | * pairs with task_work_add()->set_notify_resume() after | 191 | * pairs with task_work_add()->set_notify_resume() after |
192 | * hlist_add_head(task->task_works); | 192 | * hlist_add_head(task->task_works); |
193 | */ | 193 | */ |
194 | smp_mb__after_clear_bit(); | 194 | smp_mb__after_atomic(); |
195 | if (unlikely(current->task_works)) | 195 | if (unlikely(current->task_works)) |
196 | task_work_run(); | 196 | task_work_run(); |
197 | } | 197 | } |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 5679d927562b..624a8a54806d 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -1204,7 +1204,7 @@ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) | |||
1204 | /* put back the conn without restarting its timer */ | 1204 | /* put back the conn without restarting its timer */ |
1205 | static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) | 1205 | static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) |
1206 | { | 1206 | { |
1207 | smp_mb__before_atomic_dec(); | 1207 | smp_mb__before_atomic(); |
1208 | atomic_dec(&cp->refcnt); | 1208 | atomic_dec(&cp->refcnt); |
1209 | } | 1209 | } |
1210 | void ip_vs_conn_put(struct ip_vs_conn *cp); | 1210 | void ip_vs_conn_put(struct ip_vs_conn *cp); |
@@ -1408,7 +1408,7 @@ static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) | |||
1408 | 1408 | ||
1409 | static inline void ip_vs_dest_put(struct ip_vs_dest *dest) | 1409 | static inline void ip_vs_dest_put(struct ip_vs_dest *dest) |
1410 | { | 1410 | { |
1411 | smp_mb__before_atomic_dec(); | 1411 | smp_mb__before_atomic(); |
1412 | atomic_dec(&dest->refcnt); | 1412 | atomic_dec(&dest->refcnt); |
1413 | } | 1413 | } |
1414 | 1414 | ||