diff options
44 files changed, 401 insertions, 319 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index a61be39c7b51..904ee42d078e 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -1655,17 +1655,18 @@ macro is a good place to start looking. | |||
1655 | SMP memory barriers are reduced to compiler barriers on uniprocessor compiled | 1655 | SMP memory barriers are reduced to compiler barriers on uniprocessor compiled |
1656 | systems because it is assumed that a CPU will appear to be self-consistent, | 1656 | systems because it is assumed that a CPU will appear to be self-consistent, |
1657 | and will order overlapping accesses correctly with respect to itself. | 1657 | and will order overlapping accesses correctly with respect to itself. |
1658 | However, see the subsection on "Virtual Machine Guests" below. | ||
1658 | 1659 | ||
1659 | [!] Note that SMP memory barriers _must_ be used to control the ordering of | 1660 | [!] Note that SMP memory barriers _must_ be used to control the ordering of |
1660 | references to shared memory on SMP systems, though the use of locking instead | 1661 | references to shared memory on SMP systems, though the use of locking instead |
1661 | is sufficient. | 1662 | is sufficient. |
1662 | 1663 | ||
1663 | Mandatory barriers should not be used to control SMP effects, since mandatory | 1664 | Mandatory barriers should not be used to control SMP effects, since mandatory |
1664 | barriers unnecessarily impose overhead on UP systems. They may, however, be | 1665 | barriers impose unnecessary overhead on both SMP and UP systems. They may, |
1665 | used to control MMIO effects on accesses through relaxed memory I/O windows. | 1666 | however, be used to control MMIO effects on accesses through relaxed memory I/O |
1666 | These are required even on non-SMP systems as they affect the order in which | 1667 | windows. These barriers are required even on non-SMP systems as they affect |
1667 | memory operations appear to a device by prohibiting both the compiler and the | 1668 | the order in which memory operations appear to a device by prohibiting both the |
1668 | CPU from reordering them. | 1669 | compiler and the CPU from reordering them. |
1669 | 1670 | ||
1670 | 1671 | ||
1671 | There are some more advanced barrier functions: | 1672 | There are some more advanced barrier functions: |
@@ -2948,6 +2949,23 @@ The Alpha defines the Linux kernel's memory barrier model. | |||
2948 | 2949 | ||
2949 | See the subsection on "Cache Coherency" above. | 2950 | See the subsection on "Cache Coherency" above. |
2950 | 2951 | ||
2952 | VIRTUAL MACHINE GUESTS | ||
2953 | ------------------- | ||
2954 | |||
2955 | Guests running within virtual machines might be affected by SMP effects even if | ||
2956 | the guest itself is compiled without SMP support. This is an artifact of | ||
2957 | interfacing with an SMP host while running an UP kernel. Using mandatory | ||
2958 | barriers for this use-case would be possible but is often suboptimal. | ||
2959 | |||
2960 | To handle this case optimally, low-level virt_mb() etc macros are available. | ||
2961 | These have the same effect as smp_mb() etc when SMP is enabled, but generate | ||
2962 | identical code for SMP and non-SMP systems. For example, virtual machine guests | ||
2963 | should use virt_mb() rather than smp_mb() when synchronizing against a | ||
2964 | (possibly SMP) host. | ||
2965 | |||
2966 | These are equivalent to smp_mb() etc counterparts in all other respects, | ||
2967 | in particular, they do not control MMIO effects: to control | ||
2968 | MMIO effects, use mandatory barriers. | ||
2951 | 2969 | ||
2952 | ============ | 2970 | ============ |
2953 | EXAMPLE USES | 2971 | EXAMPLE USES |
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 3ff5642d9788..112cc1a5d47f 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h | |||
@@ -60,38 +60,11 @@ extern void arm_heavy_mb(void); | |||
60 | #define dma_wmb() barrier() | 60 | #define dma_wmb() barrier() |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifndef CONFIG_SMP | 63 | #define __smp_mb() dmb(ish) |
64 | #define smp_mb() barrier() | 64 | #define __smp_rmb() __smp_mb() |
65 | #define smp_rmb() barrier() | 65 | #define __smp_wmb() dmb(ishst) |
66 | #define smp_wmb() barrier() | ||
67 | #else | ||
68 | #define smp_mb() dmb(ish) | ||
69 | #define smp_rmb() smp_mb() | ||
70 | #define smp_wmb() dmb(ishst) | ||
71 | #endif | ||
72 | |||
73 | #define smp_store_release(p, v) \ | ||
74 | do { \ | ||
75 | compiletime_assert_atomic_type(*p); \ | ||
76 | smp_mb(); \ | ||
77 | WRITE_ONCE(*p, v); \ | ||
78 | } while (0) | ||
79 | |||
80 | #define smp_load_acquire(p) \ | ||
81 | ({ \ | ||
82 | typeof(*p) ___p1 = READ_ONCE(*p); \ | ||
83 | compiletime_assert_atomic_type(*p); \ | ||
84 | smp_mb(); \ | ||
85 | ___p1; \ | ||
86 | }) | ||
87 | |||
88 | #define read_barrier_depends() do { } while(0) | ||
89 | #define smp_read_barrier_depends() do { } while(0) | ||
90 | |||
91 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
92 | 66 | ||
93 | #define smp_mb__before_atomic() smp_mb() | 67 | #include <asm-generic/barrier.h> |
94 | #define smp_mb__after_atomic() smp_mb() | ||
95 | 68 | ||
96 | #endif /* !__ASSEMBLY__ */ | 69 | #endif /* !__ASSEMBLY__ */ |
97 | #endif /* __ASM_BARRIER_H */ | 70 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9622eb48f894..dae5c49618db 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -35,11 +35,11 @@ | |||
35 | #define dma_rmb() dmb(oshld) | 35 | #define dma_rmb() dmb(oshld) |
36 | #define dma_wmb() dmb(oshst) | 36 | #define dma_wmb() dmb(oshst) |
37 | 37 | ||
38 | #define smp_mb() dmb(ish) | 38 | #define __smp_mb() dmb(ish) |
39 | #define smp_rmb() dmb(ishld) | 39 | #define __smp_rmb() dmb(ishld) |
40 | #define smp_wmb() dmb(ishst) | 40 | #define __smp_wmb() dmb(ishst) |
41 | 41 | ||
42 | #define smp_store_release(p, v) \ | 42 | #define __smp_store_release(p, v) \ |
43 | do { \ | 43 | do { \ |
44 | compiletime_assert_atomic_type(*p); \ | 44 | compiletime_assert_atomic_type(*p); \ |
45 | switch (sizeof(*p)) { \ | 45 | switch (sizeof(*p)) { \ |
@@ -62,7 +62,7 @@ do { \ | |||
62 | } \ | 62 | } \ |
63 | } while (0) | 63 | } while (0) |
64 | 64 | ||
65 | #define smp_load_acquire(p) \ | 65 | #define __smp_load_acquire(p) \ |
66 | ({ \ | 66 | ({ \ |
67 | union { typeof(*p) __val; char __c[1]; } __u; \ | 67 | union { typeof(*p) __val; char __c[1]; } __u; \ |
68 | compiletime_assert_atomic_type(*p); \ | 68 | compiletime_assert_atomic_type(*p); \ |
@@ -91,14 +91,7 @@ do { \ | |||
91 | __u.__val; \ | 91 | __u.__val; \ |
92 | }) | 92 | }) |
93 | 93 | ||
94 | #define read_barrier_depends() do { } while(0) | 94 | #include <asm-generic/barrier.h> |
95 | #define smp_read_barrier_depends() do { } while(0) | ||
96 | |||
97 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
98 | #define nop() asm volatile("nop"); | ||
99 | |||
100 | #define smp_mb__before_atomic() smp_mb() | ||
101 | #define smp_mb__after_atomic() smp_mb() | ||
102 | 95 | ||
103 | #endif /* __ASSEMBLY__ */ | 96 | #endif /* __ASSEMBLY__ */ |
104 | 97 | ||
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index dfb66fe88b34..7cca51cae5ff 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h | |||
@@ -78,8 +78,8 @@ | |||
78 | 78 | ||
79 | #endif /* !CONFIG_SMP */ | 79 | #endif /* !CONFIG_SMP */ |
80 | 80 | ||
81 | #define smp_mb__before_atomic() barrier() | 81 | #define __smp_mb__before_atomic() barrier() |
82 | #define smp_mb__after_atomic() barrier() | 82 | #define __smp_mb__after_atomic() barrier() |
83 | 83 | ||
84 | #include <asm-generic/barrier.h> | 84 | #include <asm-generic/barrier.h> |
85 | 85 | ||
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 209c4b817c95..588f1614cafc 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h | |||
@@ -42,34 +42,24 @@ | |||
42 | #define dma_rmb() mb() | 42 | #define dma_rmb() mb() |
43 | #define dma_wmb() mb() | 43 | #define dma_wmb() mb() |
44 | 44 | ||
45 | #ifdef CONFIG_SMP | 45 | # define __smp_mb() mb() |
46 | # define smp_mb() mb() | ||
47 | #else | ||
48 | # define smp_mb() barrier() | ||
49 | #endif | ||
50 | 46 | ||
51 | #define smp_rmb() smp_mb() | 47 | #define __smp_mb__before_atomic() barrier() |
52 | #define smp_wmb() smp_mb() | 48 | #define __smp_mb__after_atomic() barrier() |
53 | |||
54 | #define read_barrier_depends() do { } while (0) | ||
55 | #define smp_read_barrier_depends() do { } while (0) | ||
56 | |||
57 | #define smp_mb__before_atomic() barrier() | ||
58 | #define smp_mb__after_atomic() barrier() | ||
59 | 49 | ||
60 | /* | 50 | /* |
61 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no | 51 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no |
62 | * need for asm trickery! | 52 | * need for asm trickery! |
63 | */ | 53 | */ |
64 | 54 | ||
65 | #define smp_store_release(p, v) \ | 55 | #define __smp_store_release(p, v) \ |
66 | do { \ | 56 | do { \ |
67 | compiletime_assert_atomic_type(*p); \ | 57 | compiletime_assert_atomic_type(*p); \ |
68 | barrier(); \ | 58 | barrier(); \ |
69 | WRITE_ONCE(*p, v); \ | 59 | WRITE_ONCE(*p, v); \ |
70 | } while (0) | 60 | } while (0) |
71 | 61 | ||
72 | #define smp_load_acquire(p) \ | 62 | #define __smp_load_acquire(p) \ |
73 | ({ \ | 63 | ({ \ |
74 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 64 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
75 | compiletime_assert_atomic_type(*p); \ | 65 | compiletime_assert_atomic_type(*p); \ |
@@ -77,12 +67,12 @@ do { \ | |||
77 | ___p1; \ | 67 | ___p1; \ |
78 | }) | 68 | }) |
79 | 69 | ||
80 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
81 | |||
82 | /* | 70 | /* |
83 | * The group barrier in front of the rsm & ssm are necessary to ensure | 71 | * The group barrier in front of the rsm & ssm are necessary to ensure |
84 | * that none of the previous instructions in the same group are | 72 | * that none of the previous instructions in the same group are |
85 | * affected by the rsm/ssm. | 73 | * affected by the rsm/ssm. |
86 | */ | 74 | */ |
87 | 75 | ||
76 | #include <asm-generic/barrier.h> | ||
77 | |||
88 | #endif /* _ASM_IA64_BARRIER_H */ | 78 | #endif /* _ASM_IA64_BARRIER_H */ |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d2fae054d988..90fde5b8669d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | static void | 258 | static void |
259 | nop (struct irq_data *data) | 259 | iosapic_nop (struct irq_data *data) |
260 | { | 260 | { |
261 | /* do nothing... */ | 261 | /* do nothing... */ |
262 | } | 262 | } |
@@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data) | |||
415 | #define iosapic_shutdown_level_irq mask_irq | 415 | #define iosapic_shutdown_level_irq mask_irq |
416 | #define iosapic_enable_level_irq unmask_irq | 416 | #define iosapic_enable_level_irq unmask_irq |
417 | #define iosapic_disable_level_irq mask_irq | 417 | #define iosapic_disable_level_irq mask_irq |
418 | #define iosapic_ack_level_irq nop | 418 | #define iosapic_ack_level_irq iosapic_nop |
419 | 419 | ||
420 | static struct irq_chip irq_type_iosapic_level = { | 420 | static struct irq_chip irq_type_iosapic_level = { |
421 | .name = "IO-SAPIC-level", | 421 | .name = "IO-SAPIC-level", |
@@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data) | |||
453 | } | 453 | } |
454 | 454 | ||
455 | #define iosapic_enable_edge_irq unmask_irq | 455 | #define iosapic_enable_edge_irq unmask_irq |
456 | #define iosapic_disable_edge_irq nop | 456 | #define iosapic_disable_edge_irq iosapic_nop |
457 | 457 | ||
458 | static struct irq_chip irq_type_iosapic_edge = { | 458 | static struct irq_chip irq_type_iosapic_edge = { |
459 | .name = "IO-SAPIC-edge", | 459 | .name = "IO-SAPIC-edge", |
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 172b7e5efc53..5418517aa5eb 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h | |||
@@ -44,16 +44,6 @@ static inline void wr_fence(void) | |||
44 | #define rmb() barrier() | 44 | #define rmb() barrier() |
45 | #define wmb() mb() | 45 | #define wmb() mb() |
46 | 46 | ||
47 | #define dma_rmb() rmb() | ||
48 | #define dma_wmb() wmb() | ||
49 | |||
50 | #ifndef CONFIG_SMP | ||
51 | #define fence() do { } while (0) | ||
52 | #define smp_mb() barrier() | ||
53 | #define smp_rmb() barrier() | ||
54 | #define smp_wmb() barrier() | ||
55 | #else | ||
56 | |||
57 | #ifdef CONFIG_METAG_SMP_WRITE_REORDERING | 47 | #ifdef CONFIG_METAG_SMP_WRITE_REORDERING |
58 | /* | 48 | /* |
59 | * Write to the atomic memory unlock system event register (command 0). This is | 49 | * Write to the atomic memory unlock system event register (command 0). This is |
@@ -63,45 +53,32 @@ static inline void wr_fence(void) | |||
63 | * incoherence). It is therefore ineffective if used after and on the same | 53 | * incoherence). It is therefore ineffective if used after and on the same |
64 | * thread as a write. | 54 | * thread as a write. |
65 | */ | 55 | */ |
66 | static inline void fence(void) | 56 | static inline void metag_fence(void) |
67 | { | 57 | { |
68 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; | 58 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; |
69 | barrier(); | 59 | barrier(); |
70 | *flushptr = 0; | 60 | *flushptr = 0; |
71 | barrier(); | 61 | barrier(); |
72 | } | 62 | } |
73 | #define smp_mb() fence() | 63 | #define __smp_mb() metag_fence() |
74 | #define smp_rmb() fence() | 64 | #define __smp_rmb() metag_fence() |
75 | #define smp_wmb() barrier() | 65 | #define __smp_wmb() barrier() |
76 | #else | 66 | #else |
77 | #define fence() do { } while (0) | 67 | #define metag_fence() do { } while (0) |
78 | #define smp_mb() barrier() | 68 | #define __smp_mb() barrier() |
79 | #define smp_rmb() barrier() | 69 | #define __smp_rmb() barrier() |
80 | #define smp_wmb() barrier() | 70 | #define __smp_wmb() barrier() |
81 | #endif | ||
82 | #endif | 71 | #endif |
83 | 72 | ||
84 | #define read_barrier_depends() do { } while (0) | 73 | #ifdef CONFIG_SMP |
85 | #define smp_read_barrier_depends() do { } while (0) | 74 | #define fence() metag_fence() |
86 | 75 | #else | |
87 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | 76 | #define fence() do { } while (0) |
88 | 77 | #endif | |
89 | #define smp_store_release(p, v) \ | ||
90 | do { \ | ||
91 | compiletime_assert_atomic_type(*p); \ | ||
92 | smp_mb(); \ | ||
93 | WRITE_ONCE(*p, v); \ | ||
94 | } while (0) | ||
95 | 78 | ||
96 | #define smp_load_acquire(p) \ | 79 | #define __smp_mb__before_atomic() barrier() |
97 | ({ \ | 80 | #define __smp_mb__after_atomic() barrier() |
98 | typeof(*p) ___p1 = READ_ONCE(*p); \ | ||
99 | compiletime_assert_atomic_type(*p); \ | ||
100 | smp_mb(); \ | ||
101 | ___p1; \ | ||
102 | }) | ||
103 | 81 | ||
104 | #define smp_mb__before_atomic() barrier() | 82 | #include <asm-generic/barrier.h> |
105 | #define smp_mb__after_atomic() barrier() | ||
106 | 83 | ||
107 | #endif /* _ASM_METAG_BARRIER_H */ | 84 | #endif /* _ASM_METAG_BARRIER_H */ |
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 752e0b86c171..d296633d890e 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
@@ -10,9 +10,6 @@ | |||
10 | 10 | ||
11 | #include <asm/addrspace.h> | 11 | #include <asm/addrspace.h> |
12 | 12 | ||
13 | #define read_barrier_depends() do { } while(0) | ||
14 | #define smp_read_barrier_depends() do { } while(0) | ||
15 | |||
16 | #ifdef CONFIG_CPU_HAS_SYNC | 13 | #ifdef CONFIG_CPU_HAS_SYNC |
17 | #define __sync() \ | 14 | #define __sync() \ |
18 | __asm__ __volatile__( \ | 15 | __asm__ __volatile__( \ |
@@ -87,23 +84,21 @@ | |||
87 | 84 | ||
88 | #define wmb() fast_wmb() | 85 | #define wmb() fast_wmb() |
89 | #define rmb() fast_rmb() | 86 | #define rmb() fast_rmb() |
90 | #define dma_wmb() fast_wmb() | ||
91 | #define dma_rmb() fast_rmb() | ||
92 | 87 | ||
93 | #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) | 88 | #if defined(CONFIG_WEAK_ORDERING) |
94 | # ifdef CONFIG_CPU_CAVIUM_OCTEON | 89 | # ifdef CONFIG_CPU_CAVIUM_OCTEON |
95 | # define smp_mb() __sync() | 90 | # define __smp_mb() __sync() |
96 | # define smp_rmb() barrier() | 91 | # define __smp_rmb() barrier() |
97 | # define smp_wmb() __syncw() | 92 | # define __smp_wmb() __syncw() |
98 | # else | 93 | # else |
99 | # define smp_mb() __asm__ __volatile__("sync" : : :"memory") | 94 | # define __smp_mb() __asm__ __volatile__("sync" : : :"memory") |
100 | # define smp_rmb() __asm__ __volatile__("sync" : : :"memory") | 95 | # define __smp_rmb() __asm__ __volatile__("sync" : : :"memory") |
101 | # define smp_wmb() __asm__ __volatile__("sync" : : :"memory") | 96 | # define __smp_wmb() __asm__ __volatile__("sync" : : :"memory") |
102 | # endif | 97 | # endif |
103 | #else | 98 | #else |
104 | #define smp_mb() barrier() | 99 | #define __smp_mb() barrier() |
105 | #define smp_rmb() barrier() | 100 | #define __smp_rmb() barrier() |
106 | #define smp_wmb() barrier() | 101 | #define __smp_wmb() barrier() |
107 | #endif | 102 | #endif |
108 | 103 | ||
109 | #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) | 104 | #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) |
@@ -112,13 +107,11 @@ | |||
112 | #define __WEAK_LLSC_MB " \n" | 107 | #define __WEAK_LLSC_MB " \n" |
113 | #endif | 108 | #endif |
114 | 109 | ||
115 | #define smp_store_mb(var, value) \ | ||
116 | do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
117 | |||
118 | #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | 110 | #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") |
119 | 111 | ||
120 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 112 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
121 | #define smp_mb__before_llsc() smp_wmb() | 113 | #define smp_mb__before_llsc() smp_wmb() |
114 | #define __smp_mb__before_llsc() __smp_wmb() | ||
122 | /* Cause previous writes to become visible on all CPUs as soon as possible */ | 115 | /* Cause previous writes to become visible on all CPUs as soon as possible */ |
123 | #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ | 116 | #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ |
124 | ".set arch=octeon\n\t" \ | 117 | ".set arch=octeon\n\t" \ |
@@ -126,25 +119,13 @@ | |||
126 | ".set pop" : : : "memory") | 119 | ".set pop" : : : "memory") |
127 | #else | 120 | #else |
128 | #define smp_mb__before_llsc() smp_llsc_mb() | 121 | #define smp_mb__before_llsc() smp_llsc_mb() |
122 | #define __smp_mb__before_llsc() smp_llsc_mb() | ||
129 | #define nudge_writes() mb() | 123 | #define nudge_writes() mb() |
130 | #endif | 124 | #endif |
131 | 125 | ||
132 | #define smp_store_release(p, v) \ | 126 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() |
133 | do { \ | 127 | #define __smp_mb__after_atomic() smp_llsc_mb() |
134 | compiletime_assert_atomic_type(*p); \ | 128 | |
135 | smp_mb(); \ | 129 | #include <asm-generic/barrier.h> |
136 | WRITE_ONCE(*p, v); \ | ||
137 | } while (0) | ||
138 | |||
139 | #define smp_load_acquire(p) \ | ||
140 | ({ \ | ||
141 | typeof(*p) ___p1 = READ_ONCE(*p); \ | ||
142 | compiletime_assert_atomic_type(*p); \ | ||
143 | smp_mb(); \ | ||
144 | ___p1; \ | ||
145 | }) | ||
146 | |||
147 | #define smp_mb__before_atomic() smp_mb__before_llsc() | ||
148 | #define smp_mb__after_atomic() smp_llsc_mb() | ||
149 | 130 | ||
150 | #endif /* __ASM_BARRIER_H */ | 131 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index a7af5fb7b914..c0deafc212b8 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h | |||
@@ -34,8 +34,6 @@ | |||
34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | 34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | 35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
36 | 36 | ||
37 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
38 | |||
39 | #ifdef __SUBARCH_HAS_LWSYNC | 37 | #ifdef __SUBARCH_HAS_LWSYNC |
40 | # define SMPWMB LWSYNC | 38 | # define SMPWMB LWSYNC |
41 | #else | 39 | #else |
@@ -46,22 +44,11 @@ | |||
46 | #define dma_rmb() __lwsync() | 44 | #define dma_rmb() __lwsync() |
47 | #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | 45 | #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
48 | 46 | ||
49 | #ifdef CONFIG_SMP | 47 | #define __smp_lwsync() __lwsync() |
50 | #define smp_lwsync() __lwsync() | ||
51 | |||
52 | #define smp_mb() mb() | ||
53 | #define smp_rmb() __lwsync() | ||
54 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | ||
55 | #else | ||
56 | #define smp_lwsync() barrier() | ||
57 | 48 | ||
58 | #define smp_mb() barrier() | 49 | #define __smp_mb() mb() |
59 | #define smp_rmb() barrier() | 50 | #define __smp_rmb() __lwsync() |
60 | #define smp_wmb() barrier() | 51 | #define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
61 | #endif /* CONFIG_SMP */ | ||
62 | |||
63 | #define read_barrier_depends() do { } while (0) | ||
64 | #define smp_read_barrier_depends() do { } while (0) | ||
65 | 52 | ||
66 | /* | 53 | /* |
67 | * This is a barrier which prevents following instructions from being | 54 | * This is a barrier which prevents following instructions from being |
@@ -72,23 +59,23 @@ | |||
72 | #define data_barrier(x) \ | 59 | #define data_barrier(x) \ |
73 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | 60 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); |
74 | 61 | ||
75 | #define smp_store_release(p, v) \ | 62 | #define __smp_store_release(p, v) \ |
76 | do { \ | 63 | do { \ |
77 | compiletime_assert_atomic_type(*p); \ | 64 | compiletime_assert_atomic_type(*p); \ |
78 | smp_lwsync(); \ | 65 | __smp_lwsync(); \ |
79 | WRITE_ONCE(*p, v); \ | 66 | WRITE_ONCE(*p, v); \ |
80 | } while (0) | 67 | } while (0) |
81 | 68 | ||
82 | #define smp_load_acquire(p) \ | 69 | #define __smp_load_acquire(p) \ |
83 | ({ \ | 70 | ({ \ |
84 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 71 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
85 | compiletime_assert_atomic_type(*p); \ | 72 | compiletime_assert_atomic_type(*p); \ |
86 | smp_lwsync(); \ | 73 | __smp_lwsync(); \ |
87 | ___p1; \ | 74 | ___p1; \ |
88 | }) | 75 | }) |
89 | 76 | ||
90 | #define smp_mb__before_atomic() smp_mb() | ||
91 | #define smp_mb__after_atomic() smp_mb() | ||
92 | #define smp_mb__before_spinlock() smp_mb() | 77 | #define smp_mb__before_spinlock() smp_mb() |
93 | 78 | ||
79 | #include <asm-generic/barrier.h> | ||
80 | |||
94 | #endif /* _ASM_POWERPC_BARRIER_H */ | 81 | #endif /* _ASM_POWERPC_BARRIER_H */ |
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 7ffd0b19135c..5c8db3ce61c8 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h | |||
@@ -26,26 +26,18 @@ | |||
26 | #define wmb() barrier() | 26 | #define wmb() barrier() |
27 | #define dma_rmb() mb() | 27 | #define dma_rmb() mb() |
28 | #define dma_wmb() mb() | 28 | #define dma_wmb() mb() |
29 | #define smp_mb() mb() | 29 | #define __smp_mb() mb() |
30 | #define smp_rmb() rmb() | 30 | #define __smp_rmb() rmb() |
31 | #define smp_wmb() wmb() | 31 | #define __smp_wmb() wmb() |
32 | 32 | ||
33 | #define read_barrier_depends() do { } while (0) | 33 | #define __smp_store_release(p, v) \ |
34 | #define smp_read_barrier_depends() do { } while (0) | ||
35 | |||
36 | #define smp_mb__before_atomic() smp_mb() | ||
37 | #define smp_mb__after_atomic() smp_mb() | ||
38 | |||
39 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
40 | |||
41 | #define smp_store_release(p, v) \ | ||
42 | do { \ | 34 | do { \ |
43 | compiletime_assert_atomic_type(*p); \ | 35 | compiletime_assert_atomic_type(*p); \ |
44 | barrier(); \ | 36 | barrier(); \ |
45 | WRITE_ONCE(*p, v); \ | 37 | WRITE_ONCE(*p, v); \ |
46 | } while (0) | 38 | } while (0) |
47 | 39 | ||
48 | #define smp_load_acquire(p) \ | 40 | #define __smp_load_acquire(p) \ |
49 | ({ \ | 41 | ({ \ |
50 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 42 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
51 | compiletime_assert_atomic_type(*p); \ | 43 | compiletime_assert_atomic_type(*p); \ |
@@ -53,4 +45,9 @@ do { \ | |||
53 | ___p1; \ | 45 | ___p1; \ |
54 | }) | 46 | }) |
55 | 47 | ||
48 | #define __smp_mb__before_atomic() barrier() | ||
49 | #define __smp_mb__after_atomic() barrier() | ||
50 | |||
51 | #include <asm-generic/barrier.h> | ||
52 | |||
56 | #endif /* __ASM_BARRIER_H */ | 53 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index bf91037db4e0..f887c6465a82 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h | |||
@@ -32,7 +32,8 @@ | |||
32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | 32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | 35 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
36 | #define smp_store_mb(var, value) __smp_store_mb(var, value) | ||
36 | 37 | ||
37 | #include <asm-generic/barrier.h> | 38 | #include <asm-generic/barrier.h> |
38 | 39 | ||
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h index f848dec9e483..2ed557b31bd9 100644 --- a/arch/sh/include/asm/cmpxchg-grb.h +++ b/arch/sh/include/asm/cmpxchg-grb.h | |||
@@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | |||
23 | return retval; | 23 | return retval; |
24 | } | 24 | } |
25 | 25 | ||
26 | static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) | ||
27 | { | ||
28 | unsigned long retval; | ||
29 | |||
30 | __asm__ __volatile__ ( | ||
31 | " .align 2 \n\t" | ||
32 | " mova 1f, r0 \n\t" /* r0 = end point */ | ||
33 | " mov r15, r1 \n\t" /* r1 = saved sp */ | ||
34 | " mov #-6, r15 \n\t" /* LOGIN */ | ||
35 | " mov.w @%1, %0 \n\t" /* load old value */ | ||
36 | " extu.w %0, %0 \n\t" /* extend as unsigned */ | ||
37 | " mov.w %2, @%1 \n\t" /* store new value */ | ||
38 | "1: mov r1, r15 \n\t" /* LOGOUT */ | ||
39 | : "=&r" (retval), | ||
40 | "+r" (m), | ||
41 | "+r" (val) /* inhibit r15 overloading */ | ||
42 | : | ||
43 | : "memory" , "r0", "r1"); | ||
44 | |||
45 | return retval; | ||
46 | } | ||
47 | |||
26 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | 48 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) |
27 | { | 49 | { |
28 | unsigned long retval; | 50 | unsigned long retval; |
diff --git a/arch/sh/include/asm/cmpxchg-irq.h b/arch/sh/include/asm/cmpxchg-irq.h index bd11f630414a..f88877257171 100644 --- a/arch/sh/include/asm/cmpxchg-irq.h +++ b/arch/sh/include/asm/cmpxchg-irq.h | |||
@@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | |||
14 | return retval; | 14 | return retval; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) | ||
18 | { | ||
19 | unsigned long flags, retval; | ||
20 | |||
21 | local_irq_save(flags); | ||
22 | retval = *m; | ||
23 | *m = val; | ||
24 | local_irq_restore(flags); | ||
25 | return retval; | ||
26 | } | ||
27 | |||
17 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | 28 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) |
18 | { | 29 | { |
19 | unsigned long flags, retval; | 30 | unsigned long flags, retval; |
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index 47136661a203..fcfd32271bff 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h | |||
@@ -22,29 +22,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | |||
22 | return retval; | 22 | return retval; |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
26 | { | ||
27 | unsigned long retval; | ||
28 | unsigned long tmp; | ||
29 | |||
30 | __asm__ __volatile__ ( | ||
31 | "1: \n\t" | ||
32 | "movli.l @%2, %0 ! xchg_u8 \n\t" | ||
33 | "mov %0, %1 \n\t" | ||
34 | "mov %3, %0 \n\t" | ||
35 | "movco.l %0, @%2 \n\t" | ||
36 | "bf 1b \n\t" | ||
37 | "synco \n\t" | ||
38 | : "=&z"(tmp), "=&r" (retval) | ||
39 | : "r" (m), "r" (val & 0xff) | ||
40 | : "t", "memory" | ||
41 | ); | ||
42 | |||
43 | return retval; | ||
44 | } | ||
45 | |||
46 | static inline unsigned long | 25 | static inline unsigned long |
47 | __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) | 26 | __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) |
48 | { | 27 | { |
49 | unsigned long retval; | 28 | unsigned long retval; |
50 | unsigned long tmp; | 29 | unsigned long tmp; |
@@ -68,4 +47,6 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) | |||
68 | return retval; | 47 | return retval; |
69 | } | 48 | } |
70 | 49 | ||
50 | #include <asm/cmpxchg-xchg.h> | ||
51 | |||
71 | #endif /* __ASM_SH_CMPXCHG_LLSC_H */ | 52 | #endif /* __ASM_SH_CMPXCHG_LLSC_H */ |
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h new file mode 100644 index 000000000000..7219719c23a3 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-xchg.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_XCHG_H | ||
2 | #define __ASM_SH_CMPXCHG_XCHG_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2016 Red Hat, Inc. | ||
6 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
7 | * | ||
8 | * This work is licensed under the terms of the GNU GPL, version 2. See the | ||
9 | * file "COPYING" in the main directory of this archive for more details. | ||
10 | */ | ||
11 | #include <linux/bitops.h> | ||
12 | #include <asm/byteorder.h> | ||
13 | |||
14 | /* | ||
15 | * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg. | ||
16 | * Note: this header isn't self-contained: before including it, __cmpxchg_u32 | ||
17 | * must be defined first. | ||
18 | */ | ||
19 | static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) | ||
20 | { | ||
21 | int off = (unsigned long)ptr % sizeof(u32); | ||
22 | volatile u32 *p = ptr - off; | ||
23 | #ifdef __BIG_ENDIAN | ||
24 | int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; | ||
25 | #else | ||
26 | int bitoff = off * BITS_PER_BYTE; | ||
27 | #endif | ||
28 | u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; | ||
29 | u32 oldv, newv; | ||
30 | u32 ret; | ||
31 | |||
32 | do { | ||
33 | oldv = READ_ONCE(*p); | ||
34 | ret = (oldv & bitmask) >> bitoff; | ||
35 | newv = (oldv & ~bitmask) | (x << bitoff); | ||
36 | } while (__cmpxchg_u32(p, oldv, newv) != oldv); | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | |||
41 | static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) | ||
42 | { | ||
43 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
44 | } | ||
45 | |||
46 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
47 | { | ||
48 | return __xchg_cmpxchg(m, val, sizeof *m); | ||
49 | } | ||
50 | |||
51 | #endif /* __ASM_SH_CMPXCHG_XCHG_H */ | ||
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 85c97b188d71..5225916c1057 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h | |||
@@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void); | |||
27 | case 4: \ | 27 | case 4: \ |
28 | __xchg__res = xchg_u32(__xchg_ptr, x); \ | 28 | __xchg__res = xchg_u32(__xchg_ptr, x); \ |
29 | break; \ | 29 | break; \ |
30 | case 2: \ | ||
31 | __xchg__res = xchg_u16(__xchg_ptr, x); \ | ||
32 | break; \ | ||
30 | case 1: \ | 33 | case 1: \ |
31 | __xchg__res = xchg_u8(__xchg_ptr, x); \ | 34 | __xchg__res = xchg_u8(__xchg_ptr, x); \ |
32 | break; \ | 35 | break; \ |
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index ae69eda288f4..8059130a6cee 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef __SPARC_BARRIER_H | 1 | #ifndef __SPARC_BARRIER_H |
2 | #define __SPARC_BARRIER_H | 2 | #define __SPARC_BARRIER_H |
3 | 3 | ||
4 | #include <asm/processor.h> /* for nop() */ | ||
5 | #include <asm-generic/barrier.h> | 4 | #include <asm-generic/barrier.h> |
6 | 5 | ||
7 | #endif /* !(__SPARC_BARRIER_H) */ | 6 | #endif /* !(__SPARC_BARRIER_H) */ |
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 14a928601657..c9f6ee64f41d 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h | |||
@@ -37,33 +37,14 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
37 | #define rmb() __asm__ __volatile__("":::"memory") | 37 | #define rmb() __asm__ __volatile__("":::"memory") |
38 | #define wmb() __asm__ __volatile__("":::"memory") | 38 | #define wmb() __asm__ __volatile__("":::"memory") |
39 | 39 | ||
40 | #define dma_rmb() rmb() | 40 | #define __smp_store_release(p, v) \ |
41 | #define dma_wmb() wmb() | ||
42 | |||
43 | #define smp_store_mb(__var, __value) \ | ||
44 | do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) | ||
45 | |||
46 | #ifdef CONFIG_SMP | ||
47 | #define smp_mb() mb() | ||
48 | #define smp_rmb() rmb() | ||
49 | #define smp_wmb() wmb() | ||
50 | #else | ||
51 | #define smp_mb() __asm__ __volatile__("":::"memory") | ||
52 | #define smp_rmb() __asm__ __volatile__("":::"memory") | ||
53 | #define smp_wmb() __asm__ __volatile__("":::"memory") | ||
54 | #endif | ||
55 | |||
56 | #define read_barrier_depends() do { } while (0) | ||
57 | #define smp_read_barrier_depends() do { } while (0) | ||
58 | |||
59 | #define smp_store_release(p, v) \ | ||
60 | do { \ | 41 | do { \ |
61 | compiletime_assert_atomic_type(*p); \ | 42 | compiletime_assert_atomic_type(*p); \ |
62 | barrier(); \ | 43 | barrier(); \ |
63 | WRITE_ONCE(*p, v); \ | 44 | WRITE_ONCE(*p, v); \ |
64 | } while (0) | 45 | } while (0) |
65 | 46 | ||
66 | #define smp_load_acquire(p) \ | 47 | #define __smp_load_acquire(p) \ |
67 | ({ \ | 48 | ({ \ |
68 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 49 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
69 | compiletime_assert_atomic_type(*p); \ | 50 | compiletime_assert_atomic_type(*p); \ |
@@ -71,7 +52,9 @@ do { \ | |||
71 | ___p1; \ | 52 | ___p1; \ |
72 | }) | 53 | }) |
73 | 54 | ||
74 | #define smp_mb__before_atomic() barrier() | 55 | #define __smp_mb__before_atomic() barrier() |
75 | #define smp_mb__after_atomic() barrier() | 56 | #define __smp_mb__after_atomic() barrier() |
57 | |||
58 | #include <asm-generic/barrier.h> | ||
76 | 59 | ||
77 | #endif /* !(__SPARC64_BARRIER_H) */ | 60 | #endif /* !(__SPARC64_BARRIER_H) */ |
diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h index 2fe99e66e760..9da9646bf6c6 100644 --- a/arch/sparc/include/asm/processor.h +++ b/arch/sparc/include/asm/processor.h | |||
@@ -5,7 +5,4 @@ | |||
5 | #else | 5 | #else |
6 | #include <asm/processor_32.h> | 6 | #include <asm/processor_32.h> |
7 | #endif | 7 | #endif |
8 | |||
9 | #define nop() __asm__ __volatile__ ("nop") | ||
10 | |||
11 | #endif | 8 | #endif |
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index 96a42ae79f4d..d55222806c2f 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h | |||
@@ -79,11 +79,12 @@ mb_incoherent(void) | |||
79 | * But after the word is updated, the routine issues an "mf" before returning, | 79 | * But after the word is updated, the routine issues an "mf" before returning, |
80 | * and since it's a function call, we don't even need a compiler barrier. | 80 | * and since it's a function call, we don't even need a compiler barrier. |
81 | */ | 81 | */ |
82 | #define smp_mb__before_atomic() smp_mb() | 82 | #define __smp_mb__before_atomic() __smp_mb() |
83 | #define smp_mb__after_atomic() do { } while (0) | 83 | #define __smp_mb__after_atomic() do { } while (0) |
84 | #define smp_mb__after_atomic() __smp_mb__after_atomic() | ||
84 | #else /* 64 bit */ | 85 | #else /* 64 bit */ |
85 | #define smp_mb__before_atomic() smp_mb() | 86 | #define __smp_mb__before_atomic() __smp_mb() |
86 | #define smp_mb__after_atomic() smp_mb() | 87 | #define __smp_mb__after_atomic() __smp_mb() |
87 | #endif | 88 | #endif |
88 | 89 | ||
89 | #include <asm-generic/barrier.h> | 90 | #include <asm-generic/barrier.h> |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 0681d2532527..a584e1c50918 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -31,20 +31,10 @@ | |||
31 | #endif | 31 | #endif |
32 | #define dma_wmb() barrier() | 32 | #define dma_wmb() barrier() |
33 | 33 | ||
34 | #ifdef CONFIG_SMP | 34 | #define __smp_mb() mb() |
35 | #define smp_mb() mb() | 35 | #define __smp_rmb() dma_rmb() |
36 | #define smp_rmb() dma_rmb() | 36 | #define __smp_wmb() barrier() |
37 | #define smp_wmb() barrier() | 37 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
38 | #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
39 | #else /* !SMP */ | ||
40 | #define smp_mb() barrier() | ||
41 | #define smp_rmb() barrier() | ||
42 | #define smp_wmb() barrier() | ||
43 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||
44 | #endif /* SMP */ | ||
45 | |||
46 | #define read_barrier_depends() do { } while (0) | ||
47 | #define smp_read_barrier_depends() do { } while (0) | ||
48 | 38 | ||
49 | #if defined(CONFIG_X86_PPRO_FENCE) | 39 | #if defined(CONFIG_X86_PPRO_FENCE) |
50 | 40 | ||
@@ -53,31 +43,31 @@ | |||
53 | * model and we should fall back to full barriers. | 43 | * model and we should fall back to full barriers. |
54 | */ | 44 | */ |
55 | 45 | ||
56 | #define smp_store_release(p, v) \ | 46 | #define __smp_store_release(p, v) \ |
57 | do { \ | 47 | do { \ |
58 | compiletime_assert_atomic_type(*p); \ | 48 | compiletime_assert_atomic_type(*p); \ |
59 | smp_mb(); \ | 49 | __smp_mb(); \ |
60 | WRITE_ONCE(*p, v); \ | 50 | WRITE_ONCE(*p, v); \ |
61 | } while (0) | 51 | } while (0) |
62 | 52 | ||
63 | #define smp_load_acquire(p) \ | 53 | #define __smp_load_acquire(p) \ |
64 | ({ \ | 54 | ({ \ |
65 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 55 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
66 | compiletime_assert_atomic_type(*p); \ | 56 | compiletime_assert_atomic_type(*p); \ |
67 | smp_mb(); \ | 57 | __smp_mb(); \ |
68 | ___p1; \ | 58 | ___p1; \ |
69 | }) | 59 | }) |
70 | 60 | ||
71 | #else /* regular x86 TSO memory ordering */ | 61 | #else /* regular x86 TSO memory ordering */ |
72 | 62 | ||
73 | #define smp_store_release(p, v) \ | 63 | #define __smp_store_release(p, v) \ |
74 | do { \ | 64 | do { \ |
75 | compiletime_assert_atomic_type(*p); \ | 65 | compiletime_assert_atomic_type(*p); \ |
76 | barrier(); \ | 66 | barrier(); \ |
77 | WRITE_ONCE(*p, v); \ | 67 | WRITE_ONCE(*p, v); \ |
78 | } while (0) | 68 | } while (0) |
79 | 69 | ||
80 | #define smp_load_acquire(p) \ | 70 | #define __smp_load_acquire(p) \ |
81 | ({ \ | 71 | ({ \ |
82 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 72 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
83 | compiletime_assert_atomic_type(*p); \ | 73 | compiletime_assert_atomic_type(*p); \ |
@@ -88,7 +78,9 @@ do { \ | |||
88 | #endif | 78 | #endif |
89 | 79 | ||
90 | /* Atomic operations are already serializing on x86 */ | 80 | /* Atomic operations are already serializing on x86 */ |
91 | #define smp_mb__before_atomic() barrier() | 81 | #define __smp_mb__before_atomic() barrier() |
92 | #define smp_mb__after_atomic() barrier() | 82 | #define __smp_mb__after_atomic() barrier() |
83 | |||
84 | #include <asm-generic/barrier.h> | ||
93 | 85 | ||
94 | #endif /* _ASM_X86_BARRIER_H */ | 86 | #endif /* _ASM_X86_BARRIER_H */ |
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 755481f14d90..174781a404ff 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h | |||
@@ -36,13 +36,6 @@ | |||
36 | #endif /* CONFIG_X86_PPRO_FENCE */ | 36 | #endif /* CONFIG_X86_PPRO_FENCE */ |
37 | #define dma_wmb() barrier() | 37 | #define dma_wmb() barrier() |
38 | 38 | ||
39 | #define smp_mb() barrier() | 39 | #include <asm-generic/barrier.h> |
40 | #define smp_rmb() barrier() | ||
41 | #define smp_wmb() barrier() | ||
42 | |||
43 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||
44 | |||
45 | #define read_barrier_depends() do { } while (0) | ||
46 | #define smp_read_barrier_depends() do { } while (0) | ||
47 | 40 | ||
48 | #endif | 41 | #endif |
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 5b88774c75ab..956596e4d437 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h | |||
@@ -13,8 +13,8 @@ | |||
13 | #define rmb() barrier() | 13 | #define rmb() barrier() |
14 | #define wmb() mb() | 14 | #define wmb() mb() |
15 | 15 | ||
16 | #define smp_mb__before_atomic() barrier() | 16 | #define __smp_mb__before_atomic() barrier() |
17 | #define smp_mb__after_atomic() barrier() | 17 | #define __smp_mb__after_atomic() barrier() |
18 | 18 | ||
19 | #include <asm-generic/barrier.h> | 19 | #include <asm-generic/barrier.h> |
20 | 20 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 06496a128162..4150873d432e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c | |||
@@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) | |||
130 | static vq_callback_t *callbacks[] = { | 130 | static vq_callback_t *callbacks[] = { |
131 | virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack | 131 | virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack |
132 | }; | 132 | }; |
133 | static const char *names[] = { "control", "cursor" }; | 133 | static const char * const names[] = { "control", "cursor" }; |
134 | 134 | ||
135 | struct virtio_gpu_device *vgdev; | 135 | struct virtio_gpu_device *vgdev; |
136 | /* this will expand later */ | 136 | /* this will expand later */ |
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c index e486a0c26267..f6ed57d3125c 100644 --- a/drivers/misc/mic/card/mic_virtio.c +++ b/drivers/misc/mic/card/mic_virtio.c | |||
@@ -311,7 +311,7 @@ unmap: | |||
311 | static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 311 | static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
312 | struct virtqueue *vqs[], | 312 | struct virtqueue *vqs[], |
313 | vq_callback_t *callbacks[], | 313 | vq_callback_t *callbacks[], |
314 | const char *names[]) | 314 | const char * const names[]) |
315 | { | 315 | { |
316 | struct mic_vdev *mvdev = to_micvdev(vdev); | 316 | struct mic_vdev *mvdev = to_micvdev(vdev); |
317 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | 317 | struct mic_device_ctrl __iomem *dc = mvdev->dc; |
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index e1a10232a943..e44872fb9e5e 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c | |||
@@ -147,7 +147,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) | |||
147 | static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 147 | static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
148 | struct virtqueue *vqs[], | 148 | struct virtqueue *vqs[], |
149 | vq_callback_t *callbacks[], | 149 | vq_callback_t *callbacks[], |
150 | const char *names[]) | 150 | const char * const names[]) |
151 | { | 151 | { |
152 | struct rproc *rproc = vdev_to_rproc(vdev); | 152 | struct rproc *rproc = vdev_to_rproc(vdev); |
153 | int i, ret; | 153 | int i, ret; |
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 73354ee27877..1fcd27c1f183 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c | |||
@@ -945,7 +945,7 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, | |||
945 | static int rpmsg_probe(struct virtio_device *vdev) | 945 | static int rpmsg_probe(struct virtio_device *vdev) |
946 | { | 946 | { |
947 | vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; | 947 | vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; |
948 | const char *names[] = { "input", "output" }; | 948 | static const char * const names[] = { "input", "output" }; |
949 | struct virtqueue *vqs[2]; | 949 | struct virtqueue *vqs[2]; |
950 | struct virtproc_info *vrp; | 950 | struct virtproc_info *vrp; |
951 | void *bufs_va; | 951 | void *bufs_va; |
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975c404b..1d060fd293a3 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c | |||
@@ -255,7 +255,7 @@ static void kvm_del_vqs(struct virtio_device *vdev) | |||
255 | static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 255 | static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
256 | struct virtqueue *vqs[], | 256 | struct virtqueue *vqs[], |
257 | vq_callback_t *callbacks[], | 257 | vq_callback_t *callbacks[], |
258 | const char *names[]) | 258 | const char * const names[]) |
259 | { | 259 | { |
260 | struct kvm_device *kdev = to_kvmdev(vdev); | 260 | struct kvm_device *kdev = to_kvmdev(vdev); |
261 | int i; | 261 | int i; |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 1b831598df7c..bf2d1300a957 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -635,7 +635,7 @@ out: | |||
635 | static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 635 | static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
636 | struct virtqueue *vqs[], | 636 | struct virtqueue *vqs[], |
637 | vq_callback_t *callbacks[], | 637 | vq_callback_t *callbacks[], |
638 | const char *names[]) | 638 | const char * const names[]) |
639 | { | 639 | { |
640 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); | 640 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
641 | unsigned long *indicatorp = NULL; | 641 | unsigned long *indicatorp = NULL; |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7efc32945810..0c3691f46575 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) | |||
209 | */ | 209 | */ |
210 | if (vb->num_pfns != 0) | 210 | if (vb->num_pfns != 0) |
211 | tell_host(vb, vb->deflate_vq); | 211 | tell_host(vb, vb->deflate_vq); |
212 | mutex_unlock(&vb->balloon_lock); | ||
213 | release_pages_balloon(vb); | 212 | release_pages_balloon(vb); |
213 | mutex_unlock(&vb->balloon_lock); | ||
214 | return num_freed_pages; | 214 | return num_freed_pages; |
215 | } | 215 | } |
216 | 216 | ||
@@ -388,7 +388,7 @@ static int init_vqs(struct virtio_balloon *vb) | |||
388 | { | 388 | { |
389 | struct virtqueue *vqs[3]; | 389 | struct virtqueue *vqs[3]; |
390 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; | 390 | vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; |
391 | const char *names[] = { "inflate", "deflate", "stats" }; | 391 | static const char * const names[] = { "inflate", "deflate", "stats" }; |
392 | int err, nvqs; | 392 | int err, nvqs; |
393 | 393 | ||
394 | /* | 394 | /* |
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index c96944b59856..350a2a5a49db 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c | |||
@@ -170,7 +170,7 @@ static int virtinput_init_vqs(struct virtio_input *vi) | |||
170 | struct virtqueue *vqs[2]; | 170 | struct virtqueue *vqs[2]; |
171 | vq_callback_t *cbs[] = { virtinput_recv_events, | 171 | vq_callback_t *cbs[] = { virtinput_recv_events, |
172 | virtinput_recv_status }; | 172 | virtinput_recv_status }; |
173 | static const char *names[] = { "events", "status" }; | 173 | static const char * const names[] = { "events", "status" }; |
174 | int err; | 174 | int err; |
175 | 175 | ||
176 | err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); | 176 | err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f499d9da7237..745c6ee1bb3e 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -482,7 +482,7 @@ error_available: | |||
482 | static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 482 | static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
483 | struct virtqueue *vqs[], | 483 | struct virtqueue *vqs[], |
484 | vq_callback_t *callbacks[], | 484 | vq_callback_t *callbacks[], |
485 | const char *names[]) | 485 | const char * const names[]) |
486 | { | 486 | { |
487 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 487 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
488 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); | 488 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 78f804af6c20..36205c27c4d0 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
@@ -296,7 +296,7 @@ void vp_del_vqs(struct virtio_device *vdev) | |||
296 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 296 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
297 | struct virtqueue *vqs[], | 297 | struct virtqueue *vqs[], |
298 | vq_callback_t *callbacks[], | 298 | vq_callback_t *callbacks[], |
299 | const char *names[], | 299 | const char * const names[], |
300 | bool use_msix, | 300 | bool use_msix, |
301 | bool per_vq_vectors) | 301 | bool per_vq_vectors) |
302 | { | 302 | { |
@@ -376,7 +376,7 @@ error_find: | |||
376 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 376 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
377 | struct virtqueue *vqs[], | 377 | struct virtqueue *vqs[], |
378 | vq_callback_t *callbacks[], | 378 | vq_callback_t *callbacks[], |
379 | const char *names[]) | 379 | const char * const names[]) |
380 | { | 380 | { |
381 | int err; | 381 | int err; |
382 | 382 | ||
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b976d968e793..2cc252270b2d 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h | |||
@@ -139,7 +139,7 @@ void vp_del_vqs(struct virtio_device *vdev); | |||
139 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 139 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
140 | struct virtqueue *vqs[], | 140 | struct virtqueue *vqs[], |
141 | vq_callback_t *callbacks[], | 141 | vq_callback_t *callbacks[], |
142 | const char *names[]); | 142 | const char * const names[]); |
143 | const char *vp_bus_name(struct virtio_device *vdev); | 143 | const char *vp_bus_name(struct virtio_device *vdev); |
144 | 144 | ||
145 | /* Setup the affinity for a virtqueue: | 145 | /* Setup the affinity for a virtqueue: |
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 8e5cf194cc0b..c0c11fad4611 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c | |||
@@ -418,7 +418,7 @@ err_new_queue: | |||
418 | static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 418 | static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
419 | struct virtqueue *vqs[], | 419 | struct virtqueue *vqs[], |
420 | vq_callback_t *callbacks[], | 420 | vq_callback_t *callbacks[], |
421 | const char *names[]) | 421 | const char * const names[]) |
422 | { | 422 | { |
423 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 423 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
424 | struct virtqueue *vq; | 424 | struct virtqueue *vq; |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index ee663c458b20..e12e385f7ac3 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | |||
517 | /* If we expect an interrupt for the next entry, tell host | 517 | /* If we expect an interrupt for the next entry, tell host |
518 | * by writing event index and flush out the write before | 518 | * by writing event index and flush out the write before |
519 | * the read in the next get_buf call. */ | 519 | * the read in the next get_buf call. */ |
520 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { | 520 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) |
521 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); | 521 | virtio_store_mb(vq->weak_barriers, |
522 | virtio_mb(vq->weak_barriers); | 522 | &vring_used_event(&vq->vring), |
523 | } | 523 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); |
524 | 524 | ||
525 | #ifdef DEBUG | 525 | #ifdef DEBUG |
526 | vq->last_add_time_valid = false; | 526 | vq->last_add_time_valid = false; |
@@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | |||
653 | } | 653 | } |
654 | /* TODO: tune this threshold */ | 654 | /* TODO: tune this threshold */ |
655 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; | 655 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; |
656 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); | 656 | |
657 | virtio_mb(vq->weak_barriers); | 657 | virtio_store_mb(vq->weak_barriers, |
658 | &vring_used_event(&vq->vring), | ||
659 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); | ||
660 | |||
658 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { | 661 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { |
659 | END_USE(vq); | 662 | END_USE(vq); |
660 | return false; | 663 | return false; |
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 96a1b8da5371..eff2b88003d9 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/percpu.h> | 41 | #include <linux/percpu.h> |
42 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
43 | 43 | ||
44 | #include <asm/barrier.h> | ||
44 | #include <asm/sync_bitops.h> | 45 | #include <asm/sync_bitops.h> |
45 | #include <asm/xen/hypercall.h> | 46 | #include <asm/xen/hypercall.h> |
46 | #include <asm/xen/hypervisor.h> | 47 | #include <asm/xen/hypervisor.h> |
@@ -296,7 +297,7 @@ static void consume_one_event(unsigned cpu, | |||
296 | * control block. | 297 | * control block. |
297 | */ | 298 | */ |
298 | if (head == 0) { | 299 | if (head == 0) { |
299 | rmb(); /* Ensure word is up-to-date before reading head. */ | 300 | virt_rmb(); /* Ensure word is up-to-date before reading head. */ |
300 | head = control_block->head[priority]; | 301 | head = control_block->head[priority]; |
301 | } | 302 | } |
302 | 303 | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index fdb0f339d0a7..ecdecce80a6c 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c | |||
@@ -123,14 +123,14 @@ int xb_write(const void *data, unsigned len) | |||
123 | avail = len; | 123 | avail = len; |
124 | 124 | ||
125 | /* Must write data /after/ reading the consumer index. */ | 125 | /* Must write data /after/ reading the consumer index. */ |
126 | mb(); | 126 | virt_mb(); |
127 | 127 | ||
128 | memcpy(dst, data, avail); | 128 | memcpy(dst, data, avail); |
129 | data += avail; | 129 | data += avail; |
130 | len -= avail; | 130 | len -= avail; |
131 | 131 | ||
132 | /* Other side must not see new producer until data is there. */ | 132 | /* Other side must not see new producer until data is there. */ |
133 | wmb(); | 133 | virt_wmb(); |
134 | intf->req_prod += avail; | 134 | intf->req_prod += avail; |
135 | 135 | ||
136 | /* Implies mb(): other side will see the updated producer. */ | 136 | /* Implies mb(): other side will see the updated producer. */ |
@@ -180,14 +180,14 @@ int xb_read(void *data, unsigned len) | |||
180 | avail = len; | 180 | avail = len; |
181 | 181 | ||
182 | /* Must read data /after/ reading the producer index. */ | 182 | /* Must read data /after/ reading the producer index. */ |
183 | rmb(); | 183 | virt_rmb(); |
184 | 184 | ||
185 | memcpy(data, src, avail); | 185 | memcpy(data, src, avail); |
186 | data += avail; | 186 | data += avail; |
187 | len -= avail; | 187 | len -= avail; |
188 | 188 | ||
189 | /* Other side must not see free space until we've copied out */ | 189 | /* Other side must not see free space until we've copied out */ |
190 | mb(); | 190 | virt_mb(); |
191 | intf->rsp_cons += avail; | 191 | intf->rsp_cons += avail; |
192 | 192 | ||
193 | pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); | 193 | pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); |
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 0f45f93ef692..1cceca146905 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h | |||
@@ -54,22 +54,38 @@ | |||
54 | #define read_barrier_depends() do { } while (0) | 54 | #define read_barrier_depends() do { } while (0) |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #ifndef __smp_mb | ||
58 | #define __smp_mb() mb() | ||
59 | #endif | ||
60 | |||
61 | #ifndef __smp_rmb | ||
62 | #define __smp_rmb() rmb() | ||
63 | #endif | ||
64 | |||
65 | #ifndef __smp_wmb | ||
66 | #define __smp_wmb() wmb() | ||
67 | #endif | ||
68 | |||
69 | #ifndef __smp_read_barrier_depends | ||
70 | #define __smp_read_barrier_depends() read_barrier_depends() | ||
71 | #endif | ||
72 | |||
57 | #ifdef CONFIG_SMP | 73 | #ifdef CONFIG_SMP |
58 | 74 | ||
59 | #ifndef smp_mb | 75 | #ifndef smp_mb |
60 | #define smp_mb() mb() | 76 | #define smp_mb() __smp_mb() |
61 | #endif | 77 | #endif |
62 | 78 | ||
63 | #ifndef smp_rmb | 79 | #ifndef smp_rmb |
64 | #define smp_rmb() rmb() | 80 | #define smp_rmb() __smp_rmb() |
65 | #endif | 81 | #endif |
66 | 82 | ||
67 | #ifndef smp_wmb | 83 | #ifndef smp_wmb |
68 | #define smp_wmb() wmb() | 84 | #define smp_wmb() __smp_wmb() |
69 | #endif | 85 | #endif |
70 | 86 | ||
71 | #ifndef smp_read_barrier_depends | 87 | #ifndef smp_read_barrier_depends |
72 | #define smp_read_barrier_depends() read_barrier_depends() | 88 | #define smp_read_barrier_depends() __smp_read_barrier_depends() |
73 | #endif | 89 | #endif |
74 | 90 | ||
75 | #else /* !CONFIG_SMP */ | 91 | #else /* !CONFIG_SMP */ |
@@ -92,32 +108,104 @@ | |||
92 | 108 | ||
93 | #endif /* CONFIG_SMP */ | 109 | #endif /* CONFIG_SMP */ |
94 | 110 | ||
111 | #ifndef __smp_store_mb | ||
112 | #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) | ||
113 | #endif | ||
114 | |||
115 | #ifndef __smp_mb__before_atomic | ||
116 | #define __smp_mb__before_atomic() __smp_mb() | ||
117 | #endif | ||
118 | |||
119 | #ifndef __smp_mb__after_atomic | ||
120 | #define __smp_mb__after_atomic() __smp_mb() | ||
121 | #endif | ||
122 | |||
123 | #ifndef __smp_store_release | ||
124 | #define __smp_store_release(p, v) \ | ||
125 | do { \ | ||
126 | compiletime_assert_atomic_type(*p); \ | ||
127 | __smp_mb(); \ | ||
128 | WRITE_ONCE(*p, v); \ | ||
129 | } while (0) | ||
130 | #endif | ||
131 | |||
132 | #ifndef __smp_load_acquire | ||
133 | #define __smp_load_acquire(p) \ | ||
134 | ({ \ | ||
135 | typeof(*p) ___p1 = READ_ONCE(*p); \ | ||
136 | compiletime_assert_atomic_type(*p); \ | ||
137 | __smp_mb(); \ | ||
138 | ___p1; \ | ||
139 | }) | ||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_SMP | ||
143 | |||
144 | #ifndef smp_store_mb | ||
145 | #define smp_store_mb(var, value) __smp_store_mb(var, value) | ||
146 | #endif | ||
147 | |||
148 | #ifndef smp_mb__before_atomic | ||
149 | #define smp_mb__before_atomic() __smp_mb__before_atomic() | ||
150 | #endif | ||
151 | |||
152 | #ifndef smp_mb__after_atomic | ||
153 | #define smp_mb__after_atomic() __smp_mb__after_atomic() | ||
154 | #endif | ||
155 | |||
156 | #ifndef smp_store_release | ||
157 | #define smp_store_release(p, v) __smp_store_release(p, v) | ||
158 | #endif | ||
159 | |||
160 | #ifndef smp_load_acquire | ||
161 | #define smp_load_acquire(p) __smp_load_acquire(p) | ||
162 | #endif | ||
163 | |||
164 | #else /* !CONFIG_SMP */ | ||
165 | |||
95 | #ifndef smp_store_mb | 166 | #ifndef smp_store_mb |
96 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | 167 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
97 | #endif | 168 | #endif |
98 | 169 | ||
99 | #ifndef smp_mb__before_atomic | 170 | #ifndef smp_mb__before_atomic |
100 | #define smp_mb__before_atomic() smp_mb() | 171 | #define smp_mb__before_atomic() barrier() |
101 | #endif | 172 | #endif |
102 | 173 | ||
103 | #ifndef smp_mb__after_atomic | 174 | #ifndef smp_mb__after_atomic |
104 | #define smp_mb__after_atomic() smp_mb() | 175 | #define smp_mb__after_atomic() barrier() |
105 | #endif | 176 | #endif |
106 | 177 | ||
178 | #ifndef smp_store_release | ||
107 | #define smp_store_release(p, v) \ | 179 | #define smp_store_release(p, v) \ |
108 | do { \ | 180 | do { \ |
109 | compiletime_assert_atomic_type(*p); \ | 181 | compiletime_assert_atomic_type(*p); \ |
110 | smp_mb(); \ | 182 | barrier(); \ |
111 | WRITE_ONCE(*p, v); \ | 183 | WRITE_ONCE(*p, v); \ |
112 | } while (0) | 184 | } while (0) |
185 | #endif | ||
113 | 186 | ||
187 | #ifndef smp_load_acquire | ||
114 | #define smp_load_acquire(p) \ | 188 | #define smp_load_acquire(p) \ |
115 | ({ \ | 189 | ({ \ |
116 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 190 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
117 | compiletime_assert_atomic_type(*p); \ | 191 | compiletime_assert_atomic_type(*p); \ |
118 | smp_mb(); \ | 192 | barrier(); \ |
119 | ___p1; \ | 193 | ___p1; \ |
120 | }) | 194 | }) |
195 | #endif | ||
196 | |||
197 | #endif | ||
198 | |||
199 | /* Barriers for virtual machine guests when talking to an SMP host */ | ||
200 | #define virt_mb() __smp_mb() | ||
201 | #define virt_rmb() __smp_rmb() | ||
202 | #define virt_wmb() __smp_wmb() | ||
203 | #define virt_read_barrier_depends() __smp_read_barrier_depends() | ||
204 | #define virt_store_mb(var, value) __smp_store_mb(var, value) | ||
205 | #define virt_mb__before_atomic() __smp_mb__before_atomic() | ||
206 | #define virt_mb__after_atomic() __smp_mb__after_atomic() | ||
207 | #define virt_store_release(p, v) __smp_store_release(p, v) | ||
208 | #define virt_load_acquire(p) __smp_load_acquire(p) | ||
121 | 209 | ||
122 | #endif /* !__ASSEMBLY__ */ | 210 | #endif /* !__ASSEMBLY__ */ |
123 | #endif /* __ASM_GENERIC_BARRIER_H */ | 211 | #endif /* __ASM_GENERIC_BARRIER_H */ |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e5ce8ab0b8b0..6e6cb0c9d7cb 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -70,7 +70,7 @@ struct virtio_config_ops { | |||
70 | int (*find_vqs)(struct virtio_device *, unsigned nvqs, | 70 | int (*find_vqs)(struct virtio_device *, unsigned nvqs, |
71 | struct virtqueue *vqs[], | 71 | struct virtqueue *vqs[], |
72 | vq_callback_t *callbacks[], | 72 | vq_callback_t *callbacks[], |
73 | const char *names[]); | 73 | const char * const names[]); |
74 | void (*del_vqs)(struct virtio_device *); | 74 | void (*del_vqs)(struct virtio_device *); |
75 | u64 (*get_features)(struct virtio_device *vdev); | 75 | u64 (*get_features)(struct virtio_device *vdev); |
76 | int (*finalize_features)(struct virtio_device *vdev); | 76 | int (*finalize_features)(struct virtio_device *vdev); |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 8e50888a6d59..a156e2b6ccfe 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -12,7 +12,7 @@ | |||
12 | * anyone care? | 12 | * anyone care? |
13 | * | 13 | * |
14 | * For virtio_pci on SMP, we don't need to order with respect to MMIO | 14 | * For virtio_pci on SMP, we don't need to order with respect to MMIO |
15 | * accesses through relaxed memory I/O windows, so smp_mb() et al are | 15 | * accesses through relaxed memory I/O windows, so virt_mb() et al are |
16 | * sufficient. | 16 | * sufficient. |
17 | * | 17 | * |
18 | * For using virtio to talk to real devices (eg. other heterogeneous | 18 | * For using virtio to talk to real devices (eg. other heterogeneous |
@@ -23,18 +23,16 @@ | |||
23 | 23 | ||
24 | static inline void virtio_mb(bool weak_barriers) | 24 | static inline void virtio_mb(bool weak_barriers) |
25 | { | 25 | { |
26 | #ifdef CONFIG_SMP | ||
27 | if (weak_barriers) | 26 | if (weak_barriers) |
28 | smp_mb(); | 27 | virt_mb(); |
29 | else | 28 | else |
30 | #endif | ||
31 | mb(); | 29 | mb(); |
32 | } | 30 | } |
33 | 31 | ||
34 | static inline void virtio_rmb(bool weak_barriers) | 32 | static inline void virtio_rmb(bool weak_barriers) |
35 | { | 33 | { |
36 | if (weak_barriers) | 34 | if (weak_barriers) |
37 | dma_rmb(); | 35 | virt_rmb(); |
38 | else | 36 | else |
39 | rmb(); | 37 | rmb(); |
40 | } | 38 | } |
@@ -42,11 +40,22 @@ static inline void virtio_rmb(bool weak_barriers) | |||
42 | static inline void virtio_wmb(bool weak_barriers) | 40 | static inline void virtio_wmb(bool weak_barriers) |
43 | { | 41 | { |
44 | if (weak_barriers) | 42 | if (weak_barriers) |
45 | dma_wmb(); | 43 | virt_wmb(); |
46 | else | 44 | else |
47 | wmb(); | 45 | wmb(); |
48 | } | 46 | } |
49 | 47 | ||
48 | static inline void virtio_store_mb(bool weak_barriers, | ||
49 | __virtio16 *p, __virtio16 v) | ||
50 | { | ||
51 | if (weak_barriers) { | ||
52 | virt_store_mb(*p, v); | ||
53 | } else { | ||
54 | WRITE_ONCE(*p, v); | ||
55 | mb(); | ||
56 | } | ||
57 | } | ||
58 | |||
50 | struct virtio_device; | 59 | struct virtio_device; |
51 | struct virtqueue; | 60 | struct virtqueue; |
52 | 61 | ||
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 7dc685b4057d..21f4fbd55e48 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h | |||
@@ -208,12 +208,12 @@ struct __name##_back_ring { \ | |||
208 | 208 | ||
209 | 209 | ||
210 | #define RING_PUSH_REQUESTS(_r) do { \ | 210 | #define RING_PUSH_REQUESTS(_r) do { \ |
211 | wmb(); /* back sees requests /before/ updated producer index */ \ | 211 | virt_wmb(); /* back sees requests /before/ updated producer index */ \ |
212 | (_r)->sring->req_prod = (_r)->req_prod_pvt; \ | 212 | (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |
213 | } while (0) | 213 | } while (0) |
214 | 214 | ||
215 | #define RING_PUSH_RESPONSES(_r) do { \ | 215 | #define RING_PUSH_RESPONSES(_r) do { \ |
216 | wmb(); /* front sees responses /before/ updated producer index */ \ | 216 | virt_wmb(); /* front sees responses /before/ updated producer index */ \ |
217 | (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ | 217 | (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ |
218 | } while (0) | 218 | } while (0) |
219 | 219 | ||
@@ -250,9 +250,9 @@ struct __name##_back_ring { \ | |||
250 | #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ | 250 | #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ |
251 | RING_IDX __old = (_r)->sring->req_prod; \ | 251 | RING_IDX __old = (_r)->sring->req_prod; \ |
252 | RING_IDX __new = (_r)->req_prod_pvt; \ | 252 | RING_IDX __new = (_r)->req_prod_pvt; \ |
253 | wmb(); /* back sees requests /before/ updated producer index */ \ | 253 | virt_wmb(); /* back sees requests /before/ updated producer index */ \ |
254 | (_r)->sring->req_prod = __new; \ | 254 | (_r)->sring->req_prod = __new; \ |
255 | mb(); /* back sees new requests /before/ we check req_event */ \ | 255 | virt_mb(); /* back sees new requests /before/ we check req_event */ \ |
256 | (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ | 256 | (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ |
257 | (RING_IDX)(__new - __old)); \ | 257 | (RING_IDX)(__new - __old)); \ |
258 | } while (0) | 258 | } while (0) |
@@ -260,9 +260,9 @@ struct __name##_back_ring { \ | |||
260 | #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ | 260 | #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ |
261 | RING_IDX __old = (_r)->sring->rsp_prod; \ | 261 | RING_IDX __old = (_r)->sring->rsp_prod; \ |
262 | RING_IDX __new = (_r)->rsp_prod_pvt; \ | 262 | RING_IDX __new = (_r)->rsp_prod_pvt; \ |
263 | wmb(); /* front sees responses /before/ updated producer index */ \ | 263 | virt_wmb(); /* front sees responses /before/ updated producer index */ \ |
264 | (_r)->sring->rsp_prod = __new; \ | 264 | (_r)->sring->rsp_prod = __new; \ |
265 | mb(); /* front sees new responses /before/ we check rsp_event */ \ | 265 | virt_mb(); /* front sees new responses /before/ we check rsp_event */ \ |
266 | (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ | 266 | (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ |
267 | (RING_IDX)(__new - __old)); \ | 267 | (RING_IDX)(__new - __old)); \ |
268 | } while (0) | 268 | } while (0) |
@@ -271,7 +271,7 @@ struct __name##_back_ring { \ | |||
271 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | 271 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
272 | if (_work_to_do) break; \ | 272 | if (_work_to_do) break; \ |
273 | (_r)->sring->req_event = (_r)->req_cons + 1; \ | 273 | (_r)->sring->req_event = (_r)->req_cons + 1; \ |
274 | mb(); \ | 274 | virt_mb(); \ |
275 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | 275 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
276 | } while (0) | 276 | } while (0) |
277 | 277 | ||
@@ -279,7 +279,7 @@ struct __name##_back_ring { \ | |||
279 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | 279 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
280 | if (_work_to_do) break; \ | 280 | if (_work_to_do) break; \ |
281 | (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ | 281 | (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ |
282 | mb(); \ | 282 | virt_mb(); \ |
283 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | 283 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
284 | } while (0) | 284 | } while (0) |
285 | 285 | ||
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index d3116be5a00f..300117f1a08f 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c | |||
@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | |||
61 | bool dequeued_page; | 61 | bool dequeued_page; |
62 | 62 | ||
63 | dequeued_page = false; | 63 | dequeued_page = false; |
64 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | ||
64 | list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { | 65 | list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { |
65 | /* | 66 | /* |
66 | * Block others from accessing the 'page' while we get around | 67 | * Block others from accessing the 'page' while we get around |
@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | |||
75 | continue; | 76 | continue; |
76 | } | 77 | } |
77 | #endif | 78 | #endif |
78 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | ||
79 | balloon_page_delete(page); | 79 | balloon_page_delete(page); |
80 | __count_vm_event(BALLOON_DEFLATE); | 80 | __count_vm_event(BALLOON_DEFLATE); |
81 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); | ||
82 | unlock_page(page); | 81 | unlock_page(page); |
83 | dequeued_page = true; | 82 | dequeued_page = true; |
84 | break; | 83 | break; |
85 | } | 84 | } |
86 | } | 85 | } |
86 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); | ||
87 | 87 | ||
88 | if (!dequeued_page) { | 88 | if (!dequeued_page) { |
89 | /* | 89 | /* |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 2b3c22808c3b..c7bf1aa2eeb3 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -5116,13 +5116,44 @@ sub process { | |||
5116 | } | 5116 | } |
5117 | } | 5117 | } |
5118 | # check for memory barriers without a comment. | 5118 | # check for memory barriers without a comment. |
5119 | if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) { | 5119 | |
5120 | my $barriers = qr{ | ||
5121 | mb| | ||
5122 | rmb| | ||
5123 | wmb| | ||
5124 | read_barrier_depends | ||
5125 | }x; | ||
5126 | my $barrier_stems = qr{ | ||
5127 | mb__before_atomic| | ||
5128 | mb__after_atomic| | ||
5129 | store_release| | ||
5130 | load_acquire| | ||
5131 | store_mb| | ||
5132 | (?:$barriers) | ||
5133 | }x; | ||
5134 | my $all_barriers = qr{ | ||
5135 | (?:$barriers)| | ||
5136 | smp_(?:$barrier_stems)| | ||
5137 | virt_(?:$barrier_stems) | ||
5138 | }x; | ||
5139 | |||
5140 | if ($line =~ /\b(?:$all_barriers)\s*\(/) { | ||
5120 | if (!ctx_has_comment($first_line, $linenr)) { | 5141 | if (!ctx_has_comment($first_line, $linenr)) { |
5121 | WARN("MEMORY_BARRIER", | 5142 | WARN("MEMORY_BARRIER", |
5122 | "memory barrier without comment\n" . $herecurr); | 5143 | "memory barrier without comment\n" . $herecurr); |
5123 | } | 5144 | } |
5124 | } | 5145 | } |
5125 | 5146 | ||
5147 | my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x; | ||
5148 | |||
5149 | if ($realfile !~ m@^include/asm-generic/@ && | ||
5150 | $realfile !~ m@/barrier\.h$@ && | ||
5151 | $line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ && | ||
5152 | $line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) { | ||
5153 | WARN("MEMORY_BARRIER", | ||
5154 | "__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr); | ||
5155 | } | ||
5156 | |||
5126 | # check for waitqueue_active without a comment. | 5157 | # check for waitqueue_active without a comment. |
5127 | if ($line =~ /\bwaitqueue_active\s*\(/) { | 5158 | if ($line =~ /\bwaitqueue_active\s*\(/) { |
5128 | if (!ctx_has_comment($first_line, $linenr)) { | 5159 | if (!ctx_has_comment($first_line, $linenr)) { |