aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2016-06-08 15:38:38 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2016-06-08 15:41:20 -0400
commit117780eef7740729e803bdcc0d5f2f48137ea8e3 (patch)
tree63bd519e1d8b115d332fcb6e63ee4f94ee235161
parent2823d4da5d8a0c222747b24eceb65f5b30717d02 (diff)
x86, asm: use bool for bitops and other assembly outputs
The gcc people have confirmed that using "bool" when combined with inline assembly always is treated as a byte-sized operand that can be assumed to be 0 or 1, which is exactly what the SET instruction emits. Change the output types and intermediate variables of as many operations as practical to "bool". Signed-off-by: H. Peter Anvin <hpa@zytor.com> Link: http://lkml.kernel.org/r/1465414726-197858-3-git-send-email-hpa@linux.intel.com Reviewed-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Borislav Petkov <bp@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--arch/x86/boot/bitops.h8
-rw-r--r--arch/x86/boot/boot.h8
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/archrandom.h16
-rw-r--r--arch/x86/include/asm/atomic.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h10
-rw-r--r--arch/x86/include/asm/bitops.h28
-rw-r--r--arch/x86/include/asm/local.h8
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/rmwcc.h4
-rw-r--r--arch/x86/include/asm/rwsem.h17
-rw-r--r--include/linux/random.h12
13 files changed, 69 insertions, 66 deletions
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 878e4b9940d9..0d41d68131cc 100644
--- a/arch/x86/boot/bitops.h
+++ b/arch/x86/boot/bitops.h
@@ -16,14 +16,16 @@
16#define BOOT_BITOPS_H 16#define BOOT_BITOPS_H
17#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */ 17#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
18 18
19static inline int constant_test_bit(int nr, const void *addr) 19#include <linux/types.h>
20
21static inline bool constant_test_bit(int nr, const void *addr)
20{ 22{
21 const u32 *p = (const u32 *)addr; 23 const u32 *p = (const u32 *)addr;
22 return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0; 24 return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
23} 25}
24static inline int variable_test_bit(int nr, const void *addr) 26static inline bool variable_test_bit(int nr, const void *addr)
25{ 27{
26 u8 v; 28 bool v;
27 const u32 *p = (const u32 *)addr; 29 const u32 *p = (const u32 *)addr;
28 30
29 asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); 31 asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 9011a88353de..2edb2d53c3a2 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -176,16 +176,16 @@ static inline void wrgs32(u32 v, addr_t addr)
176} 176}
177 177
178/* Note: these only return true/false, not a signed return value! */ 178/* Note: these only return true/false, not a signed return value! */
179static inline int memcmp_fs(const void *s1, addr_t s2, size_t len) 179static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
180{ 180{
181 u8 diff; 181 bool diff;
182 asm volatile("fs; repe; cmpsb; setnz %0" 182 asm volatile("fs; repe; cmpsb; setnz %0"
183 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 183 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
184 return diff; 184 return diff;
185} 185}
186static inline int memcmp_gs(const void *s1, addr_t s2, size_t len) 186static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
187{ 187{
188 u8 diff; 188 bool diff;
189 asm volatile("gs; repe; cmpsb; setnz %0" 189 asm volatile("gs; repe; cmpsb; setnz %0"
190 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 190 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
191 return diff; 191 return diff;
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 318b8465d302..cc3bd583dce1 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -17,7 +17,7 @@
17 17
18int memcmp(const void *s1, const void *s2, size_t len) 18int memcmp(const void *s1, const void *s2, size_t len)
19{ 19{
20 u8 diff; 20 bool diff;
21 asm("repe; cmpsb; setnz %0" 21 asm("repe; cmpsb; setnz %0"
22 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 22 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
23 return diff; 23 return diff;
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..93eebc636c76 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -45,11 +45,11 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
45 : "memory", "cc"); 45 : "memory", "cc");
46} 46}
47 47
48static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, 48static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
49 u32 ecx_in, u32 *eax) 49 u32 ecx_in, u32 *eax)
50{ 50{
51 int cx, dx, si; 51 int cx, dx, si;
52 u8 error; 52 bool error;
53 53
54 /* 54 /*
55 * N.B. We do NOT need a cld after the BIOS call 55 * N.B. We do NOT need a cld after the BIOS call
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 69f1366f1aa3..ab6f599ce2fd 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -43,7 +43,7 @@
43#ifdef CONFIG_ARCH_RANDOM 43#ifdef CONFIG_ARCH_RANDOM
44 44
45/* Instead of arch_get_random_long() when alternatives haven't run. */ 45/* Instead of arch_get_random_long() when alternatives haven't run. */
46static inline int rdrand_long(unsigned long *v) 46static inline bool rdrand_long(unsigned long *v)
47{ 47{
48 int ok; 48 int ok;
49 asm volatile("1: " RDRAND_LONG "\n\t" 49 asm volatile("1: " RDRAND_LONG "\n\t"
@@ -53,13 +53,13 @@ static inline int rdrand_long(unsigned long *v)
53 "2:" 53 "2:"
54 : "=r" (ok), "=a" (*v) 54 : "=r" (ok), "=a" (*v)
55 : "0" (RDRAND_RETRY_LOOPS)); 55 : "0" (RDRAND_RETRY_LOOPS));
56 return ok; 56 return !!ok;
57} 57}
58 58
59/* A single attempt at RDSEED */ 59/* A single attempt at RDSEED */
60static inline bool rdseed_long(unsigned long *v) 60static inline bool rdseed_long(unsigned long *v)
61{ 61{
62 unsigned char ok; 62 bool ok;
63 asm volatile(RDSEED_LONG "\n\t" 63 asm volatile(RDSEED_LONG "\n\t"
64 "setc %0" 64 "setc %0"
65 : "=qm" (ok), "=a" (*v)); 65 : "=qm" (ok), "=a" (*v));
@@ -67,7 +67,7 @@ static inline bool rdseed_long(unsigned long *v)
67} 67}
68 68
69#define GET_RANDOM(name, type, rdrand, nop) \ 69#define GET_RANDOM(name, type, rdrand, nop) \
70static inline int name(type *v) \ 70static inline bool name(type *v) \
71{ \ 71{ \
72 int ok; \ 72 int ok; \
73 alternative_io("movl $0, %0\n\t" \ 73 alternative_io("movl $0, %0\n\t" \
@@ -80,13 +80,13 @@ static inline int name(type *v) \
80 X86_FEATURE_RDRAND, \ 80 X86_FEATURE_RDRAND, \
81 ASM_OUTPUT2("=r" (ok), "=a" (*v)), \ 81 ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
82 "0" (RDRAND_RETRY_LOOPS)); \ 82 "0" (RDRAND_RETRY_LOOPS)); \
83 return ok; \ 83 return !!ok; \
84} 84}
85 85
86#define GET_SEED(name, type, rdseed, nop) \ 86#define GET_SEED(name, type, rdseed, nop) \
87static inline int name(type *v) \ 87static inline bool name(type *v) \
88{ \ 88{ \
89 unsigned char ok; \ 89 bool ok; \
90 alternative_io("movb $0, %0\n\t" \ 90 alternative_io("movb $0, %0\n\t" \
91 nop, \ 91 nop, \
92 rdseed "\n\t" \ 92 rdseed "\n\t" \
@@ -119,7 +119,7 @@ GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
119 119
120#else 120#else
121 121
122static inline int rdrand_long(unsigned long *v) 122static inline bool rdrand_long(unsigned long *v)
123{ 123{
124 return 0; 124 return 0;
125} 125}
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 3e8674288198..17d881248e6c 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -75,7 +75,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
75 * true if the result is zero, or false for all 75 * true if the result is zero, or false for all
76 * other cases. 76 * other cases.
77 */ 77 */
78static __always_inline int atomic_sub_and_test(int i, atomic_t *v) 78static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
79{ 79{
80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
81} 81}
@@ -112,7 +112,7 @@ static __always_inline void atomic_dec(atomic_t *v)
112 * returns true if the result is 0, or false for all other 112 * returns true if the result is 0, or false for all other
113 * cases. 113 * cases.
114 */ 114 */
115static __always_inline int atomic_dec_and_test(atomic_t *v) 115static __always_inline bool atomic_dec_and_test(atomic_t *v)
116{ 116{
117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
118} 118}
@@ -125,7 +125,7 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
125 * and returns true if the result is zero, or false for all 125 * and returns true if the result is zero, or false for all
126 * other cases. 126 * other cases.
127 */ 127 */
128static __always_inline int atomic_inc_and_test(atomic_t *v) 128static __always_inline bool atomic_inc_and_test(atomic_t *v)
129{ 129{
130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
131} 131}
@@ -139,7 +139,7 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
139 * if the result is negative, or false when 139 * if the result is negative, or false when
140 * result is greater than or equal to zero. 140 * result is greater than or equal to zero.
141 */ 141 */
142static __always_inline int atomic_add_negative(int i, atomic_t *v) 142static __always_inline bool atomic_add_negative(int i, atomic_t *v)
143{ 143{
144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
145} 145}
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 037351022f54..4f881d7f0c39 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -70,7 +70,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
70 * true if the result is zero, or false for all 70 * true if the result is zero, or false for all
71 * other cases. 71 * other cases.
72 */ 72 */
73static inline int atomic64_sub_and_test(long i, atomic64_t *v) 73static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
74{ 74{
75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); 75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
76} 76}
@@ -109,7 +109,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
109 * returns true if the result is 0, or false for all other 109 * returns true if the result is 0, or false for all other
110 * cases. 110 * cases.
111 */ 111 */
112static inline int atomic64_dec_and_test(atomic64_t *v) 112static inline bool atomic64_dec_and_test(atomic64_t *v)
113{ 113{
114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); 114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
115} 115}
@@ -122,7 +122,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
122 * and returns true if the result is zero, or false for all 122 * and returns true if the result is zero, or false for all
123 * other cases. 123 * other cases.
124 */ 124 */
125static inline int atomic64_inc_and_test(atomic64_t *v) 125static inline bool atomic64_inc_and_test(atomic64_t *v)
126{ 126{
127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); 127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
128} 128}
@@ -136,7 +136,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
136 * if the result is negative, or false when 136 * if the result is negative, or false when
137 * result is greater than or equal to zero. 137 * result is greater than or equal to zero.
138 */ 138 */
139static inline int atomic64_add_negative(long i, atomic64_t *v) 139static inline bool atomic64_add_negative(long i, atomic64_t *v)
140{ 140{
141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); 141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
142} 142}
@@ -180,7 +180,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
180 * Atomically adds @a to @v, so long as it was not @u. 180 * Atomically adds @a to @v, so long as it was not @u.
181 * Returns the old value of @v. 181 * Returns the old value of @v.
182 */ 182 */
183static inline int atomic64_add_unless(atomic64_t *v, long a, long u) 183static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
184{ 184{
185 long c, old; 185 long c, old;
186 c = atomic64_read(v); 186 c = atomic64_read(v);
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b2b797d1f49a..8cbb7f495546 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
201 * This operation is atomic and cannot be reordered. 201 * This operation is atomic and cannot be reordered.
202 * It also implies a memory barrier. 202 * It also implies a memory barrier.
203 */ 203 */
204static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr) 204static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
205{ 205{
206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
207} 207}
@@ -213,7 +213,7 @@ static __always_inline int test_and_set_bit(long nr, volatile unsigned long *add
213 * 213 *
214 * This is the same as test_and_set_bit on x86. 214 * This is the same as test_and_set_bit on x86.
215 */ 215 */
216static __always_inline int 216static __always_inline bool
217test_and_set_bit_lock(long nr, volatile unsigned long *addr) 217test_and_set_bit_lock(long nr, volatile unsigned long *addr)
218{ 218{
219 return test_and_set_bit(nr, addr); 219 return test_and_set_bit(nr, addr);
@@ -228,9 +228,9 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
228 * If two examples of this operation race, one can appear to succeed 228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock. 229 * but actually fail. You must protect multiple accesses with a lock.
230 */ 230 */
231static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr) 231static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
232{ 232{
233 unsigned char oldbit; 233 bool oldbit;
234 234
235 asm("bts %2,%1\n\t" 235 asm("bts %2,%1\n\t"
236 "setc %0" 236 "setc %0"
@@ -247,7 +247,7 @@ static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *a
247 * This operation is atomic and cannot be reordered. 247 * This operation is atomic and cannot be reordered.
248 * It also implies a memory barrier. 248 * It also implies a memory barrier.
249 */ 249 */
250static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 250static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
251{ 251{
252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
253} 253}
@@ -268,9 +268,9 @@ static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *a
268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change
269 * this without also updating arch/x86/kernel/kvm.c 269 * this without also updating arch/x86/kernel/kvm.c
270 */ 270 */
271static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) 271static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
272{ 272{
273 unsigned char oldbit; 273 bool oldbit;
274 274
275 asm volatile("btr %2,%1\n\t" 275 asm volatile("btr %2,%1\n\t"
276 "setc %0" 276 "setc %0"
@@ -280,9 +280,9 @@ static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long
280} 280}
281 281
282/* WARNING: non atomic and it can be reordered! */ 282/* WARNING: non atomic and it can be reordered! */
283static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr) 283static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
284{ 284{
285 unsigned char oldbit; 285 bool oldbit;
286 286
287 asm volatile("btc %2,%1\n\t" 287 asm volatile("btc %2,%1\n\t"
288 "setc %0" 288 "setc %0"
@@ -300,20 +300,20 @@ static __always_inline int __test_and_change_bit(long nr, volatile unsigned long
300 * This operation is atomic and cannot be reordered. 300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier. 301 * It also implies a memory barrier.
302 */ 302 */
303static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr) 303static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
304{ 304{
305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
306} 306}
307 307
308static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 308static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
309{ 309{
310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 310 return ((1UL << (nr & (BITS_PER_LONG-1))) &
311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
312} 312}
313 313
314static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr) 314static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
315{ 315{
316 unsigned char oldbit; 316 bool oldbit;
317 317
318 asm volatile("bt %2,%1\n\t" 318 asm volatile("bt %2,%1\n\t"
319 "setc %0" 319 "setc %0"
@@ -329,7 +329,7 @@ static __always_inline int variable_test_bit(long nr, volatile const unsigned lo
329 * @nr: bit number to test 329 * @nr: bit number to test
330 * @addr: Address to start counting from 330 * @addr: Address to start counting from
331 */ 331 */
332static int test_bit(int nr, const volatile unsigned long *addr); 332static bool test_bit(int nr, const volatile unsigned long *addr);
333#endif 333#endif
334 334
335#define test_bit(nr, addr) \ 335#define test_bit(nr, addr) \
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 4ad6560847b1..0cdc65b0d14d 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -50,7 +50,7 @@ static inline void local_sub(long i, local_t *l)
50 * true if the result is zero, or false for all 50 * true if the result is zero, or false for all
51 * other cases. 51 * other cases.
52 */ 52 */
53static inline int local_sub_and_test(long i, local_t *l) 53static inline bool local_sub_and_test(long i, local_t *l)
54{ 54{
55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); 55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
56} 56}
@@ -63,7 +63,7 @@ static inline int local_sub_and_test(long i, local_t *l)
63 * returns true if the result is 0, or false for all other 63 * returns true if the result is 0, or false for all other
64 * cases. 64 * cases.
65 */ 65 */
66static inline int local_dec_and_test(local_t *l) 66static inline bool local_dec_and_test(local_t *l)
67{ 67{
68 GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); 68 GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
69} 69}
@@ -76,7 +76,7 @@ static inline int local_dec_and_test(local_t *l)
76 * and returns true if the result is zero, or false for all 76 * and returns true if the result is zero, or false for all
77 * other cases. 77 * other cases.
78 */ 78 */
79static inline int local_inc_and_test(local_t *l) 79static inline bool local_inc_and_test(local_t *l)
80{ 80{
81 GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); 81 GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
82} 82}
@@ -90,7 +90,7 @@ static inline int local_inc_and_test(local_t *l)
90 * if the result is negative, or false when 90 * if the result is negative, or false when
91 * result is greater than or equal to zero. 91 * result is greater than or equal to zero.
92 */ 92 */
93static inline int local_add_negative(long i, local_t *l) 93static inline bool local_add_negative(long i, local_t *l)
94{ 94{
95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); 95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
96} 96}
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 65039e9571db..184d7f3ecb9f 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -510,14 +510,14 @@ do { \
510/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 510/* This is not atomic against other CPUs -- CPU preemption needs to be off */
511#define x86_test_and_clear_bit_percpu(bit, var) \ 511#define x86_test_and_clear_bit_percpu(bit, var) \
512({ \ 512({ \
513 unsigned char old__; \ 513 bool old__; \
514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsetc %0" \ 514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsetc %0" \
515 : "=qm" (old__), "+m" (var) \ 515 : "=qm" (old__), "+m" (var) \
516 : "dIr" (bit)); \ 516 : "dIr" (bit)); \
517 old__; \ 517 old__; \
518}) 518})
519 519
520static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, 520static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
521 const unsigned long __percpu *addr) 521 const unsigned long __percpu *addr)
522{ 522{
523 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; 523 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
@@ -529,10 +529,10 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
529#endif 529#endif
530} 530}
531 531
532static inline int x86_this_cpu_variable_test_bit(int nr, 532static inline bool x86_this_cpu_variable_test_bit(int nr,
533 const unsigned long __percpu *addr) 533 const unsigned long __percpu *addr)
534{ 534{
535 unsigned char oldbit; 535 bool oldbit;
536 536
537 asm volatile("bt "__percpu_arg(2)",%1\n\t" 537 asm volatile("bt "__percpu_arg(2)",%1\n\t"
538 "setc %0" 538 "setc %0"
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 8f7866a5b9a4..a15b73d90be3 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -23,11 +23,11 @@ cc_label: \
23 23
24#define __GEN_RMWcc(fullop, var, cc, ...) \ 24#define __GEN_RMWcc(fullop, var, cc, ...) \
25do { \ 25do { \
26 char c; \ 26 bool c; \
27 asm volatile (fullop "; set" cc " %1" \ 27 asm volatile (fullop "; set" cc " %1" \
28 : "+m" (var), "=qm" (c) \ 28 : "+m" (var), "=qm" (c) \
29 : __VA_ARGS__ : "memory"); \ 29 : __VA_ARGS__ : "memory"); \
30 return c != 0; \ 30 return c; \
31} while (0) 31} while (0)
32 32
33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 33#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c1d347..c5087706c02e 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -77,7 +77,7 @@ static inline void __down_read(struct rw_semaphore *sem)
77/* 77/*
78 * trylock for reading -- returns 1 if successful, 0 if contention 78 * trylock for reading -- returns 1 if successful, 0 if contention
79 */ 79 */
80static inline int __down_read_trylock(struct rw_semaphore *sem) 80static inline bool __down_read_trylock(struct rw_semaphore *sem)
81{ 81{
82 long result, tmp; 82 long result, tmp;
83 asm volatile("# beginning __down_read_trylock\n\t" 83 asm volatile("# beginning __down_read_trylock\n\t"
@@ -93,7 +93,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
93 : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 93 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
94 : "i" (RWSEM_ACTIVE_READ_BIAS) 94 : "i" (RWSEM_ACTIVE_READ_BIAS)
95 : "memory", "cc"); 95 : "memory", "cc");
96 return result >= 0 ? 1 : 0; 96 return result >= 0;
97} 97}
98 98
99/* 99/*
@@ -134,9 +134,10 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
134/* 134/*
135 * trylock for writing -- returns 1 if successful, 0 if contention 135 * trylock for writing -- returns 1 if successful, 0 if contention
136 */ 136 */
137static inline int __down_write_trylock(struct rw_semaphore *sem) 137static inline bool __down_write_trylock(struct rw_semaphore *sem)
138{ 138{
139 long result, tmp; 139 bool result;
140 long tmp0, tmp1;
140 asm volatile("# beginning __down_write_trylock\n\t" 141 asm volatile("# beginning __down_write_trylock\n\t"
141 " mov %0,%1\n\t" 142 " mov %0,%1\n\t"
142 "1:\n\t" 143 "1:\n\t"
@@ -144,14 +145,14 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
144 /* was the active mask 0 before? */ 145 /* was the active mask 0 before? */
145 " jnz 2f\n\t" 146 " jnz 2f\n\t"
146 " mov %1,%2\n\t" 147 " mov %1,%2\n\t"
147 " add %3,%2\n\t" 148 " add %4,%2\n\t"
148 LOCK_PREFIX " cmpxchg %2,%0\n\t" 149 LOCK_PREFIX " cmpxchg %2,%0\n\t"
149 " jnz 1b\n\t" 150 " jnz 1b\n\t"
150 "2:\n\t" 151 "2:\n\t"
151 " sete %b1\n\t" 152 " sete %3\n\t"
152 " movzbl %b1, %k1\n\t"
153 "# ending __down_write_trylock\n\t" 153 "# ending __down_write_trylock\n\t"
154 : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 154 : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
155 "=qm" (result)
155 : "er" (RWSEM_ACTIVE_WRITE_BIAS) 156 : "er" (RWSEM_ACTIVE_WRITE_BIAS)
156 : "memory", "cc"); 157 : "memory", "cc");
157 return result; 158 return result;
diff --git a/include/linux/random.h b/include/linux/random.h
index e47e533742b5..3d6e9815cd85 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
95#ifdef CONFIG_ARCH_RANDOM 95#ifdef CONFIG_ARCH_RANDOM
96# include <asm/archrandom.h> 96# include <asm/archrandom.h>
97#else 97#else
98static inline int arch_get_random_long(unsigned long *v) 98static inline bool arch_get_random_long(unsigned long *v)
99{ 99{
100 return 0; 100 return 0;
101} 101}
102static inline int arch_get_random_int(unsigned int *v) 102static inline bool arch_get_random_int(unsigned int *v)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106static inline int arch_has_random(void) 106static inline bool arch_has_random(void)
107{ 107{
108 return 0; 108 return 0;
109} 109}
110static inline int arch_get_random_seed_long(unsigned long *v) 110static inline bool arch_get_random_seed_long(unsigned long *v)
111{ 111{
112 return 0; 112 return 0;
113} 113}
114static inline int arch_get_random_seed_int(unsigned int *v) 114static inline bool arch_get_random_seed_int(unsigned int *v)
115{ 115{
116 return 0; 116 return 0;
117} 117}
118static inline int arch_has_random_seed(void) 118static inline bool arch_has_random_seed(void)
119{ 119{
120 return 0; 120 return 0;
121} 121}