aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2008-03-23 04:03:38 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:27 -0400
commit26b7fcc4bde28237a906597a809b149fb06713b0 (patch)
treedc500a327ff45c650a9858ad89d2cdd10bc8d00f /include
parenta4c2d7d9285500a9b229bb7ddc7abe0212a0dab0 (diff)
include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/sync_bitops.h56
1 files changed, 28 insertions, 28 deletions
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index bc249f40e0ee..f1078a5e4ed7 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -13,7 +13,7 @@
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
14 */ 14 */
15 15
16#define ADDR (*(volatile long *) addr) 16#define ADDR (*(volatile long *)addr)
17 17
18/** 18/**
19 * sync_set_bit - Atomically set a bit in memory 19 * sync_set_bit - Atomically set a bit in memory
@@ -26,12 +26,12 @@
26 * Note that @nr may be almost arbitrarily large; this function is not 26 * Note that @nr may be almost arbitrarily large; this function is not
27 * restricted to acting on a single-word quantity. 27 * restricted to acting on a single-word quantity.
28 */ 28 */
29static inline void sync_set_bit(int nr, volatile unsigned long * addr) 29static inline void sync_set_bit(int nr, volatile unsigned long *addr)
30{ 30{
31 __asm__ __volatile__("lock; btsl %1,%0" 31 asm volatile("lock; btsl %1,%0"
32 :"+m" (ADDR) 32 : "+m" (ADDR)
33 :"Ir" (nr) 33 : "Ir" (nr)
34 : "memory"); 34 : "memory");
35} 35}
36 36
37/** 37/**
@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
45 * in order to ensure changes are visible on other processors. 45 * in order to ensure changes are visible on other processors.
46 */ 46 */
47static inline void sync_clear_bit(int nr, volatile unsigned long * addr) 47static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
48{ 48{
49 __asm__ __volatile__("lock; btrl %1,%0" 49 asm volatile("lock; btrl %1,%0"
50 :"+m" (ADDR) 50 : "+m" (ADDR)
51 :"Ir" (nr) 51 : "Ir" (nr)
52 : "memory"); 52 : "memory");
53} 53}
54 54
55/** 55/**
@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
61 * Note that @nr may be almost arbitrarily large; this function is not 61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity. 62 * restricted to acting on a single-word quantity.
63 */ 63 */
64static inline void sync_change_bit(int nr, volatile unsigned long * addr) 64static inline void sync_change_bit(int nr, volatile unsigned long *addr)
65{ 65{
66 __asm__ __volatile__("lock; btcl %1,%0" 66 asm volatile("lock; btcl %1,%0"
67 :"+m" (ADDR) 67 : "+m" (ADDR)
68 :"Ir" (nr) 68 : "Ir" (nr)
69 : "memory"); 69 : "memory");
70} 70}
71 71
72/** 72/**
@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
77 * This operation is atomic and cannot be reordered. 77 * This operation is atomic and cannot be reordered.
78 * It also implies a memory barrier. 78 * It also implies a memory barrier.
79 */ 79 */
80static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) 80static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
81{ 81{
82 int oldbit; 82 int oldbit;
83 83
84 __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" 84 asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
85 :"=r" (oldbit),"+m" (ADDR) 85 : "=r" (oldbit), "+m" (ADDR)
86 :"Ir" (nr) : "memory"); 86 : "Ir" (nr) : "memory");
87 return oldbit; 87 return oldbit;
88} 88}
89 89
@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
95 * This operation is atomic and cannot be reordered. 95 * This operation is atomic and cannot be reordered.
96 * It also implies a memory barrier. 96 * It also implies a memory barrier.
97 */ 97 */
98static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) 98static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
99{ 99{
100 int oldbit; 100 int oldbit;
101 101
102 __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" 102 asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
103 :"=r" (oldbit),"+m" (ADDR) 103 : "=r" (oldbit), "+m" (ADDR)
104 :"Ir" (nr) : "memory"); 104 : "Ir" (nr) : "memory");
105 return oldbit; 105 return oldbit;
106} 106}
107 107
@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
113 * This operation is atomic and cannot be reordered. 113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier. 114 * It also implies a memory barrier.
115 */ 115 */
116static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) 116static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
117{ 117{
118 int oldbit; 118 int oldbit;
119 119
120 __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" 120 asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
121 :"=r" (oldbit),"+m" (ADDR) 121 : "=r" (oldbit), "+m" (ADDR)
122 :"Ir" (nr) : "memory"); 122 : "Ir" (nr) : "memory");
123 return oldbit; 123 return oldbit;
124} 124}
125 125