aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-07 01:16:32 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-07 01:16:32 -0500
commit4df286e52917c95c415400367cfd523dfbb0f93a (patch)
tree70a3339e7e1d615331e5ab0a845cb24303209f68 /arch/sparc
parentddaf1b27edf72372242d752730d526b79312a44e (diff)
sparc: Make atomic locks raw
SPIN_LOCK_UNLOCKED is deprecated and the locks which protect the atomic operations have no dependency on other locks and the code is well tested so the conversion to a raw lock is safe. Make the lock array static while at it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/lib/atomic32.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index cbddeb38ffda..080b7c26e0fd 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -15,8 +15,8 @@
15#define ATOMIC_HASH_SIZE 4 15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) 16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17 17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { 18static raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19 [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED 19 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
20}; 20};
21 21
22#else /* SMP */ 22#else /* SMP */
@@ -31,11 +31,11 @@ int __atomic_add_return(int i, atomic_t *v)
31{ 31{
32 int ret; 32 int ret;
33 unsigned long flags; 33 unsigned long flags;
34 spin_lock_irqsave(ATOMIC_HASH(v), flags); 34 __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags);
35 35
36 ret = (v->counter += i); 36 ret = (v->counter += i);
37 37
38 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 38 __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
39 return ret; 39 return ret;
40} 40}
41EXPORT_SYMBOL(__atomic_add_return); 41EXPORT_SYMBOL(__atomic_add_return);
@@ -45,12 +45,12 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
45 int ret; 45 int ret;
46 unsigned long flags; 46 unsigned long flags;
47 47
48 spin_lock_irqsave(ATOMIC_HASH(v), flags); 48 __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags);
49 ret = v->counter; 49 ret = v->counter;
50 if (likely(ret == old)) 50 if (likely(ret == old))
51 v->counter = new; 51 v->counter = new;
52 52
53 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 53 __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
54 return ret; 54 return ret;
55} 55}
56EXPORT_SYMBOL(atomic_cmpxchg); 56EXPORT_SYMBOL(atomic_cmpxchg);
@@ -60,11 +60,11 @@ int atomic_add_unless(atomic_t *v, int a, int u)
60 int ret; 60 int ret;
61 unsigned long flags; 61 unsigned long flags;
62 62
63 spin_lock_irqsave(ATOMIC_HASH(v), flags); 63 __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags);
64 ret = v->counter; 64 ret = v->counter;
65 if (ret != u) 65 if (ret != u)
66 v->counter += a; 66 v->counter += a;
67 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 67 __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
68 return ret != u; 68 return ret != u;
69} 69}
70EXPORT_SYMBOL(atomic_add_unless); 70EXPORT_SYMBOL(atomic_add_unless);
@@ -74,9 +74,9 @@ void atomic_set(atomic_t *v, int i)
74{ 74{
75 unsigned long flags; 75 unsigned long flags;
76 76
77 spin_lock_irqsave(ATOMIC_HASH(v), flags); 77 __raw_spin_lock_irqsave(ATOMIC_HASH(v), flags);
78 v->counter = i; 78 v->counter = i;
79 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 79 __raw_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
80} 80}
81EXPORT_SYMBOL(atomic_set); 81EXPORT_SYMBOL(atomic_set);
82 82
@@ -84,10 +84,10 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
84{ 84{
85 unsigned long old, flags; 85 unsigned long old, flags;
86 86
87 spin_lock_irqsave(ATOMIC_HASH(addr), flags); 87 __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
88 old = *addr; 88 old = *addr;
89 *addr = old | mask; 89 *addr = old | mask;
90 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); 90 __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
91 91
92 return old & mask; 92 return old & mask;
93} 93}
@@ -97,10 +97,10 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
97{ 97{
98 unsigned long old, flags; 98 unsigned long old, flags;
99 99
100 spin_lock_irqsave(ATOMIC_HASH(addr), flags); 100 __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
101 old = *addr; 101 old = *addr;
102 *addr = old & ~mask; 102 *addr = old & ~mask;
103 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); 103 __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
104 104
105 return old & mask; 105 return old & mask;
106} 106}
@@ -110,10 +110,10 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
110{ 110{
111 unsigned long old, flags; 111 unsigned long old, flags;
112 112
113 spin_lock_irqsave(ATOMIC_HASH(addr), flags); 113 __raw_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
114 old = *addr; 114 old = *addr;
115 *addr = old ^ mask; 115 *addr = old ^ mask;
116 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); 116 __raw_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
117 117
118 return old & mask; 118 return old & mask;
119} 119}
@@ -124,10 +124,10 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
124 unsigned long flags; 124 unsigned long flags;
125 u32 prev; 125 u32 prev;
126 126
127 spin_lock_irqsave(ATOMIC_HASH(ptr), flags); 127 __raw_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
128 if ((prev = *ptr) == old) 128 if ((prev = *ptr) == old)
129 *ptr = new; 129 *ptr = new;
130 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); 130 __raw_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
131 131
132 return (unsigned long)prev; 132 return (unsigned long)prev;
133} 133}