diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /lib/atomic64.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'lib/atomic64.c')
-rw-r--r-- | lib/atomic64.c | 83 |
1 files changed, 45 insertions, 38 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c index 08a4f068e61..e12ae0dd08a 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/cache.h> | 13 | #include <linux/cache.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/export.h> | 16 | #include <linux/module.h> |
17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
18 | 18 | ||
19 | /* | 19 | /* |
@@ -29,15 +29,11 @@ | |||
29 | * Ensure each lock is in a separate cacheline. | 29 | * Ensure each lock is in a separate cacheline. |
30 | */ | 30 | */ |
31 | static union { | 31 | static union { |
32 | raw_spinlock_t lock; | 32 | spinlock_t lock; |
33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; |
35 | [0 ... (NR_LOCKS - 1)] = { | ||
36 | .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), | ||
37 | }, | ||
38 | }; | ||
39 | 35 | ||
40 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | 36 | static inline spinlock_t *lock_addr(const atomic64_t *v) |
41 | { | 37 | { |
42 | unsigned long addr = (unsigned long) v; | 38 | unsigned long addr = (unsigned long) v; |
43 | 39 | ||
@@ -49,12 +45,12 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) | |||
49 | long long atomic64_read(const atomic64_t *v) | 45 | long long atomic64_read(const atomic64_t *v) |
50 | { | 46 | { |
51 | unsigned long flags; | 47 | unsigned long flags; |
52 | raw_spinlock_t *lock = lock_addr(v); | 48 | spinlock_t *lock = lock_addr(v); |
53 | long long val; | 49 | long long val; |
54 | 50 | ||
55 | raw_spin_lock_irqsave(lock, flags); | 51 | spin_lock_irqsave(lock, flags); |
56 | val = v->counter; | 52 | val = v->counter; |
57 | raw_spin_unlock_irqrestore(lock, flags); | 53 | spin_unlock_irqrestore(lock, flags); |
58 | return val; | 54 | return val; |
59 | } | 55 | } |
60 | EXPORT_SYMBOL(atomic64_read); | 56 | EXPORT_SYMBOL(atomic64_read); |
@@ -62,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read); | |||
62 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
63 | { | 59 | { |
64 | unsigned long flags; | 60 | unsigned long flags; |
65 | raw_spinlock_t *lock = lock_addr(v); | 61 | spinlock_t *lock = lock_addr(v); |
66 | 62 | ||
67 | raw_spin_lock_irqsave(lock, flags); | 63 | spin_lock_irqsave(lock, flags); |
68 | v->counter = i; | 64 | v->counter = i; |
69 | raw_spin_unlock_irqrestore(lock, flags); | 65 | spin_unlock_irqrestore(lock, flags); |
70 | } | 66 | } |
71 | EXPORT_SYMBOL(atomic64_set); | 67 | EXPORT_SYMBOL(atomic64_set); |
72 | 68 | ||
73 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
74 | { | 70 | { |
75 | unsigned long flags; | 71 | unsigned long flags; |
76 | raw_spinlock_t *lock = lock_addr(v); | 72 | spinlock_t *lock = lock_addr(v); |
77 | 73 | ||
78 | raw_spin_lock_irqsave(lock, flags); | 74 | spin_lock_irqsave(lock, flags); |
79 | v->counter += a; | 75 | v->counter += a; |
80 | raw_spin_unlock_irqrestore(lock, flags); | 76 | spin_unlock_irqrestore(lock, flags); |
81 | } | 77 | } |
82 | EXPORT_SYMBOL(atomic64_add); | 78 | EXPORT_SYMBOL(atomic64_add); |
83 | 79 | ||
84 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
85 | { | 81 | { |
86 | unsigned long flags; | 82 | unsigned long flags; |
87 | raw_spinlock_t *lock = lock_addr(v); | 83 | spinlock_t *lock = lock_addr(v); |
88 | long long val; | 84 | long long val; |
89 | 85 | ||
90 | raw_spin_lock_irqsave(lock, flags); | 86 | spin_lock_irqsave(lock, flags); |
91 | val = v->counter += a; | 87 | val = v->counter += a; |
92 | raw_spin_unlock_irqrestore(lock, flags); | 88 | spin_unlock_irqrestore(lock, flags); |
93 | return val; | 89 | return val; |
94 | } | 90 | } |
95 | EXPORT_SYMBOL(atomic64_add_return); | 91 | EXPORT_SYMBOL(atomic64_add_return); |
@@ -97,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return); | |||
97 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
98 | { | 94 | { |
99 | unsigned long flags; | 95 | unsigned long flags; |
100 | raw_spinlock_t *lock = lock_addr(v); | 96 | spinlock_t *lock = lock_addr(v); |
101 | 97 | ||
102 | raw_spin_lock_irqsave(lock, flags); | 98 | spin_lock_irqsave(lock, flags); |
103 | v->counter -= a; | 99 | v->counter -= a; |
104 | raw_spin_unlock_irqrestore(lock, flags); | 100 | spin_unlock_irqrestore(lock, flags); |
105 | } | 101 | } |
106 | EXPORT_SYMBOL(atomic64_sub); | 102 | EXPORT_SYMBOL(atomic64_sub); |
107 | 103 | ||
108 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
109 | { | 105 | { |
110 | unsigned long flags; | 106 | unsigned long flags; |
111 | raw_spinlock_t *lock = lock_addr(v); | 107 | spinlock_t *lock = lock_addr(v); |
112 | long long val; | 108 | long long val; |
113 | 109 | ||
114 | raw_spin_lock_irqsave(lock, flags); | 110 | spin_lock_irqsave(lock, flags); |
115 | val = v->counter -= a; | 111 | val = v->counter -= a; |
116 | raw_spin_unlock_irqrestore(lock, flags); | 112 | spin_unlock_irqrestore(lock, flags); |
117 | return val; | 113 | return val; |
118 | } | 114 | } |
119 | EXPORT_SYMBOL(atomic64_sub_return); | 115 | EXPORT_SYMBOL(atomic64_sub_return); |
@@ -121,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return); | |||
121 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
122 | { | 118 | { |
123 | unsigned long flags; | 119 | unsigned long flags; |
124 | raw_spinlock_t *lock = lock_addr(v); | 120 | spinlock_t *lock = lock_addr(v); |
125 | long long val; | 121 | long long val; |
126 | 122 | ||
127 | raw_spin_lock_irqsave(lock, flags); | 123 | spin_lock_irqsave(lock, flags); |
128 | val = v->counter - 1; | 124 | val = v->counter - 1; |
129 | if (val >= 0) | 125 | if (val >= 0) |
130 | v->counter = val; | 126 | v->counter = val; |
131 | raw_spin_unlock_irqrestore(lock, flags); | 127 | spin_unlock_irqrestore(lock, flags); |
132 | return val; | 128 | return val; |
133 | } | 129 | } |
134 | EXPORT_SYMBOL(atomic64_dec_if_positive); | 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
@@ -136,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |||
136 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
137 | { | 133 | { |
138 | unsigned long flags; | 134 | unsigned long flags; |
139 | raw_spinlock_t *lock = lock_addr(v); | 135 | spinlock_t *lock = lock_addr(v); |
140 | long long val; | 136 | long long val; |
141 | 137 | ||
142 | raw_spin_lock_irqsave(lock, flags); | 138 | spin_lock_irqsave(lock, flags); |
143 | val = v->counter; | 139 | val = v->counter; |
144 | if (val == o) | 140 | if (val == o) |
145 | v->counter = n; | 141 | v->counter = n; |
146 | raw_spin_unlock_irqrestore(lock, flags); | 142 | spin_unlock_irqrestore(lock, flags); |
147 | return val; | 143 | return val; |
148 | } | 144 | } |
149 | EXPORT_SYMBOL(atomic64_cmpxchg); | 145 | EXPORT_SYMBOL(atomic64_cmpxchg); |
@@ -151,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |||
151 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
152 | { | 148 | { |
153 | unsigned long flags; | 149 | unsigned long flags; |
154 | raw_spinlock_t *lock = lock_addr(v); | 150 | spinlock_t *lock = lock_addr(v); |
155 | long long val; | 151 | long long val; |
156 | 152 | ||
157 | raw_spin_lock_irqsave(lock, flags); | 153 | spin_lock_irqsave(lock, flags); |
158 | val = v->counter; | 154 | val = v->counter; |
159 | v->counter = new; | 155 | v->counter = new; |
160 | raw_spin_unlock_irqrestore(lock, flags); | 156 | spin_unlock_irqrestore(lock, flags); |
161 | return val; | 157 | return val; |
162 | } | 158 | } |
163 | EXPORT_SYMBOL(atomic64_xchg); | 159 | EXPORT_SYMBOL(atomic64_xchg); |
@@ -165,15 +161,26 @@ EXPORT_SYMBOL(atomic64_xchg); | |||
165 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
166 | { | 162 | { |
167 | unsigned long flags; | 163 | unsigned long flags; |
168 | raw_spinlock_t *lock = lock_addr(v); | 164 | spinlock_t *lock = lock_addr(v); |
169 | int ret = 0; | 165 | int ret = 0; |
170 | 166 | ||
171 | raw_spin_lock_irqsave(lock, flags); | 167 | spin_lock_irqsave(lock, flags); |
172 | if (v->counter != u) { | 168 | if (v->counter != u) { |
173 | v->counter += a; | 169 | v->counter += a; |
174 | ret = 1; | 170 | ret = 1; |
175 | } | 171 | } |
176 | raw_spin_unlock_irqrestore(lock, flags); | 172 | spin_unlock_irqrestore(lock, flags); |
177 | return ret; | 173 | return ret; |
178 | } | 174 | } |
179 | EXPORT_SYMBOL(atomic64_add_unless); | 175 | EXPORT_SYMBOL(atomic64_add_unless); |
176 | |||
177 | static int init_atomic64_lock(void) | ||
178 | { | ||
179 | int i; | ||
180 | |||
181 | for (i = 0; i < NR_LOCKS; ++i) | ||
182 | spin_lock_init(&atomic64_lock[i].lock); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | pure_initcall(init_atomic64_lock); | ||