aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-11-09 23:54:00 -0500
committerPaul Mackerras <paulus@samba.org>2005-11-09 23:54:00 -0500
commit0c95fbb25602caa02ef697c3852cd61f9829e6ff (patch)
treecdab09f60b9bf00864af24fcf1da408b07b91aa1
parent49b09853df1a303876b82a6480efb2f7b45ef041 (diff)
parent06a98dba0d4b4f2f9b1f35f636beb166d6cbde34 (diff)
Merge git://oak/home/sfr/kernels/iseries/work
-rw-r--r--arch/powerpc/kernel/prom.c6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c1
-rw-r--r--include/asm-powerpc/atomic.h178
3 files changed, 181 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6391a4a0709a..6a5b468edb4d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1080,9 +1080,9 @@ void __init unflatten_device_tree(void)
1080static int __init early_init_dt_scan_cpus(unsigned long node, 1080static int __init early_init_dt_scan_cpus(unsigned long node,
1081 const char *uname, int depth, void *data) 1081 const char *uname, int depth, void *data)
1082{ 1082{
1083 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1084 u32 *prop; 1083 u32 *prop;
1085 unsigned long size = 0; 1084 unsigned long size;
1085 char *type = of_get_flat_dt_prop(node, "device_type", &size);
1086 1086
1087 /* We are scanning "cpu" nodes only */ 1087 /* We are scanning "cpu" nodes only */
1088 if (type == NULL || strcmp(type, "cpu") != 0) 1088 if (type == NULL || strcmp(type, "cpu") != 0)
@@ -1108,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
1108 1108
1109#ifdef CONFIG_ALTIVEC 1109#ifdef CONFIG_ALTIVEC
1110 /* Check if we have a VMX and eventually update CPU features */ 1110 /* Check if we have a VMX and eventually update CPU features */
1111 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size); 1111 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1112 if (prop && (*prop) > 0) { 1112 if (prop && (*prop) > 0) {
1113 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 1113 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1114 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 1114 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 1f338341d8f2..6a29f301436b 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -704,7 +704,6 @@ static void iseries_shared_idle(void)
704 704
705static void iseries_dedicated_idle(void) 705static void iseries_dedicated_idle(void)
706{ 706{
707 long oldval;
708 set_thread_flag(TIF_POLLING_NRFLAG); 707 set_thread_flag(TIF_POLLING_NRFLAG);
709 708
710 while (1) { 709 while (1) {
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index c5b12fd2b46b..9c0b372a46e1 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -197,5 +197,183 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
197#define smp_mb__before_atomic_inc() smp_mb() 197#define smp_mb__before_atomic_inc() smp_mb()
198#define smp_mb__after_atomic_inc() smp_mb() 198#define smp_mb__after_atomic_inc() smp_mb()
199 199
200#ifdef __powerpc64__
201
202typedef struct { volatile long counter; } atomic64_t;
203
204#define ATOMIC64_INIT(i) { (i) }
205
206#define atomic64_read(v) ((v)->counter)
207#define atomic64_set(v,i) (((v)->counter) = (i))
208
209static __inline__ void atomic64_add(long a, atomic64_t *v)
210{
211 long t;
212
213 __asm__ __volatile__(
214"1: ldarx %0,0,%3 # atomic64_add\n\
215 add %0,%2,%0\n\
216 stdcx. %0,0,%3 \n\
217 bne- 1b"
218 : "=&r" (t), "=m" (v->counter)
219 : "r" (a), "r" (&v->counter), "m" (v->counter)
220 : "cc");
221}
222
223static __inline__ long atomic64_add_return(long a, atomic64_t *v)
224{
225 long t;
226
227 __asm__ __volatile__(
228 EIEIO_ON_SMP
229"1: ldarx %0,0,%2 # atomic64_add_return\n\
230 add %0,%1,%0\n\
231 stdcx. %0,0,%2 \n\
232 bne- 1b"
233 ISYNC_ON_SMP
234 : "=&r" (t)
235 : "r" (a), "r" (&v->counter)
236 : "cc", "memory");
237
238 return t;
239}
240
241#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
242
243static __inline__ void atomic64_sub(long a, atomic64_t *v)
244{
245 long t;
246
247 __asm__ __volatile__(
248"1: ldarx %0,0,%3 # atomic64_sub\n\
249 subf %0,%2,%0\n\
250 stdcx. %0,0,%3 \n\
251 bne- 1b"
252 : "=&r" (t), "=m" (v->counter)
253 : "r" (a), "r" (&v->counter), "m" (v->counter)
254 : "cc");
255}
256
257static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
258{
259 long t;
260
261 __asm__ __volatile__(
262 EIEIO_ON_SMP
263"1: ldarx %0,0,%2 # atomic64_sub_return\n\
264 subf %0,%1,%0\n\
265 stdcx. %0,0,%2 \n\
266 bne- 1b"
267 ISYNC_ON_SMP
268 : "=&r" (t)
269 : "r" (a), "r" (&v->counter)
270 : "cc", "memory");
271
272 return t;
273}
274
275static __inline__ void atomic64_inc(atomic64_t *v)
276{
277 long t;
278
279 __asm__ __volatile__(
280"1: ldarx %0,0,%2 # atomic64_inc\n\
281 addic %0,%0,1\n\
282 stdcx. %0,0,%2 \n\
283 bne- 1b"
284 : "=&r" (t), "=m" (v->counter)
285 : "r" (&v->counter), "m" (v->counter)
286 : "cc");
287}
288
289static __inline__ long atomic64_inc_return(atomic64_t *v)
290{
291 long t;
292
293 __asm__ __volatile__(
294 EIEIO_ON_SMP
295"1: ldarx %0,0,%1 # atomic64_inc_return\n\
296 addic %0,%0,1\n\
297 stdcx. %0,0,%1 \n\
298 bne- 1b"
299 ISYNC_ON_SMP
300 : "=&r" (t)
301 : "r" (&v->counter)
302 : "cc", "memory");
303
304 return t;
305}
306
307/*
308 * atomic64_inc_and_test - increment and test
309 * @v: pointer of type atomic64_t
310 *
311 * Atomically increments @v by 1
312 * and returns true if the result is zero, or false for all
313 * other cases.
314 */
315#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
316
317static __inline__ void atomic64_dec(atomic64_t *v)
318{
319 long t;
320
321 __asm__ __volatile__(
322"1: ldarx %0,0,%2 # atomic64_dec\n\
323 addic %0,%0,-1\n\
324 stdcx. %0,0,%2\n\
325 bne- 1b"
326 : "=&r" (t), "=m" (v->counter)
327 : "r" (&v->counter), "m" (v->counter)
328 : "cc");
329}
330
331static __inline__ long atomic64_dec_return(atomic64_t *v)
332{
333 long t;
334
335 __asm__ __volatile__(
336 EIEIO_ON_SMP
337"1: ldarx %0,0,%1 # atomic64_dec_return\n\
338 addic %0,%0,-1\n\
339 stdcx. %0,0,%1\n\
340 bne- 1b"
341 ISYNC_ON_SMP
342 : "=&r" (t)
343 : "r" (&v->counter)
344 : "cc", "memory");
345
346 return t;
347}
348
349#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
350#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
351
352/*
353 * Atomically test *v and decrement if it is greater than 0.
354 * The function returns the old value of *v minus 1.
355 */
356static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
357{
358 long t;
359
360 __asm__ __volatile__(
361 EIEIO_ON_SMP
362"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
363 addic. %0,%0,-1\n\
364 blt- 2f\n\
365 stdcx. %0,0,%1\n\
366 bne- 1b"
367 ISYNC_ON_SMP
368 "\n\
3692:" : "=&r" (t)
370 : "r" (&v->counter)
371 : "cc", "memory");
372
373 return t;
374}
375
376#endif /* __powerpc64__ */
377
200#endif /* __KERNEL__ */ 378#endif /* __KERNEL__ */
201#endif /* _ASM_POWERPC_ATOMIC_H_ */ 379#endif /* _ASM_POWERPC_ATOMIC_H_ */