aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-24 22:35:42 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-24 22:35:42 -0400
commit620de54675e76f1369326bbe1fc75aff88917063 (patch)
treee50b449c94843e15c4dfa964ada5b50f1876e497 /include/asm-sparc64
parent6593eaed814d50214d6056c683d52ac63153730e (diff)
[SPARC64]: Simplify asm/rwsem.h slightly.
rwsem_atomic_update and rwsem_atomic_add can be implemented straightly using atomic_*() routines. Also, rwsem_cmpxchgw() is totally unused, kill it. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/rwsem.h48
1 files changed, 4 insertions, 44 deletions
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index a1cc94f95984..4568ee4022df 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem);
46extern void __up_write(struct rw_semaphore *sem); 46extern void __up_write(struct rw_semaphore *sem);
47extern void __downgrade_write(struct rw_semaphore *sem); 47extern void __downgrade_write(struct rw_semaphore *sem);
48 48
49static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 49static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
50{ 50{
51 int tmp = delta; 51 return atomic_add_return(delta, (atomic_t *)(&sem->count));
52
53 __asm__ __volatile__(
54 "1:\tlduw [%2], %%g1\n\t"
55 "add %%g1, %1, %%g7\n\t"
56 "cas [%2], %%g1, %%g7\n\t"
57 "cmp %%g1, %%g7\n\t"
58 "membar #StoreLoad | #StoreStore\n\t"
59 "bne,pn %%icc, 1b\n\t"
60 " nop\n\t"
61 "mov %%g7, %0\n\t"
62 : "=&r" (tmp)
63 : "0" (tmp), "r" (sem)
64 : "g1", "g7", "memory", "cc");
65
66 return tmp + delta;
67}
68
69#define rwsem_atomic_add rwsem_atomic_update
70
71static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
72{
73 u32 old = (sem->count & 0xffff0000) | (u32) __old;
74 u32 new = (old & 0xffff0000) | (u32) __new;
75 u32 prev;
76
77again:
78 __asm__ __volatile__("cas [%2], %3, %0\n\t"
79 "membar #StoreLoad | #StoreStore"
80 : "=&r" (prev)
81 : "0" (new), "r" (sem), "r" (old)
82 : "memory");
83
84 /* To give the same semantics as x86 cmpxchgw, keep trying
85 * if only the upper 16-bits changed.
86 */
87 if (prev != old &&
88 ((prev & 0xffff) == (old & 0xffff)))
89 goto again;
90
91 return prev & 0xffff;
92} 52}
93 53
94static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) 54static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
95{ 55{
96 return cmpxchg(&sem->count,old,new); 56 atomic_add(delta, (atomic_t *)(&sem->count));
97} 57}
98 58
99#endif /* __KERNEL__ */ 59#endif /* __KERNEL__ */