diff options
Diffstat (limited to 'arch/blackfin/include/asm/spinlock.h')
-rw-r--r-- | arch/blackfin/include/asm/spinlock.h | 87 |
1 files changed, 85 insertions, 2 deletions
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 64e908a50646..0249ac319476 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
@@ -1,6 +1,89 @@ | |||
1 | #ifndef __BFIN_SPINLOCK_H | 1 | #ifndef __BFIN_SPINLOCK_H |
2 | #define __BFIN_SPINLOCK_H | 2 | #define __BFIN_SPINLOCK_H |
3 | 3 | ||
4 | #error blackfin architecture does not support SMP spin lock yet | 4 | #include <asm/atomic.h> |
5 | 5 | ||
6 | #endif | 6 | asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); |
7 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); | ||
8 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); | ||
9 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); | ||
10 | asmlinkage void __raw_read_lock_asm(volatile int *ptr); | ||
11 | asmlinkage int __raw_read_trylock_asm(volatile int *ptr); | ||
12 | asmlinkage void __raw_read_unlock_asm(volatile int *ptr); | ||
13 | asmlinkage void __raw_write_lock_asm(volatile int *ptr); | ||
14 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); | ||
15 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); | ||
16 | |||
17 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
18 | { | ||
19 | return __raw_spin_is_locked_asm(&lock->lock); | ||
20 | } | ||
21 | |||
22 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
23 | { | ||
24 | __raw_spin_lock_asm(&lock->lock); | ||
25 | } | ||
26 | |||
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
28 | |||
29 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
30 | { | ||
31 | return __raw_spin_trylock_asm(&lock->lock); | ||
32 | } | ||
33 | |||
34 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
35 | { | ||
36 | __raw_spin_unlock_asm(&lock->lock); | ||
37 | } | ||
38 | |||
39 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
40 | { | ||
41 | while (__raw_spin_is_locked(lock)) | ||
42 | cpu_relax(); | ||
43 | } | ||
44 | |||
45 | static inline int __raw_read_can_lock(raw_rwlock_t *rw) | ||
46 | { | ||
47 | return __raw_uncached_fetch_asm(&rw->lock) > 0; | ||
48 | } | ||
49 | |||
50 | static inline int __raw_write_can_lock(raw_rwlock_t *rw) | ||
51 | { | ||
52 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; | ||
53 | } | ||
54 | |||
55 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
56 | { | ||
57 | __raw_read_lock_asm(&rw->lock); | ||
58 | } | ||
59 | |||
60 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | ||
61 | { | ||
62 | return __raw_read_trylock_asm(&rw->lock); | ||
63 | } | ||
64 | |||
65 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
66 | { | ||
67 | __raw_read_unlock_asm(&rw->lock); | ||
68 | } | ||
69 | |||
70 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
71 | { | ||
72 | __raw_write_lock_asm(&rw->lock); | ||
73 | } | ||
74 | |||
75 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
76 | { | ||
77 | return __raw_write_trylock_asm(&rw->lock); | ||
78 | } | ||
79 | |||
80 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
81 | { | ||
82 | __raw_write_unlock_asm(&rw->lock); | ||
83 | } | ||
84 | |||
85 | #define _raw_spin_relax(lock) cpu_relax() | ||
86 | #define _raw_read_relax(lock) cpu_relax() | ||
87 | #define _raw_write_relax(lock) cpu_relax() | ||
88 | |||
89 | #endif /* !__BFIN_SPINLOCK_H */ | ||