diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
commit | 96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch) | |
tree | d947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/spinlock_64.h | |
parent | 27bd0c955648646abf2a353a8371d28c37bcd982 (diff) |
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the
header install make rules
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/spinlock_64.h')
-rw-r--r-- | include/asm-x86/spinlock_64.h | 167 |
1 files changed, 167 insertions, 0 deletions
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h new file mode 100644 index 000000000000..88bf981e73cf --- /dev/null +++ b/include/asm-x86/spinlock_64.h | |||
@@ -0,0 +1,167 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | /* | ||
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
11 | * | ||
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
13 | * on the local processor, one does not. | ||
14 | * | ||
15 | * We make no fairness assumptions. They have a cost. | ||
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | ||
19 | |||
20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
21 | { | ||
22 | return *(volatile signed int *)(&(lock)->slock) <= 0; | ||
23 | } | ||
24 | |||
25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
26 | { | ||
27 | asm volatile( | ||
28 | "\n1:\t" | ||
29 | LOCK_PREFIX " ; decl %0\n\t" | ||
30 | "jns 2f\n" | ||
31 | "3:\n" | ||
32 | "rep;nop\n\t" | ||
33 | "cmpl $0,%0\n\t" | ||
34 | "jle 3b\n\t" | ||
35 | "jmp 1b\n" | ||
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Same as __raw_spin_lock, but reenable interrupts during spinning. | ||
41 | */ | ||
42 | #ifndef CONFIG_PROVE_LOCKING | ||
43 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
44 | { | ||
45 | asm volatile( | ||
46 | "\n1:\t" | ||
47 | LOCK_PREFIX " ; decl %0\n\t" | ||
48 | "jns 5f\n" | ||
49 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ | ||
50 | "jz 4f\n\t" | ||
51 | "sti\n" | ||
52 | "3:\t" | ||
53 | "rep;nop\n\t" | ||
54 | "cmpl $0, %0\n\t" | ||
55 | "jle 3b\n\t" | ||
56 | "cli\n\t" | ||
57 | "jmp 1b\n" | ||
58 | "4:\t" | ||
59 | "rep;nop\n\t" | ||
60 | "cmpl $0, %0\n\t" | ||
61 | "jg 1b\n\t" | ||
62 | "jmp 4b\n" | ||
63 | "5:\n\t" | ||
64 | : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
69 | { | ||
70 | int oldval; | ||
71 | |||
72 | asm volatile( | ||
73 | "xchgl %0,%1" | ||
74 | :"=q" (oldval), "=m" (lock->slock) | ||
75 | :"0" (0) : "memory"); | ||
76 | |||
77 | return oldval > 0; | ||
78 | } | ||
79 | |||
80 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
81 | { | ||
82 | asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); | ||
83 | } | ||
84 | |||
85 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
86 | { | ||
87 | while (__raw_spin_is_locked(lock)) | ||
88 | cpu_relax(); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Read-write spinlocks, allowing multiple readers | ||
93 | * but only one writer. | ||
94 | * | ||
95 | * NOTE! it is quite common to have readers in interrupts | ||
96 | * but no interrupt writers. For those circumstances we | ||
97 | * can "mix" irq-safe locks - any writer needs to get a | ||
98 | * irq-safe write-lock, but readers can get non-irqsafe | ||
99 | * read-locks. | ||
100 | * | ||
101 | * On x86, we implement read-write locks as a 32-bit counter | ||
102 | * with the high bit (sign) being the "contended" bit. | ||
103 | */ | ||
104 | |||
105 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
106 | { | ||
107 | return (int)(lock)->lock > 0; | ||
108 | } | ||
109 | |||
110 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
111 | { | ||
112 | return (lock)->lock == RW_LOCK_BIAS; | ||
113 | } | ||
114 | |||
115 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
116 | { | ||
117 | asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" | ||
118 | "jns 1f\n" | ||
119 | "call __read_lock_failed\n" | ||
120 | "1:\n" | ||
121 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
122 | } | ||
123 | |||
124 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
125 | { | ||
126 | asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" | ||
127 | "jz 1f\n" | ||
128 | "\tcall __write_lock_failed\n\t" | ||
129 | "1:\n" | ||
130 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
131 | } | ||
132 | |||
133 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
134 | { | ||
135 | atomic_t *count = (atomic_t *)lock; | ||
136 | atomic_dec(count); | ||
137 | if (atomic_read(count) >= 0) | ||
138 | return 1; | ||
139 | atomic_inc(count); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
144 | { | ||
145 | atomic_t *count = (atomic_t *)lock; | ||
146 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
147 | return 1; | ||
148 | atomic_add(RW_LOCK_BIAS, count); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
153 | { | ||
154 | asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
155 | } | ||
156 | |||
157 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
158 | { | ||
159 | asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
160 | : "=m" (rw->lock) : : "memory"); | ||
161 | } | ||
162 | |||
163 | #define _raw_spin_relax(lock) cpu_relax() | ||
164 | #define _raw_read_relax(lock) cpu_relax() | ||
165 | #define _raw_write_relax(lock) cpu_relax() | ||
166 | |||
167 | #endif /* __ASM_SPINLOCK_H */ | ||