diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-21 10:45:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-21 10:45:56 -0400 |
commit | 2e2dcc7631e331cf2e8396ce452e7f01e35f1182 (patch) | |
tree | 5a02c9602db66bc8c8db9660899c0c4455d7464f /include | |
parent | acee709cab689ec7703770e8b8cb5cc3a4abcb31 (diff) | |
parent | 1c29dd9a9e2f83ffb02e50bb3619c3b9db8fd526 (diff) |
Merge branch 'x86/paravirt-spinlocks' into x86/for-linus
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/paravirt.h | 43 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 118 | ||||
-rw-r--r-- | include/asm-x86/spinlock_types.h | 2 | ||||
-rw-r--r-- | include/asm-x86/xen/events.h | 1 | ||||
-rw-r--r-- | include/xen/events.h | 7 |
5 files changed, 155 insertions, 16 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 695ce9383f52..aec9767836b6 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -325,6 +325,15 @@ struct pv_mmu_ops { | |||
325 | unsigned long phys, pgprot_t flags); | 325 | unsigned long phys, pgprot_t flags); |
326 | }; | 326 | }; |
327 | 327 | ||
328 | struct raw_spinlock; | ||
329 | struct pv_lock_ops { | ||
330 | int (*spin_is_locked)(struct raw_spinlock *lock); | ||
331 | int (*spin_is_contended)(struct raw_spinlock *lock); | ||
332 | void (*spin_lock)(struct raw_spinlock *lock); | ||
333 | int (*spin_trylock)(struct raw_spinlock *lock); | ||
334 | void (*spin_unlock)(struct raw_spinlock *lock); | ||
335 | }; | ||
336 | |||
328 | /* This contains all the paravirt structures: we get a convenient | 337 | /* This contains all the paravirt structures: we get a convenient |
329 | * number for each function using the offset which we use to indicate | 338 | * number for each function using the offset which we use to indicate |
330 | * what to patch. */ | 339 | * what to patch. */ |
@@ -335,6 +344,7 @@ struct paravirt_patch_template { | |||
335 | struct pv_irq_ops pv_irq_ops; | 344 | struct pv_irq_ops pv_irq_ops; |
336 | struct pv_apic_ops pv_apic_ops; | 345 | struct pv_apic_ops pv_apic_ops; |
337 | struct pv_mmu_ops pv_mmu_ops; | 346 | struct pv_mmu_ops pv_mmu_ops; |
347 | struct pv_lock_ops pv_lock_ops; | ||
338 | }; | 348 | }; |
339 | 349 | ||
340 | extern struct pv_info pv_info; | 350 | extern struct pv_info pv_info; |
@@ -344,6 +354,7 @@ extern struct pv_cpu_ops pv_cpu_ops; | |||
344 | extern struct pv_irq_ops pv_irq_ops; | 354 | extern struct pv_irq_ops pv_irq_ops; |
345 | extern struct pv_apic_ops pv_apic_ops; | 355 | extern struct pv_apic_ops pv_apic_ops; |
346 | extern struct pv_mmu_ops pv_mmu_ops; | 356 | extern struct pv_mmu_ops pv_mmu_ops; |
357 | extern struct pv_lock_ops pv_lock_ops; | ||
347 | 358 | ||
348 | #define PARAVIRT_PATCH(x) \ | 359 | #define PARAVIRT_PATCH(x) \ |
349 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) | 360 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) |
@@ -1368,6 +1379,37 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
1368 | void _paravirt_nop(void); | 1379 | void _paravirt_nop(void); |
1369 | #define paravirt_nop ((void *)_paravirt_nop) | 1380 | #define paravirt_nop ((void *)_paravirt_nop) |
1370 | 1381 | ||
1382 | void paravirt_use_bytelocks(void); | ||
1383 | |||
1384 | #ifdef CONFIG_SMP | ||
1385 | |||
1386 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | ||
1387 | { | ||
1388 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | ||
1389 | } | ||
1390 | |||
1391 | static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | ||
1392 | { | ||
1393 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | ||
1394 | } | ||
1395 | |||
1396 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | ||
1397 | { | ||
1398 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | ||
1399 | } | ||
1400 | |||
1401 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | ||
1402 | { | ||
1403 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | ||
1404 | } | ||
1405 | |||
1406 | static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | ||
1407 | { | ||
1408 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | ||
1409 | } | ||
1410 | |||
1411 | #endif | ||
1412 | |||
1371 | /* These all sit in the .parainstructions section to tell us what to patch. */ | 1413 | /* These all sit in the .parainstructions section to tell us what to patch. */ |
1372 | struct paravirt_patch_site { | 1414 | struct paravirt_patch_site { |
1373 | u8 *instr; /* original instructions */ | 1415 | u8 *instr; /* original instructions */ |
@@ -1452,6 +1494,7 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1452 | return f; | 1494 | return f; |
1453 | } | 1495 | } |
1454 | 1496 | ||
1497 | |||
1455 | /* Make sure as little as possible of this mess escapes. */ | 1498 | /* Make sure as little as possible of this mess escapes. */ |
1456 | #undef PARAVIRT_CALL | 1499 | #undef PARAVIRT_CALL |
1457 | #undef __PVOP_CALL | 1500 | #undef __PVOP_CALL |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 21e89bf92f1c..4f9a9861799a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | #include <asm/paravirt.h> | |
10 | /* | 10 | /* |
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
12 | * | 12 | * |
@@ -54,21 +54,21 @@ | |||
54 | * much between them in performance though, especially as locks are out of line. | 54 | * much between them in performance though, especially as locks are out of line. |
55 | */ | 55 | */ |
56 | #if (NR_CPUS < 256) | 56 | #if (NR_CPUS < 256) |
57 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 57 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | int tmp = ACCESS_ONCE(lock->slock); | 59 | int tmp = ACCESS_ONCE(lock->slock); |
60 | 60 | ||
61 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); | 61 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 64 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
65 | { | 65 | { |
66 | int tmp = ACCESS_ONCE(lock->slock); | 66 | int tmp = ACCESS_ONCE(lock->slock); |
67 | 67 | ||
68 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | 68 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; |
69 | } | 69 | } |
70 | 70 | ||
71 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 71 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
72 | { | 72 | { |
73 | short inc = 0x0100; | 73 | short inc = 0x0100; |
74 | 74 | ||
@@ -87,9 +87,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
87 | : "memory", "cc"); | 87 | : "memory", "cc"); |
88 | } | 88 | } |
89 | 89 | ||
90 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 90 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
91 | |||
92 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
93 | { | 91 | { |
94 | int tmp; | 92 | int tmp; |
95 | short new; | 93 | short new; |
@@ -110,7 +108,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
110 | return tmp; | 108 | return tmp; |
111 | } | 109 | } |
112 | 110 | ||
113 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 111 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
114 | { | 112 | { |
115 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" | 113 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
116 | : "+m" (lock->slock) | 114 | : "+m" (lock->slock) |
@@ -118,21 +116,21 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
118 | : "memory", "cc"); | 116 | : "memory", "cc"); |
119 | } | 117 | } |
120 | #else | 118 | #else |
121 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 119 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
122 | { | 120 | { |
123 | int tmp = ACCESS_ONCE(lock->slock); | 121 | int tmp = ACCESS_ONCE(lock->slock); |
124 | 122 | ||
125 | return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); | 123 | return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); |
126 | } | 124 | } |
127 | 125 | ||
128 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 126 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
129 | { | 127 | { |
130 | int tmp = ACCESS_ONCE(lock->slock); | 128 | int tmp = ACCESS_ONCE(lock->slock); |
131 | 129 | ||
132 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | 130 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; |
133 | } | 131 | } |
134 | 132 | ||
135 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 133 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
136 | { | 134 | { |
137 | int inc = 0x00010000; | 135 | int inc = 0x00010000; |
138 | int tmp; | 136 | int tmp; |
@@ -153,9 +151,7 @@ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
153 | : "memory", "cc"); | 151 | : "memory", "cc"); |
154 | } | 152 | } |
155 | 153 | ||
156 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 154 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
157 | |||
158 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
159 | { | 155 | { |
160 | int tmp; | 156 | int tmp; |
161 | int new; | 157 | int new; |
@@ -177,7 +173,7 @@ static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
177 | return tmp; | 173 | return tmp; |
178 | } | 174 | } |
179 | 175 | ||
180 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 176 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
181 | { | 177 | { |
182 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" | 178 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
183 | : "+m" (lock->slock) | 179 | : "+m" (lock->slock) |
@@ -186,6 +182,98 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
186 | } | 182 | } |
187 | #endif | 183 | #endif |
188 | 184 | ||
185 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
186 | |||
187 | #ifdef CONFIG_PARAVIRT | ||
188 | /* | ||
189 | * Define virtualization-friendly old-style lock byte lock, for use in | ||
190 | * pv_lock_ops if desired. | ||
191 | * | ||
192 | * This differs from the pre-2.6.24 spinlock by always using xchgb | ||
193 | * rather than decb to take the lock; this allows it to use a | ||
194 | * zero-initialized lock structure. It also maintains a 1-byte | ||
195 | * contention counter, so that we can implement | ||
196 | * __byte_spin_is_contended. | ||
197 | */ | ||
198 | struct __byte_spinlock { | ||
199 | s8 lock; | ||
200 | s8 spinners; | ||
201 | }; | ||
202 | |||
203 | static inline int __byte_spin_is_locked(raw_spinlock_t *lock) | ||
204 | { | ||
205 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | ||
206 | return bl->lock != 0; | ||
207 | } | ||
208 | |||
209 | static inline int __byte_spin_is_contended(raw_spinlock_t *lock) | ||
210 | { | ||
211 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | ||
212 | return bl->spinners != 0; | ||
213 | } | ||
214 | |||
215 | static inline void __byte_spin_lock(raw_spinlock_t *lock) | ||
216 | { | ||
217 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | ||
218 | s8 val = 1; | ||
219 | |||
220 | asm("1: xchgb %1, %0\n" | ||
221 | " test %1,%1\n" | ||
222 | " jz 3f\n" | ||
223 | " " LOCK_PREFIX "incb %2\n" | ||
224 | "2: rep;nop\n" | ||
225 | " cmpb $1, %0\n" | ||
226 | " je 2b\n" | ||
227 | " " LOCK_PREFIX "decb %2\n" | ||
228 | " jmp 1b\n" | ||
229 | "3:" | ||
230 | : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory"); | ||
231 | } | ||
232 | |||
233 | static inline int __byte_spin_trylock(raw_spinlock_t *lock) | ||
234 | { | ||
235 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | ||
236 | u8 old = 1; | ||
237 | |||
238 | asm("xchgb %1,%0" | ||
239 | : "+m" (bl->lock), "+q" (old) : : "memory"); | ||
240 | |||
241 | return old == 0; | ||
242 | } | ||
243 | |||
244 | static inline void __byte_spin_unlock(raw_spinlock_t *lock) | ||
245 | { | ||
246 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; | ||
247 | smp_wmb(); | ||
248 | bl->lock = 0; | ||
249 | } | ||
250 | #else /* !CONFIG_PARAVIRT */ | ||
251 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
252 | { | ||
253 | return __ticket_spin_is_locked(lock); | ||
254 | } | ||
255 | |||
256 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | ||
257 | { | ||
258 | return __ticket_spin_is_contended(lock); | ||
259 | } | ||
260 | |||
261 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
262 | { | ||
263 | __ticket_spin_lock(lock); | ||
264 | } | ||
265 | |||
266 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
267 | { | ||
268 | return __ticket_spin_trylock(lock); | ||
269 | } | ||
270 | |||
271 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
272 | { | ||
273 | __ticket_spin_unlock(lock); | ||
274 | } | ||
275 | #endif /* CONFIG_PARAVIRT */ | ||
276 | |||
189 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 277 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
190 | { | 278 | { |
191 | while (__raw_spin_is_locked(lock)) | 279 | while (__raw_spin_is_locked(lock)) |
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h index 9029cf78cf5d..06c071c9eee9 100644 --- a/include/asm-x86/spinlock_types.h +++ b/include/asm-x86/spinlock_types.h | |||
@@ -5,7 +5,7 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct raw_spinlock { |
9 | unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h index f8d57ea1f05f..8ded74720024 100644 --- a/include/asm-x86/xen/events.h +++ b/include/asm-x86/xen/events.h | |||
@@ -5,6 +5,7 @@ enum ipi_vector { | |||
5 | XEN_RESCHEDULE_VECTOR, | 5 | XEN_RESCHEDULE_VECTOR, |
6 | XEN_CALL_FUNCTION_VECTOR, | 6 | XEN_CALL_FUNCTION_VECTOR, |
7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, | 7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, |
8 | XEN_SPIN_UNLOCK_VECTOR, | ||
8 | 9 | ||
9 | XEN_NR_IPIS, | 10 | XEN_NR_IPIS, |
10 | }; | 11 | }; |
diff --git a/include/xen/events.h b/include/xen/events.h index 67c4436554a9..4680ff3fbc91 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -44,4 +44,11 @@ extern void notify_remote_via_irq(int irq); | |||
44 | 44 | ||
45 | extern void xen_irq_resume(void); | 45 | extern void xen_irq_resume(void); |
46 | 46 | ||
47 | /* Clear an irq's pending state, in preparation for polling on it */ | ||
48 | void xen_clear_irq_pending(int irq); | ||
49 | |||
50 | /* Poll waiting for an irq to become pending. In the usual case, the | ||
51 | irq will be disabled so it won't deliver an interrupt. */ | ||
52 | void xen_poll_irq(int irq); | ||
53 | |||
47 | #endif /* _XEN_EVENTS_H */ | 54 | #endif /* _XEN_EVENTS_H */ |