diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-01 11:40:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-01 11:40:25 -0400 |
commit | 5778077d03cb25aac9b6a428e18970642fc019e3 (patch) | |
tree | 2e3f3da1fb99c3646da5ed9a09644696ca5f2309 /include/linux/spinlock.h | |
parent | 65a99597f044c083983f4274ab049c9ec3b9d764 (diff) | |
parent | 7e01ebffffedec22cea86ebe94802f909e4579ca (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm changes from Ingo Molnar:
"The biggest changes in this cycle were:
- Revamp, simplify (and in some cases fix) Time Stamp Counter (TSC)
primitives. (Andy Lutomirski)
- Add new, comprehensible entry and exit handlers written in C.
(Andy Lutomirski)
- vm86 mode cleanups and fixes. (Brian Gerst)
- 32-bit compat code cleanups. (Brian Gerst)
The amount of simplification in low level assembly code is already
palpable:
arch/x86/entry/entry_32.S | 130 +----
arch/x86/entry/entry_64.S | 197 ++-----
but more simplifications are planned.
There's also the usual laudry mix of low level changes - see the
changelog for details"
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (83 commits)
x86/asm: Drop repeated macro of X86_EFLAGS_AC definition
x86/asm/msr: Make wrmsrl() a function
x86/asm/delay: Introduce an MWAITX-based delay with a configurable timer
x86/asm: Add MONITORX/MWAITX instruction support
x86/traps: Weaken context tracking entry assertions
x86/asm/tsc: Add rdtscll() merge helper
selftests/x86: Add syscall_nt selftest
selftests/x86: Disable sigreturn_64
x86/vdso: Emit a GNU hash
x86/entry: Remove do_notify_resume(), syscall_trace_leave(), and their TIF masks
x86/entry/32: Migrate to C exit path
x86/entry/32: Remove 32-bit syscall audit optimizations
x86/vm86: Rename vm86->v86flags and v86mask
x86/vm86: Rename vm86->vm86_info to user_vm86
x86/vm86: Clean up vm86.h includes
x86/vm86: Move the vm86 IRQ definitions to vm86.h
x86/vm86: Use the normal pt_regs area for vm86
x86/vm86: Eliminate 'struct kernel_vm86_struct'
x86/vm86: Move fields from 'struct kernel_vm86_struct' to 'struct vm86'
x86/vm86: Move vm86 fields out of 'thread_struct'
...
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 16c5ed5a627c..47dd0cebd204 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -286,7 +286,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |||
286 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | 286 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
287 | */ | 287 | */ |
288 | 288 | ||
289 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | 289 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
290 | { | 290 | { |
291 | return &lock->rlock; | 291 | return &lock->rlock; |
292 | } | 292 | } |
@@ -297,17 +297,17 @@ do { \ | |||
297 | raw_spin_lock_init(&(_lock)->rlock); \ | 297 | raw_spin_lock_init(&(_lock)->rlock); \ |
298 | } while (0) | 298 | } while (0) |
299 | 299 | ||
300 | static inline void spin_lock(spinlock_t *lock) | 300 | static __always_inline void spin_lock(spinlock_t *lock) |
301 | { | 301 | { |
302 | raw_spin_lock(&lock->rlock); | 302 | raw_spin_lock(&lock->rlock); |
303 | } | 303 | } |
304 | 304 | ||
305 | static inline void spin_lock_bh(spinlock_t *lock) | 305 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
306 | { | 306 | { |
307 | raw_spin_lock_bh(&lock->rlock); | 307 | raw_spin_lock_bh(&lock->rlock); |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline int spin_trylock(spinlock_t *lock) | 310 | static __always_inline int spin_trylock(spinlock_t *lock) |
311 | { | 311 | { |
312 | return raw_spin_trylock(&lock->rlock); | 312 | return raw_spin_trylock(&lock->rlock); |
313 | } | 313 | } |
@@ -327,7 +327,7 @@ do { \ | |||
327 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | 327 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
328 | } while (0) | 328 | } while (0) |
329 | 329 | ||
330 | static inline void spin_lock_irq(spinlock_t *lock) | 330 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
331 | { | 331 | { |
332 | raw_spin_lock_irq(&lock->rlock); | 332 | raw_spin_lock_irq(&lock->rlock); |
333 | } | 333 | } |
@@ -342,32 +342,32 @@ do { \ | |||
342 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | 342 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
343 | } while (0) | 343 | } while (0) |
344 | 344 | ||
345 | static inline void spin_unlock(spinlock_t *lock) | 345 | static __always_inline void spin_unlock(spinlock_t *lock) |
346 | { | 346 | { |
347 | raw_spin_unlock(&lock->rlock); | 347 | raw_spin_unlock(&lock->rlock); |
348 | } | 348 | } |
349 | 349 | ||
350 | static inline void spin_unlock_bh(spinlock_t *lock) | 350 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
351 | { | 351 | { |
352 | raw_spin_unlock_bh(&lock->rlock); | 352 | raw_spin_unlock_bh(&lock->rlock); |
353 | } | 353 | } |
354 | 354 | ||
355 | static inline void spin_unlock_irq(spinlock_t *lock) | 355 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
356 | { | 356 | { |
357 | raw_spin_unlock_irq(&lock->rlock); | 357 | raw_spin_unlock_irq(&lock->rlock); |
358 | } | 358 | } |
359 | 359 | ||
360 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 360 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
361 | { | 361 | { |
362 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | 362 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
363 | } | 363 | } |
364 | 364 | ||
365 | static inline int spin_trylock_bh(spinlock_t *lock) | 365 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
366 | { | 366 | { |
367 | return raw_spin_trylock_bh(&lock->rlock); | 367 | return raw_spin_trylock_bh(&lock->rlock); |
368 | } | 368 | } |
369 | 369 | ||
370 | static inline int spin_trylock_irq(spinlock_t *lock) | 370 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
371 | { | 371 | { |
372 | return raw_spin_trylock_irq(&lock->rlock); | 372 | return raw_spin_trylock_irq(&lock->rlock); |
373 | } | 373 | } |
@@ -377,22 +377,22 @@ static inline int spin_trylock_irq(spinlock_t *lock) | |||
377 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | 377 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
378 | }) | 378 | }) |
379 | 379 | ||
380 | static inline void spin_unlock_wait(spinlock_t *lock) | 380 | static __always_inline void spin_unlock_wait(spinlock_t *lock) |
381 | { | 381 | { |
382 | raw_spin_unlock_wait(&lock->rlock); | 382 | raw_spin_unlock_wait(&lock->rlock); |
383 | } | 383 | } |
384 | 384 | ||
385 | static inline int spin_is_locked(spinlock_t *lock) | 385 | static __always_inline int spin_is_locked(spinlock_t *lock) |
386 | { | 386 | { |
387 | return raw_spin_is_locked(&lock->rlock); | 387 | return raw_spin_is_locked(&lock->rlock); |
388 | } | 388 | } |
389 | 389 | ||
390 | static inline int spin_is_contended(spinlock_t *lock) | 390 | static __always_inline int spin_is_contended(spinlock_t *lock) |
391 | { | 391 | { |
392 | return raw_spin_is_contended(&lock->rlock); | 392 | return raw_spin_is_contended(&lock->rlock); |
393 | } | 393 | } |
394 | 394 | ||
395 | static inline int spin_can_lock(spinlock_t *lock) | 395 | static __always_inline int spin_can_lock(spinlock_t *lock) |
396 | { | 396 | { |
397 | return raw_spin_can_lock(&lock->rlock); | 397 | return raw_spin_can_lock(&lock->rlock); |
398 | } | 398 | } |