diff options
author | Denys Vlasenko <dvlasenk@redhat.com> | 2015-07-13 14:31:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-07-21 04:14:07 -0400 |
commit | 3490565b633c705d2fb1f6ede51228952664663d (patch) | |
tree | 264e6e7228a755b9890efec4db19449f5718c29f /include/linux/spinlock.h | |
parent | 9dea5dc921b5f4045a18c63eb92e84dc274d17eb (diff) |
locking/spinlocks: Force inlining of spinlock ops
With both gcc 4.7.2 and 4.9.2, sometimes GCC mysteriously
doesn't inline very small functions we expect to be inlined.
See:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122
In particular, with this config:
http://busybox.net/~vda/kernel_config
there are more than a thousand copies of tiny spinlock-related
functions:
$ nm --size-sort vmlinux | grep -iF ' t ' | uniq -c | grep -v '^ *1 ' | sort -rn | grep ' spin'
473 000000000000000b t spin_unlock_irqrestore
292 000000000000000b t spin_unlock
215 000000000000000b t spin_lock
134 000000000000000b t spin_unlock_irq
130 000000000000000b t spin_unlock_bh
120 000000000000000b t spin_lock_irq
106 000000000000000b t spin_lock_bh
Disassembly:
ffffffff81004720 <spin_lock>:
ffffffff81004720: 55 push %rbp
ffffffff81004721: 48 89 e5 mov %rsp,%rbp
ffffffff81004724: e8 f8 4e e2 02 callq <_raw_spin_lock>
ffffffff81004729: 5d pop %rbp
ffffffff8100472a: c3 retq
This patch fixes this via s/inline/__always_inline/ in
spinlock.h. This decreases vmlinux by about 40k:
text data bss dec hex filename
82375570 22255544 20627456 125258570 7774b4a vmlinux.before
82335059 22255416 20627456 125217931 776ac8b vmlinux
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Graf <tgraf@suug.ch>
Link: http://lkml.kernel.org/r/1436812263-15243-1-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 0063b24b4f36..ffcd053ca89a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -296,7 +296,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |||
296 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | 296 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
297 | */ | 297 | */ |
298 | 298 | ||
299 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | 299 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
300 | { | 300 | { |
301 | return &lock->rlock; | 301 | return &lock->rlock; |
302 | } | 302 | } |
@@ -307,17 +307,17 @@ do { \ | |||
307 | raw_spin_lock_init(&(_lock)->rlock); \ | 307 | raw_spin_lock_init(&(_lock)->rlock); \ |
308 | } while (0) | 308 | } while (0) |
309 | 309 | ||
310 | static inline void spin_lock(spinlock_t *lock) | 310 | static __always_inline void spin_lock(spinlock_t *lock) |
311 | { | 311 | { |
312 | raw_spin_lock(&lock->rlock); | 312 | raw_spin_lock(&lock->rlock); |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline void spin_lock_bh(spinlock_t *lock) | 315 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
316 | { | 316 | { |
317 | raw_spin_lock_bh(&lock->rlock); | 317 | raw_spin_lock_bh(&lock->rlock); |
318 | } | 318 | } |
319 | 319 | ||
320 | static inline int spin_trylock(spinlock_t *lock) | 320 | static __always_inline int spin_trylock(spinlock_t *lock) |
321 | { | 321 | { |
322 | return raw_spin_trylock(&lock->rlock); | 322 | return raw_spin_trylock(&lock->rlock); |
323 | } | 323 | } |
@@ -337,7 +337,7 @@ do { \ | |||
337 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | 337 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
338 | } while (0) | 338 | } while (0) |
339 | 339 | ||
340 | static inline void spin_lock_irq(spinlock_t *lock) | 340 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
341 | { | 341 | { |
342 | raw_spin_lock_irq(&lock->rlock); | 342 | raw_spin_lock_irq(&lock->rlock); |
343 | } | 343 | } |
@@ -352,32 +352,32 @@ do { \ | |||
352 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | 352 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
353 | } while (0) | 353 | } while (0) |
354 | 354 | ||
355 | static inline void spin_unlock(spinlock_t *lock) | 355 | static __always_inline void spin_unlock(spinlock_t *lock) |
356 | { | 356 | { |
357 | raw_spin_unlock(&lock->rlock); | 357 | raw_spin_unlock(&lock->rlock); |
358 | } | 358 | } |
359 | 359 | ||
360 | static inline void spin_unlock_bh(spinlock_t *lock) | 360 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
361 | { | 361 | { |
362 | raw_spin_unlock_bh(&lock->rlock); | 362 | raw_spin_unlock_bh(&lock->rlock); |
363 | } | 363 | } |
364 | 364 | ||
365 | static inline void spin_unlock_irq(spinlock_t *lock) | 365 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
366 | { | 366 | { |
367 | raw_spin_unlock_irq(&lock->rlock); | 367 | raw_spin_unlock_irq(&lock->rlock); |
368 | } | 368 | } |
369 | 369 | ||
370 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 370 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
371 | { | 371 | { |
372 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | 372 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
373 | } | 373 | } |
374 | 374 | ||
375 | static inline int spin_trylock_bh(spinlock_t *lock) | 375 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
376 | { | 376 | { |
377 | return raw_spin_trylock_bh(&lock->rlock); | 377 | return raw_spin_trylock_bh(&lock->rlock); |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline int spin_trylock_irq(spinlock_t *lock) | 380 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
381 | { | 381 | { |
382 | return raw_spin_trylock_irq(&lock->rlock); | 382 | return raw_spin_trylock_irq(&lock->rlock); |
383 | } | 383 | } |
@@ -387,22 +387,22 @@ static inline int spin_trylock_irq(spinlock_t *lock) | |||
387 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | 387 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
388 | }) | 388 | }) |
389 | 389 | ||
390 | static inline void spin_unlock_wait(spinlock_t *lock) | 390 | static __always_inline void spin_unlock_wait(spinlock_t *lock) |
391 | { | 391 | { |
392 | raw_spin_unlock_wait(&lock->rlock); | 392 | raw_spin_unlock_wait(&lock->rlock); |
393 | } | 393 | } |
394 | 394 | ||
395 | static inline int spin_is_locked(spinlock_t *lock) | 395 | static __always_inline int spin_is_locked(spinlock_t *lock) |
396 | { | 396 | { |
397 | return raw_spin_is_locked(&lock->rlock); | 397 | return raw_spin_is_locked(&lock->rlock); |
398 | } | 398 | } |
399 | 399 | ||
400 | static inline int spin_is_contended(spinlock_t *lock) | 400 | static __always_inline int spin_is_contended(spinlock_t *lock) |
401 | { | 401 | { |
402 | return raw_spin_is_contended(&lock->rlock); | 402 | return raw_spin_is_contended(&lock->rlock); |
403 | } | 403 | } |
404 | 404 | ||
405 | static inline int spin_can_lock(spinlock_t *lock) | 405 | static __always_inline int spin_can_lock(spinlock_t *lock) |
406 | { | 406 | { |
407 | return raw_spin_can_lock(&lock->rlock); | 407 | return raw_spin_can_lock(&lock->rlock); |
408 | } | 408 | } |