aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 17:13:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 17:13:25 -0400
commit176ab02d4916f09d5d8cb63372d142df4378cdea (patch)
tree7039b1282e7b34341789a5416fbd7ca5da0e557a /kernel/locking
parente06df6a7eae1ab1ef4deb076aeeaed90e948e5c0 (diff)
parentef178f9238b142cc1020265e176b20d27fd02ba9 (diff)
Merge branch 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 LTO changes from Peter Anvin: "More infrastructure work in preparation for link-time optimization (LTO). Most of these changes is to make sure symbols accessed from assembly code are properly marked as visible so the linker doesn't remove them. My understanding is that the changes to support LTO are still not upstream in binutils, but are on the way there. This patchset should conclude the x86-specific changes, and remaining patches to actually enable LTO will be fed through the Kbuild tree (other than keeping up with changes to the x86 code base, of course), although not necessarily in this merge window" * 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) Kbuild, lto: Handle basic LTO in modpost Kbuild, lto: Disable LTO for asm-offsets.c Kbuild, lto: Add a gcc-ld script to let run gcc as ld Kbuild, lto: add ld-version and ld-ifversion macros Kbuild, lto: Drop .number postfixes in modpost Kbuild, lto, workaround: Don't warn for initcall_reference in modpost lto: Disable LTO for sys_ni lto: Handle LTO common symbols in module loader lto, workaround: Add workaround for initcall reordering lto: Make asmlinkage __visible x86, lto: Disable LTO for the x86 VDSO initconst, x86: Fix initconst mistake in ts5500 code initconst: Fix initconst mistake in dcdbas asmlinkage: Make trace_hardirqs_on/off_caller visible asmlinkage, x86: Fix 32bit memcpy for LTO asmlinkage Make __stack_chk_failed and memcmp visible asmlinkage: Mark rwsem functions that can be called from assembler asmlinkage asmlinkage: Make main_extable_sort_needed visible asmlinkage, mutex: Mark __visible asmlinkage: Make trace_hardirq visible ...
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c6
-rw-r--r--kernel/locking/mutex.c10
-rw-r--r--kernel/locking/rwsem-xadd.c4
3 files changed, 12 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index bf0c6b0dd9c5..b0e9467922e1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2557,7 +2557,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
2557 debug_atomic_inc(hardirqs_on_events); 2557 debug_atomic_inc(hardirqs_on_events);
2558} 2558}
2559 2559
2560void trace_hardirqs_on_caller(unsigned long ip) 2560__visible void trace_hardirqs_on_caller(unsigned long ip)
2561{ 2561{
2562 time_hardirqs_on(CALLER_ADDR0, ip); 2562 time_hardirqs_on(CALLER_ADDR0, ip);
2563 2563
@@ -2610,7 +2610,7 @@ EXPORT_SYMBOL(trace_hardirqs_on);
2610/* 2610/*
2611 * Hardirqs were disabled: 2611 * Hardirqs were disabled:
2612 */ 2612 */
2613void trace_hardirqs_off_caller(unsigned long ip) 2613__visible void trace_hardirqs_off_caller(unsigned long ip)
2614{ 2614{
2615 struct task_struct *curr = current; 2615 struct task_struct *curr = current;
2616 2616
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
4188} 4188}
4189EXPORT_SYMBOL_GPL(debug_show_held_locks); 4189EXPORT_SYMBOL_GPL(debug_show_held_locks);
4190 4190
4191void lockdep_sys_exit(void) 4191asmlinkage void lockdep_sys_exit(void)
4192{ 4192{
4193 struct task_struct *curr = current; 4193 struct task_struct *curr = current;
4194 4194
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 14fe72cc8ce7..bc73d33c6760 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -75,8 +75,7 @@ EXPORT_SYMBOL(__mutex_init);
75 * We also put the fastpath first in the kernel image, to make sure the 75 * We also put the fastpath first in the kernel image, to make sure the
76 * branch is predicted by the CPU as default-untaken. 76 * branch is predicted by the CPU as default-untaken.
77 */ 77 */
78static __used noinline void __sched 78__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
79__mutex_lock_slowpath(atomic_t *lock_count);
80 79
81/** 80/**
82 * mutex_lock - acquire the mutex 81 * mutex_lock - acquire the mutex
@@ -189,7 +188,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
189} 188}
190#endif 189#endif
191 190
192static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 191__visible __used noinline
192void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
193 193
194/** 194/**
195 * mutex_unlock - release the mutex 195 * mutex_unlock - release the mutex
@@ -716,7 +716,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
716/* 716/*
717 * Release the lock, slowpath: 717 * Release the lock, slowpath:
718 */ 718 */
719static __used noinline void 719__visible void
720__mutex_unlock_slowpath(atomic_t *lock_count) 720__mutex_unlock_slowpath(atomic_t *lock_count)
721{ 721{
722 __mutex_unlock_common_slowpath(lock_count, 1); 722 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -773,7 +773,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
773} 773}
774EXPORT_SYMBOL(mutex_lock_killable); 774EXPORT_SYMBOL(mutex_lock_killable);
775 775
776static __used noinline void __sched 776__visible void __sched
777__mutex_lock_slowpath(atomic_t *lock_count) 777__mutex_lock_slowpath(atomic_t *lock_count)
778{ 778{
779 struct mutex *lock = container_of(lock_count, struct mutex, count); 779 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 19c5fa95e0b4..1d66e08e897d 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -143,6 +143,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
143/* 143/*
144 * wait for the read lock to be granted 144 * wait for the read lock to be granted
145 */ 145 */
146__visible
146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) 147struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
147{ 148{
148 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; 149 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
@@ -190,6 +191,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
190/* 191/*
191 * wait until we successfully acquire the write lock 192 * wait until we successfully acquire the write lock
192 */ 193 */
194__visible
193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) 195struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
194{ 196{
195 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; 197 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
@@ -252,6 +254,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
252 * handle waking up a waiter on the semaphore 254 * handle waking up a waiter on the semaphore
253 * - up_read/up_write has decremented the active part of count if we come here 255 * - up_read/up_write has decremented the active part of count if we come here
254 */ 256 */
257__visible
255struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) 258struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
256{ 259{
257 unsigned long flags; 260 unsigned long flags;
@@ -272,6 +275,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
272 * - caller incremented waiting part of count and discovered it still negative 275 * - caller incremented waiting part of count and discovered it still negative
273 * - just wake up any readers at the front of the queue 276 * - just wake up any readers at the front of the queue
274 */ 277 */
278__visible
275struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) 279struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
276{ 280{
277 unsigned long flags; 281 unsigned long flags;