aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2014-02-08 02:52:03 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2014-02-13 21:13:19 -0500
commit22d9fd3411c693ccae5f5c2280fb1f9bb106ad4f (patch)
tree8af37fe84d323a59c9864eeb81309991c725878d /kernel/locking/mutex.c
parentb35f8305339f1ba3070fe606c6ef0d86ef093dee (diff)
asmlinkage, mutex: Mark __visible
Various kernel/mutex.c functions can be called from inline assembler, so they should be all global and __visible. Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andi Kleen <ak@linux.intel.com> Link: http://lkml.kernel.org/r/1391845930-28580-7-git-send-email-ak@linux.intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4dd6e4c219de..adbc0d0f314b 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init);
67 * We also put the fastpath first in the kernel image, to make sure the 67 * We also put the fastpath first in the kernel image, to make sure the
68 * branch is predicted by the CPU as default-untaken. 68 * branch is predicted by the CPU as default-untaken.
69 */ 69 */
70static __used noinline void __sched 70__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
71__mutex_lock_slowpath(atomic_t *lock_count);
72 71
73/** 72/**
74 * mutex_lock - acquire the mutex 73 * mutex_lock - acquire the mutex
@@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
225} 224}
226#endif 225#endif
227 226
228static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 227__visible __used noinline
228void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
229 229
230/** 230/**
231 * mutex_unlock - release the mutex 231 * mutex_unlock - release the mutex
@@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
746/* 746/*
747 * Release the lock, slowpath: 747 * Release the lock, slowpath:
748 */ 748 */
749static __used noinline void 749__visible void
750__mutex_unlock_slowpath(atomic_t *lock_count) 750__mutex_unlock_slowpath(atomic_t *lock_count)
751{ 751{
752 __mutex_unlock_common_slowpath(lock_count, 1); 752 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
803} 803}
804EXPORT_SYMBOL(mutex_lock_killable); 804EXPORT_SYMBOL(mutex_lock_killable);
805 805
806static __used noinline void __sched 806__visible void __sched
807__mutex_lock_slowpath(atomic_t *lock_count) 807__mutex_lock_slowpath(atomic_t *lock_count)
808{ 808{
809 struct mutex *lock = container_of(lock_count, struct mutex, count); 809 struct mutex *lock = container_of(lock_count, struct mutex, count);