diff options
author | Andi Kleen <ak@linux.intel.com> | 2014-02-08 02:52:03 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-02-13 21:13:19 -0500 |
commit | 22d9fd3411c693ccae5f5c2280fb1f9bb106ad4f (patch) | |
tree | 8af37fe84d323a59c9864eeb81309991c725878d /kernel/locking/mutex.c | |
parent | b35f8305339f1ba3070fe606c6ef0d86ef093dee (diff) |
asmlinkage, mutex: Mark __visible
Various kernel/mutex.c functions can be called from
inline assembler, so they should be all global and
__visible.
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1391845930-28580-7-git-send-email-ak@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r-- | kernel/locking/mutex.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4dd6e4c219de..adbc0d0f314b 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
67 | * We also put the fastpath first in the kernel image, to make sure the | 67 | * We also put the fastpath first in the kernel image, to make sure the |
68 | * branch is predicted by the CPU as default-untaken. | 68 | * branch is predicted by the CPU as default-untaken. |
69 | */ | 69 | */ |
70 | static __used noinline void __sched | 70 | __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); |
71 | __mutex_lock_slowpath(atomic_t *lock_count); | ||
72 | 71 | ||
73 | /** | 72 | /** |
74 | * mutex_lock - acquire the mutex | 73 | * mutex_lock - acquire the mutex |
@@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) | |||
225 | } | 224 | } |
226 | #endif | 225 | #endif |
227 | 226 | ||
228 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 227 | __visible __used noinline |
228 | void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | ||
229 | 229 | ||
230 | /** | 230 | /** |
231 | * mutex_unlock - release the mutex | 231 | * mutex_unlock - release the mutex |
@@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
746 | /* | 746 | /* |
747 | * Release the lock, slowpath: | 747 | * Release the lock, slowpath: |
748 | */ | 748 | */ |
749 | static __used noinline void | 749 | __visible void |
750 | __mutex_unlock_slowpath(atomic_t *lock_count) | 750 | __mutex_unlock_slowpath(atomic_t *lock_count) |
751 | { | 751 | { |
752 | __mutex_unlock_common_slowpath(lock_count, 1); | 752 | __mutex_unlock_common_slowpath(lock_count, 1); |
@@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock) | |||
803 | } | 803 | } |
804 | EXPORT_SYMBOL(mutex_lock_killable); | 804 | EXPORT_SYMBOL(mutex_lock_killable); |
805 | 805 | ||
806 | static __used noinline void __sched | 806 | __visible void __sched |
807 | __mutex_lock_slowpath(atomic_t *lock_count) | 807 | __mutex_lock_slowpath(atomic_t *lock_count) |
808 | { | 808 | { |
809 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 809 | struct mutex *lock = container_of(lock_count, struct mutex, count); |