diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/mutex.c | 46 | ||||
-rw-r--r-- | kernel/sched/core.c | 45 | ||||
-rw-r--r-- | kernel/sched/features.h | 7 |
3 files changed, 46 insertions, 52 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 52f23011b6e0..262d7177adad 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -95,6 +95,52 @@ void __sched mutex_lock(struct mutex *lock) | |||
95 | EXPORT_SYMBOL(mutex_lock); | 95 | EXPORT_SYMBOL(mutex_lock); |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
99 | /* | ||
100 | * Mutex spinning code migrated from kernel/sched/core.c | ||
101 | */ | ||
102 | |||
103 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | ||
104 | { | ||
105 | if (lock->owner != owner) | ||
106 | return false; | ||
107 | |||
108 | /* | ||
109 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | ||
110 | * lock->owner still matches owner, if that fails, owner might | ||
111 | * point to free()d memory, if it still matches, the rcu_read_lock() | ||
112 | * ensures the memory stays valid. | ||
113 | */ | ||
114 | barrier(); | ||
115 | |||
116 | return owner->on_cpu; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Look out! "owner" is an entirely speculative pointer | ||
121 | * access and not reliable. | ||
122 | */ | ||
123 | static noinline | ||
124 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | ||
125 | { | ||
126 | rcu_read_lock(); | ||
127 | while (owner_running(lock, owner)) { | ||
128 | if (need_resched()) | ||
129 | break; | ||
130 | |||
131 | arch_mutex_cpu_relax(); | ||
132 | } | ||
133 | rcu_read_unlock(); | ||
134 | |||
135 | /* | ||
136 | * We break out the loop above on need_resched() and when the | ||
137 | * owner changed, which is a sign for heavy contention. Return | ||
138 | * success only when lock->owner is NULL. | ||
139 | */ | ||
140 | return lock->owner == NULL; | ||
141 | } | ||
142 | #endif | ||
143 | |||
98 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 144 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
99 | 145 | ||
100 | /** | 146 | /** |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393c..b37a22b99e0e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2997,51 +2997,6 @@ void __sched schedule_preempt_disabled(void) | |||
2997 | preempt_disable(); | 2997 | preempt_disable(); |
2998 | } | 2998 | } |
2999 | 2999 | ||
3000 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
3001 | |||
3002 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | ||
3003 | { | ||
3004 | if (lock->owner != owner) | ||
3005 | return false; | ||
3006 | |||
3007 | /* | ||
3008 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | ||
3009 | * lock->owner still matches owner, if that fails, owner might | ||
3010 | * point to free()d memory, if it still matches, the rcu_read_lock() | ||
3011 | * ensures the memory stays valid. | ||
3012 | */ | ||
3013 | barrier(); | ||
3014 | |||
3015 | return owner->on_cpu; | ||
3016 | } | ||
3017 | |||
3018 | /* | ||
3019 | * Look out! "owner" is an entirely speculative pointer | ||
3020 | * access and not reliable. | ||
3021 | */ | ||
3022 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | ||
3023 | { | ||
3024 | if (!sched_feat(OWNER_SPIN)) | ||
3025 | return 0; | ||
3026 | |||
3027 | rcu_read_lock(); | ||
3028 | while (owner_running(lock, owner)) { | ||
3029 | if (need_resched()) | ||
3030 | break; | ||
3031 | |||
3032 | arch_mutex_cpu_relax(); | ||
3033 | } | ||
3034 | rcu_read_unlock(); | ||
3035 | |||
3036 | /* | ||
3037 | * We break out the loop above on need_resched() and when the | ||
3038 | * owner changed, which is a sign for heavy contention. Return | ||
3039 | * success only when lock->owner is NULL. | ||
3040 | */ | ||
3041 | return lock->owner == NULL; | ||
3042 | } | ||
3043 | #endif | ||
3044 | |||
3045 | #ifdef CONFIG_PREEMPT | 3000 | #ifdef CONFIG_PREEMPT |
3046 | /* | 3001 | /* |
3047 | * this is the entry point to schedule() from in-kernel preemption | 3002 | * this is the entry point to schedule() from in-kernel preemption |
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1ad1d2b5395f..99399f8e4799 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
@@ -46,13 +46,6 @@ SCHED_FEAT(DOUBLE_TICK, false) | |||
46 | SCHED_FEAT(LB_BIAS, true) | 46 | SCHED_FEAT(LB_BIAS, true) |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Spin-wait on mutex acquisition when the mutex owner is running on | ||
50 | * another cpu -- assumes that when the owner is running, it will soon | ||
51 | * release the lock. Decreases scheduling overhead. | ||
52 | */ | ||
53 | SCHED_FEAT(OWNER_SPIN, true) | ||
54 | |||
55 | /* | ||
56 | * Decrement CPU power based on time not spent running tasks | 49 | * Decrement CPU power based on time not spent running tasks |
57 | */ | 50 | */ |
58 | SCHED_FEAT(NONTASK_POWER, true) | 51 | SCHED_FEAT(NONTASK_POWER, true) |