aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-07-06 08:20:14 -0400
committerIngo Molnar <mingo@elte.hu>2011-12-06 14:51:26 -0500
commitf8b6d1cc7dc15cf3de538b864eefaedad7a84d85 (patch)
tree084948d4bebc74a66231d95e03daaeb808640d66 /kernel/sched
parentbe726ffd1ef291c04c4d6632ac277afa1c281712 (diff)
sched: Use jump_labels for sched_feat
Now that we initialize jump_labels before sched_init() we can use them for the debug features without having to worry about a window where they have the wrong setting. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-vpreo4hal9e0kzqmg5y0io2k@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c46
-rw-r--r--kernel/sched/features.h30
-rw-r--r--kernel/sched/sched.h27
3 files changed, 81 insertions, 22 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9ac22d2b0dd3..3c5b21e2ef20 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -149,7 +149,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
149{ 149{
150 int i; 150 int i;
151 151
152 for (i = 0; sched_feat_names[i]; i++) { 152 for (i = 0; i < __SCHED_FEAT_NR; i++) {
153 if (!(sysctl_sched_features & (1UL << i))) 153 if (!(sysctl_sched_features & (1UL << i)))
154 seq_puts(m, "NO_"); 154 seq_puts(m, "NO_");
155 seq_printf(m, "%s ", sched_feat_names[i]); 155 seq_printf(m, "%s ", sched_feat_names[i]);
@@ -159,6 +159,36 @@ static int sched_feat_show(struct seq_file *m, void *v)
159 return 0; 159 return 0;
160} 160}
161 161
162#ifdef HAVE_JUMP_LABEL
163
164#define jump_label_key__true jump_label_key_enabled
165#define jump_label_key__false jump_label_key_disabled
166
167#define SCHED_FEAT(name, enabled) \
168 jump_label_key__##enabled ,
169
170struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
171#include "features.h"
172};
173
174#undef SCHED_FEAT
175
176static void sched_feat_disable(int i)
177{
178 if (jump_label_enabled(&sched_feat_keys[i]))
179 jump_label_dec(&sched_feat_keys[i]);
180}
181
182static void sched_feat_enable(int i)
183{
184 if (!jump_label_enabled(&sched_feat_keys[i]))
185 jump_label_inc(&sched_feat_keys[i]);
186}
187#else
188static void sched_feat_disable(int i) { };
189static void sched_feat_enable(int i) { };
190#endif /* HAVE_JUMP_LABEL */
191
162static ssize_t 192static ssize_t
163sched_feat_write(struct file *filp, const char __user *ubuf, 193sched_feat_write(struct file *filp, const char __user *ubuf,
164 size_t cnt, loff_t *ppos) 194 size_t cnt, loff_t *ppos)
@@ -182,17 +212,20 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
182 cmp += 3; 212 cmp += 3;
183 } 213 }
184 214
185 for (i = 0; sched_feat_names[i]; i++) { 215 for (i = 0; i < __SCHED_FEAT_NR; i++) {
186 if (strcmp(cmp, sched_feat_names[i]) == 0) { 216 if (strcmp(cmp, sched_feat_names[i]) == 0) {
187 if (neg) 217 if (neg) {
188 sysctl_sched_features &= ~(1UL << i); 218 sysctl_sched_features &= ~(1UL << i);
189 else 219 sched_feat_disable(i);
220 } else {
190 sysctl_sched_features |= (1UL << i); 221 sysctl_sched_features |= (1UL << i);
222 sched_feat_enable(i);
223 }
191 break; 224 break;
192 } 225 }
193 } 226 }
194 227
195 if (!sched_feat_names[i]) 228 if (i == __SCHED_FEAT_NR)
196 return -EINVAL; 229 return -EINVAL;
197 230
198 *ppos += cnt; 231 *ppos += cnt;
@@ -221,8 +254,7 @@ static __init int sched_init_debug(void)
221 return 0; 254 return 0;
222} 255}
223late_initcall(sched_init_debug); 256late_initcall(sched_init_debug);
224 257#endif /* CONFIG_SCHED_DEBUG */
225#endif
226 258
227/* 259/*
228 * Number of tasks to iterate in a single balance run. 260 * Number of tasks to iterate in a single balance run.
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 84802245abd2..e61fd73913d0 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -3,13 +3,13 @@
3 * them to run sooner, but does not allow tons of sleepers to 3 * them to run sooner, but does not allow tons of sleepers to
4 * rip the spread apart. 4 * rip the spread apart.
5 */ 5 */
6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) 6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
7 7
8/* 8/*
9 * Place new tasks ahead so that they do not starve already running 9 * Place new tasks ahead so that they do not starve already running
10 * tasks 10 * tasks
11 */ 11 */
12SCHED_FEAT(START_DEBIT, 1) 12SCHED_FEAT(START_DEBIT, true)
13 13
14/* 14/*
15 * Based on load and program behaviour, see if it makes sense to place 15 * Based on load and program behaviour, see if it makes sense to place
@@ -17,54 +17,54 @@ SCHED_FEAT(START_DEBIT, 1)
17 * improve cache locality. Typically used with SYNC wakeups as 17 * improve cache locality. Typically used with SYNC wakeups as
18 * generated by pipes and the like, see also SYNC_WAKEUPS. 18 * generated by pipes and the like, see also SYNC_WAKEUPS.
19 */ 19 */
20SCHED_FEAT(AFFINE_WAKEUPS, 1) 20SCHED_FEAT(AFFINE_WAKEUPS, true)
21 21
22/* 22/*
23 * Prefer to schedule the task we woke last (assuming it failed 23 * Prefer to schedule the task we woke last (assuming it failed
24 * wakeup-preemption), since its likely going to consume data we 24 * wakeup-preemption), since its likely going to consume data we
25 * touched, increases cache locality. 25 * touched, increases cache locality.
26 */ 26 */
27SCHED_FEAT(NEXT_BUDDY, 0) 27SCHED_FEAT(NEXT_BUDDY, false)
28 28
29/* 29/*
30 * Prefer to schedule the task that ran last (when we did 30 * Prefer to schedule the task that ran last (when we did
31 * wake-preempt) as that likely will touch the same data, increases 31 * wake-preempt) as that likely will touch the same data, increases
32 * cache locality. 32 * cache locality.
33 */ 33 */
34SCHED_FEAT(LAST_BUDDY, 1) 34SCHED_FEAT(LAST_BUDDY, true)
35 35
36/* 36/*
37 * Consider buddies to be cache hot, decreases the likelyness of a 37 * Consider buddies to be cache hot, decreases the likelyness of a
38 * cache buddy being migrated away, increases cache locality. 38 * cache buddy being migrated away, increases cache locality.
39 */ 39 */
40SCHED_FEAT(CACHE_HOT_BUDDY, 1) 40SCHED_FEAT(CACHE_HOT_BUDDY, true)
41 41
42/* 42/*
43 * Use arch dependent cpu power functions 43 * Use arch dependent cpu power functions
44 */ 44 */
45SCHED_FEAT(ARCH_POWER, 0) 45SCHED_FEAT(ARCH_POWER, false)
46 46
47SCHED_FEAT(HRTICK, 0) 47SCHED_FEAT(HRTICK, false)
48SCHED_FEAT(DOUBLE_TICK, 0) 48SCHED_FEAT(DOUBLE_TICK, false)
49SCHED_FEAT(LB_BIAS, 1) 49SCHED_FEAT(LB_BIAS, true)
50 50
51/* 51/*
52 * Spin-wait on mutex acquisition when the mutex owner is running on 52 * Spin-wait on mutex acquisition when the mutex owner is running on
53 * another cpu -- assumes that when the owner is running, it will soon 53 * another cpu -- assumes that when the owner is running, it will soon
54 * release the lock. Decreases scheduling overhead. 54 * release the lock. Decreases scheduling overhead.
55 */ 55 */
56SCHED_FEAT(OWNER_SPIN, 1) 56SCHED_FEAT(OWNER_SPIN, true)
57 57
58/* 58/*
59 * Decrement CPU power based on time not spent running tasks 59 * Decrement CPU power based on time not spent running tasks
60 */ 60 */
61SCHED_FEAT(NONTASK_POWER, 1) 61SCHED_FEAT(NONTASK_POWER, true)
62 62
63/* 63/*
64 * Queue remote wakeups on the target CPU and process them 64 * Queue remote wakeups on the target CPU and process them
65 * using the scheduler IPI. Reduces rq->lock contention/bounces. 65 * using the scheduler IPI. Reduces rq->lock contention/bounces.
66 */ 66 */
67SCHED_FEAT(TTWU_QUEUE, 1) 67SCHED_FEAT(TTWU_QUEUE, true)
68 68
69SCHED_FEAT(FORCE_SD_OVERLAP, 0) 69SCHED_FEAT(FORCE_SD_OVERLAP, false)
70SCHED_FEAT(RT_RUNTIME_SHARE, 1) 70SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c24801636219..d8d3613a4055 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -581,6 +581,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
581 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 581 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
582 */ 582 */
583#ifdef CONFIG_SCHED_DEBUG 583#ifdef CONFIG_SCHED_DEBUG
584# include <linux/jump_label.h>
584# define const_debug __read_mostly 585# define const_debug __read_mostly
585#else 586#else
586# define const_debug const 587# define const_debug const
@@ -593,11 +594,37 @@ extern const_debug unsigned int sysctl_sched_features;
593 594
594enum { 595enum {
595#include "features.h" 596#include "features.h"
597 __SCHED_FEAT_NR,
596}; 598};
597 599
598#undef SCHED_FEAT 600#undef SCHED_FEAT
599 601
602#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
603static __always_inline bool static_branch__true(struct jump_label_key *key)
604{
605 return likely(static_branch(key)); /* Not out of line branch. */
606}
607
608static __always_inline bool static_branch__false(struct jump_label_key *key)
609{
610 return unlikely(static_branch(key)); /* Out of line branch. */
611}
612
613#define SCHED_FEAT(name, enabled) \
614static __always_inline bool static_branch_##name(struct jump_label_key *key) \
615{ \
616 return static_branch__##enabled(key); \
617}
618
619#include "features.h"
620
621#undef SCHED_FEAT
622
623extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
624#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
625#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
600#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 626#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
627#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
601 628
602static inline u64 global_rt_period(void) 629static inline u64 global_rt_period(void)
603{ 630{