aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h32
1 files changed, 12 insertions, 20 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 98c0c2623db8..fb3acba4d52e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -36,11 +36,7 @@ extern __read_mostly int scheduler_running;
36 36
37/* 37/*
38 * These are the 'tuning knobs' of the scheduler: 38 * These are the 'tuning knobs' of the scheduler:
39 *
40 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
41 * Timeslices get refilled after they expire.
42 */ 39 */
43#define DEF_TIMESLICE (100 * HZ / 1000)
44 40
45/* 41/*
46 * single value that denotes runtime == period, ie unlimited time. 42 * single value that denotes runtime == period, ie unlimited time.
@@ -216,9 +212,6 @@ struct cfs_rq {
216 struct rb_root tasks_timeline; 212 struct rb_root tasks_timeline;
217 struct rb_node *rb_leftmost; 213 struct rb_node *rb_leftmost;
218 214
219 struct list_head tasks;
220 struct list_head *balance_iterator;
221
222 /* 215 /*
223 * 'curr' points to currently running entity on this cfs_rq. 216 * 'curr' points to currently running entity on this cfs_rq.
224 * It is set to NULL otherwise (i.e when none are currently running). 217 * It is set to NULL otherwise (i.e when none are currently running).
@@ -246,11 +239,6 @@ struct cfs_rq {
246 239
247#ifdef CONFIG_SMP 240#ifdef CONFIG_SMP
248 /* 241 /*
249 * the part of load.weight contributed by tasks
250 */
251 unsigned long task_weight;
252
253 /*
254 * h_load = weight * f(tg) 242 * h_load = weight * f(tg)
255 * 243 *
256 * Where f(tg) is the recursive weight fraction assigned to 244 * Where f(tg) is the recursive weight fraction assigned to
@@ -424,6 +412,8 @@ struct rq {
424 int cpu; 412 int cpu;
425 int online; 413 int online;
426 414
415 struct list_head cfs_tasks;
416
427 u64 rt_avg; 417 u64 rt_avg;
428 u64 age_stamp; 418 u64 age_stamp;
429 u64 idle_stamp; 419 u64 idle_stamp;
@@ -462,7 +452,6 @@ struct rq {
462 unsigned int yld_count; 452 unsigned int yld_count;
463 453
464 /* schedule() stats */ 454 /* schedule() stats */
465 unsigned int sched_switch;
466 unsigned int sched_count; 455 unsigned int sched_count;
467 unsigned int sched_goidle; 456 unsigned int sched_goidle;
468 457
@@ -611,7 +600,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 600 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
612 */ 601 */
613#ifdef CONFIG_SCHED_DEBUG 602#ifdef CONFIG_SCHED_DEBUG
614# include <linux/jump_label.h> 603# include <linux/static_key.h>
615# define const_debug __read_mostly 604# define const_debug __read_mostly
616#else 605#else
617# define const_debug const 606# define const_debug const
@@ -630,18 +619,18 @@ enum {
630#undef SCHED_FEAT 619#undef SCHED_FEAT
631 620
632#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 621#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
633static __always_inline bool static_branch__true(struct jump_label_key *key) 622static __always_inline bool static_branch__true(struct static_key *key)
634{ 623{
635 return likely(static_branch(key)); /* Not out of line branch. */ 624 return static_key_true(key); /* Not out of line branch. */
636} 625}
637 626
638static __always_inline bool static_branch__false(struct jump_label_key *key) 627static __always_inline bool static_branch__false(struct static_key *key)
639{ 628{
640 return unlikely(static_branch(key)); /* Out of line branch. */ 629 return static_key_false(key); /* Out of line branch. */
641} 630}
642 631
643#define SCHED_FEAT(name, enabled) \ 632#define SCHED_FEAT(name, enabled) \
644static __always_inline bool static_branch_##name(struct jump_label_key *key) \ 633static __always_inline bool static_branch_##name(struct static_key *key) \
645{ \ 634{ \
646 return static_branch__##enabled(key); \ 635 return static_branch__##enabled(key); \
647} 636}
@@ -650,7 +639,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
650 639
651#undef SCHED_FEAT 640#undef SCHED_FEAT
652 641
653extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; 642extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
654#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 643#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
655#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 644#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
656#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 645#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
@@ -692,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
692#ifndef finish_arch_switch 681#ifndef finish_arch_switch
693# define finish_arch_switch(prev) do { } while (0) 682# define finish_arch_switch(prev) do { } while (0)
694#endif 683#endif
684#ifndef finish_arch_post_lock_switch
685# define finish_arch_post_lock_switch() do { } while (0)
686#endif
695 687
696#ifndef __ARCH_WANT_UNLOCKED_CTXSW 688#ifndef __ARCH_WANT_UNLOCKED_CTXSW
697static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 689static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)