diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 71 |
1 files changed, 29 insertions, 42 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ce39224d6155..ef0a7b2439dd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -10,8 +10,16 @@ | |||
10 | #include "cpupri.h" | 10 | #include "cpupri.h" |
11 | #include "cpuacct.h" | 11 | #include "cpuacct.h" |
12 | 12 | ||
13 | struct rq; | ||
14 | |||
13 | extern __read_mostly int scheduler_running; | 15 | extern __read_mostly int scheduler_running; |
14 | 16 | ||
17 | extern unsigned long calc_load_update; | ||
18 | extern atomic_long_t calc_load_tasks; | ||
19 | |||
20 | extern long calc_load_fold_active(struct rq *this_rq); | ||
21 | extern void update_cpu_load_active(struct rq *this_rq); | ||
22 | |||
15 | /* | 23 | /* |
16 | * Convert user-nice values [ -20 ... 0 ... 19 ] | 24 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
17 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], | 25 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
@@ -140,10 +148,11 @@ struct task_group { | |||
140 | struct cfs_rq **cfs_rq; | 148 | struct cfs_rq **cfs_rq; |
141 | unsigned long shares; | 149 | unsigned long shares; |
142 | 150 | ||
143 | atomic_t load_weight; | 151 | #ifdef CONFIG_SMP |
144 | atomic64_t load_avg; | 152 | atomic_long_t load_avg; |
145 | atomic_t runnable_avg; | 153 | atomic_t runnable_avg; |
146 | #endif | 154 | #endif |
155 | #endif | ||
147 | 156 | ||
148 | #ifdef CONFIG_RT_GROUP_SCHED | 157 | #ifdef CONFIG_RT_GROUP_SCHED |
149 | struct sched_rt_entity **rt_se; | 158 | struct sched_rt_entity **rt_se; |
@@ -261,26 +270,21 @@ struct cfs_rq { | |||
261 | #endif | 270 | #endif |
262 | 271 | ||
263 | #ifdef CONFIG_SMP | 272 | #ifdef CONFIG_SMP |
264 | /* | ||
265 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
266 | * removed when useful for applications beyond shares distribution (e.g. | ||
267 | * load-balance). | ||
268 | */ | ||
269 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
270 | /* | 273 | /* |
271 | * CFS Load tracking | 274 | * CFS Load tracking |
272 | * Under CFS, load is tracked on a per-entity basis and aggregated up. | 275 | * Under CFS, load is tracked on a per-entity basis and aggregated up. |
273 | * This allows for the description of both thread and group usage (in | 276 | * This allows for the description of both thread and group usage (in |
274 | * the FAIR_GROUP_SCHED case). | 277 | * the FAIR_GROUP_SCHED case). |
275 | */ | 278 | */ |
276 | u64 runnable_load_avg, blocked_load_avg; | 279 | unsigned long runnable_load_avg, blocked_load_avg; |
277 | atomic64_t decay_counter, removed_load; | 280 | atomic64_t decay_counter; |
278 | u64 last_decay; | 281 | u64 last_decay; |
279 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 282 | atomic_long_t removed_load; |
280 | /* These always depend on CONFIG_FAIR_GROUP_SCHED */ | 283 | |
281 | #ifdef CONFIG_FAIR_GROUP_SCHED | 284 | #ifdef CONFIG_FAIR_GROUP_SCHED |
285 | /* Required to track per-cpu representation of a task_group */ | ||
282 | u32 tg_runnable_contrib; | 286 | u32 tg_runnable_contrib; |
283 | u64 tg_load_contrib; | 287 | unsigned long tg_load_contrib; |
284 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 288 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
285 | 289 | ||
286 | /* | 290 | /* |
@@ -353,7 +357,6 @@ struct rt_rq { | |||
353 | unsigned long rt_nr_boosted; | 357 | unsigned long rt_nr_boosted; |
354 | 358 | ||
355 | struct rq *rq; | 359 | struct rq *rq; |
356 | struct list_head leaf_rt_rq_list; | ||
357 | struct task_group *tg; | 360 | struct task_group *tg; |
358 | #endif | 361 | #endif |
359 | }; | 362 | }; |
@@ -540,6 +543,16 @@ DECLARE_PER_CPU(struct rq, runqueues); | |||
540 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 543 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
541 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) | 544 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
542 | 545 | ||
546 | static inline u64 rq_clock(struct rq *rq) | ||
547 | { | ||
548 | return rq->clock; | ||
549 | } | ||
550 | |||
551 | static inline u64 rq_clock_task(struct rq *rq) | ||
552 | { | ||
553 | return rq->clock_task; | ||
554 | } | ||
555 | |||
543 | #ifdef CONFIG_SMP | 556 | #ifdef CONFIG_SMP |
544 | 557 | ||
545 | #define rcu_dereference_check_sched_domain(p) \ | 558 | #define rcu_dereference_check_sched_domain(p) \ |
@@ -884,24 +897,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
884 | #define WF_FORK 0x02 /* child wakeup after fork */ | 897 | #define WF_FORK 0x02 /* child wakeup after fork */ |
885 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ | 898 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
886 | 899 | ||
887 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | ||
888 | { | ||
889 | lw->weight += inc; | ||
890 | lw->inv_weight = 0; | ||
891 | } | ||
892 | |||
893 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | ||
894 | { | ||
895 | lw->weight -= dec; | ||
896 | lw->inv_weight = 0; | ||
897 | } | ||
898 | |||
899 | static inline void update_load_set(struct load_weight *lw, unsigned long w) | ||
900 | { | ||
901 | lw->weight = w; | ||
902 | lw->inv_weight = 0; | ||
903 | } | ||
904 | |||
905 | /* | 900 | /* |
906 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | 901 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
907 | * of tasks with abnormal "nice" values across CPUs the contribution that | 902 | * of tasks with abnormal "nice" values across CPUs the contribution that |
@@ -1028,17 +1023,8 @@ extern void update_group_power(struct sched_domain *sd, int cpu); | |||
1028 | extern void trigger_load_balance(struct rq *rq, int cpu); | 1023 | extern void trigger_load_balance(struct rq *rq, int cpu); |
1029 | extern void idle_balance(int this_cpu, struct rq *this_rq); | 1024 | extern void idle_balance(int this_cpu, struct rq *this_rq); |
1030 | 1025 | ||
1031 | /* | ||
1032 | * Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg | ||
1033 | * becomes useful in lb | ||
1034 | */ | ||
1035 | #if defined(CONFIG_FAIR_GROUP_SCHED) | ||
1036 | extern void idle_enter_fair(struct rq *this_rq); | 1026 | extern void idle_enter_fair(struct rq *this_rq); |
1037 | extern void idle_exit_fair(struct rq *this_rq); | 1027 | extern void idle_exit_fair(struct rq *this_rq); |
1038 | #else | ||
1039 | static inline void idle_enter_fair(struct rq *this_rq) {} | ||
1040 | static inline void idle_exit_fair(struct rq *this_rq) {} | ||
1041 | #endif | ||
1042 | 1028 | ||
1043 | #else /* CONFIG_SMP */ | 1029 | #else /* CONFIG_SMP */ |
1044 | 1030 | ||
@@ -1051,7 +1037,6 @@ static inline void idle_balance(int cpu, struct rq *rq) | |||
1051 | extern void sysrq_sched_debug_show(void); | 1037 | extern void sysrq_sched_debug_show(void); |
1052 | extern void sched_init_granularity(void); | 1038 | extern void sched_init_granularity(void); |
1053 | extern void update_max_interval(void); | 1039 | extern void update_max_interval(void); |
1054 | extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); | ||
1055 | extern void init_sched_rt_class(void); | 1040 | extern void init_sched_rt_class(void); |
1056 | extern void init_sched_fair_class(void); | 1041 | extern void init_sched_fair_class(void); |
1057 | 1042 | ||
@@ -1063,6 +1048,8 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime | |||
1063 | 1048 | ||
1064 | extern void update_idle_cpu_load(struct rq *this_rq); | 1049 | extern void update_idle_cpu_load(struct rq *this_rq); |
1065 | 1050 | ||
1051 | extern void init_task_runnable_average(struct task_struct *p); | ||
1052 | |||
1066 | #ifdef CONFIG_PARAVIRT | 1053 | #ifdef CONFIG_PARAVIRT |
1067 | static inline u64 steal_ticks(u64 steal) | 1054 | static inline u64 steal_ticks(u64 steal) |
1068 | { | 1055 | { |