aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h75
1 files changed, 34 insertions, 41 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f964add50f38..c9007f28d3a2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -24,24 +24,6 @@ extern long calc_load_fold_active(struct rq *this_rq);
24extern void update_cpu_load_active(struct rq *this_rq); 24extern void update_cpu_load_active(struct rq *this_rq);
25 25
26/* 26/*
27 * Convert user-nice values [ -20 ... 0 ... 19 ]
28 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
29 * and back.
30 */
31#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
32#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
33#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
34
35/*
36 * 'User priority' is the nice value converted to something we
37 * can work with better when scaling various scheduler parameters,
38 * it's a [ 0 ... 39 ] range.
39 */
40#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
41#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
42#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
43
44/*
45 * Helpers for converting nanosecond timing to jiffy resolution 27 * Helpers for converting nanosecond timing to jiffy resolution
46 */ 28 */
47#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 29#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
@@ -441,6 +423,18 @@ struct rt_rq {
441#endif 423#endif
442}; 424};
443 425
426#ifdef CONFIG_RT_GROUP_SCHED
427static inline int rt_rq_throttled(struct rt_rq *rt_rq)
428{
429 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
430}
431#else
432static inline int rt_rq_throttled(struct rt_rq *rt_rq)
433{
434 return rt_rq->rt_throttled;
435}
436#endif
437
444/* Deadline class' related fields in a runqueue */ 438/* Deadline class' related fields in a runqueue */
445struct dl_rq { 439struct dl_rq {
446 /* runqueue is an rbtree, ordered by deadline */ 440 /* runqueue is an rbtree, ordered by deadline */
@@ -558,11 +552,9 @@ struct rq {
558#ifdef CONFIG_FAIR_GROUP_SCHED 552#ifdef CONFIG_FAIR_GROUP_SCHED
559 /* list of leaf cfs_rq on this cpu: */ 553 /* list of leaf cfs_rq on this cpu: */
560 struct list_head leaf_cfs_rq_list; 554 struct list_head leaf_cfs_rq_list;
561#endif /* CONFIG_FAIR_GROUP_SCHED */
562 555
563#ifdef CONFIG_RT_GROUP_SCHED 556 struct sched_avg avg;
564 struct list_head leaf_rt_rq_list; 557#endif /* CONFIG_FAIR_GROUP_SCHED */
565#endif
566 558
567 /* 559 /*
568 * This is part of a global counter where only the total sum 560 * This is part of a global counter where only the total sum
@@ -651,8 +643,6 @@ struct rq {
651#ifdef CONFIG_SMP 643#ifdef CONFIG_SMP
652 struct llist_head wake_list; 644 struct llist_head wake_list;
653#endif 645#endif
654
655 struct sched_avg avg;
656}; 646};
657 647
658static inline int cpu_of(struct rq *rq) 648static inline int cpu_of(struct rq *rq)
@@ -1112,6 +1102,8 @@ static const u32 prio_to_wmult[40] = {
1112 1102
1113#define DEQUEUE_SLEEP 1 1103#define DEQUEUE_SLEEP 1
1114 1104
1105#define RETRY_TASK ((void *)-1UL)
1106
1115struct sched_class { 1107struct sched_class {
1116 const struct sched_class *next; 1108 const struct sched_class *next;
1117 1109
@@ -1122,14 +1114,22 @@ struct sched_class {
1122 1114
1123 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1115 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1124 1116
1125 struct task_struct * (*pick_next_task) (struct rq *rq); 1117 /*
1118 * It is the responsibility of the pick_next_task() method that will
1119 * return the next task to call put_prev_task() on the @prev task or
1120 * something equivalent.
1121 *
1122 * May return RETRY_TASK when it finds a higher prio class has runnable
1123 * tasks.
1124 */
1125 struct task_struct * (*pick_next_task) (struct rq *rq,
1126 struct task_struct *prev);
1126 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1127 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1127 1128
1128#ifdef CONFIG_SMP 1129#ifdef CONFIG_SMP
1129 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1130 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1130 void (*migrate_task_rq)(struct task_struct *p, int next_cpu); 1131 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1131 1132
1132 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1133 void (*post_schedule) (struct rq *this_rq); 1133 void (*post_schedule) (struct rq *this_rq);
1134 void (*task_waking) (struct task_struct *task); 1134 void (*task_waking) (struct task_struct *task);
1135 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1135 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1159,6 +1159,11 @@ struct sched_class {
1159#endif 1159#endif
1160}; 1160};
1161 1161
1162static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1163{
1164 prev->sched_class->put_prev_task(rq, prev);
1165}
1166
1162#define sched_class_highest (&stop_sched_class) 1167#define sched_class_highest (&stop_sched_class)
1163#define for_each_class(class) \ 1168#define for_each_class(class) \
1164 for (class = sched_class_highest; class; class = class->next) 1169 for (class = sched_class_highest; class; class = class->next)
@@ -1175,16 +1180,14 @@ extern const struct sched_class idle_sched_class;
1175extern void update_group_power(struct sched_domain *sd, int cpu); 1180extern void update_group_power(struct sched_domain *sd, int cpu);
1176 1181
1177extern void trigger_load_balance(struct rq *rq); 1182extern void trigger_load_balance(struct rq *rq);
1178extern void idle_balance(int this_cpu, struct rq *this_rq);
1179 1183
1180extern void idle_enter_fair(struct rq *this_rq); 1184extern void idle_enter_fair(struct rq *this_rq);
1181extern void idle_exit_fair(struct rq *this_rq); 1185extern void idle_exit_fair(struct rq *this_rq);
1182 1186
1183#else /* CONFIG_SMP */ 1187#else
1184 1188
1185static inline void idle_balance(int cpu, struct rq *rq) 1189static inline void idle_enter_fair(struct rq *rq) { }
1186{ 1190static inline void idle_exit_fair(struct rq *rq) { }
1187}
1188 1191
1189#endif 1192#endif
1190 1193
@@ -1213,16 +1216,6 @@ extern void update_idle_cpu_load(struct rq *this_rq);
1213 1216
1214extern void init_task_runnable_average(struct task_struct *p); 1217extern void init_task_runnable_average(struct task_struct *p);
1215 1218
1216#ifdef CONFIG_PARAVIRT
1217static inline u64 steal_ticks(u64 steal)
1218{
1219 if (unlikely(steal > NSEC_PER_SEC))
1220 return div_u64(steal, TICK_NSEC);
1221
1222 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1223}
1224#endif
1225
1226static inline void inc_nr_running(struct rq *rq) 1219static inline void inc_nr_running(struct rq *rq)
1227{ 1220{
1228 rq->nr_running++; 1221 rq->nr_running++;