diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index f0a50b20e8a0..1d19c025f9d2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -998,6 +998,7 @@ struct sched_class { | |||
998 | struct rq *busiest, struct sched_domain *sd, | 998 | struct rq *busiest, struct sched_domain *sd, |
999 | enum cpu_idle_type idle); | 999 | enum cpu_idle_type idle); |
1000 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1000 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1001 | int (*needs_post_schedule) (struct rq *this_rq); | ||
1001 | void (*post_schedule) (struct rq *this_rq); | 1002 | void (*post_schedule) (struct rq *this_rq); |
1002 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1003 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
1003 | 1004 | ||
@@ -1052,6 +1053,10 @@ struct sched_entity { | |||
1052 | u64 last_wakeup; | 1053 | u64 last_wakeup; |
1053 | u64 avg_overlap; | 1054 | u64 avg_overlap; |
1054 | 1055 | ||
1056 | u64 start_runtime; | ||
1057 | u64 avg_wakeup; | ||
1058 | u64 nr_migrations; | ||
1059 | |||
1055 | #ifdef CONFIG_SCHEDSTATS | 1060 | #ifdef CONFIG_SCHEDSTATS |
1056 | u64 wait_start; | 1061 | u64 wait_start; |
1057 | u64 wait_max; | 1062 | u64 wait_max; |
@@ -1067,7 +1072,6 @@ struct sched_entity { | |||
1067 | u64 exec_max; | 1072 | u64 exec_max; |
1068 | u64 slice_max; | 1073 | u64 slice_max; |
1069 | 1074 | ||
1070 | u64 nr_migrations; | ||
1071 | u64 nr_migrations_cold; | 1075 | u64 nr_migrations_cold; |
1072 | u64 nr_failed_migrations_affine; | 1076 | u64 nr_failed_migrations_affine; |
1073 | u64 nr_failed_migrations_running; | 1077 | u64 nr_failed_migrations_running; |
@@ -1164,6 +1168,7 @@ struct task_struct { | |||
1164 | #endif | 1168 | #endif |
1165 | 1169 | ||
1166 | struct list_head tasks; | 1170 | struct list_head tasks; |
1171 | struct plist_node pushable_tasks; | ||
1167 | 1172 | ||
1168 | struct mm_struct *mm, *active_mm; | 1173 | struct mm_struct *mm, *active_mm; |
1169 | 1174 | ||
@@ -1175,6 +1180,8 @@ struct task_struct { | |||
1175 | /* ??? */ | 1180 | /* ??? */ |
1176 | unsigned int personality; | 1181 | unsigned int personality; |
1177 | unsigned did_exec:1; | 1182 | unsigned did_exec:1; |
1183 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | ||
1184 | * execve */ | ||
1178 | pid_t pid; | 1185 | pid_t pid; |
1179 | pid_t tgid; | 1186 | pid_t tgid; |
1180 | 1187 | ||
@@ -1418,6 +1425,9 @@ struct task_struct { | |||
1418 | #endif | 1425 | #endif |
1419 | }; | 1426 | }; |
1420 | 1427 | ||
1428 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | ||
1429 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | ||
1430 | |||
1421 | /* | 1431 | /* |
1422 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1432 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1423 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1433 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
@@ -1669,6 +1679,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1669 | return set_cpus_allowed_ptr(p, &new_mask); | 1679 | return set_cpus_allowed_ptr(p, &new_mask); |
1670 | } | 1680 | } |
1671 | 1681 | ||
1682 | /* | ||
1683 | * Architectures can set this to 1 if they have specified | ||
1684 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1685 | * but then during bootup it turns out that sched_clock() | ||
1686 | * is reliable after all: | ||
1687 | */ | ||
1688 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1689 | extern int sched_clock_stable; | ||
1690 | #endif | ||
1691 | |||
1672 | extern unsigned long long sched_clock(void); | 1692 | extern unsigned long long sched_clock(void); |
1673 | 1693 | ||
1674 | extern void sched_clock_init(void); | 1694 | extern void sched_clock_init(void); |
@@ -2303,9 +2323,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); | |||
2303 | extern int sched_group_set_rt_period(struct task_group *tg, | 2323 | extern int sched_group_set_rt_period(struct task_group *tg, |
2304 | long rt_period_us); | 2324 | long rt_period_us); |
2305 | extern long sched_group_rt_period(struct task_group *tg); | 2325 | extern long sched_group_rt_period(struct task_group *tg); |
2326 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2306 | #endif | 2327 | #endif |
2307 | #endif | 2328 | #endif |
2308 | 2329 | ||
2330 | extern int task_can_switch_user(struct user_struct *up, | ||
2331 | struct task_struct *tsk); | ||
2332 | |||
2309 | #ifdef CONFIG_TASK_XACCT | 2333 | #ifdef CONFIG_TASK_XACCT |
2310 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2334 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2311 | { | 2335 | { |