aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/latencytop.h10
-rw-r--r--include/linux/plist.h9
-rw-r--r--include/linux/sched.h21
4 files changed, 36 insertions, 5 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e752d973fa21..af1de95e711e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -147,6 +147,7 @@ extern struct cred init_cred;
147 .nr_cpus_allowed = NR_CPUS, \ 147 .nr_cpus_allowed = NR_CPUS, \
148 }, \ 148 }, \
149 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 149 .tasks = LIST_HEAD_INIT(tsk.tasks), \
150 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
150 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 151 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
151 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 152 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
152 .real_parent = &tsk, \ 153 .real_parent = &tsk, \
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 901c2d6377a8..b0e99898527c 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -9,6 +9,7 @@
9#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ 9#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
10#define _INCLUDE_GUARD_LATENCYTOP_H_ 10#define _INCLUDE_GUARD_LATENCYTOP_H_
11 11
12#include <linux/compiler.h>
12#ifdef CONFIG_LATENCYTOP 13#ifdef CONFIG_LATENCYTOP
13 14
14#define LT_SAVECOUNT 32 15#define LT_SAVECOUNT 32
@@ -24,7 +25,14 @@ struct latency_record {
24 25
25struct task_struct; 26struct task_struct;
26 27
27void account_scheduler_latency(struct task_struct *task, int usecs, int inter); 28extern int latencytop_enabled;
29void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
30static inline void
31account_scheduler_latency(struct task_struct *task, int usecs, int inter)
32{
33 if (unlikely(latencytop_enabled))
34 __account_scheduler_latency(task, usecs, inter);
35}
28 36
29void clear_all_latency_tracing(struct task_struct *p); 37void clear_all_latency_tracing(struct task_struct *p);
30 38
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 85de2f055874..45926d77d6ac 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -96,6 +96,10 @@ struct plist_node {
96# define PLIST_HEAD_LOCK_INIT(_lock) 96# define PLIST_HEAD_LOCK_INIT(_lock)
97#endif 97#endif
98 98
99#define _PLIST_HEAD_INIT(head) \
100 .prio_list = LIST_HEAD_INIT((head).prio_list), \
101 .node_list = LIST_HEAD_INIT((head).node_list)
102
99/** 103/**
100 * PLIST_HEAD_INIT - static struct plist_head initializer 104 * PLIST_HEAD_INIT - static struct plist_head initializer
101 * @head: struct plist_head variable name 105 * @head: struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
103 */ 107 */
104#define PLIST_HEAD_INIT(head, _lock) \ 108#define PLIST_HEAD_INIT(head, _lock) \
105{ \ 109{ \
106 .prio_list = LIST_HEAD_INIT((head).prio_list), \ 110 _PLIST_HEAD_INIT(head), \
107 .node_list = LIST_HEAD_INIT((head).node_list), \
108 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 111 PLIST_HEAD_LOCK_INIT(&(_lock)) \
109} 112}
110 113
@@ -116,7 +119,7 @@ struct plist_node {
116#define PLIST_NODE_INIT(node, __prio) \ 119#define PLIST_NODE_INIT(node, __prio) \
117{ \ 120{ \
118 .prio = (__prio), \ 121 .prio = (__prio), \
119 .plist = PLIST_HEAD_INIT((node).plist, NULL), \ 122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
120} 123}
121 124
122/** 125/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8981e52c714f..75bf49291c60 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -998,6 +998,7 @@ struct sched_class {
998 struct rq *busiest, struct sched_domain *sd, 998 struct rq *busiest, struct sched_domain *sd,
999 enum cpu_idle_type idle); 999 enum cpu_idle_type idle);
1000 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1000 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1001 int (*needs_post_schedule) (struct rq *this_rq);
1001 void (*post_schedule) (struct rq *this_rq); 1002 void (*post_schedule) (struct rq *this_rq);
1002 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1003 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1003 1004
@@ -1052,6 +1053,10 @@ struct sched_entity {
1052 u64 last_wakeup; 1053 u64 last_wakeup;
1053 u64 avg_overlap; 1054 u64 avg_overlap;
1054 1055
1056 u64 start_runtime;
1057 u64 avg_wakeup;
1058 u64 nr_migrations;
1059
1055#ifdef CONFIG_SCHEDSTATS 1060#ifdef CONFIG_SCHEDSTATS
1056 u64 wait_start; 1061 u64 wait_start;
1057 u64 wait_max; 1062 u64 wait_max;
@@ -1067,7 +1072,6 @@ struct sched_entity {
1067 u64 exec_max; 1072 u64 exec_max;
1068 u64 slice_max; 1073 u64 slice_max;
1069 1074
1070 u64 nr_migrations;
1071 u64 nr_migrations_cold; 1075 u64 nr_migrations_cold;
1072 u64 nr_failed_migrations_affine; 1076 u64 nr_failed_migrations_affine;
1073 u64 nr_failed_migrations_running; 1077 u64 nr_failed_migrations_running;
@@ -1164,6 +1168,7 @@ struct task_struct {
1164#endif 1168#endif
1165 1169
1166 struct list_head tasks; 1170 struct list_head tasks;
1171 struct plist_node pushable_tasks;
1167 1172
1168 struct mm_struct *mm, *active_mm; 1173 struct mm_struct *mm, *active_mm;
1169 1174
@@ -1670,6 +1675,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1670 return set_cpus_allowed_ptr(p, &new_mask); 1675 return set_cpus_allowed_ptr(p, &new_mask);
1671} 1676}
1672 1677
1678/*
1679 * Architectures can set this to 1 if they have specified
1680 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1681 * but then during bootup it turns out that sched_clock()
1682 * is reliable after all:
1683 */
1684#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1685extern int sched_clock_stable;
1686#endif
1687
1673extern unsigned long long sched_clock(void); 1688extern unsigned long long sched_clock(void);
1674 1689
1675extern void sched_clock_init(void); 1690extern void sched_clock_init(void);
@@ -2291,9 +2306,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2291extern int sched_group_set_rt_period(struct task_group *tg, 2306extern int sched_group_set_rt_period(struct task_group *tg,
2292 long rt_period_us); 2307 long rt_period_us);
2293extern long sched_group_rt_period(struct task_group *tg); 2308extern long sched_group_rt_period(struct task_group *tg);
2309extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2294#endif 2310#endif
2295#endif 2311#endif
2296 2312
2313extern int task_can_switch_user(struct user_struct *up,
2314 struct task_struct *tsk);
2315
2297#ifdef CONFIG_TASK_XACCT 2316#ifdef CONFIG_TASK_XACCT
2298static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2317static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2299{ 2318{