aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h88
1 files changed, 42 insertions, 46 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 46c6f8d5dc06..b55e988988b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio_list; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context;
103struct perf_event_context; 102struct perf_event_context;
104 103
105/* 104/*
@@ -258,6 +257,10 @@ extern spinlock_t mmlist_lock;
258 257
259struct task_struct; 258struct task_struct;
260 259
260#ifdef CONFIG_PROVE_RCU
261extern int lockdep_tasklist_lock_is_held(void);
262#endif /* #ifdef CONFIG_PROVE_RCU */
263
261extern void sched_init(void); 264extern void sched_init(void);
262extern void sched_init_smp(void); 265extern void sched_init_smp(void);
263extern asmlinkage void schedule_tail(struct task_struct *prev); 266extern asmlinkage void schedule_tail(struct task_struct *prev);
@@ -271,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
271#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
272extern int select_nohz_load_balancer(int cpu); 275extern int select_nohz_load_balancer(int cpu);
273extern int get_nohz_load_balancer(void); 276extern int get_nohz_load_balancer(void);
277extern int nohz_ratelimit(int cpu);
274#else 278#else
275static inline int select_nohz_load_balancer(int cpu) 279static inline int select_nohz_load_balancer(int cpu)
276{ 280{
277 return 0; 281 return 0;
278} 282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
279#endif 288#endif
280 289
281/* 290/*
@@ -950,6 +959,7 @@ struct sched_domain {
950 char *name; 959 char *name;
951#endif 960#endif
952 961
962 unsigned int span_weight;
953 /* 963 /*
954 * Span of all CPUs in this domain. 964 * Span of all CPUs in this domain.
955 * 965 *
@@ -1022,12 +1032,17 @@ struct sched_domain;
1022#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1023#define WF_FORK 0x02 /* child wakeup after fork */ 1033#define WF_FORK 0x02 /* child wakeup after fork */
1024 1034
1035#define ENQUEUE_WAKEUP 1
1036#define ENQUEUE_WAKING 2
1037#define ENQUEUE_HEAD 4
1038
1039#define DEQUEUE_SLEEP 1
1040
1025struct sched_class { 1041struct sched_class {
1026 const struct sched_class *next; 1042 const struct sched_class *next;
1027 1043
1028 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1044 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1029 bool head); 1045 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1030 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1031 void (*yield_task) (struct rq *rq); 1046 void (*yield_task) (struct rq *rq);
1032 1047
1033 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1036,7 +1051,8 @@ struct sched_class {
1036 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1037 1052
1038#ifdef CONFIG_SMP 1053#ifdef CONFIG_SMP
1039 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1054 int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1055 int sd_flag, int flags);
1040 1056
1041 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1042 void (*post_schedule) (struct rq *this_rq); 1058 void (*post_schedule) (struct rq *this_rq);
@@ -1073,36 +1089,8 @@ struct load_weight {
1073 unsigned long weight, inv_weight; 1089 unsigned long weight, inv_weight;
1074}; 1090};
1075 1091
1076/*
1077 * CFS stats for a schedulable entity (task, task-group etc)
1078 *
1079 * Current field usage histogram:
1080 *
1081 * 4 se->block_start
1082 * 4 se->run_node
1083 * 4 se->sleep_start
1084 * 6 se->load.weight
1085 */
1086struct sched_entity {
1087 struct load_weight load; /* for load-balancing */
1088 struct rb_node run_node;
1089 struct list_head group_node;
1090 unsigned int on_rq;
1091
1092 u64 exec_start;
1093 u64 sum_exec_runtime;
1094 u64 vruntime;
1095 u64 prev_sum_exec_runtime;
1096
1097 u64 last_wakeup;
1098 u64 avg_overlap;
1099
1100 u64 nr_migrations;
1101
1102 u64 start_runtime;
1103 u64 avg_wakeup;
1104
1105#ifdef CONFIG_SCHEDSTATS 1092#ifdef CONFIG_SCHEDSTATS
1093struct sched_statistics {
1106 u64 wait_start; 1094 u64 wait_start;
1107 u64 wait_max; 1095 u64 wait_max;
1108 u64 wait_count; 1096 u64 wait_count;
@@ -1134,6 +1122,24 @@ struct sched_entity {
1134 u64 nr_wakeups_affine_attempts; 1122 u64 nr_wakeups_affine_attempts;
1135 u64 nr_wakeups_passive; 1123 u64 nr_wakeups_passive;
1136 u64 nr_wakeups_idle; 1124 u64 nr_wakeups_idle;
1125};
1126#endif
1127
1128struct sched_entity {
1129 struct load_weight load; /* for load-balancing */
1130 struct rb_node run_node;
1131 struct list_head group_node;
1132 unsigned int on_rq;
1133
1134 u64 exec_start;
1135 u64 sum_exec_runtime;
1136 u64 vruntime;
1137 u64 prev_sum_exec_runtime;
1138
1139 u64 nr_migrations;
1140
1141#ifdef CONFIG_SCHEDSTATS
1142 struct sched_statistics statistics;
1137#endif 1143#endif
1138 1144
1139#ifdef CONFIG_FAIR_GROUP_SCHED 1145#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1268,12 +1274,6 @@ struct task_struct {
1268 struct list_head ptraced; 1274 struct list_head ptraced;
1269 struct list_head ptrace_entry; 1275 struct list_head ptrace_entry;
1270 1276
1271 /*
1272 * This is the tracer handle for the ptrace BTS extension.
1273 * This field actually belongs to the ptracer task.
1274 */
1275 struct bts_context *bts;
1276
1277 /* PID/PID hash table linkage. */ 1277 /* PID/PID hash table linkage. */
1278 struct pid_link pids[PIDTYPE_MAX]; 1278 struct pid_link pids[PIDTYPE_MAX];
1279 struct list_head thread_group; 1279 struct list_head thread_group;
@@ -1473,7 +1473,7 @@ struct task_struct {
1473 1473
1474 struct list_head *scm_work_list; 1474 struct list_head *scm_work_list;
1475#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1475#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1476 /* Index of current stored adress in ret_stack */ 1476 /* Index of current stored address in ret_stack */
1477 int curr_ret_stack; 1477 int curr_ret_stack;
1478 /* Stack of return addresses for return function tracing */ 1478 /* Stack of return addresses for return function tracing */
1479 struct ftrace_ret_stack *ret_stack; 1479 struct ftrace_ret_stack *ret_stack;
@@ -1493,7 +1493,6 @@ struct task_struct {
1493 /* bitmask of trace recursion */ 1493 /* bitmask of trace recursion */
1494 unsigned long trace_recursion; 1494 unsigned long trace_recursion;
1495#endif /* CONFIG_TRACING */ 1495#endif /* CONFIG_TRACING */
1496 unsigned long stack_start;
1497#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1496#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1498 struct memcg_batch_info { 1497 struct memcg_batch_info {
1499 int do_batch; /* incremented when batch uncharge started */ 1498 int do_batch; /* incremented when batch uncharge started */
@@ -1843,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void);
1843extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1842extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1844 1843
1845#ifdef CONFIG_HOTPLUG_CPU 1844#ifdef CONFIG_HOTPLUG_CPU
1845extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1846extern void idle_task_exit(void); 1846extern void idle_task_exit(void);
1847#else 1847#else
1848static inline void idle_task_exit(void) {} 1848static inline void idle_task_exit(void) {}
@@ -2119,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2119extern char *get_task_comm(char *to, struct task_struct *tsk); 2119extern char *get_task_comm(char *to, struct task_struct *tsk);
2120 2120
2121#ifdef CONFIG_SMP 2121#ifdef CONFIG_SMP
2122extern void wait_task_context_switch(struct task_struct *p);
2123extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2122extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2124#else 2123#else
2125static inline void wait_task_context_switch(struct task_struct *p) {}
2126static inline unsigned long wait_task_inactive(struct task_struct *p, 2124static inline unsigned long wait_task_inactive(struct task_struct *p,
2127 long match_state) 2125 long match_state)
2128{ 2126{
@@ -2391,9 +2389,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2391 2389
2392static inline void thread_group_cputime_init(struct signal_struct *sig) 2390static inline void thread_group_cputime_init(struct signal_struct *sig)
2393{ 2391{
2394 sig->cputimer.cputime = INIT_CPUTIME;
2395 spin_lock_init(&sig->cputimer.lock); 2392 spin_lock_init(&sig->cputimer.lock);
2396 sig->cputimer.running = 0;
2397} 2393}
2398 2394
2399static inline void thread_group_cputime_free(struct signal_struct *sig) 2395static inline void thread_group_cputime_free(struct signal_struct *sig)