aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h128
1 files changed, 87 insertions, 41 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e4921aad4063..6c333579d9da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
27#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 27#define CLONE_NEWUSER 0x10000000 /* New user namespace */
28#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 28#define CLONE_NEWPID 0x20000000 /* New pid namespace */
29#define CLONE_NEWNET 0x40000000 /* New network namespace */ 29#define CLONE_NEWNET 0x40000000 /* New network namespace */
30#define CLONE_IO 0x80000000 /* Clone io context */
30 31
31/* 32/*
32 * Scheduling policies 33 * Scheduling policies
@@ -78,7 +79,6 @@ struct sched_param {
78#include <linux/proportions.h> 79#include <linux/proportions.h>
79#include <linux/seccomp.h> 80#include <linux/seccomp.h>
80#include <linux/rcupdate.h> 81#include <linux/rcupdate.h>
81#include <linux/futex.h>
82#include <linux/rtmutex.h> 82#include <linux/rtmutex.h>
83 83
84#include <linux/time.h> 84#include <linux/time.h>
@@ -88,11 +88,13 @@ struct sched_param {
88#include <linux/hrtimer.h> 88#include <linux/hrtimer.h>
89#include <linux/task_io_accounting.h> 89#include <linux/task_io_accounting.h>
90#include <linux/kobject.h> 90#include <linux/kobject.h>
91#include <linux/latencytop.h>
91 92
92#include <asm/processor.h> 93#include <asm/processor.h>
93 94
94struct exec_domain; 95struct exec_domain;
95struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head;
96struct bio; 98struct bio;
97 99
98/* 100/*
@@ -252,6 +254,8 @@ static inline int select_nohz_load_balancer(int cpu)
252} 254}
253#endif 255#endif
254 256
257extern unsigned long rt_needs_cpu(int cpu);
258
255/* 259/*
256 * Only dump TASK_* tasks. (0 for all tasks) 260 * Only dump TASK_* tasks. (0 for all tasks)
257 */ 261 */
@@ -279,13 +283,19 @@ extern void trap_init(void);
279extern void account_process_tick(struct task_struct *task, int user); 283extern void account_process_tick(struct task_struct *task, int user);
280extern void update_process_times(int user); 284extern void update_process_times(int user);
281extern void scheduler_tick(void); 285extern void scheduler_tick(void);
286extern void hrtick_resched(void);
287
288extern void sched_show_task(struct task_struct *p);
282 289
283#ifdef CONFIG_DETECT_SOFTLOCKUP 290#ifdef CONFIG_DETECT_SOFTLOCKUP
284extern void softlockup_tick(void); 291extern void softlockup_tick(void);
285extern void spawn_softlockup_task(void); 292extern void spawn_softlockup_task(void);
286extern void touch_softlockup_watchdog(void); 293extern void touch_softlockup_watchdog(void);
287extern void touch_all_softlockup_watchdogs(void); 294extern void touch_all_softlockup_watchdogs(void);
288extern int softlockup_thresh; 295extern unsigned long softlockup_thresh;
296extern unsigned long sysctl_hung_task_check_count;
297extern unsigned long sysctl_hung_task_timeout_secs;
298extern unsigned long sysctl_hung_task_warnings;
289#else 299#else
290static inline void softlockup_tick(void) 300static inline void softlockup_tick(void)
291{ 301{
@@ -575,18 +585,13 @@ struct user_struct {
575#ifdef CONFIG_FAIR_USER_SCHED 585#ifdef CONFIG_FAIR_USER_SCHED
576 struct task_group *tg; 586 struct task_group *tg;
577#ifdef CONFIG_SYSFS 587#ifdef CONFIG_SYSFS
578 struct kset kset; 588 struct kobject kobj;
579 struct subsys_attribute user_attr;
580 struct work_struct work; 589 struct work_struct work;
581#endif 590#endif
582#endif 591#endif
583}; 592};
584 593
585#ifdef CONFIG_FAIR_USER_SCHED 594extern int uids_sysfs_init(void);
586extern int uids_kobject_init(void);
587#else
588static inline int uids_kobject_init(void) { return 0; }
589#endif
590 595
591extern struct user_struct *find_user(uid_t); 596extern struct user_struct *find_user(uid_t);
592 597
@@ -850,6 +855,7 @@ struct sched_class {
850 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 855 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
851 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 856 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
852 void (*yield_task) (struct rq *rq); 857 void (*yield_task) (struct rq *rq);
858 int (*select_task_rq)(struct task_struct *p, int sync);
853 859
854 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 860 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
855 861
@@ -865,11 +871,25 @@ struct sched_class {
865 int (*move_one_task) (struct rq *this_rq, int this_cpu, 871 int (*move_one_task) (struct rq *this_rq, int this_cpu,
866 struct rq *busiest, struct sched_domain *sd, 872 struct rq *busiest, struct sched_domain *sd,
867 enum cpu_idle_type idle); 873 enum cpu_idle_type idle);
874 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
875 void (*post_schedule) (struct rq *this_rq);
876 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
868#endif 877#endif
869 878
870 void (*set_curr_task) (struct rq *rq); 879 void (*set_curr_task) (struct rq *rq);
871 void (*task_tick) (struct rq *rq, struct task_struct *p); 880 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
872 void (*task_new) (struct rq *rq, struct task_struct *p); 881 void (*task_new) (struct rq *rq, struct task_struct *p);
882 void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
883
884 void (*join_domain)(struct rq *rq);
885 void (*leave_domain)(struct rq *rq);
886
887 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
888 int running);
889 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
890 int running);
891 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
892 int oldprio, int running);
873}; 893};
874 894
875struct load_weight { 895struct load_weight {
@@ -899,6 +919,8 @@ struct sched_entity {
899#ifdef CONFIG_SCHEDSTATS 919#ifdef CONFIG_SCHEDSTATS
900 u64 wait_start; 920 u64 wait_start;
901 u64 wait_max; 921 u64 wait_max;
922 u64 wait_count;
923 u64 wait_sum;
902 924
903 u64 sleep_start; 925 u64 sleep_start;
904 u64 sleep_max; 926 u64 sleep_max;
@@ -937,6 +959,21 @@ struct sched_entity {
937#endif 959#endif
938}; 960};
939 961
962struct sched_rt_entity {
963 struct list_head run_list;
964 unsigned int time_slice;
965 unsigned long timeout;
966 int nr_cpus_allowed;
967
968#ifdef CONFIG_FAIR_GROUP_SCHED
969 struct sched_rt_entity *parent;
970 /* rq on which this entity is (to be) queued: */
971 struct rt_rq *rt_rq;
972 /* rq "owned" by this entity/group: */
973 struct rt_rq *my_q;
974#endif
975};
976
940struct task_struct { 977struct task_struct {
941 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 978 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
942 void *stack; 979 void *stack;
@@ -953,16 +990,15 @@ struct task_struct {
953#endif 990#endif
954 991
955 int prio, static_prio, normal_prio; 992 int prio, static_prio, normal_prio;
956 struct list_head run_list;
957 const struct sched_class *sched_class; 993 const struct sched_class *sched_class;
958 struct sched_entity se; 994 struct sched_entity se;
995 struct sched_rt_entity rt;
959 996
960#ifdef CONFIG_PREEMPT_NOTIFIERS 997#ifdef CONFIG_PREEMPT_NOTIFIERS
961 /* list of struct preempt_notifier: */ 998 /* list of struct preempt_notifier: */
962 struct hlist_head preempt_notifiers; 999 struct hlist_head preempt_notifiers;
963#endif 1000#endif
964 1001
965 unsigned short ioprio;
966 /* 1002 /*
967 * fpu_counter contains the number of consecutive context switches 1003 * fpu_counter contains the number of consecutive context switches
968 * that the FPU is used. If this is over a threshold, the lazy fpu 1004 * that the FPU is used. If this is over a threshold, the lazy fpu
@@ -979,7 +1015,11 @@ struct task_struct {
979 1015
980 unsigned int policy; 1016 unsigned int policy;
981 cpumask_t cpus_allowed; 1017 cpumask_t cpus_allowed;
982 unsigned int time_slice; 1018
1019#ifdef CONFIG_PREEMPT_RCU
1020 int rcu_read_lock_nesting;
1021 int rcu_flipctr_idx;
1022#endif /* #ifdef CONFIG_PREEMPT_RCU */
983 1023
984#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1024#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
985 struct sched_info sched_info; 1025 struct sched_info sched_info;
@@ -1069,6 +1109,11 @@ struct task_struct {
1069/* ipc stuff */ 1109/* ipc stuff */
1070 struct sysv_sem sysvsem; 1110 struct sysv_sem sysvsem;
1071#endif 1111#endif
1112#ifdef CONFIG_DETECT_SOFTLOCKUP
1113/* hung task detection */
1114 unsigned long last_switch_timestamp;
1115 unsigned long last_switch_count;
1116#endif
1072/* CPU-specific state of this task */ 1117/* CPU-specific state of this task */
1073 struct thread_struct thread; 1118 struct thread_struct thread;
1074/* filesystem information */ 1119/* filesystem information */
@@ -1201,6 +1246,10 @@ struct task_struct {
1201 int make_it_fail; 1246 int make_it_fail;
1202#endif 1247#endif
1203 struct prop_local_single dirties; 1248 struct prop_local_single dirties;
1249#ifdef CONFIG_LATENCYTOP
1250 int latency_record_count;
1251 struct latency_record latency_record[LT_SAVECOUNT];
1252#endif
1204}; 1253};
1205 1254
1206/* 1255/*
@@ -1278,13 +1327,6 @@ struct pid_namespace;
1278 * 1327 *
1279 * set_task_vxid() : assigns a virtual id to a task; 1328 * set_task_vxid() : assigns a virtual id to a task;
1280 * 1329 *
1281 * task_ppid_nr_ns() : the parent's id as seen from the namespace specified.
1282 * the result depends on the namespace and whether the
1283 * task in question is the namespace's init. e.g. for the
1284 * namespace's init this will return 0 when called from
1285 * the namespace of this init, or appropriate id otherwise.
1286 *
1287 *
1288 * see also pid_nr() etc in include/linux/pid.h 1330 * see also pid_nr() etc in include/linux/pid.h
1289 */ 1331 */
1290 1332
@@ -1340,12 +1382,6 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
1340} 1382}
1341 1383
1342 1384
1343static inline pid_t task_ppid_nr_ns(struct task_struct *tsk,
1344 struct pid_namespace *ns)
1345{
1346 return pid_nr_ns(task_pid(rcu_dereference(tsk->real_parent)), ns);
1347}
1348
1349/** 1385/**
1350 * pid_alive - check that a task structure is not stale 1386 * pid_alive - check that a task structure is not stale
1351 * @p: Task structure to be checked. 1387 * @p: Task structure to be checked.
@@ -1494,6 +1530,12 @@ extern unsigned int sysctl_sched_child_runs_first;
1494extern unsigned int sysctl_sched_features; 1530extern unsigned int sysctl_sched_features;
1495extern unsigned int sysctl_sched_migration_cost; 1531extern unsigned int sysctl_sched_migration_cost;
1496extern unsigned int sysctl_sched_nr_migrate; 1532extern unsigned int sysctl_sched_nr_migrate;
1533extern unsigned int sysctl_sched_rt_period;
1534extern unsigned int sysctl_sched_rt_ratio;
1535#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
1536extern unsigned int sysctl_sched_min_bal_int_shares;
1537extern unsigned int sysctl_sched_max_bal_int_shares;
1538#endif
1497 1539
1498int sched_nr_latency_handler(struct ctl_table *table, int write, 1540int sched_nr_latency_handler(struct ctl_table *table, int write,
1499 struct file *file, void __user *buffer, size_t *length, 1541 struct file *file, void __user *buffer, size_t *length,
@@ -1893,29 +1935,33 @@ static inline int need_resched(void)
1893 * cond_resched_lock() will drop the spinlock before scheduling, 1935 * cond_resched_lock() will drop the spinlock before scheduling,
1894 * cond_resched_softirq() will enable bhs before scheduling. 1936 * cond_resched_softirq() will enable bhs before scheduling.
1895 */ 1937 */
1896extern int cond_resched(void); 1938#ifdef CONFIG_PREEMPT
1897extern int cond_resched_lock(spinlock_t * lock); 1939static inline int cond_resched(void)
1898extern int cond_resched_softirq(void); 1940{
1899 1941 return 0;
1900/* 1942}
1901 * Does a critical section need to be broken due to another
1902 * task waiting?:
1903 */
1904#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
1905# define need_lockbreak(lock) ((lock)->break_lock)
1906#else 1943#else
1907# define need_lockbreak(lock) 0 1944extern int _cond_resched(void);
1945static inline int cond_resched(void)
1946{
1947 return _cond_resched();
1948}
1908#endif 1949#endif
1950extern int cond_resched_lock(spinlock_t * lock);
1951extern int cond_resched_softirq(void);
1909 1952
1910/* 1953/*
1911 * Does a critical section need to be broken due to another 1954 * Does a critical section need to be broken due to another
1912 * task waiting or preemption being signalled: 1955 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1956 * but a general need for low latency)
1913 */ 1957 */
1914static inline int lock_need_resched(spinlock_t *lock) 1958static inline int spin_needbreak(spinlock_t *lock)
1915{ 1959{
1916 if (need_lockbreak(lock) || need_resched()) 1960#ifdef CONFIG_PREEMPT
1917 return 1; 1961 return spin_is_contended(lock);
1962#else
1918 return 0; 1963 return 0;
1964#endif
1919} 1965}
1920 1966
1921/* 1967/*