diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 99 |
1 files changed, 62 insertions, 37 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60bf583..f2f842db03ce 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void); | |||
145 | 145 | ||
146 | 146 | ||
147 | extern void calc_global_load(void); | 147 | extern void calc_global_load(void); |
148 | extern u64 cpu_nr_migrations(int cpu); | ||
149 | 148 | ||
150 | extern unsigned long get_parent_ip(unsigned long addr); | 149 | extern unsigned long get_parent_ip(unsigned long addr); |
151 | 150 | ||
@@ -171,8 +170,6 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
171 | } | 170 | } |
172 | #endif | 171 | #endif |
173 | 172 | ||
174 | extern unsigned long long time_sync_thresh; | ||
175 | |||
176 | /* | 173 | /* |
177 | * Task state bitmask. NOTE! These bits are also | 174 | * Task state bitmask. NOTE! These bits are also |
178 | * encoded in fs/proc/array.c: get_task_state(). | 175 | * encoded in fs/proc/array.c: get_task_state(). |
@@ -195,6 +192,12 @@ extern unsigned long long time_sync_thresh; | |||
195 | #define TASK_DEAD 64 | 192 | #define TASK_DEAD 64 |
196 | #define TASK_WAKEKILL 128 | 193 | #define TASK_WAKEKILL 128 |
197 | #define TASK_WAKING 256 | 194 | #define TASK_WAKING 256 |
195 | #define TASK_STATE_MAX 512 | ||
196 | |||
197 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
198 | |||
199 | extern char ___assert_task_state[1 - 2*!!( | ||
200 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
198 | 201 | ||
199 | /* Convenience macros for the sake of set_task_state */ | 202 | /* Convenience macros for the sake of set_task_state */ |
200 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
@@ -349,7 +352,6 @@ extern signed long schedule_timeout(signed long timeout); | |||
349 | extern signed long schedule_timeout_interruptible(signed long timeout); | 352 | extern signed long schedule_timeout_interruptible(signed long timeout); |
350 | extern signed long schedule_timeout_killable(signed long timeout); | 353 | extern signed long schedule_timeout_killable(signed long timeout); |
351 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 354 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
352 | asmlinkage void __schedule(void); | ||
353 | asmlinkage void schedule(void); | 355 | asmlinkage void schedule(void); |
354 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 356 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); |
355 | 357 | ||
@@ -628,6 +630,9 @@ struct signal_struct { | |||
628 | cputime_t utime, stime, cutime, cstime; | 630 | cputime_t utime, stime, cutime, cstime; |
629 | cputime_t gtime; | 631 | cputime_t gtime; |
630 | cputime_t cgtime; | 632 | cputime_t cgtime; |
633 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
634 | cputime_t prev_utime, prev_stime; | ||
635 | #endif | ||
631 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 636 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
632 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 637 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
633 | unsigned long inblock, oublock, cinblock, coublock; | 638 | unsigned long inblock, oublock, cinblock, coublock; |
@@ -1013,9 +1018,13 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | |||
1013 | return to_cpumask(sd->span); | 1018 | return to_cpumask(sd->span); |
1014 | } | 1019 | } |
1015 | 1020 | ||
1016 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 1021 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1017 | struct sched_domain_attr *dattr_new); | 1022 | struct sched_domain_attr *dattr_new); |
1018 | 1023 | ||
1024 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | ||
1025 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | ||
1026 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | ||
1027 | |||
1019 | /* Test a flag in parent sched domain */ | 1028 | /* Test a flag in parent sched domain */ |
1020 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | 1029 | static inline int test_sd_parent(struct sched_domain *sd, int flag) |
1021 | { | 1030 | { |
@@ -1033,7 +1042,7 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | |||
1033 | struct sched_domain_attr; | 1042 | struct sched_domain_attr; |
1034 | 1043 | ||
1035 | static inline void | 1044 | static inline void |
1036 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 1045 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1037 | struct sched_domain_attr *dattr_new) | 1046 | struct sched_domain_attr *dattr_new) |
1038 | { | 1047 | { |
1039 | } | 1048 | } |
@@ -1088,7 +1097,8 @@ struct sched_class { | |||
1088 | enum cpu_idle_type idle); | 1097 | enum cpu_idle_type idle); |
1089 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1098 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1090 | void (*post_schedule) (struct rq *this_rq); | 1099 | void (*post_schedule) (struct rq *this_rq); |
1091 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1100 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
1101 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1092 | 1102 | ||
1093 | void (*set_cpus_allowed)(struct task_struct *p, | 1103 | void (*set_cpus_allowed)(struct task_struct *p, |
1094 | const struct cpumask *newmask); | 1104 | const struct cpumask *newmask); |
@@ -1099,7 +1109,7 @@ struct sched_class { | |||
1099 | 1109 | ||
1100 | void (*set_curr_task) (struct rq *rq); | 1110 | void (*set_curr_task) (struct rq *rq); |
1101 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 1111 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
1102 | void (*task_new) (struct rq *rq, struct task_struct *p); | 1112 | void (*task_fork) (struct task_struct *p); |
1103 | 1113 | ||
1104 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 1114 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
1105 | int running); | 1115 | int running); |
@@ -1108,10 +1118,11 @@ struct sched_class { | |||
1108 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1118 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
1109 | int oldprio, int running); | 1119 | int oldprio, int running); |
1110 | 1120 | ||
1111 | unsigned int (*get_rr_interval) (struct task_struct *task); | 1121 | unsigned int (*get_rr_interval) (struct rq *rq, |
1122 | struct task_struct *task); | ||
1112 | 1123 | ||
1113 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1124 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1114 | void (*moved_group) (struct task_struct *p); | 1125 | void (*moved_group) (struct task_struct *p, int on_rq); |
1115 | #endif | 1126 | #endif |
1116 | }; | 1127 | }; |
1117 | 1128 | ||
@@ -1148,8 +1159,6 @@ struct sched_entity { | |||
1148 | u64 start_runtime; | 1159 | u64 start_runtime; |
1149 | u64 avg_wakeup; | 1160 | u64 avg_wakeup; |
1150 | 1161 | ||
1151 | u64 avg_running; | ||
1152 | |||
1153 | #ifdef CONFIG_SCHEDSTATS | 1162 | #ifdef CONFIG_SCHEDSTATS |
1154 | u64 wait_start; | 1163 | u64 wait_start; |
1155 | u64 wait_max; | 1164 | u64 wait_max; |
@@ -1172,7 +1181,6 @@ struct sched_entity { | |||
1172 | u64 nr_failed_migrations_running; | 1181 | u64 nr_failed_migrations_running; |
1173 | u64 nr_failed_migrations_hot; | 1182 | u64 nr_failed_migrations_hot; |
1174 | u64 nr_forced_migrations; | 1183 | u64 nr_forced_migrations; |
1175 | u64 nr_forced2_migrations; | ||
1176 | 1184 | ||
1177 | u64 nr_wakeups; | 1185 | u64 nr_wakeups; |
1178 | u64 nr_wakeups_sync; | 1186 | u64 nr_wakeups_sync; |
@@ -1331,7 +1339,9 @@ struct task_struct { | |||
1331 | 1339 | ||
1332 | cputime_t utime, stime, utimescaled, stimescaled; | 1340 | cputime_t utime, stime, utimescaled, stimescaled; |
1333 | cputime_t gtime; | 1341 | cputime_t gtime; |
1342 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
1334 | cputime_t prev_utime, prev_stime; | 1343 | cputime_t prev_utime, prev_stime; |
1344 | #endif | ||
1335 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1345 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
1336 | struct timespec start_time; /* monotonic time */ | 1346 | struct timespec start_time; /* monotonic time */ |
1337 | struct timespec real_start_time; /* boot based time */ | 1347 | struct timespec real_start_time; /* boot based time */ |
@@ -1406,7 +1416,7 @@ struct task_struct { | |||
1406 | #endif | 1416 | #endif |
1407 | 1417 | ||
1408 | /* Protection of the PI data structures: */ | 1418 | /* Protection of the PI data structures: */ |
1409 | spinlock_t pi_lock; | 1419 | raw_spinlock_t pi_lock; |
1410 | 1420 | ||
1411 | #ifdef CONFIG_RT_MUTEXES | 1421 | #ifdef CONFIG_RT_MUTEXES |
1412 | /* PI waiters blocked on a rt_mutex held by this task */ | 1422 | /* PI waiters blocked on a rt_mutex held by this task */ |
@@ -1421,17 +1431,17 @@ struct task_struct { | |||
1421 | #endif | 1431 | #endif |
1422 | #ifdef CONFIG_TRACE_IRQFLAGS | 1432 | #ifdef CONFIG_TRACE_IRQFLAGS |
1423 | unsigned int irq_events; | 1433 | unsigned int irq_events; |
1424 | int hardirqs_enabled; | ||
1425 | unsigned long hardirq_enable_ip; | 1434 | unsigned long hardirq_enable_ip; |
1426 | unsigned int hardirq_enable_event; | ||
1427 | unsigned long hardirq_disable_ip; | 1435 | unsigned long hardirq_disable_ip; |
1436 | unsigned int hardirq_enable_event; | ||
1428 | unsigned int hardirq_disable_event; | 1437 | unsigned int hardirq_disable_event; |
1429 | int softirqs_enabled; | 1438 | int hardirqs_enabled; |
1439 | int hardirq_context; | ||
1430 | unsigned long softirq_disable_ip; | 1440 | unsigned long softirq_disable_ip; |
1431 | unsigned int softirq_disable_event; | ||
1432 | unsigned long softirq_enable_ip; | 1441 | unsigned long softirq_enable_ip; |
1442 | unsigned int softirq_disable_event; | ||
1433 | unsigned int softirq_enable_event; | 1443 | unsigned int softirq_enable_event; |
1434 | int hardirq_context; | 1444 | int softirqs_enabled; |
1435 | int softirq_context; | 1445 | int softirq_context; |
1436 | #endif | 1446 | #endif |
1437 | #ifdef CONFIG_LOCKDEP | 1447 | #ifdef CONFIG_LOCKDEP |
@@ -1539,10 +1549,18 @@ struct task_struct { | |||
1539 | unsigned long trace_recursion; | 1549 | unsigned long trace_recursion; |
1540 | #endif /* CONFIG_TRACING */ | 1550 | #endif /* CONFIG_TRACING */ |
1541 | unsigned long stack_start; | 1551 | unsigned long stack_start; |
1552 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | ||
1553 | struct memcg_batch_info { | ||
1554 | int do_batch; /* incremented when batch uncharge started */ | ||
1555 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | ||
1556 | unsigned long bytes; /* uncharged usage */ | ||
1557 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | ||
1558 | } memcg_batch; | ||
1559 | #endif | ||
1542 | }; | 1560 | }; |
1543 | 1561 | ||
1544 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1562 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
1545 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | 1563 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
1546 | 1564 | ||
1547 | /* | 1565 | /* |
1548 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1566 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
@@ -1720,9 +1738,8 @@ static inline void put_task_struct(struct task_struct *t) | |||
1720 | __put_task_struct(t); | 1738 | __put_task_struct(t); |
1721 | } | 1739 | } |
1722 | 1740 | ||
1723 | extern cputime_t task_utime(struct task_struct *p); | 1741 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1724 | extern cputime_t task_stime(struct task_struct *p); | 1742 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1725 | extern cputime_t task_gtime(struct task_struct *p); | ||
1726 | 1743 | ||
1727 | /* | 1744 | /* |
1728 | * Per process flags | 1745 | * Per process flags |
@@ -1836,7 +1853,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1836 | extern int sched_clock_stable; | 1853 | extern int sched_clock_stable; |
1837 | #endif | 1854 | #endif |
1838 | 1855 | ||
1839 | extern unsigned long long sched_clock(void); | 1856 | /* ftrace calls sched_clock() directly */ |
1857 | extern unsigned long long notrace sched_clock(void); | ||
1840 | 1858 | ||
1841 | extern void sched_clock_init(void); | 1859 | extern void sched_clock_init(void); |
1842 | extern u64 sched_clock_cpu(int cpu); | 1860 | extern u64 sched_clock_cpu(int cpu); |
@@ -1899,14 +1917,22 @@ extern unsigned int sysctl_sched_wakeup_granularity; | |||
1899 | extern unsigned int sysctl_sched_shares_ratelimit; | 1917 | extern unsigned int sysctl_sched_shares_ratelimit; |
1900 | extern unsigned int sysctl_sched_shares_thresh; | 1918 | extern unsigned int sysctl_sched_shares_thresh; |
1901 | extern unsigned int sysctl_sched_child_runs_first; | 1919 | extern unsigned int sysctl_sched_child_runs_first; |
1920 | |||
1921 | enum sched_tunable_scaling { | ||
1922 | SCHED_TUNABLESCALING_NONE, | ||
1923 | SCHED_TUNABLESCALING_LOG, | ||
1924 | SCHED_TUNABLESCALING_LINEAR, | ||
1925 | SCHED_TUNABLESCALING_END, | ||
1926 | }; | ||
1927 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
1928 | |||
1902 | #ifdef CONFIG_SCHED_DEBUG | 1929 | #ifdef CONFIG_SCHED_DEBUG |
1903 | extern unsigned int sysctl_sched_features; | ||
1904 | extern unsigned int sysctl_sched_migration_cost; | 1930 | extern unsigned int sysctl_sched_migration_cost; |
1905 | extern unsigned int sysctl_sched_nr_migrate; | 1931 | extern unsigned int sysctl_sched_nr_migrate; |
1906 | extern unsigned int sysctl_sched_time_avg; | 1932 | extern unsigned int sysctl_sched_time_avg; |
1907 | extern unsigned int sysctl_timer_migration; | 1933 | extern unsigned int sysctl_timer_migration; |
1908 | 1934 | ||
1909 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1935 | int sched_proc_update_handler(struct ctl_table *table, int write, |
1910 | void __user *buffer, size_t *length, | 1936 | void __user *buffer, size_t *length, |
1911 | loff_t *ppos); | 1937 | loff_t *ppos); |
1912 | #endif | 1938 | #endif |
@@ -2062,7 +2088,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t); | |||
2062 | extern int do_notify_parent(struct task_struct *, int); | 2088 | extern int do_notify_parent(struct task_struct *, int); |
2063 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2089 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
2064 | extern void force_sig(int, struct task_struct *); | 2090 | extern void force_sig(int, struct task_struct *); |
2065 | extern void force_sig_specific(int, struct task_struct *); | ||
2066 | extern int send_sig(int, struct task_struct *, int); | 2091 | extern int send_sig(int, struct task_struct *, int); |
2067 | extern void zap_other_threads(struct task_struct *p); | 2092 | extern void zap_other_threads(struct task_struct *p); |
2068 | extern struct sigqueue *sigqueue_alloc(void); | 2093 | extern struct sigqueue *sigqueue_alloc(void); |
@@ -2081,16 +2106,18 @@ static inline int kill_cad_pid(int sig, int priv) | |||
2081 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2106 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
2082 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2107 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
2083 | 2108 | ||
2084 | static inline int is_si_special(const struct siginfo *info) | 2109 | /* |
2085 | { | 2110 | * True if we are on the alternate signal stack. |
2086 | return info <= SEND_SIG_FORCED; | 2111 | */ |
2087 | } | ||
2088 | |||
2089 | /* True if we are on the alternate signal stack. */ | ||
2090 | |||
2091 | static inline int on_sig_stack(unsigned long sp) | 2112 | static inline int on_sig_stack(unsigned long sp) |
2092 | { | 2113 | { |
2093 | return (sp - current->sas_ss_sp < current->sas_ss_size); | 2114 | #ifdef CONFIG_STACK_GROWSUP |
2115 | return sp >= current->sas_ss_sp && | ||
2116 | sp - current->sas_ss_sp < current->sas_ss_size; | ||
2117 | #else | ||
2118 | return sp > current->sas_ss_sp && | ||
2119 | sp - current->sas_ss_sp <= current->sas_ss_size; | ||
2120 | #endif | ||
2094 | } | 2121 | } |
2095 | 2122 | ||
2096 | static inline int sas_ss_flags(unsigned long sp) | 2123 | static inline int sas_ss_flags(unsigned long sp) |
@@ -2574,8 +2601,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
2574 | } | 2601 | } |
2575 | #endif /* CONFIG_MM_OWNER */ | 2602 | #endif /* CONFIG_MM_OWNER */ |
2576 | 2603 | ||
2577 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | ||
2578 | |||
2579 | #endif /* __KERNEL__ */ | 2604 | #endif /* __KERNEL__ */ |
2580 | 2605 | ||
2581 | #endif | 2606 | #endif |