diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 81 |
1 files changed, 60 insertions, 21 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 89115ec7d43f..6f7bba93929b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
192 | #define TASK_DEAD 64 | 192 | #define TASK_DEAD 64 |
193 | #define TASK_WAKEKILL 128 | 193 | #define TASK_WAKEKILL 128 |
194 | #define TASK_WAKING 256 | 194 | #define TASK_WAKING 256 |
195 | #define TASK_STATE_MAX 512 | ||
196 | |||
197 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
198 | |||
199 | extern char ___assert_task_state[1 - 2*!!( | ||
200 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
195 | 201 | ||
196 | /* Convenience macros for the sake of set_task_state */ | 202 | /* Convenience macros for the sake of set_task_state */ |
197 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
@@ -371,6 +377,8 @@ extern int sysctl_max_map_count; | |||
371 | 377 | ||
372 | #include <linux/aio.h> | 378 | #include <linux/aio.h> |
373 | 379 | ||
380 | #ifdef CONFIG_MMU | ||
381 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
374 | extern unsigned long | 382 | extern unsigned long |
375 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 383 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
376 | unsigned long, unsigned long); | 384 | unsigned long, unsigned long); |
@@ -380,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
380 | unsigned long flags); | 388 | unsigned long flags); |
381 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 389 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
382 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 390 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
391 | #else | ||
392 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | ||
393 | #endif | ||
383 | 394 | ||
384 | #if USE_SPLIT_PTLOCKS | 395 | #if USE_SPLIT_PTLOCKS |
385 | /* | 396 | /* |
@@ -1091,7 +1102,8 @@ struct sched_class { | |||
1091 | enum cpu_idle_type idle); | 1102 | enum cpu_idle_type idle); |
1092 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1103 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1093 | void (*post_schedule) (struct rq *this_rq); | 1104 | void (*post_schedule) (struct rq *this_rq); |
1094 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1105 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
1106 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1095 | 1107 | ||
1096 | void (*set_cpus_allowed)(struct task_struct *p, | 1108 | void (*set_cpus_allowed)(struct task_struct *p, |
1097 | const struct cpumask *newmask); | 1109 | const struct cpumask *newmask); |
@@ -1102,7 +1114,7 @@ struct sched_class { | |||
1102 | 1114 | ||
1103 | void (*set_curr_task) (struct rq *rq); | 1115 | void (*set_curr_task) (struct rq *rq); |
1104 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 1116 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
1105 | void (*task_new) (struct rq *rq, struct task_struct *p); | 1117 | void (*task_fork) (struct task_struct *p); |
1106 | 1118 | ||
1107 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 1119 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
1108 | int running); | 1120 | int running); |
@@ -1111,10 +1123,11 @@ struct sched_class { | |||
1111 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1123 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
1112 | int oldprio, int running); | 1124 | int oldprio, int running); |
1113 | 1125 | ||
1114 | unsigned int (*get_rr_interval) (struct task_struct *task); | 1126 | unsigned int (*get_rr_interval) (struct rq *rq, |
1127 | struct task_struct *task); | ||
1115 | 1128 | ||
1116 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1129 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1117 | void (*moved_group) (struct task_struct *p); | 1130 | void (*moved_group) (struct task_struct *p, int on_rq); |
1118 | #endif | 1131 | #endif |
1119 | }; | 1132 | }; |
1120 | 1133 | ||
@@ -1151,8 +1164,6 @@ struct sched_entity { | |||
1151 | u64 start_runtime; | 1164 | u64 start_runtime; |
1152 | u64 avg_wakeup; | 1165 | u64 avg_wakeup; |
1153 | 1166 | ||
1154 | u64 avg_running; | ||
1155 | |||
1156 | #ifdef CONFIG_SCHEDSTATS | 1167 | #ifdef CONFIG_SCHEDSTATS |
1157 | u64 wait_start; | 1168 | u64 wait_start; |
1158 | u64 wait_max; | 1169 | u64 wait_max; |
@@ -1175,7 +1186,6 @@ struct sched_entity { | |||
1175 | u64 nr_failed_migrations_running; | 1186 | u64 nr_failed_migrations_running; |
1176 | u64 nr_failed_migrations_hot; | 1187 | u64 nr_failed_migrations_hot; |
1177 | u64 nr_forced_migrations; | 1188 | u64 nr_forced_migrations; |
1178 | u64 nr_forced2_migrations; | ||
1179 | 1189 | ||
1180 | u64 nr_wakeups; | 1190 | u64 nr_wakeups; |
1181 | u64 nr_wakeups_sync; | 1191 | u64 nr_wakeups_sync; |
@@ -1411,7 +1421,7 @@ struct task_struct { | |||
1411 | #endif | 1421 | #endif |
1412 | 1422 | ||
1413 | /* Protection of the PI data structures: */ | 1423 | /* Protection of the PI data structures: */ |
1414 | spinlock_t pi_lock; | 1424 | raw_spinlock_t pi_lock; |
1415 | 1425 | ||
1416 | #ifdef CONFIG_RT_MUTEXES | 1426 | #ifdef CONFIG_RT_MUTEXES |
1417 | /* PI waiters blocked on a rt_mutex held by this task */ | 1427 | /* PI waiters blocked on a rt_mutex held by this task */ |
@@ -1544,10 +1554,18 @@ struct task_struct { | |||
1544 | unsigned long trace_recursion; | 1554 | unsigned long trace_recursion; |
1545 | #endif /* CONFIG_TRACING */ | 1555 | #endif /* CONFIG_TRACING */ |
1546 | unsigned long stack_start; | 1556 | unsigned long stack_start; |
1557 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | ||
1558 | struct memcg_batch_info { | ||
1559 | int do_batch; /* incremented when batch uncharge started */ | ||
1560 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | ||
1561 | unsigned long bytes; /* uncharged usage */ | ||
1562 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | ||
1563 | } memcg_batch; | ||
1564 | #endif | ||
1547 | }; | 1565 | }; |
1548 | 1566 | ||
1549 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1567 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
1550 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | 1568 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
1551 | 1569 | ||
1552 | /* | 1570 | /* |
1553 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1571 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
@@ -1840,7 +1858,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1840 | extern int sched_clock_stable; | 1858 | extern int sched_clock_stable; |
1841 | #endif | 1859 | #endif |
1842 | 1860 | ||
1843 | extern unsigned long long sched_clock(void); | 1861 | /* ftrace calls sched_clock() directly */ |
1862 | extern unsigned long long notrace sched_clock(void); | ||
1844 | 1863 | ||
1845 | extern void sched_clock_init(void); | 1864 | extern void sched_clock_init(void); |
1846 | extern u64 sched_clock_cpu(int cpu); | 1865 | extern u64 sched_clock_cpu(int cpu); |
@@ -1903,14 +1922,22 @@ extern unsigned int sysctl_sched_wakeup_granularity; | |||
1903 | extern unsigned int sysctl_sched_shares_ratelimit; | 1922 | extern unsigned int sysctl_sched_shares_ratelimit; |
1904 | extern unsigned int sysctl_sched_shares_thresh; | 1923 | extern unsigned int sysctl_sched_shares_thresh; |
1905 | extern unsigned int sysctl_sched_child_runs_first; | 1924 | extern unsigned int sysctl_sched_child_runs_first; |
1925 | |||
1926 | enum sched_tunable_scaling { | ||
1927 | SCHED_TUNABLESCALING_NONE, | ||
1928 | SCHED_TUNABLESCALING_LOG, | ||
1929 | SCHED_TUNABLESCALING_LINEAR, | ||
1930 | SCHED_TUNABLESCALING_END, | ||
1931 | }; | ||
1932 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
1933 | |||
1906 | #ifdef CONFIG_SCHED_DEBUG | 1934 | #ifdef CONFIG_SCHED_DEBUG |
1907 | extern unsigned int sysctl_sched_features; | ||
1908 | extern unsigned int sysctl_sched_migration_cost; | 1935 | extern unsigned int sysctl_sched_migration_cost; |
1909 | extern unsigned int sysctl_sched_nr_migrate; | 1936 | extern unsigned int sysctl_sched_nr_migrate; |
1910 | extern unsigned int sysctl_sched_time_avg; | 1937 | extern unsigned int sysctl_sched_time_avg; |
1911 | extern unsigned int sysctl_timer_migration; | 1938 | extern unsigned int sysctl_timer_migration; |
1912 | 1939 | ||
1913 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1940 | int sched_proc_update_handler(struct ctl_table *table, int write, |
1914 | void __user *buffer, size_t *length, | 1941 | void __user *buffer, size_t *length, |
1915 | loff_t *ppos); | 1942 | loff_t *ppos); |
1916 | #endif | 1943 | #endif |
@@ -2066,7 +2093,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t); | |||
2066 | extern int do_notify_parent(struct task_struct *, int); | 2093 | extern int do_notify_parent(struct task_struct *, int); |
2067 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2094 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
2068 | extern void force_sig(int, struct task_struct *); | 2095 | extern void force_sig(int, struct task_struct *); |
2069 | extern void force_sig_specific(int, struct task_struct *); | ||
2070 | extern int send_sig(int, struct task_struct *, int); | 2096 | extern int send_sig(int, struct task_struct *, int); |
2071 | extern void zap_other_threads(struct task_struct *p); | 2097 | extern void zap_other_threads(struct task_struct *p); |
2072 | extern struct sigqueue *sigqueue_alloc(void); | 2098 | extern struct sigqueue *sigqueue_alloc(void); |
@@ -2085,11 +2111,6 @@ static inline int kill_cad_pid(int sig, int priv) | |||
2085 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2111 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
2086 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2112 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
2087 | 2113 | ||
2088 | static inline int is_si_special(const struct siginfo *info) | ||
2089 | { | ||
2090 | return info <= SEND_SIG_FORCED; | ||
2091 | } | ||
2092 | |||
2093 | /* | 2114 | /* |
2094 | * True if we are on the alternate signal stack. | 2115 | * True if we are on the alternate signal stack. |
2095 | */ | 2116 | */ |
@@ -2475,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
2475 | 2496 | ||
2476 | #endif /* CONFIG_SMP */ | 2497 | #endif /* CONFIG_SMP */ |
2477 | 2498 | ||
2478 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
2479 | |||
2480 | #ifdef CONFIG_TRACING | 2499 | #ifdef CONFIG_TRACING |
2481 | extern void | 2500 | extern void |
2482 | __trace_special(void *__tr, void *__data, | 2501 | __trace_special(void *__tr, void *__data, |
@@ -2585,7 +2604,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
2585 | } | 2604 | } |
2586 | #endif /* CONFIG_MM_OWNER */ | 2605 | #endif /* CONFIG_MM_OWNER */ |
2587 | 2606 | ||
2588 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 2607 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
2608 | unsigned int limit) | ||
2609 | { | ||
2610 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | ||
2611 | } | ||
2612 | |||
2613 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | ||
2614 | unsigned int limit) | ||
2615 | { | ||
2616 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | ||
2617 | } | ||
2618 | |||
2619 | static inline unsigned long rlimit(unsigned int limit) | ||
2620 | { | ||
2621 | return task_rlimit(current, limit); | ||
2622 | } | ||
2623 | |||
2624 | static inline unsigned long rlimit_max(unsigned int limit) | ||
2625 | { | ||
2626 | return task_rlimit_max(current, limit); | ||
2627 | } | ||
2589 | 2628 | ||
2590 | #endif /* __KERNEL__ */ | 2629 | #endif /* __KERNEL__ */ |
2591 | 2630 | ||