diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 77 |
1 files changed, 58 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ce160d68f5e7..d0036e52a24a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings; | |||
| 336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
| 337 | void __user *buffer, | 337 | void __user *buffer, |
| 338 | size_t *lenp, loff_t *ppos); | 338 | size_t *lenp, loff_t *ppos); |
| 339 | #else | ||
| 340 | /* Avoid need for ifdefs elsewhere in the code */ | ||
| 341 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
| 339 | #endif | 342 | #endif |
| 340 | 343 | ||
| 341 | /* Attach to any functions which should be ignored in wchan output. */ | 344 | /* Attach to any functions which should be ignored in wchan output. */ |
| @@ -623,6 +626,10 @@ struct signal_struct { | |||
| 623 | 626 | ||
| 624 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 627 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
| 625 | int oom_score_adj; /* OOM kill score adjustment */ | 628 | int oom_score_adj; /* OOM kill score adjustment */ |
| 629 | |||
| 630 | struct mutex cred_guard_mutex; /* guard against foreign influences on | ||
| 631 | * credential calculations | ||
| 632 | * (notably. ptrace) */ | ||
| 626 | }; | 633 | }; |
| 627 | 634 | ||
| 628 | /* Context switch must be unlocked if interrupts are to be enabled */ | 635 | /* Context switch must be unlocked if interrupts are to be enabled */ |
| @@ -665,6 +672,9 @@ struct user_struct { | |||
| 665 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 672 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
| 666 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 673 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
| 667 | #endif | 674 | #endif |
| 675 | #ifdef CONFIG_FANOTIFY | ||
| 676 | atomic_t fanotify_listeners; | ||
| 677 | #endif | ||
| 668 | #ifdef CONFIG_EPOLL | 678 | #ifdef CONFIG_EPOLL |
| 669 | atomic_t epoll_watches; /* The number of file descriptors currently watched */ | 679 | atomic_t epoll_watches; /* The number of file descriptors currently watched */ |
| 670 | #endif | 680 | #endif |
| @@ -875,6 +885,7 @@ enum sched_domain_level { | |||
| 875 | SD_LV_NONE = 0, | 885 | SD_LV_NONE = 0, |
| 876 | SD_LV_SIBLING, | 886 | SD_LV_SIBLING, |
| 877 | SD_LV_MC, | 887 | SD_LV_MC, |
| 888 | SD_LV_BOOK, | ||
| 878 | SD_LV_CPU, | 889 | SD_LV_CPU, |
| 879 | SD_LV_NODE, | 890 | SD_LV_NODE, |
| 880 | SD_LV_ALLNODES, | 891 | SD_LV_ALLNODES, |
| @@ -1072,7 +1083,7 @@ struct sched_class { | |||
| 1072 | struct task_struct *task); | 1083 | struct task_struct *task); |
| 1073 | 1084 | ||
| 1074 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1085 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1075 | void (*moved_group) (struct task_struct *p, int on_rq); | 1086 | void (*task_move_group) (struct task_struct *p, int on_rq); |
| 1076 | #endif | 1087 | #endif |
| 1077 | }; | 1088 | }; |
| 1078 | 1089 | ||
| @@ -1160,6 +1171,13 @@ struct sched_rt_entity { | |||
| 1160 | 1171 | ||
| 1161 | struct rcu_node; | 1172 | struct rcu_node; |
| 1162 | 1173 | ||
| 1174 | enum perf_event_task_context { | ||
| 1175 | perf_invalid_context = -1, | ||
| 1176 | perf_hw_context = 0, | ||
| 1177 | perf_sw_context, | ||
| 1178 | perf_nr_task_contexts, | ||
| 1179 | }; | ||
| 1180 | |||
| 1163 | struct task_struct { | 1181 | struct task_struct { |
| 1164 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1182 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
| 1165 | void *stack; | 1183 | void *stack; |
| @@ -1202,11 +1220,13 @@ struct task_struct { | |||
| 1202 | unsigned int policy; | 1220 | unsigned int policy; |
| 1203 | cpumask_t cpus_allowed; | 1221 | cpumask_t cpus_allowed; |
| 1204 | 1222 | ||
| 1205 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1223 | #ifdef CONFIG_PREEMPT_RCU |
| 1206 | int rcu_read_lock_nesting; | 1224 | int rcu_read_lock_nesting; |
| 1207 | char rcu_read_unlock_special; | 1225 | char rcu_read_unlock_special; |
| 1208 | struct rcu_node *rcu_blocked_node; | ||
| 1209 | struct list_head rcu_node_entry; | 1226 | struct list_head rcu_node_entry; |
| 1227 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
| 1228 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
| 1229 | struct rcu_node *rcu_blocked_node; | ||
| 1210 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1230 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 1211 | 1231 | ||
| 1212 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1232 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| @@ -1288,13 +1308,10 @@ struct task_struct { | |||
| 1288 | struct list_head cpu_timers[3]; | 1308 | struct list_head cpu_timers[3]; |
| 1289 | 1309 | ||
| 1290 | /* process credentials */ | 1310 | /* process credentials */ |
| 1291 | const struct cred *real_cred; /* objective and real subjective task | 1311 | const struct cred __rcu *real_cred; /* objective and real subjective task |
| 1292 | * credentials (COW) */ | 1312 | * credentials (COW) */ |
| 1293 | const struct cred *cred; /* effective (overridable) subjective task | 1313 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
| 1294 | * credentials (COW) */ | 1314 | * credentials (COW) */ |
| 1295 | struct mutex cred_guard_mutex; /* guard against foreign influences on | ||
| 1296 | * credential calculations | ||
| 1297 | * (notably. ptrace) */ | ||
| 1298 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ | 1315 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ |
| 1299 | 1316 | ||
| 1300 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1317 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| @@ -1418,7 +1435,7 @@ struct task_struct { | |||
| 1418 | #endif | 1435 | #endif |
| 1419 | #ifdef CONFIG_CGROUPS | 1436 | #ifdef CONFIG_CGROUPS |
| 1420 | /* Control Group info protected by css_set_lock */ | 1437 | /* Control Group info protected by css_set_lock */ |
| 1421 | struct css_set *cgroups; | 1438 | struct css_set __rcu *cgroups; |
| 1422 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1439 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
| 1423 | struct list_head cg_list; | 1440 | struct list_head cg_list; |
| 1424 | #endif | 1441 | #endif |
| @@ -1431,7 +1448,7 @@ struct task_struct { | |||
| 1431 | struct futex_pi_state *pi_state_cache; | 1448 | struct futex_pi_state *pi_state_cache; |
| 1432 | #endif | 1449 | #endif |
| 1433 | #ifdef CONFIG_PERF_EVENTS | 1450 | #ifdef CONFIG_PERF_EVENTS |
| 1434 | struct perf_event_context *perf_event_ctxp; | 1451 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
| 1435 | struct mutex perf_event_mutex; | 1452 | struct mutex perf_event_mutex; |
| 1436 | struct list_head perf_event_list; | 1453 | struct list_head perf_event_list; |
| 1437 | #endif | 1454 | #endif |
| @@ -1681,8 +1698,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
| 1681 | /* | 1698 | /* |
| 1682 | * Per process flags | 1699 | * Per process flags |
| 1683 | */ | 1700 | */ |
| 1684 | #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ | 1701 | #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ |
| 1685 | /* Not implemented yet, only for 486*/ | ||
| 1686 | #define PF_STARTING 0x00000002 /* being created */ | 1702 | #define PF_STARTING 0x00000002 /* being created */ |
| 1687 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1703 | #define PF_EXITING 0x00000004 /* getting shut down */ |
| 1688 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1704 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
| @@ -1694,7 +1710,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
| 1694 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1710 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
| 1695 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | 1711 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
| 1696 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1712 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
| 1697 | #define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ | ||
| 1698 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1713 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
| 1699 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | 1714 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ |
| 1700 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1715 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
| @@ -1740,7 +1755,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
| 1740 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1755 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
| 1741 | #define used_math() tsk_used_math(current) | 1756 | #define used_math() tsk_used_math(current) |
| 1742 | 1757 | ||
| 1743 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1758 | #ifdef CONFIG_PREEMPT_RCU |
| 1744 | 1759 | ||
| 1745 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1760 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
| 1746 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1761 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ |
| @@ -1749,7 +1764,9 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
| 1749 | { | 1764 | { |
| 1750 | p->rcu_read_lock_nesting = 0; | 1765 | p->rcu_read_lock_nesting = 0; |
| 1751 | p->rcu_read_unlock_special = 0; | 1766 | p->rcu_read_unlock_special = 0; |
| 1767 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
| 1752 | p->rcu_blocked_node = NULL; | 1768 | p->rcu_blocked_node = NULL; |
| 1769 | #endif | ||
| 1753 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1770 | INIT_LIST_HEAD(&p->rcu_node_entry); |
| 1754 | } | 1771 | } |
| 1755 | 1772 | ||
| @@ -1826,6 +1843,19 @@ extern void sched_clock_idle_sleep_event(void); | |||
| 1826 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1843 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
| 1827 | #endif | 1844 | #endif |
| 1828 | 1845 | ||
| 1846 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
| 1847 | /* | ||
| 1848 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | ||
| 1849 | * The reason for this explicit opt-in is not to have perf penalty with | ||
| 1850 | * slow sched_clocks. | ||
| 1851 | */ | ||
| 1852 | extern void enable_sched_clock_irqtime(void); | ||
| 1853 | extern void disable_sched_clock_irqtime(void); | ||
| 1854 | #else | ||
| 1855 | static inline void enable_sched_clock_irqtime(void) {} | ||
| 1856 | static inline void disable_sched_clock_irqtime(void) {} | ||
| 1857 | #endif | ||
| 1858 | |||
| 1829 | extern unsigned long long | 1859 | extern unsigned long long |
| 1830 | task_sched_runtime(struct task_struct *task); | 1860 | task_sched_runtime(struct task_struct *task); |
| 1831 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1861 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
| @@ -2109,7 +2139,9 @@ extern void daemonize(const char *, ...); | |||
| 2109 | extern int allow_signal(int); | 2139 | extern int allow_signal(int); |
| 2110 | extern int disallow_signal(int); | 2140 | extern int disallow_signal(int); |
| 2111 | 2141 | ||
| 2112 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 2142 | extern int do_execve(const char *, |
| 2143 | const char __user * const __user *, | ||
| 2144 | const char __user * const __user *, struct pt_regs *); | ||
| 2113 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 2145 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
| 2114 | struct task_struct *fork_idle(int); | 2146 | struct task_struct *fork_idle(int); |
| 2115 | 2147 | ||
| @@ -2208,9 +2240,16 @@ static inline void task_unlock(struct task_struct *p) | |||
| 2208 | spin_unlock(&p->alloc_lock); | 2240 | spin_unlock(&p->alloc_lock); |
| 2209 | } | 2241 | } |
| 2210 | 2242 | ||
| 2211 | extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, | 2243 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
| 2212 | unsigned long *flags); | 2244 | unsigned long *flags); |
| 2213 | 2245 | ||
| 2246 | #define lock_task_sighand(tsk, flags) \ | ||
| 2247 | ({ struct sighand_struct *__ss; \ | ||
| 2248 | __cond_lock(&(tsk)->sighand->siglock, \ | ||
| 2249 | (__ss = __lock_task_sighand(tsk, flags))); \ | ||
| 2250 | __ss; \ | ||
| 2251 | }) \ | ||
| 2252 | |||
| 2214 | static inline void unlock_task_sighand(struct task_struct *tsk, | 2253 | static inline void unlock_task_sighand(struct task_struct *tsk, |
| 2215 | unsigned long *flags) | 2254 | unsigned long *flags) |
| 2216 | { | 2255 | { |
| @@ -2365,9 +2404,9 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
| 2365 | 2404 | ||
| 2366 | extern int __cond_resched_softirq(void); | 2405 | extern int __cond_resched_softirq(void); |
| 2367 | 2406 | ||
| 2368 | #define cond_resched_softirq() ({ \ | 2407 | #define cond_resched_softirq() ({ \ |
| 2369 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ | 2408 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
| 2370 | __cond_resched_softirq(); \ | 2409 | __cond_resched_softirq(); \ |
| 2371 | }) | 2410 | }) |
| 2372 | 2411 | ||
| 2373 | /* | 2412 | /* |
