diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 60 |
1 files changed, 47 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ed1a9bc52b2..d8005503cc6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu); | |||
143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
144 | 144 | ||
145 | 145 | ||
146 | extern void calc_global_load(void); | 146 | extern void calc_global_load(unsigned long ticks); |
147 | 147 | ||
148 | extern unsigned long get_parent_ip(unsigned long addr); | 148 | extern unsigned long get_parent_ip(unsigned long addr); |
149 | 149 | ||
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings; | |||
336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
337 | void __user *buffer, | 337 | void __user *buffer, |
338 | size_t *lenp, loff_t *ppos); | 338 | size_t *lenp, loff_t *ppos); |
339 | #else | ||
340 | /* Avoid need for ifdefs elsewhere in the code */ | ||
341 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
339 | #endif | 342 | #endif |
340 | 343 | ||
341 | /* Attach to any functions which should be ignored in wchan output. */ | 344 | /* Attach to any functions which should be ignored in wchan output. */ |
@@ -623,6 +626,10 @@ struct signal_struct { | |||
623 | 626 | ||
624 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 627 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
625 | int oom_score_adj; /* OOM kill score adjustment */ | 628 | int oom_score_adj; /* OOM kill score adjustment */ |
629 | |||
630 | struct mutex cred_guard_mutex; /* guard against foreign influences on | ||
631 | * credential calculations | ||
632 | * (notably. ptrace) */ | ||
626 | }; | 633 | }; |
627 | 634 | ||
628 | /* Context switch must be unlocked if interrupts are to be enabled */ | 635 | /* Context switch must be unlocked if interrupts are to be enabled */ |
@@ -665,6 +672,9 @@ struct user_struct { | |||
665 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 672 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
666 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 673 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
667 | #endif | 674 | #endif |
675 | #ifdef CONFIG_FANOTIFY | ||
676 | atomic_t fanotify_listeners; | ||
677 | #endif | ||
668 | #ifdef CONFIG_EPOLL | 678 | #ifdef CONFIG_EPOLL |
669 | atomic_t epoll_watches; /* The number of file descriptors currently watched */ | 679 | atomic_t epoll_watches; /* The number of file descriptors currently watched */ |
670 | #endif | 680 | #endif |
@@ -852,6 +862,7 @@ struct sched_group { | |||
852 | * single CPU. | 862 | * single CPU. |
853 | */ | 863 | */ |
854 | unsigned int cpu_power, cpu_power_orig; | 864 | unsigned int cpu_power, cpu_power_orig; |
865 | unsigned int group_weight; | ||
855 | 866 | ||
856 | /* | 867 | /* |
857 | * The CPUs this group covers. | 868 | * The CPUs this group covers. |
@@ -875,6 +886,7 @@ enum sched_domain_level { | |||
875 | SD_LV_NONE = 0, | 886 | SD_LV_NONE = 0, |
876 | SD_LV_SIBLING, | 887 | SD_LV_SIBLING, |
877 | SD_LV_MC, | 888 | SD_LV_MC, |
889 | SD_LV_BOOK, | ||
878 | SD_LV_CPU, | 890 | SD_LV_CPU, |
879 | SD_LV_NODE, | 891 | SD_LV_NODE, |
880 | SD_LV_ALLNODES, | 892 | SD_LV_ALLNODES, |
@@ -1072,7 +1084,7 @@ struct sched_class { | |||
1072 | struct task_struct *task); | 1084 | struct task_struct *task); |
1073 | 1085 | ||
1074 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1086 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1075 | void (*moved_group) (struct task_struct *p, int on_rq); | 1087 | void (*task_move_group) (struct task_struct *p, int on_rq); |
1076 | #endif | 1088 | #endif |
1077 | }; | 1089 | }; |
1078 | 1090 | ||
@@ -1160,6 +1172,13 @@ struct sched_rt_entity { | |||
1160 | 1172 | ||
1161 | struct rcu_node; | 1173 | struct rcu_node; |
1162 | 1174 | ||
1175 | enum perf_event_task_context { | ||
1176 | perf_invalid_context = -1, | ||
1177 | perf_hw_context = 0, | ||
1178 | perf_sw_context, | ||
1179 | perf_nr_task_contexts, | ||
1180 | }; | ||
1181 | |||
1163 | struct task_struct { | 1182 | struct task_struct { |
1164 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1183 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
1165 | void *stack; | 1184 | void *stack; |
@@ -1297,9 +1316,6 @@ struct task_struct { | |||
1297 | * credentials (COW) */ | 1316 | * credentials (COW) */ |
1298 | const struct cred __rcu *cred; /* effective (overridable) subjective task | 1317 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
1299 | * credentials (COW) */ | 1318 | * credentials (COW) */ |
1300 | struct mutex cred_guard_mutex; /* guard against foreign influences on | ||
1301 | * credential calculations | ||
1302 | * (notably. ptrace) */ | ||
1303 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ | 1319 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ |
1304 | 1320 | ||
1305 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1321 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
@@ -1436,7 +1452,7 @@ struct task_struct { | |||
1436 | struct futex_pi_state *pi_state_cache; | 1452 | struct futex_pi_state *pi_state_cache; |
1437 | #endif | 1453 | #endif |
1438 | #ifdef CONFIG_PERF_EVENTS | 1454 | #ifdef CONFIG_PERF_EVENTS |
1439 | struct perf_event_context *perf_event_ctxp; | 1455 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
1440 | struct mutex perf_event_mutex; | 1456 | struct mutex perf_event_mutex; |
1441 | struct list_head perf_event_list; | 1457 | struct list_head perf_event_list; |
1442 | #endif | 1458 | #endif |
@@ -1686,8 +1702,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1686 | /* | 1702 | /* |
1687 | * Per process flags | 1703 | * Per process flags |
1688 | */ | 1704 | */ |
1689 | #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ | 1705 | #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ |
1690 | /* Not implemented yet, only for 486*/ | ||
1691 | #define PF_STARTING 0x00000002 /* being created */ | 1706 | #define PF_STARTING 0x00000002 /* being created */ |
1692 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1707 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1693 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1708 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
@@ -1699,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1699 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1714 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
1700 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | 1715 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
1701 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1716 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
1702 | #define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ | ||
1703 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1717 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
1704 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | 1718 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ |
1705 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1719 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
@@ -1837,6 +1851,19 @@ extern void sched_clock_idle_sleep_event(void); | |||
1837 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1851 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1838 | #endif | 1852 | #endif |
1839 | 1853 | ||
1854 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1855 | /* | ||
1856 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | ||
1857 | * The reason for this explicit opt-in is not to have perf penalty with | ||
1858 | * slow sched_clocks. | ||
1859 | */ | ||
1860 | extern void enable_sched_clock_irqtime(void); | ||
1861 | extern void disable_sched_clock_irqtime(void); | ||
1862 | #else | ||
1863 | static inline void enable_sched_clock_irqtime(void) {} | ||
1864 | static inline void disable_sched_clock_irqtime(void) {} | ||
1865 | #endif | ||
1866 | |||
1840 | extern unsigned long long | 1867 | extern unsigned long long |
1841 | task_sched_runtime(struct task_struct *task); | 1868 | task_sched_runtime(struct task_struct *task); |
1842 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1869 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
@@ -2221,9 +2248,16 @@ static inline void task_unlock(struct task_struct *p) | |||
2221 | spin_unlock(&p->alloc_lock); | 2248 | spin_unlock(&p->alloc_lock); |
2222 | } | 2249 | } |
2223 | 2250 | ||
2224 | extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, | 2251 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
2225 | unsigned long *flags); | 2252 | unsigned long *flags); |
2226 | 2253 | ||
2254 | #define lock_task_sighand(tsk, flags) \ | ||
2255 | ({ struct sighand_struct *__ss; \ | ||
2256 | __cond_lock(&(tsk)->sighand->siglock, \ | ||
2257 | (__ss = __lock_task_sighand(tsk, flags))); \ | ||
2258 | __ss; \ | ||
2259 | }) \ | ||
2260 | |||
2227 | static inline void unlock_task_sighand(struct task_struct *tsk, | 2261 | static inline void unlock_task_sighand(struct task_struct *tsk, |
2228 | unsigned long *flags) | 2262 | unsigned long *flags) |
2229 | { | 2263 | { |
@@ -2378,9 +2412,9 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
2378 | 2412 | ||
2379 | extern int __cond_resched_softirq(void); | 2413 | extern int __cond_resched_softirq(void); |
2380 | 2414 | ||
2381 | #define cond_resched_softirq() ({ \ | 2415 | #define cond_resched_softirq() ({ \ |
2382 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ | 2416 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2383 | __cond_resched_softirq(); \ | 2417 | __cond_resched_softirq(); \ |
2384 | }) | 2418 | }) |
2385 | 2419 | ||
2386 | /* | 2420 | /* |