aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h74
1 files changed, 56 insertions, 18 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e2a6db2d7dd..2c79e921a68b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings;
336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
337 void __user *buffer, 337 void __user *buffer,
338 size_t *lenp, loff_t *ppos); 338 size_t *lenp, loff_t *ppos);
339#else
340/* Avoid need for ifdefs elsewhere in the code */
341enum { sysctl_hung_task_timeout_secs = 0 };
339#endif 342#endif
340 343
341/* Attach to any functions which should be ignored in wchan output. */ 344/* Attach to any functions which should be ignored in wchan output. */
@@ -623,6 +626,10 @@ struct signal_struct {
623 626
624 int oom_adj; /* OOM kill score adjustment (bit shift) */ 627 int oom_adj; /* OOM kill score adjustment (bit shift) */
625 int oom_score_adj; /* OOM kill score adjustment */ 628 int oom_score_adj; /* OOM kill score adjustment */
629
630 struct mutex cred_guard_mutex; /* guard against foreign influences on
631 * credential calculations
632 * (notably. ptrace) */
626}; 633};
627 634
628/* Context switch must be unlocked if interrupts are to be enabled */ 635/* Context switch must be unlocked if interrupts are to be enabled */
@@ -665,6 +672,9 @@ struct user_struct {
665 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 672 atomic_t inotify_watches; /* How many inotify watches does this user have? */
666 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 673 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
667#endif 674#endif
675#ifdef CONFIG_FANOTIFY
676 atomic_t fanotify_listeners;
677#endif
668#ifdef CONFIG_EPOLL 678#ifdef CONFIG_EPOLL
669 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 679 atomic_t epoll_watches; /* The number of file descriptors currently watched */
670#endif 680#endif
@@ -852,6 +862,7 @@ struct sched_group {
852 * single CPU. 862 * single CPU.
853 */ 863 */
854 unsigned int cpu_power, cpu_power_orig; 864 unsigned int cpu_power, cpu_power_orig;
865 unsigned int group_weight;
855 866
856 /* 867 /*
857 * The CPUs this group covers. 868 * The CPUs this group covers.
@@ -875,6 +886,7 @@ enum sched_domain_level {
875 SD_LV_NONE = 0, 886 SD_LV_NONE = 0,
876 SD_LV_SIBLING, 887 SD_LV_SIBLING,
877 SD_LV_MC, 888 SD_LV_MC,
889 SD_LV_BOOK,
878 SD_LV_CPU, 890 SD_LV_CPU,
879 SD_LV_NODE, 891 SD_LV_NODE,
880 SD_LV_ALLNODES, 892 SD_LV_ALLNODES,
@@ -1072,7 +1084,7 @@ struct sched_class {
1072 struct task_struct *task); 1084 struct task_struct *task);
1073 1085
1074#ifdef CONFIG_FAIR_GROUP_SCHED 1086#ifdef CONFIG_FAIR_GROUP_SCHED
1075 void (*moved_group) (struct task_struct *p, int on_rq); 1087 void (*task_move_group) (struct task_struct *p, int on_rq);
1076#endif 1088#endif
1077}; 1089};
1078 1090
@@ -1160,6 +1172,13 @@ struct sched_rt_entity {
1160 1172
1161struct rcu_node; 1173struct rcu_node;
1162 1174
1175enum perf_event_task_context {
1176 perf_invalid_context = -1,
1177 perf_hw_context = 0,
1178 perf_sw_context,
1179 perf_nr_task_contexts,
1180};
1181
1163struct task_struct { 1182struct task_struct {
1164 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1183 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1165 void *stack; 1184 void *stack;
@@ -1202,11 +1221,13 @@ struct task_struct {
1202 unsigned int policy; 1221 unsigned int policy;
1203 cpumask_t cpus_allowed; 1222 cpumask_t cpus_allowed;
1204 1223
1205#ifdef CONFIG_TREE_PREEMPT_RCU 1224#ifdef CONFIG_PREEMPT_RCU
1206 int rcu_read_lock_nesting; 1225 int rcu_read_lock_nesting;
1207 char rcu_read_unlock_special; 1226 char rcu_read_unlock_special;
1208 struct rcu_node *rcu_blocked_node;
1209 struct list_head rcu_node_entry; 1227 struct list_head rcu_node_entry;
1228#endif /* #ifdef CONFIG_PREEMPT_RCU */
1229#ifdef CONFIG_TREE_PREEMPT_RCU
1230 struct rcu_node *rcu_blocked_node;
1210#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1231#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1211 1232
1212#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1233#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1288,13 +1309,10 @@ struct task_struct {
1288 struct list_head cpu_timers[3]; 1309 struct list_head cpu_timers[3];
1289 1310
1290/* process credentials */ 1311/* process credentials */
1291 const struct cred *real_cred; /* objective and real subjective task 1312 const struct cred __rcu *real_cred; /* objective and real subjective task
1292 * credentials (COW) */ 1313 * credentials (COW) */
1293 const struct cred *cred; /* effective (overridable) subjective task 1314 const struct cred __rcu *cred; /* effective (overridable) subjective task
1294 * credentials (COW) */ 1315 * credentials (COW) */
1295 struct mutex cred_guard_mutex; /* guard against foreign influences on
1296 * credential calculations
1297 * (notably. ptrace) */
1298 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ 1316 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1299 1317
1300 char comm[TASK_COMM_LEN]; /* executable name excluding path 1318 char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -1418,7 +1436,7 @@ struct task_struct {
1418#endif 1436#endif
1419#ifdef CONFIG_CGROUPS 1437#ifdef CONFIG_CGROUPS
1420 /* Control Group info protected by css_set_lock */ 1438 /* Control Group info protected by css_set_lock */
1421 struct css_set *cgroups; 1439 struct css_set __rcu *cgroups;
1422 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1440 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1423 struct list_head cg_list; 1441 struct list_head cg_list;
1424#endif 1442#endif
@@ -1431,7 +1449,7 @@ struct task_struct {
1431 struct futex_pi_state *pi_state_cache; 1449 struct futex_pi_state *pi_state_cache;
1432#endif 1450#endif
1433#ifdef CONFIG_PERF_EVENTS 1451#ifdef CONFIG_PERF_EVENTS
1434 struct perf_event_context *perf_event_ctxp; 1452 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1435 struct mutex perf_event_mutex; 1453 struct mutex perf_event_mutex;
1436 struct list_head perf_event_list; 1454 struct list_head perf_event_list;
1437#endif 1455#endif
@@ -1681,8 +1699,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1681/* 1699/*
1682 * Per process flags 1700 * Per process flags
1683 */ 1701 */
1684#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 1702#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
1685 /* Not implemented yet, only for 486*/
1686#define PF_STARTING 0x00000002 /* being created */ 1703#define PF_STARTING 0x00000002 /* being created */
1687#define PF_EXITING 0x00000004 /* getting shut down */ 1704#define PF_EXITING 0x00000004 /* getting shut down */
1688#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1705#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1694,7 +1711,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1694#define PF_DUMPCORE 0x00000200 /* dumped core */ 1711#define PF_DUMPCORE 0x00000200 /* dumped core */
1695#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1712#define PF_SIGNALED 0x00000400 /* killed by a signal */
1696#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1713#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1697#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1698#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1714#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1699#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ 1715#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1700#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1716#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
@@ -1740,7 +1756,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1740#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1756#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1741#define used_math() tsk_used_math(current) 1757#define used_math() tsk_used_math(current)
1742 1758
1743#ifdef CONFIG_TREE_PREEMPT_RCU 1759#ifdef CONFIG_PREEMPT_RCU
1744 1760
1745#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1761#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1746#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1762#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1749,7 +1765,9 @@ static inline void rcu_copy_process(struct task_struct *p)
1749{ 1765{
1750 p->rcu_read_lock_nesting = 0; 1766 p->rcu_read_lock_nesting = 0;
1751 p->rcu_read_unlock_special = 0; 1767 p->rcu_read_unlock_special = 0;
1768#ifdef CONFIG_TREE_PREEMPT_RCU
1752 p->rcu_blocked_node = NULL; 1769 p->rcu_blocked_node = NULL;
1770#endif
1753 INIT_LIST_HEAD(&p->rcu_node_entry); 1771 INIT_LIST_HEAD(&p->rcu_node_entry);
1754} 1772}
1755 1773
@@ -1826,6 +1844,19 @@ extern void sched_clock_idle_sleep_event(void);
1826extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1844extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1827#endif 1845#endif
1828 1846
1847#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1848/*
1849 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1850 * The reason for this explicit opt-in is not to have perf penalty with
1851 * slow sched_clocks.
1852 */
1853extern void enable_sched_clock_irqtime(void);
1854extern void disable_sched_clock_irqtime(void);
1855#else
1856static inline void enable_sched_clock_irqtime(void) {}
1857static inline void disable_sched_clock_irqtime(void) {}
1858#endif
1859
1829extern unsigned long long 1860extern unsigned long long
1830task_sched_runtime(struct task_struct *task); 1861task_sched_runtime(struct task_struct *task);
1831extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1862extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2210,9 +2241,16 @@ static inline void task_unlock(struct task_struct *p)
2210 spin_unlock(&p->alloc_lock); 2241 spin_unlock(&p->alloc_lock);
2211} 2242}
2212 2243
2213extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2244extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2214 unsigned long *flags); 2245 unsigned long *flags);
2215 2246
2247#define lock_task_sighand(tsk, flags) \
2248({ struct sighand_struct *__ss; \
2249 __cond_lock(&(tsk)->sighand->siglock, \
2250 (__ss = __lock_task_sighand(tsk, flags))); \
2251 __ss; \
2252}) \
2253
2216static inline void unlock_task_sighand(struct task_struct *tsk, 2254static inline void unlock_task_sighand(struct task_struct *tsk,
2217 unsigned long *flags) 2255 unsigned long *flags)
2218{ 2256{
@@ -2367,9 +2405,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
2367 2405
2368extern int __cond_resched_softirq(void); 2406extern int __cond_resched_softirq(void);
2369 2407
2370#define cond_resched_softirq() ({ \ 2408#define cond_resched_softirq() ({ \
2371 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ 2409 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2372 __cond_resched_softirq(); \ 2410 __cond_resched_softirq(); \
2373}) 2411})
2374 2412
2375/* 2413/*