aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h48
1 files changed, 36 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e2a6db2d7dd..0383601a927c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -875,6 +875,7 @@ enum sched_domain_level {
875 SD_LV_NONE = 0, 875 SD_LV_NONE = 0,
876 SD_LV_SIBLING, 876 SD_LV_SIBLING,
877 SD_LV_MC, 877 SD_LV_MC,
878 SD_LV_BOOK,
878 SD_LV_CPU, 879 SD_LV_CPU,
879 SD_LV_NODE, 880 SD_LV_NODE,
880 SD_LV_ALLNODES, 881 SD_LV_ALLNODES,
@@ -1160,6 +1161,13 @@ struct sched_rt_entity {
1160 1161
1161struct rcu_node; 1162struct rcu_node;
1162 1163
1164enum perf_event_task_context {
1165 perf_invalid_context = -1,
1166 perf_hw_context = 0,
1167 perf_sw_context,
1168 perf_nr_task_contexts,
1169};
1170
1163struct task_struct { 1171struct task_struct {
1164 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1172 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1165 void *stack; 1173 void *stack;
@@ -1202,11 +1210,13 @@ struct task_struct {
1202 unsigned int policy; 1210 unsigned int policy;
1203 cpumask_t cpus_allowed; 1211 cpumask_t cpus_allowed;
1204 1212
1205#ifdef CONFIG_TREE_PREEMPT_RCU 1213#ifdef CONFIG_PREEMPT_RCU
1206 int rcu_read_lock_nesting; 1214 int rcu_read_lock_nesting;
1207 char rcu_read_unlock_special; 1215 char rcu_read_unlock_special;
1208 struct rcu_node *rcu_blocked_node;
1209 struct list_head rcu_node_entry; 1216 struct list_head rcu_node_entry;
1217#endif /* #ifdef CONFIG_PREEMPT_RCU */
1218#ifdef CONFIG_TREE_PREEMPT_RCU
1219 struct rcu_node *rcu_blocked_node;
1210#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1220#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1211 1221
1212#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1222#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1288,9 +1298,9 @@ struct task_struct {
1288 struct list_head cpu_timers[3]; 1298 struct list_head cpu_timers[3];
1289 1299
1290/* process credentials */ 1300/* process credentials */
1291 const struct cred *real_cred; /* objective and real subjective task 1301 const struct cred __rcu *real_cred; /* objective and real subjective task
1292 * credentials (COW) */ 1302 * credentials (COW) */
1293 const struct cred *cred; /* effective (overridable) subjective task 1303 const struct cred __rcu *cred; /* effective (overridable) subjective task
1294 * credentials (COW) */ 1304 * credentials (COW) */
1295 struct mutex cred_guard_mutex; /* guard against foreign influences on 1305 struct mutex cred_guard_mutex; /* guard against foreign influences on
1296 * credential calculations 1306 * credential calculations
@@ -1418,7 +1428,7 @@ struct task_struct {
1418#endif 1428#endif
1419#ifdef CONFIG_CGROUPS 1429#ifdef CONFIG_CGROUPS
1420 /* Control Group info protected by css_set_lock */ 1430 /* Control Group info protected by css_set_lock */
1421 struct css_set *cgroups; 1431 struct css_set __rcu *cgroups;
1422 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1432 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1423 struct list_head cg_list; 1433 struct list_head cg_list;
1424#endif 1434#endif
@@ -1431,7 +1441,7 @@ struct task_struct {
1431 struct futex_pi_state *pi_state_cache; 1441 struct futex_pi_state *pi_state_cache;
1432#endif 1442#endif
1433#ifdef CONFIG_PERF_EVENTS 1443#ifdef CONFIG_PERF_EVENTS
1434 struct perf_event_context *perf_event_ctxp; 1444 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1435 struct mutex perf_event_mutex; 1445 struct mutex perf_event_mutex;
1436 struct list_head perf_event_list; 1446 struct list_head perf_event_list;
1437#endif 1447#endif
@@ -1681,8 +1691,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1681/* 1691/*
1682 * Per process flags 1692 * Per process flags
1683 */ 1693 */
1684#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 1694#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
1685 /* Not implemented yet, only for 486*/
1686#define PF_STARTING 0x00000002 /* being created */ 1695#define PF_STARTING 0x00000002 /* being created */
1687#define PF_EXITING 0x00000004 /* getting shut down */ 1696#define PF_EXITING 0x00000004 /* getting shut down */
1688#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1697#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1740,7 +1749,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1740#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1749#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1741#define used_math() tsk_used_math(current) 1750#define used_math() tsk_used_math(current)
1742 1751
1743#ifdef CONFIG_TREE_PREEMPT_RCU 1752#ifdef CONFIG_PREEMPT_RCU
1744 1753
1745#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1754#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1746#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1755#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1749,7 +1758,9 @@ static inline void rcu_copy_process(struct task_struct *p)
1749{ 1758{
1750 p->rcu_read_lock_nesting = 0; 1759 p->rcu_read_lock_nesting = 0;
1751 p->rcu_read_unlock_special = 0; 1760 p->rcu_read_unlock_special = 0;
1761#ifdef CONFIG_TREE_PREEMPT_RCU
1752 p->rcu_blocked_node = NULL; 1762 p->rcu_blocked_node = NULL;
1763#endif
1753 INIT_LIST_HEAD(&p->rcu_node_entry); 1764 INIT_LIST_HEAD(&p->rcu_node_entry);
1754} 1765}
1755 1766
@@ -1826,6 +1837,19 @@ extern void sched_clock_idle_sleep_event(void);
1826extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1837extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1827#endif 1838#endif
1828 1839
1840#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1841/*
1842 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1843 * The reason for this explicit opt-in is not to have perf penalty with
1844 * slow sched_clocks.
1845 */
1846extern void enable_sched_clock_irqtime(void);
1847extern void disable_sched_clock_irqtime(void);
1848#else
1849static inline void enable_sched_clock_irqtime(void) {}
1850static inline void disable_sched_clock_irqtime(void) {}
1851#endif
1852
1829extern unsigned long long 1853extern unsigned long long
1830task_sched_runtime(struct task_struct *task); 1854task_sched_runtime(struct task_struct *task);
1831extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1855extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2367,9 +2391,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
2367 2391
2368extern int __cond_resched_softirq(void); 2392extern int __cond_resched_softirq(void);
2369 2393
2370#define cond_resched_softirq() ({ \ 2394#define cond_resched_softirq() ({ \
2371 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ 2395 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2372 __cond_resched_softirq(); \ 2396 __cond_resched_softirq(); \
2373}) 2397})
2374 2398
2375/* 2399/*