aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h81
1 files changed, 62 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4032ec1cf836..0c3854b0d4b1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
361extern signed long schedule_timeout_killable(signed long timeout); 361extern signed long schedule_timeout_killable(signed long timeout);
362extern signed long schedule_timeout_uninterruptible(signed long timeout); 362extern signed long schedule_timeout_uninterruptible(signed long timeout);
363asmlinkage void schedule(void); 363asmlinkage void schedule(void);
364extern void schedule_preempt_disabled(void);
364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 365extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 366
366struct nsproxy; 367struct nsproxy;
@@ -552,6 +553,18 @@ struct signal_struct {
552 int group_stop_count; 553 int group_stop_count;
553 unsigned int flags; /* see SIGNAL_* flags below */ 554 unsigned int flags; /* see SIGNAL_* flags below */
554 555
556 /*
557 * PR_SET_CHILD_SUBREAPER marks a process, like a service
558 * manager, to re-parent orphan (double-forking) child processes
559 * to this process instead of 'init'. The service manager is
560 * able to receive SIGCHLD signals and is able to investigate
561 * the process until it calls wait(). All children of this
562 * process will inherit a flag if they should look for a
563 * child_subreaper process at exit.
564 */
565 unsigned int is_child_subreaper:1;
566 unsigned int has_child_subreaper:1;
567
555 /* POSIX.1b Interval Timers */ 568 /* POSIX.1b Interval Timers */
556 struct list_head posix_timers; 569 struct list_head posix_timers;
557 570
@@ -905,6 +918,7 @@ struct sched_group_power {
905 * single CPU. 918 * single CPU.
906 */ 919 */
907 unsigned int power, power_orig; 920 unsigned int power, power_orig;
921 unsigned long next_update;
908 /* 922 /*
909 * Number of busy cpus in this group. 923 * Number of busy cpus in this group.
910 */ 924 */
@@ -1052,6 +1066,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1066unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1067unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 1068
1069bool cpus_share_cache(int this_cpu, int that_cpu);
1070
1055#else /* CONFIG_SMP */ 1071#else /* CONFIG_SMP */
1056 1072
1057struct sched_domain_attr; 1073struct sched_domain_attr;
@@ -1061,6 +1077,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 struct sched_domain_attr *dattr_new) 1077 struct sched_domain_attr *dattr_new)
1062{ 1078{
1063} 1079}
1080
1081static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1082{
1083 return true;
1084}
1085
1064#endif /* !CONFIG_SMP */ 1086#endif /* !CONFIG_SMP */
1065 1087
1066 1088
@@ -1225,6 +1247,12 @@ struct sched_rt_entity {
1225#endif 1247#endif
1226}; 1248};
1227 1249
1250/*
1251 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1252 * Timeslices get refilled after they expire.
1253 */
1254#define RR_TIMESLICE (100 * HZ / 1000)
1255
1228struct rcu_node; 1256struct rcu_node;
1229 1257
1230enum perf_event_task_context { 1258enum perf_event_task_context {
@@ -1319,6 +1347,11 @@ struct task_struct {
1319 unsigned sched_reset_on_fork:1; 1347 unsigned sched_reset_on_fork:1;
1320 unsigned sched_contributes_to_load:1; 1348 unsigned sched_contributes_to_load:1;
1321 1349
1350#ifdef CONFIG_GENERIC_HARDIRQS
1351 /* IRQ handler threads */
1352 unsigned irq_thread:1;
1353#endif
1354
1322 pid_t pid; 1355 pid_t pid;
1323 pid_t tgid; 1356 pid_t tgid;
1324 1357
@@ -1427,11 +1460,6 @@ struct task_struct {
1427 * mempolicy */ 1460 * mempolicy */
1428 spinlock_t alloc_lock; 1461 spinlock_t alloc_lock;
1429 1462
1430#ifdef CONFIG_GENERIC_HARDIRQS
1431 /* IRQ handler threads */
1432 struct irqaction *irqaction;
1433#endif
1434
1435 /* Protection of the PI data structures: */ 1463 /* Protection of the PI data structures: */
1436 raw_spinlock_t pi_lock; 1464 raw_spinlock_t pi_lock;
1437 1465
@@ -1498,7 +1526,7 @@ struct task_struct {
1498#endif 1526#endif
1499#ifdef CONFIG_CPUSETS 1527#ifdef CONFIG_CPUSETS
1500 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1528 nodemask_t mems_allowed; /* Protected by alloc_lock */
1501 int mems_allowed_change_disable; 1529 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1502 int cpuset_mem_spread_rotor; 1530 int cpuset_mem_spread_rotor;
1503 int cpuset_slab_spread_rotor; 1531 int cpuset_slab_spread_rotor;
1504#endif 1532#endif
@@ -1777,7 +1805,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1777/* 1805/*
1778 * Per process flags 1806 * Per process flags
1779 */ 1807 */
1780#define PF_STARTING 0x00000002 /* being created */
1781#define PF_EXITING 0x00000004 /* getting shut down */ 1808#define PF_EXITING 0x00000004 /* getting shut down */
1782#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1809#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1783#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1810#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1864,8 +1891,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1864#ifdef CONFIG_PREEMPT_RCU 1891#ifdef CONFIG_PREEMPT_RCU
1865 1892
1866#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1893#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1867#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1894#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1868#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1869 1895
1870static inline void rcu_copy_process(struct task_struct *p) 1896static inline void rcu_copy_process(struct task_struct *p)
1871{ 1897{
@@ -2049,7 +2075,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
2049extern void sched_autogroup_exit(struct signal_struct *sig); 2075extern void sched_autogroup_exit(struct signal_struct *sig);
2050#ifdef CONFIG_PROC_FS 2076#ifdef CONFIG_PROC_FS
2051extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2077extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2052extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2078extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2053#endif 2079#endif
2054#else 2080#else
2055static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2081static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2066,12 +2092,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2066extern int rt_mutex_getprio(struct task_struct *p); 2092extern int rt_mutex_getprio(struct task_struct *p);
2067extern void rt_mutex_setprio(struct task_struct *p, int prio); 2093extern void rt_mutex_setprio(struct task_struct *p, int prio);
2068extern void rt_mutex_adjust_pi(struct task_struct *p); 2094extern void rt_mutex_adjust_pi(struct task_struct *p);
2095static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2096{
2097 return tsk->pi_blocked_on != NULL;
2098}
2069#else 2099#else
2070static inline int rt_mutex_getprio(struct task_struct *p) 2100static inline int rt_mutex_getprio(struct task_struct *p)
2071{ 2101{
2072 return p->normal_prio; 2102 return p->normal_prio;
2073} 2103}
2074# define rt_mutex_adjust_pi(p) do { } while (0) 2104# define rt_mutex_adjust_pi(p) do { } while (0)
2105static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2106{
2107 return false;
2108}
2075#endif 2109#endif
2076 2110
2077extern bool yield_to(struct task_struct *p, bool preempt); 2111extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2088,9 +2122,9 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int,
2088extern struct task_struct *idle_task(int cpu); 2122extern struct task_struct *idle_task(int cpu);
2089/** 2123/**
2090 * is_idle_task - is the specified task an idle task? 2124 * is_idle_task - is the specified task an idle task?
2091 * @tsk: the task in question. 2125 * @p: the task in question.
2092 */ 2126 */
2093static inline bool is_idle_task(struct task_struct *p) 2127static inline bool is_idle_task(const struct task_struct *p)
2094{ 2128{
2095 return p->pid == 0; 2129 return p->pid == 0;
2096} 2130}
@@ -2259,6 +2293,12 @@ static inline void mmdrop(struct mm_struct * mm)
2259extern void mmput(struct mm_struct *); 2293extern void mmput(struct mm_struct *);
2260/* Grab a reference to a task's mm, if it is not already going away */ 2294/* Grab a reference to a task's mm, if it is not already going away */
2261extern struct mm_struct *get_task_mm(struct task_struct *task); 2295extern struct mm_struct *get_task_mm(struct task_struct *task);
2296/*
2297 * Grab a reference to a task's mm, if it is not already going away
2298 * and ptrace_may_access with the mode parameter passed to it
2299 * succeeds.
2300 */
2301extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2262/* Remove the current tasks stale references to the old mm_struct */ 2302/* Remove the current tasks stale references to the old mm_struct */
2263extern void mm_release(struct task_struct *, struct mm_struct *); 2303extern void mm_release(struct task_struct *, struct mm_struct *);
2264/* Allocate a new mm structure and copy contents from tsk->mm */ 2304/* Allocate a new mm structure and copy contents from tsk->mm */
@@ -2365,7 +2405,7 @@ static inline int thread_group_empty(struct task_struct *p)
2365 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2405 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2366 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2406 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2367 * pins the final release of task.io_context. Also protects ->cpuset and 2407 * pins the final release of task.io_context. Also protects ->cpuset and
2368 * ->cgroup.subsys[]. 2408 * ->cgroup.subsys[]. And ->vfork_done.
2369 * 2409 *
2370 * Nests both inside and outside of read_lock(&tasklist_lock). 2410 * Nests both inside and outside of read_lock(&tasklist_lock).
2371 * It must not be nested with write_lock_irq(&tasklist_lock), 2411 * It must not be nested with write_lock_irq(&tasklist_lock),
@@ -2384,12 +2424,15 @@ static inline void task_unlock(struct task_struct *p)
2384extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2424extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2385 unsigned long *flags); 2425 unsigned long *flags);
2386 2426
2387#define lock_task_sighand(tsk, flags) \ 2427static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2388({ struct sighand_struct *__ss; \ 2428 unsigned long *flags)
2389 __cond_lock(&(tsk)->sighand->siglock, \ 2429{
2390 (__ss = __lock_task_sighand(tsk, flags))); \ 2430 struct sighand_struct *ret;
2391 __ss; \ 2431
2392}) \ 2432 ret = __lock_task_sighand(tsk, flags);
2433 (void)__cond_lock(&tsk->sighand->siglock, ret);
2434 return ret;
2435}
2393 2436
2394static inline void unlock_task_sighand(struct task_struct *tsk, 2437static inline void unlock_task_sighand(struct task_struct *tsk,
2395 unsigned long *flags) 2438 unsigned long *flags)