aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h47
1 files changed, 38 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a99d735db3df..777cd01e240e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -513,6 +513,8 @@ struct thread_group_cputimer {
513 spinlock_t lock; 513 spinlock_t lock;
514}; 514};
515 515
516struct autogroup;
517
516/* 518/*
517 * NOTE! "signal_struct" does not have it's own 519 * NOTE! "signal_struct" does not have it's own
518 * locking, because a shared signal_struct always 520 * locking, because a shared signal_struct always
@@ -580,6 +582,9 @@ struct signal_struct {
580 582
581 struct tty_struct *tty; /* NULL if no tty */ 583 struct tty_struct *tty; /* NULL if no tty */
582 584
585#ifdef CONFIG_SCHED_AUTOGROUP
586 struct autogroup *autogroup;
587#endif
583 /* 588 /*
584 * Cumulative resource counters for dead threads in the group, 589 * Cumulative resource counters for dead threads in the group,
585 * and for reaped dead child processes forked by this group. 590 * and for reaped dead child processes forked by this group.
@@ -1233,13 +1238,18 @@ struct task_struct {
1233#ifdef CONFIG_TREE_PREEMPT_RCU 1238#ifdef CONFIG_TREE_PREEMPT_RCU
1234 struct rcu_node *rcu_blocked_node; 1239 struct rcu_node *rcu_blocked_node;
1235#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1240#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1241#ifdef CONFIG_RCU_BOOST
1242 struct rt_mutex *rcu_boost_mutex;
1243#endif /* #ifdef CONFIG_RCU_BOOST */
1236 1244
1237#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1245#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1238 struct sched_info sched_info; 1246 struct sched_info sched_info;
1239#endif 1247#endif
1240 1248
1241 struct list_head tasks; 1249 struct list_head tasks;
1250#ifdef CONFIG_SMP
1242 struct plist_node pushable_tasks; 1251 struct plist_node pushable_tasks;
1252#endif
1243 1253
1244 struct mm_struct *mm, *active_mm; 1254 struct mm_struct *mm, *active_mm;
1245#if defined(SPLIT_RSS_COUNTING) 1255#if defined(SPLIT_RSS_COUNTING)
@@ -1763,7 +1773,8 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1763#ifdef CONFIG_PREEMPT_RCU 1773#ifdef CONFIG_PREEMPT_RCU
1764 1774
1765#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1775#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1766#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1776#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1777#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1767 1778
1768static inline void rcu_copy_process(struct task_struct *p) 1779static inline void rcu_copy_process(struct task_struct *p)
1769{ 1780{
@@ -1771,7 +1782,10 @@ static inline void rcu_copy_process(struct task_struct *p)
1771 p->rcu_read_unlock_special = 0; 1782 p->rcu_read_unlock_special = 0;
1772#ifdef CONFIG_TREE_PREEMPT_RCU 1783#ifdef CONFIG_TREE_PREEMPT_RCU
1773 p->rcu_blocked_node = NULL; 1784 p->rcu_blocked_node = NULL;
1774#endif 1785#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1786#ifdef CONFIG_RCU_BOOST
1787 p->rcu_boost_mutex = NULL;
1788#endif /* #ifdef CONFIG_RCU_BOOST */
1775 INIT_LIST_HEAD(&p->rcu_node_entry); 1789 INIT_LIST_HEAD(&p->rcu_node_entry);
1776} 1790}
1777 1791
@@ -1876,14 +1890,11 @@ extern void sched_clock_idle_sleep_event(void);
1876extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1890extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1877 1891
1878#ifdef CONFIG_HOTPLUG_CPU 1892#ifdef CONFIG_HOTPLUG_CPU
1879extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1880extern void idle_task_exit(void); 1893extern void idle_task_exit(void);
1881#else 1894#else
1882static inline void idle_task_exit(void) {} 1895static inline void idle_task_exit(void) {}
1883#endif 1896#endif
1884 1897
1885extern void sched_idle_next(void);
1886
1887#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1898#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1888extern void wake_up_idle_cpu(int cpu); 1899extern void wake_up_idle_cpu(int cpu);
1889#else 1900#else
@@ -1893,8 +1904,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1893extern unsigned int sysctl_sched_latency; 1904extern unsigned int sysctl_sched_latency;
1894extern unsigned int sysctl_sched_min_granularity; 1905extern unsigned int sysctl_sched_min_granularity;
1895extern unsigned int sysctl_sched_wakeup_granularity; 1906extern unsigned int sysctl_sched_wakeup_granularity;
1896extern unsigned int sysctl_sched_shares_ratelimit;
1897extern unsigned int sysctl_sched_shares_thresh;
1898extern unsigned int sysctl_sched_child_runs_first; 1907extern unsigned int sysctl_sched_child_runs_first;
1899 1908
1900enum sched_tunable_scaling { 1909enum sched_tunable_scaling {
@@ -1910,6 +1919,7 @@ extern unsigned int sysctl_sched_migration_cost;
1910extern unsigned int sysctl_sched_nr_migrate; 1919extern unsigned int sysctl_sched_nr_migrate;
1911extern unsigned int sysctl_sched_time_avg; 1920extern unsigned int sysctl_sched_time_avg;
1912extern unsigned int sysctl_timer_migration; 1921extern unsigned int sysctl_timer_migration;
1922extern unsigned int sysctl_sched_shares_window;
1913 1923
1914int sched_proc_update_handler(struct ctl_table *table, int write, 1924int sched_proc_update_handler(struct ctl_table *table, int write,
1915 void __user *buffer, size_t *length, 1925 void __user *buffer, size_t *length,
@@ -1935,6 +1945,24 @@ int sched_rt_handler(struct ctl_table *table, int write,
1935 1945
1936extern unsigned int sysctl_sched_compat_yield; 1946extern unsigned int sysctl_sched_compat_yield;
1937 1947
1948#ifdef CONFIG_SCHED_AUTOGROUP
1949extern unsigned int sysctl_sched_autogroup_enabled;
1950
1951extern void sched_autogroup_create_attach(struct task_struct *p);
1952extern void sched_autogroup_detach(struct task_struct *p);
1953extern void sched_autogroup_fork(struct signal_struct *sig);
1954extern void sched_autogroup_exit(struct signal_struct *sig);
1955#ifdef CONFIG_PROC_FS
1956extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1957extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
1958#endif
1959#else
1960static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1961static inline void sched_autogroup_detach(struct task_struct *p) { }
1962static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1963static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1964#endif
1965
1938#ifdef CONFIG_RT_MUTEXES 1966#ifdef CONFIG_RT_MUTEXES
1939extern int rt_mutex_getprio(struct task_struct *p); 1967extern int rt_mutex_getprio(struct task_struct *p);
1940extern void rt_mutex_setprio(struct task_struct *p, int prio); 1968extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -1953,9 +1981,10 @@ extern int task_nice(const struct task_struct *p);
1953extern int can_nice(const struct task_struct *p, const int nice); 1981extern int can_nice(const struct task_struct *p, const int nice);
1954extern int task_curr(const struct task_struct *p); 1982extern int task_curr(const struct task_struct *p);
1955extern int idle_cpu(int cpu); 1983extern int idle_cpu(int cpu);
1956extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1984extern int sched_setscheduler(struct task_struct *, int,
1985 const struct sched_param *);
1957extern int sched_setscheduler_nocheck(struct task_struct *, int, 1986extern int sched_setscheduler_nocheck(struct task_struct *, int,
1958 struct sched_param *); 1987 const struct sched_param *);
1959extern struct task_struct *idle_task(int cpu); 1988extern struct task_struct *idle_task(int cpu);
1960extern struct task_struct *curr_task(int cpu); 1989extern struct task_struct *curr_task(int cpu);
1961extern void set_curr_task(int cpu, struct task_struct *p); 1990extern void set_curr_task(int cpu, struct task_struct *p);