diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 47 |
1 files changed, 38 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a99d735db3df..777cd01e240e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -513,6 +513,8 @@ struct thread_group_cputimer { | |||
513 | spinlock_t lock; | 513 | spinlock_t lock; |
514 | }; | 514 | }; |
515 | 515 | ||
516 | struct autogroup; | ||
517 | |||
516 | /* | 518 | /* |
517 | * NOTE! "signal_struct" does not have it's own | 519 | * NOTE! "signal_struct" does not have it's own |
518 | * locking, because a shared signal_struct always | 520 | * locking, because a shared signal_struct always |
@@ -580,6 +582,9 @@ struct signal_struct { | |||
580 | 582 | ||
581 | struct tty_struct *tty; /* NULL if no tty */ | 583 | struct tty_struct *tty; /* NULL if no tty */ |
582 | 584 | ||
585 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
586 | struct autogroup *autogroup; | ||
587 | #endif | ||
583 | /* | 588 | /* |
584 | * Cumulative resource counters for dead threads in the group, | 589 | * Cumulative resource counters for dead threads in the group, |
585 | * and for reaped dead child processes forked by this group. | 590 | * and for reaped dead child processes forked by this group. |
@@ -1233,13 +1238,18 @@ struct task_struct { | |||
1233 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1238 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1234 | struct rcu_node *rcu_blocked_node; | 1239 | struct rcu_node *rcu_blocked_node; |
1235 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1240 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1241 | #ifdef CONFIG_RCU_BOOST | ||
1242 | struct rt_mutex *rcu_boost_mutex; | ||
1243 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1236 | 1244 | ||
1237 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1245 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
1238 | struct sched_info sched_info; | 1246 | struct sched_info sched_info; |
1239 | #endif | 1247 | #endif |
1240 | 1248 | ||
1241 | struct list_head tasks; | 1249 | struct list_head tasks; |
1250 | #ifdef CONFIG_SMP | ||
1242 | struct plist_node pushable_tasks; | 1251 | struct plist_node pushable_tasks; |
1252 | #endif | ||
1243 | 1253 | ||
1244 | struct mm_struct *mm, *active_mm; | 1254 | struct mm_struct *mm, *active_mm; |
1245 | #if defined(SPLIT_RSS_COUNTING) | 1255 | #if defined(SPLIT_RSS_COUNTING) |
@@ -1763,7 +1773,8 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1763 | #ifdef CONFIG_PREEMPT_RCU | 1773 | #ifdef CONFIG_PREEMPT_RCU |
1764 | 1774 | ||
1765 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1775 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1766 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1776 | #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ |
1777 | #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ | ||
1767 | 1778 | ||
1768 | static inline void rcu_copy_process(struct task_struct *p) | 1779 | static inline void rcu_copy_process(struct task_struct *p) |
1769 | { | 1780 | { |
@@ -1771,7 +1782,10 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1771 | p->rcu_read_unlock_special = 0; | 1782 | p->rcu_read_unlock_special = 0; |
1772 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1783 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1773 | p->rcu_blocked_node = NULL; | 1784 | p->rcu_blocked_node = NULL; |
1774 | #endif | 1785 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1786 | #ifdef CONFIG_RCU_BOOST | ||
1787 | p->rcu_boost_mutex = NULL; | ||
1788 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1775 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1789 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1776 | } | 1790 | } |
1777 | 1791 | ||
@@ -1876,14 +1890,11 @@ extern void sched_clock_idle_sleep_event(void); | |||
1876 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1890 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1877 | 1891 | ||
1878 | #ifdef CONFIG_HOTPLUG_CPU | 1892 | #ifdef CONFIG_HOTPLUG_CPU |
1879 | extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p); | ||
1880 | extern void idle_task_exit(void); | 1893 | extern void idle_task_exit(void); |
1881 | #else | 1894 | #else |
1882 | static inline void idle_task_exit(void) {} | 1895 | static inline void idle_task_exit(void) {} |
1883 | #endif | 1896 | #endif |
1884 | 1897 | ||
1885 | extern void sched_idle_next(void); | ||
1886 | |||
1887 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | 1898 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) |
1888 | extern void wake_up_idle_cpu(int cpu); | 1899 | extern void wake_up_idle_cpu(int cpu); |
1889 | #else | 1900 | #else |
@@ -1893,8 +1904,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1893 | extern unsigned int sysctl_sched_latency; | 1904 | extern unsigned int sysctl_sched_latency; |
1894 | extern unsigned int sysctl_sched_min_granularity; | 1905 | extern unsigned int sysctl_sched_min_granularity; |
1895 | extern unsigned int sysctl_sched_wakeup_granularity; | 1906 | extern unsigned int sysctl_sched_wakeup_granularity; |
1896 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1897 | extern unsigned int sysctl_sched_shares_thresh; | ||
1898 | extern unsigned int sysctl_sched_child_runs_first; | 1907 | extern unsigned int sysctl_sched_child_runs_first; |
1899 | 1908 | ||
1900 | enum sched_tunable_scaling { | 1909 | enum sched_tunable_scaling { |
@@ -1910,6 +1919,7 @@ extern unsigned int sysctl_sched_migration_cost; | |||
1910 | extern unsigned int sysctl_sched_nr_migrate; | 1919 | extern unsigned int sysctl_sched_nr_migrate; |
1911 | extern unsigned int sysctl_sched_time_avg; | 1920 | extern unsigned int sysctl_sched_time_avg; |
1912 | extern unsigned int sysctl_timer_migration; | 1921 | extern unsigned int sysctl_timer_migration; |
1922 | extern unsigned int sysctl_sched_shares_window; | ||
1913 | 1923 | ||
1914 | int sched_proc_update_handler(struct ctl_table *table, int write, | 1924 | int sched_proc_update_handler(struct ctl_table *table, int write, |
1915 | void __user *buffer, size_t *length, | 1925 | void __user *buffer, size_t *length, |
@@ -1935,6 +1945,24 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
1935 | 1945 | ||
1936 | extern unsigned int sysctl_sched_compat_yield; | 1946 | extern unsigned int sysctl_sched_compat_yield; |
1937 | 1947 | ||
1948 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
1949 | extern unsigned int sysctl_sched_autogroup_enabled; | ||
1950 | |||
1951 | extern void sched_autogroup_create_attach(struct task_struct *p); | ||
1952 | extern void sched_autogroup_detach(struct task_struct *p); | ||
1953 | extern void sched_autogroup_fork(struct signal_struct *sig); | ||
1954 | extern void sched_autogroup_exit(struct signal_struct *sig); | ||
1955 | #ifdef CONFIG_PROC_FS | ||
1956 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | ||
1957 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); | ||
1958 | #endif | ||
1959 | #else | ||
1960 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } | ||
1961 | static inline void sched_autogroup_detach(struct task_struct *p) { } | ||
1962 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } | ||
1963 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | ||
1964 | #endif | ||
1965 | |||
1938 | #ifdef CONFIG_RT_MUTEXES | 1966 | #ifdef CONFIG_RT_MUTEXES |
1939 | extern int rt_mutex_getprio(struct task_struct *p); | 1967 | extern int rt_mutex_getprio(struct task_struct *p); |
1940 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 1968 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
@@ -1953,9 +1981,10 @@ extern int task_nice(const struct task_struct *p); | |||
1953 | extern int can_nice(const struct task_struct *p, const int nice); | 1981 | extern int can_nice(const struct task_struct *p, const int nice); |
1954 | extern int task_curr(const struct task_struct *p); | 1982 | extern int task_curr(const struct task_struct *p); |
1955 | extern int idle_cpu(int cpu); | 1983 | extern int idle_cpu(int cpu); |
1956 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1984 | extern int sched_setscheduler(struct task_struct *, int, |
1985 | const struct sched_param *); | ||
1957 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 1986 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
1958 | struct sched_param *); | 1987 | const struct sched_param *); |
1959 | extern struct task_struct *idle_task(int cpu); | 1988 | extern struct task_struct *idle_task(int cpu); |
1960 | extern struct task_struct *curr_task(int cpu); | 1989 | extern struct task_struct *curr_task(int cpu); |
1961 | extern void set_curr_task(int cpu, struct task_struct *p); | 1990 | extern void set_curr_task(int cpu, struct task_struct *p); |