diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 63 |
1 files changed, 49 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 223874538b33..d747f948b34e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -21,7 +21,8 @@ | |||
21 | #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ | 21 | #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ |
22 | #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ | 22 | #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ |
23 | #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ | 23 | #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ |
24 | #define CLONE_STOPPED 0x02000000 /* Start in stopped state */ | 24 | /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) |
25 | and is now available for re-use. */ | ||
25 | #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ | 26 | #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ |
26 | #define CLONE_NEWIPC 0x08000000 /* New ipcs */ | 27 | #define CLONE_NEWIPC 0x08000000 /* New ipcs */ |
27 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ | 28 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ |
@@ -70,7 +71,6 @@ struct sched_param { | |||
70 | #include <linux/smp.h> | 71 | #include <linux/smp.h> |
71 | #include <linux/sem.h> | 72 | #include <linux/sem.h> |
72 | #include <linux/signal.h> | 73 | #include <linux/signal.h> |
73 | #include <linux/path.h> | ||
74 | #include <linux/compiler.h> | 74 | #include <linux/compiler.h> |
75 | #include <linux/completion.h> | 75 | #include <linux/completion.h> |
76 | #include <linux/pid.h> | 76 | #include <linux/pid.h> |
@@ -88,7 +88,6 @@ struct sched_param { | |||
88 | #include <linux/timer.h> | 88 | #include <linux/timer.h> |
89 | #include <linux/hrtimer.h> | 89 | #include <linux/hrtimer.h> |
90 | #include <linux/task_io_accounting.h> | 90 | #include <linux/task_io_accounting.h> |
91 | #include <linux/kobject.h> | ||
92 | #include <linux/latencytop.h> | 91 | #include <linux/latencytop.h> |
93 | #include <linux/cred.h> | 92 | #include <linux/cred.h> |
94 | 93 | ||
@@ -316,6 +315,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, | |||
316 | size_t *lenp, loff_t *ppos); | 315 | size_t *lenp, loff_t *ppos); |
317 | extern unsigned int softlockup_panic; | 316 | extern unsigned int softlockup_panic; |
318 | extern int softlockup_thresh; | 317 | extern int softlockup_thresh; |
318 | void lockup_detector_init(void); | ||
319 | #else | 319 | #else |
320 | static inline void touch_softlockup_watchdog(void) | 320 | static inline void touch_softlockup_watchdog(void) |
321 | { | 321 | { |
@@ -326,6 +326,9 @@ static inline void touch_softlockup_watchdog_sync(void) | |||
326 | static inline void touch_all_softlockup_watchdogs(void) | 326 | static inline void touch_all_softlockup_watchdogs(void) |
327 | { | 327 | { |
328 | } | 328 | } |
329 | static inline void lockup_detector_init(void) | ||
330 | { | ||
331 | } | ||
329 | #endif | 332 | #endif |
330 | 333 | ||
331 | #ifdef CONFIG_DETECT_HUNG_TASK | 334 | #ifdef CONFIG_DETECT_HUNG_TASK |
@@ -431,6 +434,7 @@ extern int get_dumpable(struct mm_struct *mm); | |||
431 | #endif | 434 | #endif |
432 | /* leave room for more dump flags */ | 435 | /* leave room for more dump flags */ |
433 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | 436 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
437 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | ||
434 | 438 | ||
435 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) | 439 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
436 | 440 | ||
@@ -509,6 +513,8 @@ struct thread_group_cputimer { | |||
509 | spinlock_t lock; | 513 | spinlock_t lock; |
510 | }; | 514 | }; |
511 | 515 | ||
516 | struct autogroup; | ||
517 | |||
512 | /* | 518 | /* |
513 | * NOTE! "signal_struct" does not have it's own | 519 | * NOTE! "signal_struct" does not have it's own |
514 | * locking, because a shared signal_struct always | 520 | * locking, because a shared signal_struct always |
@@ -576,6 +582,9 @@ struct signal_struct { | |||
576 | 582 | ||
577 | struct tty_struct *tty; /* NULL if no tty */ | 583 | struct tty_struct *tty; /* NULL if no tty */ |
578 | 584 | ||
585 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
586 | struct autogroup *autogroup; | ||
587 | #endif | ||
579 | /* | 588 | /* |
580 | * Cumulative resource counters for dead threads in the group, | 589 | * Cumulative resource counters for dead threads in the group, |
581 | * and for reaped dead child processes forked by this group. | 590 | * and for reaped dead child processes forked by this group. |
@@ -626,6 +635,8 @@ struct signal_struct { | |||
626 | 635 | ||
627 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 636 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
628 | int oom_score_adj; /* OOM kill score adjustment */ | 637 | int oom_score_adj; /* OOM kill score adjustment */ |
638 | int oom_score_adj_min; /* OOM kill score adjustment minimum value. | ||
639 | * Only settable by CAP_SYS_RESOURCE. */ | ||
629 | 640 | ||
630 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 641 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
631 | * credential calculations | 642 | * credential calculations |
@@ -676,7 +687,7 @@ struct user_struct { | |||
676 | atomic_t fanotify_listeners; | 687 | atomic_t fanotify_listeners; |
677 | #endif | 688 | #endif |
678 | #ifdef CONFIG_EPOLL | 689 | #ifdef CONFIG_EPOLL |
679 | atomic_t epoll_watches; /* The number of file descriptors currently watched */ | 690 | atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ |
680 | #endif | 691 | #endif |
681 | #ifdef CONFIG_POSIX_MQUEUE | 692 | #ifdef CONFIG_POSIX_MQUEUE |
682 | /* protected by mq_lock */ | 693 | /* protected by mq_lock */ |
@@ -1229,13 +1240,18 @@ struct task_struct { | |||
1229 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1240 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1230 | struct rcu_node *rcu_blocked_node; | 1241 | struct rcu_node *rcu_blocked_node; |
1231 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1242 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1243 | #ifdef CONFIG_RCU_BOOST | ||
1244 | struct rt_mutex *rcu_boost_mutex; | ||
1245 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1232 | 1246 | ||
1233 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1247 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
1234 | struct sched_info sched_info; | 1248 | struct sched_info sched_info; |
1235 | #endif | 1249 | #endif |
1236 | 1250 | ||
1237 | struct list_head tasks; | 1251 | struct list_head tasks; |
1252 | #ifdef CONFIG_SMP | ||
1238 | struct plist_node pushable_tasks; | 1253 | struct plist_node pushable_tasks; |
1254 | #endif | ||
1239 | 1255 | ||
1240 | struct mm_struct *mm, *active_mm; | 1256 | struct mm_struct *mm, *active_mm; |
1241 | #if defined(SPLIT_RSS_COUNTING) | 1257 | #if defined(SPLIT_RSS_COUNTING) |
@@ -1759,7 +1775,8 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1759 | #ifdef CONFIG_PREEMPT_RCU | 1775 | #ifdef CONFIG_PREEMPT_RCU |
1760 | 1776 | ||
1761 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1777 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1762 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1778 | #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ |
1779 | #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ | ||
1763 | 1780 | ||
1764 | static inline void rcu_copy_process(struct task_struct *p) | 1781 | static inline void rcu_copy_process(struct task_struct *p) |
1765 | { | 1782 | { |
@@ -1767,7 +1784,10 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1767 | p->rcu_read_unlock_special = 0; | 1784 | p->rcu_read_unlock_special = 0; |
1768 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1785 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1769 | p->rcu_blocked_node = NULL; | 1786 | p->rcu_blocked_node = NULL; |
1770 | #endif | 1787 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1788 | #ifdef CONFIG_RCU_BOOST | ||
1789 | p->rcu_boost_mutex = NULL; | ||
1790 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1771 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1791 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1772 | } | 1792 | } |
1773 | 1793 | ||
@@ -1872,14 +1892,11 @@ extern void sched_clock_idle_sleep_event(void); | |||
1872 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1892 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1873 | 1893 | ||
1874 | #ifdef CONFIG_HOTPLUG_CPU | 1894 | #ifdef CONFIG_HOTPLUG_CPU |
1875 | extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p); | ||
1876 | extern void idle_task_exit(void); | 1895 | extern void idle_task_exit(void); |
1877 | #else | 1896 | #else |
1878 | static inline void idle_task_exit(void) {} | 1897 | static inline void idle_task_exit(void) {} |
1879 | #endif | 1898 | #endif |
1880 | 1899 | ||
1881 | extern void sched_idle_next(void); | ||
1882 | |||
1883 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | 1900 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) |
1884 | extern void wake_up_idle_cpu(int cpu); | 1901 | extern void wake_up_idle_cpu(int cpu); |
1885 | #else | 1902 | #else |
@@ -1889,8 +1906,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1889 | extern unsigned int sysctl_sched_latency; | 1906 | extern unsigned int sysctl_sched_latency; |
1890 | extern unsigned int sysctl_sched_min_granularity; | 1907 | extern unsigned int sysctl_sched_min_granularity; |
1891 | extern unsigned int sysctl_sched_wakeup_granularity; | 1908 | extern unsigned int sysctl_sched_wakeup_granularity; |
1892 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1893 | extern unsigned int sysctl_sched_shares_thresh; | ||
1894 | extern unsigned int sysctl_sched_child_runs_first; | 1909 | extern unsigned int sysctl_sched_child_runs_first; |
1895 | 1910 | ||
1896 | enum sched_tunable_scaling { | 1911 | enum sched_tunable_scaling { |
@@ -1906,6 +1921,7 @@ extern unsigned int sysctl_sched_migration_cost; | |||
1906 | extern unsigned int sysctl_sched_nr_migrate; | 1921 | extern unsigned int sysctl_sched_nr_migrate; |
1907 | extern unsigned int sysctl_sched_time_avg; | 1922 | extern unsigned int sysctl_sched_time_avg; |
1908 | extern unsigned int sysctl_timer_migration; | 1923 | extern unsigned int sysctl_timer_migration; |
1924 | extern unsigned int sysctl_sched_shares_window; | ||
1909 | 1925 | ||
1910 | int sched_proc_update_handler(struct ctl_table *table, int write, | 1926 | int sched_proc_update_handler(struct ctl_table *table, int write, |
1911 | void __user *buffer, size_t *length, | 1927 | void __user *buffer, size_t *length, |
@@ -1931,6 +1947,24 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
1931 | 1947 | ||
1932 | extern unsigned int sysctl_sched_compat_yield; | 1948 | extern unsigned int sysctl_sched_compat_yield; |
1933 | 1949 | ||
1950 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
1951 | extern unsigned int sysctl_sched_autogroup_enabled; | ||
1952 | |||
1953 | extern void sched_autogroup_create_attach(struct task_struct *p); | ||
1954 | extern void sched_autogroup_detach(struct task_struct *p); | ||
1955 | extern void sched_autogroup_fork(struct signal_struct *sig); | ||
1956 | extern void sched_autogroup_exit(struct signal_struct *sig); | ||
1957 | #ifdef CONFIG_PROC_FS | ||
1958 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | ||
1959 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); | ||
1960 | #endif | ||
1961 | #else | ||
1962 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } | ||
1963 | static inline void sched_autogroup_detach(struct task_struct *p) { } | ||
1964 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } | ||
1965 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | ||
1966 | #endif | ||
1967 | |||
1934 | #ifdef CONFIG_RT_MUTEXES | 1968 | #ifdef CONFIG_RT_MUTEXES |
1935 | extern int rt_mutex_getprio(struct task_struct *p); | 1969 | extern int rt_mutex_getprio(struct task_struct *p); |
1936 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 1970 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
@@ -1949,9 +1983,10 @@ extern int task_nice(const struct task_struct *p); | |||
1949 | extern int can_nice(const struct task_struct *p, const int nice); | 1983 | extern int can_nice(const struct task_struct *p, const int nice); |
1950 | extern int task_curr(const struct task_struct *p); | 1984 | extern int task_curr(const struct task_struct *p); |
1951 | extern int idle_cpu(int cpu); | 1985 | extern int idle_cpu(int cpu); |
1952 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1986 | extern int sched_setscheduler(struct task_struct *, int, |
1987 | const struct sched_param *); | ||
1953 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 1988 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
1954 | struct sched_param *); | 1989 | const struct sched_param *); |
1955 | extern struct task_struct *idle_task(int cpu); | 1990 | extern struct task_struct *idle_task(int cpu); |
1956 | extern struct task_struct *curr_task(int cpu); | 1991 | extern struct task_struct *curr_task(int cpu); |
1957 | extern void set_curr_task(int cpu, struct task_struct *p); | 1992 | extern void set_curr_task(int cpu, struct task_struct *p); |
@@ -2478,7 +2513,7 @@ extern void normalize_rt_tasks(void); | |||
2478 | 2513 | ||
2479 | #ifdef CONFIG_CGROUP_SCHED | 2514 | #ifdef CONFIG_CGROUP_SCHED |
2480 | 2515 | ||
2481 | extern struct task_group init_task_group; | 2516 | extern struct task_group root_task_group; |
2482 | 2517 | ||
2483 | extern struct task_group *sched_create_group(struct task_group *parent); | 2518 | extern struct task_group *sched_create_group(struct task_group *parent); |
2484 | extern void sched_destroy_group(struct task_group *tg); | 2519 | extern void sched_destroy_group(struct task_group *tg); |