aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h228
1 files changed, 78 insertions, 150 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2112477ff5e..d35d2b6ddbfb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
51#include <linux/cred.h> 51#include <linux/cred.h>
52#include <linux/llist.h> 52#include <linux/llist.h>
53#include <linux/uidgid.h> 53#include <linux/uidgid.h>
54#include <linux/gfp.h>
54 55
55#include <asm/processor.h> 56#include <asm/processor.h>
56 57
@@ -98,7 +99,6 @@ extern int nr_threads;
98DECLARE_PER_CPU(unsigned long, process_counts); 99DECLARE_PER_CPU(unsigned long, process_counts);
99extern int nr_processes(void); 100extern int nr_processes(void);
100extern unsigned long nr_running(void); 101extern unsigned long nr_running(void);
101extern unsigned long nr_uninterruptible(void);
102extern unsigned long nr_iowait(void); 102extern unsigned long nr_iowait(void);
103extern unsigned long nr_iowait_cpu(int cpu); 103extern unsigned long nr_iowait_cpu(int cpu);
104extern unsigned long this_cpu_load(void); 104extern unsigned long this_cpu_load(void);
@@ -304,19 +304,6 @@ static inline void lockup_detector_init(void)
304} 304}
305#endif 305#endif
306 306
307#ifdef CONFIG_DETECT_HUNG_TASK
308extern unsigned int sysctl_hung_task_panic;
309extern unsigned long sysctl_hung_task_check_count;
310extern unsigned long sysctl_hung_task_timeout_secs;
311extern unsigned long sysctl_hung_task_warnings;
312extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
313 void __user *buffer,
314 size_t *lenp, loff_t *ppos);
315#else
316/* Avoid need for ifdefs elsewhere in the code */
317enum { sysctl_hung_task_timeout_secs = 0 };
318#endif
319
320/* Attach to any functions which should be ignored in wchan output. */ 307/* Attach to any functions which should be ignored in wchan output. */
321#define __sched __attribute__((__section__(".sched.text"))) 308#define __sched __attribute__((__section__(".sched.text")))
322 309
@@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
338struct nsproxy; 325struct nsproxy;
339struct user_namespace; 326struct user_namespace;
340 327
341/*
342 * Default maximum number of active map areas, this limits the number of vmas
343 * per mm struct. Users can overwrite this number by sysctl but there is a
344 * problem.
345 *
346 * When a program's coredump is generated as ELF format, a section is created
347 * per a vma. In ELF, the number of sections is represented in unsigned short.
348 * This means the number of sections should be smaller than 65535 at coredump.
349 * Because the kernel adds some informative sections to a image of program at
350 * generating coredump, we need some margin. The number of extra sections is
351 * 1-3 now and depends on arch. We use "5" as safe margin, here.
352 */
353#define MAPCOUNT_ELF_CORE_MARGIN (5)
354#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
355
356extern int sysctl_max_map_count;
357
358#include <linux/aio.h> 328#include <linux/aio.h>
359 329
360#ifdef CONFIG_MMU 330#ifdef CONFIG_MMU
@@ -376,11 +346,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
376extern void set_dumpable(struct mm_struct *mm, int value); 346extern void set_dumpable(struct mm_struct *mm, int value);
377extern int get_dumpable(struct mm_struct *mm); 347extern int get_dumpable(struct mm_struct *mm);
378 348
379/* get/set_dumpable() values */
380#define SUID_DUMPABLE_DISABLED 0
381#define SUID_DUMPABLE_ENABLED 1
382#define SUID_DUMPABLE_SAFE 2
383
384/* mm flags */ 349/* mm flags */
385/* dumpable bits */ 350/* dumpable bits */
386#define MMF_DUMPABLE 0 /* core dump is permitted */ 351#define MMF_DUMPABLE 0 /* core dump is permitted */
@@ -1194,6 +1159,7 @@ struct sched_entity {
1194 /* rq "owned" by this entity/group: */ 1159 /* rq "owned" by this entity/group: */
1195 struct cfs_rq *my_q; 1160 struct cfs_rq *my_q;
1196#endif 1161#endif
1162
1197/* 1163/*
1198 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be 1164 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1199 * removed when useful for applications beyond shares distribution (e.g. 1165 * removed when useful for applications beyond shares distribution (e.g.
@@ -1208,6 +1174,7 @@ struct sched_entity {
1208struct sched_rt_entity { 1174struct sched_rt_entity {
1209 struct list_head run_list; 1175 struct list_head run_list;
1210 unsigned long timeout; 1176 unsigned long timeout;
1177 unsigned long watchdog_stamp;
1211 unsigned int time_slice; 1178 unsigned int time_slice;
1212 1179
1213 struct sched_rt_entity *back; 1180 struct sched_rt_entity *back;
@@ -1220,11 +1187,6 @@ struct sched_rt_entity {
1220#endif 1187#endif
1221}; 1188};
1222 1189
1223/*
1224 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1225 * Timeslices get refilled after they expire.
1226 */
1227#define RR_TIMESLICE (100 * HZ / 1000)
1228 1190
1229struct rcu_node; 1191struct rcu_node;
1230 1192
@@ -1368,6 +1330,15 @@ struct task_struct {
1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1330#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1369 struct cputime prev_cputime; 1331 struct cputime prev_cputime;
1370#endif 1332#endif
1333#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1334 seqlock_t vtime_seqlock;
1335 unsigned long long vtime_snap;
1336 enum {
1337 VTIME_SLEEPING = 0,
1338 VTIME_USER,
1339 VTIME_SYS,
1340 } vtime_snap_whence;
1341#endif
1371 unsigned long nvcsw, nivcsw; /* context switch counts */ 1342 unsigned long nvcsw, nivcsw; /* context switch counts */
1372 struct timespec start_time; /* monotonic time */ 1343 struct timespec start_time; /* monotonic time */
1373 struct timespec real_start_time; /* boot based time */ 1344 struct timespec real_start_time; /* boot based time */
@@ -1622,37 +1593,6 @@ static inline void set_numabalancing_state(bool enabled)
1622} 1593}
1623#endif 1594#endif
1624 1595
1625/*
1626 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1627 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1628 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1629 * values are inverted: lower p->prio value means higher priority.
1630 *
1631 * The MAX_USER_RT_PRIO value allows the actual maximum
1632 * RT priority to be separate from the value exported to
1633 * user-space. This allows kernel threads to set their
1634 * priority to a value higher than any user task. Note:
1635 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1636 */
1637
1638#define MAX_USER_RT_PRIO 100
1639#define MAX_RT_PRIO MAX_USER_RT_PRIO
1640
1641#define MAX_PRIO (MAX_RT_PRIO + 40)
1642#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1643
1644static inline int rt_prio(int prio)
1645{
1646 if (unlikely(prio < MAX_RT_PRIO))
1647 return 1;
1648 return 0;
1649}
1650
1651static inline int rt_task(struct task_struct *p)
1652{
1653 return rt_prio(p->prio);
1654}
1655
1656static inline struct pid *task_pid(struct task_struct *task) 1596static inline struct pid *task_pid(struct task_struct *task)
1657{ 1597{
1658 return task->pids[PIDTYPE_PID].pid; 1598 return task->pids[PIDTYPE_PID].pid;
@@ -1792,6 +1732,37 @@ static inline void put_task_struct(struct task_struct *t)
1792 __put_task_struct(t); 1732 __put_task_struct(t);
1793} 1733}
1794 1734
1735#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1736extern void task_cputime(struct task_struct *t,
1737 cputime_t *utime, cputime_t *stime);
1738extern void task_cputime_scaled(struct task_struct *t,
1739 cputime_t *utimescaled, cputime_t *stimescaled);
1740extern cputime_t task_gtime(struct task_struct *t);
1741#else
1742static inline void task_cputime(struct task_struct *t,
1743 cputime_t *utime, cputime_t *stime)
1744{
1745 if (utime)
1746 *utime = t->utime;
1747 if (stime)
1748 *stime = t->stime;
1749}
1750
1751static inline void task_cputime_scaled(struct task_struct *t,
1752 cputime_t *utimescaled,
1753 cputime_t *stimescaled)
1754{
1755 if (utimescaled)
1756 *utimescaled = t->utimescaled;
1757 if (stimescaled)
1758 *stimescaled = t->stimescaled;
1759}
1760
1761static inline cputime_t task_gtime(struct task_struct *t)
1762{
1763 return t->gtime;
1764}
1765#endif
1795extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1766extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1796extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1767extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1797 1768
@@ -1815,6 +1786,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1815#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1786#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1816#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1787#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1817#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1788#define PF_KSWAPD 0x00040000 /* I am kswapd */
1789#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1818#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1790#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1819#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1791#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1820#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1792#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1852,6 +1824,26 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1852#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1824#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1853#define used_math() tsk_used_math(current) 1825#define used_math() tsk_used_math(current)
1854 1826
1827/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1828static inline gfp_t memalloc_noio_flags(gfp_t flags)
1829{
1830 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1831 flags &= ~__GFP_IO;
1832 return flags;
1833}
1834
1835static inline unsigned int memalloc_noio_save(void)
1836{
1837 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1838 current->flags |= PF_MEMALLOC_NOIO;
1839 return flags;
1840}
1841
1842static inline void memalloc_noio_restore(unsigned int flags)
1843{
1844 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1845}
1846
1855/* 1847/*
1856 * task->jobctl flags 1848 * task->jobctl flags
1857 */ 1849 */
@@ -2033,58 +2025,7 @@ extern void wake_up_idle_cpu(int cpu);
2033static inline void wake_up_idle_cpu(int cpu) { } 2025static inline void wake_up_idle_cpu(int cpu) { }
2034#endif 2026#endif
2035 2027
2036extern unsigned int sysctl_sched_latency;
2037extern unsigned int sysctl_sched_min_granularity;
2038extern unsigned int sysctl_sched_wakeup_granularity;
2039extern unsigned int sysctl_sched_child_runs_first;
2040
2041enum sched_tunable_scaling {
2042 SCHED_TUNABLESCALING_NONE,
2043 SCHED_TUNABLESCALING_LOG,
2044 SCHED_TUNABLESCALING_LINEAR,
2045 SCHED_TUNABLESCALING_END,
2046};
2047extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2048
2049extern unsigned int sysctl_numa_balancing_scan_delay;
2050extern unsigned int sysctl_numa_balancing_scan_period_min;
2051extern unsigned int sysctl_numa_balancing_scan_period_max;
2052extern unsigned int sysctl_numa_balancing_scan_period_reset;
2053extern unsigned int sysctl_numa_balancing_scan_size;
2054extern unsigned int sysctl_numa_balancing_settle_count;
2055
2056#ifdef CONFIG_SCHED_DEBUG
2057extern unsigned int sysctl_sched_migration_cost;
2058extern unsigned int sysctl_sched_nr_migrate;
2059extern unsigned int sysctl_sched_time_avg;
2060extern unsigned int sysctl_timer_migration;
2061extern unsigned int sysctl_sched_shares_window;
2062
2063int sched_proc_update_handler(struct ctl_table *table, int write,
2064 void __user *buffer, size_t *length,
2065 loff_t *ppos);
2066#endif
2067#ifdef CONFIG_SCHED_DEBUG
2068static inline unsigned int get_sysctl_timer_migration(void)
2069{
2070 return sysctl_timer_migration;
2071}
2072#else
2073static inline unsigned int get_sysctl_timer_migration(void)
2074{
2075 return 1;
2076}
2077#endif
2078extern unsigned int sysctl_sched_rt_period;
2079extern int sysctl_sched_rt_runtime;
2080
2081int sched_rt_handler(struct ctl_table *table, int write,
2082 void __user *buffer, size_t *lenp,
2083 loff_t *ppos);
2084
2085#ifdef CONFIG_SCHED_AUTOGROUP 2028#ifdef CONFIG_SCHED_AUTOGROUP
2086extern unsigned int sysctl_sched_autogroup_enabled;
2087
2088extern void sched_autogroup_create_attach(struct task_struct *p); 2029extern void sched_autogroup_create_attach(struct task_struct *p);
2089extern void sched_autogroup_detach(struct task_struct *p); 2030extern void sched_autogroup_detach(struct task_struct *p);
2090extern void sched_autogroup_fork(struct signal_struct *sig); 2031extern void sched_autogroup_fork(struct signal_struct *sig);
@@ -2100,30 +2041,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2100static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2041static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2101#endif 2042#endif
2102 2043
2103#ifdef CONFIG_CFS_BANDWIDTH
2104extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2105#endif
2106
2107#ifdef CONFIG_RT_MUTEXES
2108extern int rt_mutex_getprio(struct task_struct *p);
2109extern void rt_mutex_setprio(struct task_struct *p, int prio);
2110extern void rt_mutex_adjust_pi(struct task_struct *p);
2111static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2112{
2113 return tsk->pi_blocked_on != NULL;
2114}
2115#else
2116static inline int rt_mutex_getprio(struct task_struct *p)
2117{
2118 return p->normal_prio;
2119}
2120# define rt_mutex_adjust_pi(p) do { } while (0)
2121static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2122{
2123 return false;
2124}
2125#endif
2126
2127extern bool yield_to(struct task_struct *p, bool preempt); 2044extern bool yield_to(struct task_struct *p, bool preempt);
2128extern void set_user_nice(struct task_struct *p, long nice); 2045extern void set_user_nice(struct task_struct *p, long nice);
2129extern int task_prio(const struct task_struct *p); 2046extern int task_prio(const struct task_struct *p);
@@ -2259,7 +2176,6 @@ extern struct sigqueue *sigqueue_alloc(void);
2259extern void sigqueue_free(struct sigqueue *); 2176extern void sigqueue_free(struct sigqueue *);
2260extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2177extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2261extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2178extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2262extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2263 2179
2264static inline void restore_saved_sigmask(void) 2180static inline void restore_saved_sigmask(void)
2265{ 2181{
@@ -2305,6 +2221,17 @@ static inline int sas_ss_flags(unsigned long sp)
2305 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2221 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2306} 2222}
2307 2223
2224static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2225{
2226 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2227#ifdef CONFIG_STACK_GROWSUP
2228 return current->sas_ss_sp;
2229#else
2230 return current->sas_ss_sp + current->sas_ss_size;
2231#endif
2232 return sp;
2233}
2234
2308/* 2235/*
2309 * Routines for handling mm_structs 2236 * Routines for handling mm_structs
2310 */ 2237 */
@@ -2753,14 +2680,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2753extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2680extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2754extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2681extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2755 2682
2756extern void normalize_rt_tasks(void);
2757
2758#ifdef CONFIG_CGROUP_SCHED 2683#ifdef CONFIG_CGROUP_SCHED
2759 2684
2760extern struct task_group root_task_group; 2685extern struct task_group root_task_group;
2761 2686
2762extern struct task_group *sched_create_group(struct task_group *parent); 2687extern struct task_group *sched_create_group(struct task_group *parent);
2688extern void sched_online_group(struct task_group *tg,
2689 struct task_group *parent);
2763extern void sched_destroy_group(struct task_group *tg); 2690extern void sched_destroy_group(struct task_group *tg);
2691extern void sched_offline_group(struct task_group *tg);
2764extern void sched_move_task(struct task_struct *tsk); 2692extern void sched_move_task(struct task_struct *tsk);
2765#ifdef CONFIG_FAIR_GROUP_SCHED 2693#ifdef CONFIG_FAIR_GROUP_SCHED
2766extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2694extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);