aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h222
1 files changed, 78 insertions, 144 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2112477ff5e..0655570c67eb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
51#include <linux/cred.h> 51#include <linux/cred.h>
52#include <linux/llist.h> 52#include <linux/llist.h>
53#include <linux/uidgid.h> 53#include <linux/uidgid.h>
54#include <linux/gfp.h>
54 55
55#include <asm/processor.h> 56#include <asm/processor.h>
56 57
@@ -304,19 +305,6 @@ static inline void lockup_detector_init(void)
304} 305}
305#endif 306#endif
306 307
307#ifdef CONFIG_DETECT_HUNG_TASK
308extern unsigned int sysctl_hung_task_panic;
309extern unsigned long sysctl_hung_task_check_count;
310extern unsigned long sysctl_hung_task_timeout_secs;
311extern unsigned long sysctl_hung_task_warnings;
312extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
313 void __user *buffer,
314 size_t *lenp, loff_t *ppos);
315#else
316/* Avoid need for ifdefs elsewhere in the code */
317enum { sysctl_hung_task_timeout_secs = 0 };
318#endif
319
320/* Attach to any functions which should be ignored in wchan output. */ 308/* Attach to any functions which should be ignored in wchan output. */
321#define __sched __attribute__((__section__(".sched.text"))) 309#define __sched __attribute__((__section__(".sched.text")))
322 310
@@ -338,23 +326,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
338struct nsproxy; 326struct nsproxy;
339struct user_namespace; 327struct user_namespace;
340 328
341/*
342 * Default maximum number of active map areas, this limits the number of vmas
343 * per mm struct. Users can overwrite this number by sysctl but there is a
344 * problem.
345 *
346 * When a program's coredump is generated as ELF format, a section is created
347 * per a vma. In ELF, the number of sections is represented in unsigned short.
348 * This means the number of sections should be smaller than 65535 at coredump.
349 * Because the kernel adds some informative sections to a image of program at
350 * generating coredump, we need some margin. The number of extra sections is
351 * 1-3 now and depends on arch. We use "5" as safe margin, here.
352 */
353#define MAPCOUNT_ELF_CORE_MARGIN (5)
354#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
355
356extern int sysctl_max_map_count;
357
358#include <linux/aio.h> 329#include <linux/aio.h>
359 330
360#ifdef CONFIG_MMU 331#ifdef CONFIG_MMU
@@ -1194,6 +1165,7 @@ struct sched_entity {
1194 /* rq "owned" by this entity/group: */ 1165 /* rq "owned" by this entity/group: */
1195 struct cfs_rq *my_q; 1166 struct cfs_rq *my_q;
1196#endif 1167#endif
1168
1197/* 1169/*
1198 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be 1170 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1199 * removed when useful for applications beyond shares distribution (e.g. 1171 * removed when useful for applications beyond shares distribution (e.g.
@@ -1208,6 +1180,7 @@ struct sched_entity {
1208struct sched_rt_entity { 1180struct sched_rt_entity {
1209 struct list_head run_list; 1181 struct list_head run_list;
1210 unsigned long timeout; 1182 unsigned long timeout;
1183 unsigned long watchdog_stamp;
1211 unsigned int time_slice; 1184 unsigned int time_slice;
1212 1185
1213 struct sched_rt_entity *back; 1186 struct sched_rt_entity *back;
@@ -1220,11 +1193,6 @@ struct sched_rt_entity {
1220#endif 1193#endif
1221}; 1194};
1222 1195
1223/*
1224 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1225 * Timeslices get refilled after they expire.
1226 */
1227#define RR_TIMESLICE (100 * HZ / 1000)
1228 1196
1229struct rcu_node; 1197struct rcu_node;
1230 1198
@@ -1368,6 +1336,15 @@ struct task_struct {
1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1336#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1369 struct cputime prev_cputime; 1337 struct cputime prev_cputime;
1370#endif 1338#endif
1339#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1340 seqlock_t vtime_seqlock;
1341 unsigned long long vtime_snap;
1342 enum {
1343 VTIME_SLEEPING = 0,
1344 VTIME_USER,
1345 VTIME_SYS,
1346 } vtime_snap_whence;
1347#endif
1371 unsigned long nvcsw, nivcsw; /* context switch counts */ 1348 unsigned long nvcsw, nivcsw; /* context switch counts */
1372 struct timespec start_time; /* monotonic time */ 1349 struct timespec start_time; /* monotonic time */
1373 struct timespec real_start_time; /* boot based time */ 1350 struct timespec real_start_time; /* boot based time */
@@ -1622,37 +1599,6 @@ static inline void set_numabalancing_state(bool enabled)
1622} 1599}
1623#endif 1600#endif
1624 1601
1625/*
1626 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1627 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1628 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1629 * values are inverted: lower p->prio value means higher priority.
1630 *
1631 * The MAX_USER_RT_PRIO value allows the actual maximum
1632 * RT priority to be separate from the value exported to
1633 * user-space. This allows kernel threads to set their
1634 * priority to a value higher than any user task. Note:
1635 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1636 */
1637
1638#define MAX_USER_RT_PRIO 100
1639#define MAX_RT_PRIO MAX_USER_RT_PRIO
1640
1641#define MAX_PRIO (MAX_RT_PRIO + 40)
1642#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1643
1644static inline int rt_prio(int prio)
1645{
1646 if (unlikely(prio < MAX_RT_PRIO))
1647 return 1;
1648 return 0;
1649}
1650
1651static inline int rt_task(struct task_struct *p)
1652{
1653 return rt_prio(p->prio);
1654}
1655
1656static inline struct pid *task_pid(struct task_struct *task) 1602static inline struct pid *task_pid(struct task_struct *task)
1657{ 1603{
1658 return task->pids[PIDTYPE_PID].pid; 1604 return task->pids[PIDTYPE_PID].pid;
@@ -1792,6 +1738,37 @@ static inline void put_task_struct(struct task_struct *t)
1792 __put_task_struct(t); 1738 __put_task_struct(t);
1793} 1739}
1794 1740
1741#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1742extern void task_cputime(struct task_struct *t,
1743 cputime_t *utime, cputime_t *stime);
1744extern void task_cputime_scaled(struct task_struct *t,
1745 cputime_t *utimescaled, cputime_t *stimescaled);
1746extern cputime_t task_gtime(struct task_struct *t);
1747#else
1748static inline void task_cputime(struct task_struct *t,
1749 cputime_t *utime, cputime_t *stime)
1750{
1751 if (utime)
1752 *utime = t->utime;
1753 if (stime)
1754 *stime = t->stime;
1755}
1756
1757static inline void task_cputime_scaled(struct task_struct *t,
1758 cputime_t *utimescaled,
1759 cputime_t *stimescaled)
1760{
1761 if (utimescaled)
1762 *utimescaled = t->utimescaled;
1763 if (stimescaled)
1764 *stimescaled = t->stimescaled;
1765}
1766
1767static inline cputime_t task_gtime(struct task_struct *t)
1768{
1769 return t->gtime;
1770}
1771#endif
1795extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1772extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1796extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1773extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1797 1774
@@ -1815,6 +1792,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1815#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1792#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1816#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1793#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1817#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1794#define PF_KSWAPD 0x00040000 /* I am kswapd */
1795#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1818#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1796#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1819#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1797#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1820#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1798#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1852,6 +1830,26 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1852#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1830#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1853#define used_math() tsk_used_math(current) 1831#define used_math() tsk_used_math(current)
1854 1832
1833/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1834static inline gfp_t memalloc_noio_flags(gfp_t flags)
1835{
1836 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1837 flags &= ~__GFP_IO;
1838 return flags;
1839}
1840
1841static inline unsigned int memalloc_noio_save(void)
1842{
1843 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1844 current->flags |= PF_MEMALLOC_NOIO;
1845 return flags;
1846}
1847
1848static inline void memalloc_noio_restore(unsigned int flags)
1849{
1850 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1851}
1852
1855/* 1853/*
1856 * task->jobctl flags 1854 * task->jobctl flags
1857 */ 1855 */
@@ -2033,58 +2031,7 @@ extern void wake_up_idle_cpu(int cpu);
2033static inline void wake_up_idle_cpu(int cpu) { } 2031static inline void wake_up_idle_cpu(int cpu) { }
2034#endif 2032#endif
2035 2033
2036extern unsigned int sysctl_sched_latency;
2037extern unsigned int sysctl_sched_min_granularity;
2038extern unsigned int sysctl_sched_wakeup_granularity;
2039extern unsigned int sysctl_sched_child_runs_first;
2040
2041enum sched_tunable_scaling {
2042 SCHED_TUNABLESCALING_NONE,
2043 SCHED_TUNABLESCALING_LOG,
2044 SCHED_TUNABLESCALING_LINEAR,
2045 SCHED_TUNABLESCALING_END,
2046};
2047extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2048
2049extern unsigned int sysctl_numa_balancing_scan_delay;
2050extern unsigned int sysctl_numa_balancing_scan_period_min;
2051extern unsigned int sysctl_numa_balancing_scan_period_max;
2052extern unsigned int sysctl_numa_balancing_scan_period_reset;
2053extern unsigned int sysctl_numa_balancing_scan_size;
2054extern unsigned int sysctl_numa_balancing_settle_count;
2055
2056#ifdef CONFIG_SCHED_DEBUG
2057extern unsigned int sysctl_sched_migration_cost;
2058extern unsigned int sysctl_sched_nr_migrate;
2059extern unsigned int sysctl_sched_time_avg;
2060extern unsigned int sysctl_timer_migration;
2061extern unsigned int sysctl_sched_shares_window;
2062
2063int sched_proc_update_handler(struct ctl_table *table, int write,
2064 void __user *buffer, size_t *length,
2065 loff_t *ppos);
2066#endif
2067#ifdef CONFIG_SCHED_DEBUG
2068static inline unsigned int get_sysctl_timer_migration(void)
2069{
2070 return sysctl_timer_migration;
2071}
2072#else
2073static inline unsigned int get_sysctl_timer_migration(void)
2074{
2075 return 1;
2076}
2077#endif
2078extern unsigned int sysctl_sched_rt_period;
2079extern int sysctl_sched_rt_runtime;
2080
2081int sched_rt_handler(struct ctl_table *table, int write,
2082 void __user *buffer, size_t *lenp,
2083 loff_t *ppos);
2084
2085#ifdef CONFIG_SCHED_AUTOGROUP 2034#ifdef CONFIG_SCHED_AUTOGROUP
2086extern unsigned int sysctl_sched_autogroup_enabled;
2087
2088extern void sched_autogroup_create_attach(struct task_struct *p); 2035extern void sched_autogroup_create_attach(struct task_struct *p);
2089extern void sched_autogroup_detach(struct task_struct *p); 2036extern void sched_autogroup_detach(struct task_struct *p);
2090extern void sched_autogroup_fork(struct signal_struct *sig); 2037extern void sched_autogroup_fork(struct signal_struct *sig);
@@ -2100,30 +2047,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2100static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2047static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2101#endif 2048#endif
2102 2049
2103#ifdef CONFIG_CFS_BANDWIDTH
2104extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2105#endif
2106
2107#ifdef CONFIG_RT_MUTEXES
2108extern int rt_mutex_getprio(struct task_struct *p);
2109extern void rt_mutex_setprio(struct task_struct *p, int prio);
2110extern void rt_mutex_adjust_pi(struct task_struct *p);
2111static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2112{
2113 return tsk->pi_blocked_on != NULL;
2114}
2115#else
2116static inline int rt_mutex_getprio(struct task_struct *p)
2117{
2118 return p->normal_prio;
2119}
2120# define rt_mutex_adjust_pi(p) do { } while (0)
2121static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2122{
2123 return false;
2124}
2125#endif
2126
2127extern bool yield_to(struct task_struct *p, bool preempt); 2050extern bool yield_to(struct task_struct *p, bool preempt);
2128extern void set_user_nice(struct task_struct *p, long nice); 2051extern void set_user_nice(struct task_struct *p, long nice);
2129extern int task_prio(const struct task_struct *p); 2052extern int task_prio(const struct task_struct *p);
@@ -2259,7 +2182,6 @@ extern struct sigqueue *sigqueue_alloc(void);
2259extern void sigqueue_free(struct sigqueue *); 2182extern void sigqueue_free(struct sigqueue *);
2260extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2183extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2261extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2184extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2262extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2263 2185
2264static inline void restore_saved_sigmask(void) 2186static inline void restore_saved_sigmask(void)
2265{ 2187{
@@ -2305,6 +2227,17 @@ static inline int sas_ss_flags(unsigned long sp)
2305 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2227 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2306} 2228}
2307 2229
2230static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2231{
2232 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2233#ifdef CONFIG_STACK_GROWSUP
2234 return current->sas_ss_sp;
2235#else
2236 return current->sas_ss_sp + current->sas_ss_size;
2237#endif
2238 return sp;
2239}
2240
2308/* 2241/*
2309 * Routines for handling mm_structs 2242 * Routines for handling mm_structs
2310 */ 2243 */
@@ -2753,14 +2686,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2753extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2686extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2754extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2687extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2755 2688
2756extern void normalize_rt_tasks(void);
2757
2758#ifdef CONFIG_CGROUP_SCHED 2689#ifdef CONFIG_CGROUP_SCHED
2759 2690
2760extern struct task_group root_task_group; 2691extern struct task_group root_task_group;
2761 2692
2762extern struct task_group *sched_create_group(struct task_group *parent); 2693extern struct task_group *sched_create_group(struct task_group *parent);
2694extern void sched_online_group(struct task_group *tg,
2695 struct task_group *parent);
2763extern void sched_destroy_group(struct task_group *tg); 2696extern void sched_destroy_group(struct task_group *tg);
2697extern void sched_offline_group(struct task_group *tg);
2764extern void sched_move_task(struct task_struct *tsk); 2698extern void sched_move_task(struct task_struct *tsk);
2765#ifdef CONFIG_FAIR_GROUP_SCHED 2699#ifdef CONFIG_FAIR_GROUP_SCHED
2766extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2700extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);