aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h212
1 files changed, 128 insertions, 84 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b483f39a7112..4cae9b81a1f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -247,8 +248,9 @@ extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 248extern void init_idle_bootup_task(struct task_struct *idle);
248 249
249extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
251extern void task_rq_unlock_wait(struct task_struct *p);
250 252
251extern cpumask_t nohz_cpu_mask; 253extern cpumask_var_t nohz_cpu_mask;
252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
253extern int select_nohz_load_balancer(int cpu); 255extern int select_nohz_load_balancer(int cpu);
254#else 256#else
@@ -258,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
258} 260}
259#endif 261#endif
260 262
261extern unsigned long rt_needs_cpu(int cpu);
262
263/* 263/*
264 * Only dump TASK_* tasks. (0 for all tasks) 264 * Only dump TASK_* tasks. (0 for all tasks)
265 */ 265 */
@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout);
284 284
285extern void cpu_init (void); 285extern void cpu_init (void);
286extern void trap_init(void); 286extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 287extern void update_process_times(int user);
289extern void scheduler_tick(void); 288extern void scheduler_tick(void);
290 289
@@ -387,6 +386,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
387 (mm)->hiwater_vm = (mm)->total_vm; \ 386 (mm)->hiwater_vm = (mm)->total_vm; \
388} while (0) 387} while (0)
389 388
389#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
390#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
391
390extern void set_dumpable(struct mm_struct *mm, int value); 392extern void set_dumpable(struct mm_struct *mm, int value);
391extern int get_dumpable(struct mm_struct *mm); 393extern int get_dumpable(struct mm_struct *mm);
392 394
@@ -571,12 +573,6 @@ struct signal_struct {
571 */ 573 */
572 struct rlimit rlim[RLIM_NLIMITS]; 574 struct rlimit rlim[RLIM_NLIMITS];
573 575
574 /* keep the process-shared keyrings here so that they do the right
575 * thing in threads created with CLONE_THREAD */
576#ifdef CONFIG_KEYS
577 struct key *session_keyring; /* keyring inherited over fork */
578 struct key *process_keyring; /* keyring private to this process */
579#endif
580#ifdef CONFIG_BSD_PROCESS_ACCT 576#ifdef CONFIG_BSD_PROCESS_ACCT
581 struct pacct_struct pacct; /* per-process accounting information */ 577 struct pacct_struct pacct; /* per-process accounting information */
582#endif 578#endif
@@ -629,6 +625,10 @@ struct user_struct {
629 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 625 atomic_t inotify_watches; /* How many inotify watches does this user have? */
630 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 626 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
631#endif 627#endif
628#ifdef CONFIG_EPOLL
629 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
630 atomic_t epoll_watches; /* The number of file descriptors currently watched */
631#endif
632#ifdef CONFIG_POSIX_MQUEUE 632#ifdef CONFIG_POSIX_MQUEUE
633 /* protected by mq_lock */ 633 /* protected by mq_lock */
634 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 634 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
@@ -643,6 +643,7 @@ struct user_struct {
643 /* Hash table maintenance information */ 643 /* Hash table maintenance information */
644 struct hlist_node uidhash_node; 644 struct hlist_node uidhash_node;
645 uid_t uid; 645 uid_t uid;
646 struct user_namespace *user_ns;
646 647
647#ifdef CONFIG_USER_SCHED 648#ifdef CONFIG_USER_SCHED
648 struct task_group *tg; 649 struct task_group *tg;
@@ -660,6 +661,7 @@ extern struct user_struct *find_user(uid_t);
660extern struct user_struct root_user; 661extern struct user_struct root_user;
661#define INIT_USER (&root_user) 662#define INIT_USER (&root_user)
662 663
664
663struct backing_dev_info; 665struct backing_dev_info;
664struct reclaim_state; 666struct reclaim_state;
665 667
@@ -667,8 +669,7 @@ struct reclaim_state;
667struct sched_info { 669struct sched_info {
668 /* cumulative counters */ 670 /* cumulative counters */
669 unsigned long pcount; /* # of times run on this cpu */ 671 unsigned long pcount; /* # of times run on this cpu */
670 unsigned long long cpu_time, /* time spent on the cpu */ 672 unsigned long long run_delay; /* time spent waiting on a runqueue */
671 run_delay; /* time spent waiting on a runqueue */
672 673
673 /* timestamps */ 674 /* timestamps */
674 unsigned long long last_arrival,/* when we last ran on a cpu */ 675 unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -759,20 +760,51 @@ enum cpu_idle_type {
759#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 760#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
760#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 761#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
761 762
762#define BALANCE_FOR_MC_POWER \ 763enum powersavings_balance_level {
763 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 764 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
765 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
766 * first for long running threads
767 */
768 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
769 * cpu package for power savings
770 */
771 MAX_POWERSAVINGS_BALANCE_LEVELS
772};
773
774extern int sched_mc_power_savings, sched_smt_power_savings;
775
776static inline int sd_balance_for_mc_power(void)
777{
778 if (sched_smt_power_savings)
779 return SD_POWERSAVINGS_BALANCE;
780
781 return 0;
782}
783
784static inline int sd_balance_for_package_power(void)
785{
786 if (sched_mc_power_savings | sched_smt_power_savings)
787 return SD_POWERSAVINGS_BALANCE;
788
789 return 0;
790}
764 791
765#define BALANCE_FOR_PKG_POWER \ 792/*
766 ((sched_mc_power_savings || sched_smt_power_savings) ? \ 793 * Optimise SD flags for power savings:
767 SD_POWERSAVINGS_BALANCE : 0) 794 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
795 * Keep default SD flags if sched_{smt,mc}_power_saving=0
796 */
768 797
769#define test_sd_parent(sd, flag) ((sd->parent && \ 798static inline int sd_power_saving_flags(void)
770 (sd->parent->flags & flag)) ? 1 : 0) 799{
800 if (sched_mc_power_savings | sched_smt_power_savings)
801 return SD_BALANCE_NEWIDLE;
771 802
803 return 0;
804}
772 805
773struct sched_group { 806struct sched_group {
774 struct sched_group *next; /* Must be a circular list */ 807 struct sched_group *next; /* Must be a circular list */
775 cpumask_t cpumask;
776 808
777 /* 809 /*
778 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 810 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -785,8 +817,15 @@ struct sched_group {
785 * (see include/linux/reciprocal_div.h) 817 * (see include/linux/reciprocal_div.h)
786 */ 818 */
787 u32 reciprocal_cpu_power; 819 u32 reciprocal_cpu_power;
820
821 unsigned long cpumask[];
788}; 822};
789 823
824static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
825{
826 return to_cpumask(sg->cpumask);
827}
828
790enum sched_domain_level { 829enum sched_domain_level {
791 SD_LV_NONE = 0, 830 SD_LV_NONE = 0,
792 SD_LV_SIBLING, 831 SD_LV_SIBLING,
@@ -810,7 +849,6 @@ struct sched_domain {
810 struct sched_domain *parent; /* top domain must be null terminated */ 849 struct sched_domain *parent; /* top domain must be null terminated */
811 struct sched_domain *child; /* bottom domain must be null terminated */ 850 struct sched_domain *child; /* bottom domain must be null terminated */
812 struct sched_group *groups; /* the balancing groups of the domain */ 851 struct sched_group *groups; /* the balancing groups of the domain */
813 cpumask_t span; /* span of all CPUs in this domain */
814 unsigned long min_interval; /* Minimum balance interval ms */ 852 unsigned long min_interval; /* Minimum balance interval ms */
815 unsigned long max_interval; /* Maximum balance interval ms */ 853 unsigned long max_interval; /* Maximum balance interval ms */
816 unsigned int busy_factor; /* less balancing by factor if busy */ 854 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -865,56 +903,41 @@ struct sched_domain {
865#ifdef CONFIG_SCHED_DEBUG 903#ifdef CONFIG_SCHED_DEBUG
866 char *name; 904 char *name;
867#endif 905#endif
906
907 /* span of all CPUs in this domain */
908 unsigned long span[];
868}; 909};
869 910
870extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 911static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
912{
913 return to_cpumask(sd->span);
914}
915
916extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
871 struct sched_domain_attr *dattr_new); 917 struct sched_domain_attr *dattr_new);
872extern int arch_reinit_sched_domains(void); 918
919/* Test a flag in parent sched domain */
920static inline int test_sd_parent(struct sched_domain *sd, int flag)
921{
922 if (sd->parent && (sd->parent->flags & flag))
923 return 1;
924
925 return 0;
926}
873 927
874#else /* CONFIG_SMP */ 928#else /* CONFIG_SMP */
875 929
876struct sched_domain_attr; 930struct sched_domain_attr;
877 931
878static inline void 932static inline void
879partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 933partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
880 struct sched_domain_attr *dattr_new) 934 struct sched_domain_attr *dattr_new)
881{ 935{
882} 936}
883#endif /* !CONFIG_SMP */ 937#endif /* !CONFIG_SMP */
884 938
885struct io_context; /* See blkdev.h */ 939struct io_context; /* See blkdev.h */
886#define NGROUPS_SMALL 32
887#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
888struct group_info {
889 int ngroups;
890 atomic_t usage;
891 gid_t small_block[NGROUPS_SMALL];
892 int nblocks;
893 gid_t *blocks[0];
894};
895
896/*
897 * get_group_info() must be called with the owning task locked (via task_lock())
898 * when task != current. The reason being that the vast majority of callers are
899 * looking at current->group_info, which can not be changed except by the
900 * current task. Changing current->group_info requires the task lock, too.
901 */
902#define get_group_info(group_info) do { \
903 atomic_inc(&(group_info)->usage); \
904} while (0)
905
906#define put_group_info(group_info) do { \
907 if (atomic_dec_and_test(&(group_info)->usage)) \
908 groups_free(group_info); \
909} while (0)
910 940
911extern struct group_info *groups_alloc(int gidsetsize);
912extern void groups_free(struct group_info *group_info);
913extern int set_current_groups(struct group_info *group_info);
914extern int groups_search(struct group_info *group_info, gid_t grp);
915/* access the groups "array" with this macro */
916#define GROUP_AT(gi, i) \
917 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
918 941
919#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 942#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
920extern void prefetch_stack(struct task_struct *t); 943extern void prefetch_stack(struct task_struct *t);
@@ -958,7 +981,7 @@ struct sched_class {
958 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 981 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
959 982
960 void (*set_cpus_allowed)(struct task_struct *p, 983 void (*set_cpus_allowed)(struct task_struct *p,
961 const cpumask_t *newmask); 984 const struct cpumask *newmask);
962 985
963 void (*rq_online)(struct rq *rq); 986 void (*rq_online)(struct rq *rq);
964 void (*rq_offline)(struct rq *rq); 987 void (*rq_offline)(struct rq *rq);
@@ -1160,6 +1183,19 @@ struct task_struct {
1160 struct list_head ptraced; 1183 struct list_head ptraced;
1161 struct list_head ptrace_entry; 1184 struct list_head ptrace_entry;
1162 1185
1186#ifdef CONFIG_X86_PTRACE_BTS
1187 /*
1188 * This is the tracer handle for the ptrace BTS extension.
1189 * This field actually belongs to the ptracer task.
1190 */
1191 struct bts_tracer *bts;
1192 /*
1193 * The buffer to hold the BTS data.
1194 */
1195 void *bts_buffer;
1196 size_t bts_size;
1197#endif /* CONFIG_X86_PTRACE_BTS */
1198
1163 /* PID/PID hash table linkage. */ 1199 /* PID/PID hash table linkage. */
1164 struct pid_link pids[PIDTYPE_MAX]; 1200 struct pid_link pids[PIDTYPE_MAX];
1165 struct list_head thread_group; 1201 struct list_head thread_group;
@@ -1181,17 +1217,12 @@ struct task_struct {
1181 struct list_head cpu_timers[3]; 1217 struct list_head cpu_timers[3];
1182 1218
1183/* process credentials */ 1219/* process credentials */
1184 uid_t uid,euid,suid,fsuid; 1220 const struct cred *real_cred; /* objective and real subjective task
1185 gid_t gid,egid,sgid,fsgid; 1221 * credentials (COW) */
1186 struct group_info *group_info; 1222 const struct cred *cred; /* effective (overridable) subjective task
1187 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1223 * credentials (COW) */
1188 struct user_struct *user; 1224 struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
1189 unsigned securebits; 1225
1190#ifdef CONFIG_KEYS
1191 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1192 struct key *request_key_auth; /* assumed request_key authority */
1193 struct key *thread_keyring; /* keyring private to this thread */
1194#endif
1195 char comm[TASK_COMM_LEN]; /* executable name excluding path 1226 char comm[TASK_COMM_LEN]; /* executable name excluding path
1196 - access with [gs]et_task_comm (which lock 1227 - access with [gs]et_task_comm (which lock
1197 it with task_lock()) 1228 it with task_lock())
@@ -1228,9 +1259,6 @@ struct task_struct {
1228 int (*notifier)(void *priv); 1259 int (*notifier)(void *priv);
1229 void *notifier_data; 1260 void *notifier_data;
1230 sigset_t *notifier_mask; 1261 sigset_t *notifier_mask;
1231#ifdef CONFIG_SECURITY
1232 void *security;
1233#endif
1234 struct audit_context *audit_context; 1262 struct audit_context *audit_context;
1235#ifdef CONFIG_AUDITSYSCALL 1263#ifdef CONFIG_AUDITSYSCALL
1236 uid_t loginuid; 1264 uid_t loginuid;
@@ -1349,6 +1377,25 @@ struct task_struct {
1349 */ 1377 */
1350 unsigned long timer_slack_ns; 1378 unsigned long timer_slack_ns;
1351 unsigned long default_timer_slack_ns; 1379 unsigned long default_timer_slack_ns;
1380
1381 struct list_head *scm_work_list;
1382#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1383 /* Index of current stored adress in ret_stack */
1384 int curr_ret_stack;
1385 /* Stack of return addresses for return function tracing */
1386 struct ftrace_ret_stack *ret_stack;
1387 /*
1388 * Number of functions that haven't been traced
1389 * because of depth overrun.
1390 */
1391 atomic_t trace_overrun;
1392 /* Pause for the tracing */
1393 atomic_t tracing_graph_pause;
1394#endif
1395#ifdef CONFIG_TRACING
1396 /* state flags for use by tracers */
1397 unsigned long trace;
1398#endif
1352}; 1399};
1353 1400
1354/* 1401/*
@@ -1587,12 +1634,12 @@ extern cputime_t task_gtime(struct task_struct *p);
1587 1634
1588#ifdef CONFIG_SMP 1635#ifdef CONFIG_SMP
1589extern int set_cpus_allowed_ptr(struct task_struct *p, 1636extern int set_cpus_allowed_ptr(struct task_struct *p,
1590 const cpumask_t *new_mask); 1637 const struct cpumask *new_mask);
1591#else 1638#else
1592static inline int set_cpus_allowed_ptr(struct task_struct *p, 1639static inline int set_cpus_allowed_ptr(struct task_struct *p,
1593 const cpumask_t *new_mask) 1640 const struct cpumask *new_mask)
1594{ 1641{
1595 if (!cpu_isset(0, *new_mask)) 1642 if (!cpumask_test_cpu(0, new_mask))
1596 return -EINVAL; 1643 return -EINVAL;
1597 return 0; 1644 return 0;
1598} 1645}
@@ -1659,16 +1706,16 @@ extern void wake_up_idle_cpu(int cpu);
1659static inline void wake_up_idle_cpu(int cpu) { } 1706static inline void wake_up_idle_cpu(int cpu) { }
1660#endif 1707#endif
1661 1708
1662#ifdef CONFIG_SCHED_DEBUG
1663extern unsigned int sysctl_sched_latency; 1709extern unsigned int sysctl_sched_latency;
1664extern unsigned int sysctl_sched_min_granularity; 1710extern unsigned int sysctl_sched_min_granularity;
1665extern unsigned int sysctl_sched_wakeup_granularity; 1711extern unsigned int sysctl_sched_wakeup_granularity;
1712extern unsigned int sysctl_sched_shares_ratelimit;
1713extern unsigned int sysctl_sched_shares_thresh;
1714#ifdef CONFIG_SCHED_DEBUG
1666extern unsigned int sysctl_sched_child_runs_first; 1715extern unsigned int sysctl_sched_child_runs_first;
1667extern unsigned int sysctl_sched_features; 1716extern unsigned int sysctl_sched_features;
1668extern unsigned int sysctl_sched_migration_cost; 1717extern unsigned int sysctl_sched_migration_cost;
1669extern unsigned int sysctl_sched_nr_migrate; 1718extern unsigned int sysctl_sched_nr_migrate;
1670extern unsigned int sysctl_sched_shares_ratelimit;
1671extern unsigned int sysctl_sched_shares_thresh;
1672 1719
1673int sched_nr_latency_handler(struct ctl_table *table, int write, 1720int sched_nr_latency_handler(struct ctl_table *table, int write,
1674 struct file *file, void __user *buffer, size_t *length, 1721 struct file *file, void __user *buffer, size_t *length,
@@ -1768,7 +1815,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
1768 return u; 1815 return u;
1769} 1816}
1770extern void free_uid(struct user_struct *); 1817extern void free_uid(struct user_struct *);
1771extern void switch_uid(struct user_struct *);
1772extern void release_uids(struct user_namespace *ns); 1818extern void release_uids(struct user_namespace *ns);
1773 1819
1774#include <asm/current.h> 1820#include <asm/current.h>
@@ -1787,9 +1833,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
1787extern void sched_fork(struct task_struct *p, int clone_flags); 1833extern void sched_fork(struct task_struct *p, int clone_flags);
1788extern void sched_dead(struct task_struct *p); 1834extern void sched_dead(struct task_struct *p);
1789 1835
1790extern int in_group_p(gid_t);
1791extern int in_egroup_p(gid_t);
1792
1793extern void proc_caches_init(void); 1836extern void proc_caches_init(void);
1794extern void flush_signals(struct task_struct *); 1837extern void flush_signals(struct task_struct *);
1795extern void ignore_signals(struct task_struct *); 1838extern void ignore_signals(struct task_struct *);
@@ -1921,6 +1964,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
1921#define for_each_process(p) \ 1964#define for_each_process(p) \
1922 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1965 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1923 1966
1967extern bool is_single_threaded(struct task_struct *);
1968
1924/* 1969/*
1925 * Careful: do_each_thread/while_each_thread is a double loop so 1970 * Careful: do_each_thread/while_each_thread is a double loop so
1926 * 'break' will not work as expected - use goto instead. 1971 * 'break' will not work as expected - use goto instead.
@@ -2205,10 +2250,8 @@ __trace_special(void *__tr, void *__data,
2205} 2250}
2206#endif 2251#endif
2207 2252
2208extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2253extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2209extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2254extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2210
2211extern int sched_mc_power_savings, sched_smt_power_savings;
2212 2255
2213extern void normalize_rt_tasks(void); 2256extern void normalize_rt_tasks(void);
2214 2257
@@ -2217,6 +2260,7 @@ extern void normalize_rt_tasks(void);
2217extern struct task_group init_task_group; 2260extern struct task_group init_task_group;
2218#ifdef CONFIG_USER_SCHED 2261#ifdef CONFIG_USER_SCHED
2219extern struct task_group root_task_group; 2262extern struct task_group root_task_group;
2263extern void set_tg_uid(struct user_struct *user);
2220#endif 2264#endif
2221 2265
2222extern struct task_group *sched_create_group(struct task_group *parent); 2266extern struct task_group *sched_create_group(struct task_group *parent);