aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
commit16fa94b532b1958f508e07eca1a9256351241fbc (patch)
tree90012a7b7fe2b8cf96f6f5ec12490e0c5e152291 /include/linux/sched.h
parente0972916e8fe943f342b0dd1c9d43dbf5bc261c2 (diff)
parent25f55d9d01ad7a7ad248fd5af1d22675ffd202c5 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this development cycle were: - full dynticks preparatory work by Frederic Weisbecker - factor out the cpu time accounting code better, by Li Zefan - multi-CPU load balancer cleanups and improvements by Joonsoo Kim - various smaller fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) sched: Fix init NOHZ_IDLE flag sched: Prevent to re-select dst-cpu in load_balance() sched: Rename load_balance_tmpmask to load_balance_mask sched: Move up affinity check to mitigate useless redoing overhead sched: Don't consider other cpus in our group in case of NEWLY_IDLE sched: Explicitly cpu_idle_type checking in rebalance_domains() sched: Change position of resched_cpu() in load_balance() sched: Fix wrong rq's runnable_avg update with rt tasks sched: Document task_struct::personality field sched/cpuacct/UML: Fix header file dependency bug on the UML build cgroup: Kill subsys.active flag sched/cpuacct: No need to check subsys active state sched/cpuacct: Initialize cpuacct subsystem earlier sched/cpuacct: Initialize root cpuacct earlier sched/cpuacct: Allocate per_cpu cpuusage for root cpuacct statically sched/cpuacct: Clean up cpuacct.h sched/cpuacct: Remove redundant NULL checks in cpuacct_acount_field() sched/cpuacct: Remove redundant NULL checks in cpuacct_charge() sched/cpuacct: Add cpuacct_acount_field() sched/cpuacct: Add cpuacct_init() ...
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h204
1 files changed, 9 insertions, 195 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bcbc30397f23..01c7d85bcaa7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127extern void proc_sched_set_task(struct task_struct *p); 127extern void proc_sched_set_task(struct task_struct *p);
128extern void 128extern void
129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130#else
131static inline void
132proc_sched_show_task(struct task_struct *p, struct seq_file *m)
133{
134}
135static inline void proc_sched_set_task(struct task_struct *p)
136{
137}
138static inline void
139print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140{
141}
142#endif 130#endif
143 131
144/* 132/*
@@ -570,7 +558,7 @@ struct signal_struct {
570 cputime_t utime, stime, cutime, cstime; 558 cputime_t utime, stime, cutime, cstime;
571 cputime_t gtime; 559 cputime_t gtime;
572 cputime_t cgtime; 560 cputime_t cgtime;
573#ifndef CONFIG_VIRT_CPU_ACCOUNTING 561#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
574 struct cputime prev_cputime; 562 struct cputime prev_cputime;
575#endif 563#endif
576 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 564 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -768,31 +756,6 @@ enum cpu_idle_type {
768}; 756};
769 757
770/* 758/*
771 * Increase resolution of nice-level calculations for 64-bit architectures.
772 * The extra resolution improves shares distribution and load balancing of
773 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
774 * hierarchies, especially on larger systems. This is not a user-visible change
775 * and does not change the user-interface for setting shares/weights.
776 *
777 * We increase resolution only if we have enough bits to allow this increased
778 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
779 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
780 * increased costs.
781 */
782#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
783# define SCHED_LOAD_RESOLUTION 10
784# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
785# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
786#else
787# define SCHED_LOAD_RESOLUTION 0
788# define scale_load(w) (w)
789# define scale_load_down(w) (w)
790#endif
791
792#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
793#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
794
795/*
796 * Increase resolution of cpu_power calculations 759 * Increase resolution of cpu_power calculations
797 */ 760 */
798#define SCHED_POWER_SHIFT 10 761#define SCHED_POWER_SHIFT 10
@@ -817,62 +780,6 @@ enum cpu_idle_type {
817 780
818extern int __weak arch_sd_sibiling_asym_packing(void); 781extern int __weak arch_sd_sibiling_asym_packing(void);
819 782
820struct sched_group_power {
821 atomic_t ref;
822 /*
823 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
824 * single CPU.
825 */
826 unsigned int power, power_orig;
827 unsigned long next_update;
828 /*
829 * Number of busy cpus in this group.
830 */
831 atomic_t nr_busy_cpus;
832
833 unsigned long cpumask[0]; /* iteration mask */
834};
835
836struct sched_group {
837 struct sched_group *next; /* Must be a circular list */
838 atomic_t ref;
839
840 unsigned int group_weight;
841 struct sched_group_power *sgp;
842
843 /*
844 * The CPUs this group covers.
845 *
846 * NOTE: this field is variable length. (Allocated dynamically
847 * by attaching extra space to the end of the structure,
848 * depending on how many CPUs the kernel has booted up with)
849 */
850 unsigned long cpumask[0];
851};
852
853static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
854{
855 return to_cpumask(sg->cpumask);
856}
857
858/*
859 * cpumask masking which cpus in the group are allowed to iterate up the domain
860 * tree.
861 */
862static inline struct cpumask *sched_group_mask(struct sched_group *sg)
863{
864 return to_cpumask(sg->sgp->cpumask);
865}
866
867/**
868 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
869 * @group: The group whose first cpu is to be returned.
870 */
871static inline unsigned int group_first_cpu(struct sched_group *group)
872{
873 return cpumask_first(sched_group_cpus(group));
874}
875
876struct sched_domain_attr { 783struct sched_domain_attr {
877 int relax_domain_level; 784 int relax_domain_level;
878}; 785};
@@ -883,6 +790,8 @@ struct sched_domain_attr {
883 790
884extern int sched_domain_level_max; 791extern int sched_domain_level_max;
885 792
793struct sched_group;
794
886struct sched_domain { 795struct sched_domain {
887 /* These fields must be setup */ 796 /* These fields must be setup */
888 struct sched_domain *parent; /* top domain must be null terminated */ 797 struct sched_domain *parent; /* top domain must be null terminated */
@@ -899,6 +808,8 @@ struct sched_domain {
899 unsigned int wake_idx; 808 unsigned int wake_idx;
900 unsigned int forkexec_idx; 809 unsigned int forkexec_idx;
901 unsigned int smt_gain; 810 unsigned int smt_gain;
811
812 int nohz_idle; /* NOHZ IDLE status */
902 int flags; /* See SD_* */ 813 int flags; /* See SD_* */
903 int level; 814 int level;
904 815
@@ -971,18 +882,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
971cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 882cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
972void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 883void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
973 884
974/* Test a flag in parent sched domain */
975static inline int test_sd_parent(struct sched_domain *sd, int flag)
976{
977 if (sd->parent && (sd->parent->flags & flag))
978 return 1;
979
980 return 0;
981}
982
983unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
984unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
985
986bool cpus_share_cache(int this_cpu, int that_cpu); 885bool cpus_share_cache(int this_cpu, int that_cpu);
987 886
988#else /* CONFIG_SMP */ 887#else /* CONFIG_SMP */
@@ -1017,72 +916,6 @@ struct mempolicy;
1017struct pipe_inode_info; 916struct pipe_inode_info;
1018struct uts_namespace; 917struct uts_namespace;
1019 918
1020struct rq;
1021struct sched_domain;
1022
1023/*
1024 * wake flags
1025 */
1026#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1027#define WF_FORK 0x02 /* child wakeup after fork */
1028#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1029
1030#define ENQUEUE_WAKEUP 1
1031#define ENQUEUE_HEAD 2
1032#ifdef CONFIG_SMP
1033#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1034#else
1035#define ENQUEUE_WAKING 0
1036#endif
1037
1038#define DEQUEUE_SLEEP 1
1039
1040struct sched_class {
1041 const struct sched_class *next;
1042
1043 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1044 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1045 void (*yield_task) (struct rq *rq);
1046 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1047
1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1049
1050 struct task_struct * (*pick_next_task) (struct rq *rq);
1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1052
1053#ifdef CONFIG_SMP
1054 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1055 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1056
1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1058 void (*post_schedule) (struct rq *this_rq);
1059 void (*task_waking) (struct task_struct *task);
1060 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1061
1062 void (*set_cpus_allowed)(struct task_struct *p,
1063 const struct cpumask *newmask);
1064
1065 void (*rq_online)(struct rq *rq);
1066 void (*rq_offline)(struct rq *rq);
1067#endif
1068
1069 void (*set_curr_task) (struct rq *rq);
1070 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1071 void (*task_fork) (struct task_struct *p);
1072
1073 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1074 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1075 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1076 int oldprio);
1077
1078 unsigned int (*get_rr_interval) (struct rq *rq,
1079 struct task_struct *task);
1080
1081#ifdef CONFIG_FAIR_GROUP_SCHED
1082 void (*task_move_group) (struct task_struct *p, int on_rq);
1083#endif
1084};
1085
1086struct load_weight { 919struct load_weight {
1087 unsigned long weight, inv_weight; 920 unsigned long weight, inv_weight;
1088}; 921};
@@ -1274,8 +1107,10 @@ struct task_struct {
1274 int exit_code, exit_signal; 1107 int exit_code, exit_signal;
1275 int pdeath_signal; /* The signal sent when the parent dies */ 1108 int pdeath_signal; /* The signal sent when the parent dies */
1276 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1109 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1277 /* ??? */ 1110
1111 /* Used for emulating ABI behavior of previous Linux versions */
1278 unsigned int personality; 1112 unsigned int personality;
1113
1279 unsigned did_exec:1; 1114 unsigned did_exec:1;
1280 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1115 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1281 * execve */ 1116 * execve */
@@ -1327,7 +1162,7 @@ struct task_struct {
1327 1162
1328 cputime_t utime, stime, utimescaled, stimescaled; 1163 cputime_t utime, stime, utimescaled, stimescaled;
1329 cputime_t gtime; 1164 cputime_t gtime;
1330#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1165#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1331 struct cputime prev_cputime; 1166 struct cputime prev_cputime;
1332#endif 1167#endif
1333#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1168#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -2681,28 +2516,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2681extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2516extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2682 2517
2683#ifdef CONFIG_CGROUP_SCHED 2518#ifdef CONFIG_CGROUP_SCHED
2684
2685extern struct task_group root_task_group; 2519extern struct task_group root_task_group;
2686
2687extern struct task_group *sched_create_group(struct task_group *parent);
2688extern void sched_online_group(struct task_group *tg,
2689 struct task_group *parent);
2690extern void sched_destroy_group(struct task_group *tg);
2691extern void sched_offline_group(struct task_group *tg);
2692extern void sched_move_task(struct task_struct *tsk);
2693#ifdef CONFIG_FAIR_GROUP_SCHED
2694extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2695extern unsigned long sched_group_shares(struct task_group *tg);
2696#endif
2697#ifdef CONFIG_RT_GROUP_SCHED
2698extern int sched_group_set_rt_runtime(struct task_group *tg,
2699 long rt_runtime_us);
2700extern long sched_group_rt_runtime(struct task_group *tg);
2701extern int sched_group_set_rt_period(struct task_group *tg,
2702 long rt_period_us);
2703extern long sched_group_rt_period(struct task_group *tg);
2704extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2705#endif
2706#endif /* CONFIG_CGROUP_SCHED */ 2520#endif /* CONFIG_CGROUP_SCHED */
2707 2521
2708extern int task_can_switch_user(struct user_struct *up, 2522extern int task_can_switch_user(struct user_struct *up,