diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 204 |
1 files changed, 9 insertions, 195 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index bcbc30397f23..01c7d85bcaa7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | |||
127 | extern void proc_sched_set_task(struct task_struct *p); | 127 | extern void proc_sched_set_task(struct task_struct *p); |
128 | extern void | 128 | extern void |
129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | 129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
130 | #else | ||
131 | static inline void | ||
132 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) | ||
133 | { | ||
134 | } | ||
135 | static inline void proc_sched_set_task(struct task_struct *p) | ||
136 | { | ||
137 | } | ||
138 | static inline void | ||
139 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
140 | { | ||
141 | } | ||
142 | #endif | 130 | #endif |
143 | 131 | ||
144 | /* | 132 | /* |
@@ -570,7 +558,7 @@ struct signal_struct { | |||
570 | cputime_t utime, stime, cutime, cstime; | 558 | cputime_t utime, stime, cutime, cstime; |
571 | cputime_t gtime; | 559 | cputime_t gtime; |
572 | cputime_t cgtime; | 560 | cputime_t cgtime; |
573 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 561 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
574 | struct cputime prev_cputime; | 562 | struct cputime prev_cputime; |
575 | #endif | 563 | #endif |
576 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 564 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
@@ -768,31 +756,6 @@ enum cpu_idle_type { | |||
768 | }; | 756 | }; |
769 | 757 | ||
770 | /* | 758 | /* |
771 | * Increase resolution of nice-level calculations for 64-bit architectures. | ||
772 | * The extra resolution improves shares distribution and load balancing of | ||
773 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
774 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
775 | * and does not change the user-interface for setting shares/weights. | ||
776 | * | ||
777 | * We increase resolution only if we have enough bits to allow this increased | ||
778 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
779 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
780 | * increased costs. | ||
781 | */ | ||
782 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
783 | # define SCHED_LOAD_RESOLUTION 10 | ||
784 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
785 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
786 | #else | ||
787 | # define SCHED_LOAD_RESOLUTION 0 | ||
788 | # define scale_load(w) (w) | ||
789 | # define scale_load_down(w) (w) | ||
790 | #endif | ||
791 | |||
792 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | ||
793 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
794 | |||
795 | /* | ||
796 | * Increase resolution of cpu_power calculations | 759 | * Increase resolution of cpu_power calculations |
797 | */ | 760 | */ |
798 | #define SCHED_POWER_SHIFT 10 | 761 | #define SCHED_POWER_SHIFT 10 |
@@ -817,62 +780,6 @@ enum cpu_idle_type { | |||
817 | 780 | ||
818 | extern int __weak arch_sd_sibiling_asym_packing(void); | 781 | extern int __weak arch_sd_sibiling_asym_packing(void); |
819 | 782 | ||
820 | struct sched_group_power { | ||
821 | atomic_t ref; | ||
822 | /* | ||
823 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | ||
824 | * single CPU. | ||
825 | */ | ||
826 | unsigned int power, power_orig; | ||
827 | unsigned long next_update; | ||
828 | /* | ||
829 | * Number of busy cpus in this group. | ||
830 | */ | ||
831 | atomic_t nr_busy_cpus; | ||
832 | |||
833 | unsigned long cpumask[0]; /* iteration mask */ | ||
834 | }; | ||
835 | |||
836 | struct sched_group { | ||
837 | struct sched_group *next; /* Must be a circular list */ | ||
838 | atomic_t ref; | ||
839 | |||
840 | unsigned int group_weight; | ||
841 | struct sched_group_power *sgp; | ||
842 | |||
843 | /* | ||
844 | * The CPUs this group covers. | ||
845 | * | ||
846 | * NOTE: this field is variable length. (Allocated dynamically | ||
847 | * by attaching extra space to the end of the structure, | ||
848 | * depending on how many CPUs the kernel has booted up with) | ||
849 | */ | ||
850 | unsigned long cpumask[0]; | ||
851 | }; | ||
852 | |||
853 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
854 | { | ||
855 | return to_cpumask(sg->cpumask); | ||
856 | } | ||
857 | |||
858 | /* | ||
859 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
860 | * tree. | ||
861 | */ | ||
862 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
863 | { | ||
864 | return to_cpumask(sg->sgp->cpumask); | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
869 | * @group: The group whose first cpu is to be returned. | ||
870 | */ | ||
871 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
872 | { | ||
873 | return cpumask_first(sched_group_cpus(group)); | ||
874 | } | ||
875 | |||
876 | struct sched_domain_attr { | 783 | struct sched_domain_attr { |
877 | int relax_domain_level; | 784 | int relax_domain_level; |
878 | }; | 785 | }; |
@@ -883,6 +790,8 @@ struct sched_domain_attr { | |||
883 | 790 | ||
884 | extern int sched_domain_level_max; | 791 | extern int sched_domain_level_max; |
885 | 792 | ||
793 | struct sched_group; | ||
794 | |||
886 | struct sched_domain { | 795 | struct sched_domain { |
887 | /* These fields must be setup */ | 796 | /* These fields must be setup */ |
888 | struct sched_domain *parent; /* top domain must be null terminated */ | 797 | struct sched_domain *parent; /* top domain must be null terminated */ |
@@ -899,6 +808,8 @@ struct sched_domain { | |||
899 | unsigned int wake_idx; | 808 | unsigned int wake_idx; |
900 | unsigned int forkexec_idx; | 809 | unsigned int forkexec_idx; |
901 | unsigned int smt_gain; | 810 | unsigned int smt_gain; |
811 | |||
812 | int nohz_idle; /* NOHZ IDLE status */ | ||
902 | int flags; /* See SD_* */ | 813 | int flags; /* See SD_* */ |
903 | int level; | 814 | int level; |
904 | 815 | ||
@@ -971,18 +882,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |||
971 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | 882 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
972 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | 883 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
973 | 884 | ||
974 | /* Test a flag in parent sched domain */ | ||
975 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
976 | { | ||
977 | if (sd->parent && (sd->parent->flags & flag)) | ||
978 | return 1; | ||
979 | |||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | ||
984 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | ||
985 | |||
986 | bool cpus_share_cache(int this_cpu, int that_cpu); | 885 | bool cpus_share_cache(int this_cpu, int that_cpu); |
987 | 886 | ||
988 | #else /* CONFIG_SMP */ | 887 | #else /* CONFIG_SMP */ |
@@ -1017,72 +916,6 @@ struct mempolicy; | |||
1017 | struct pipe_inode_info; | 916 | struct pipe_inode_info; |
1018 | struct uts_namespace; | 917 | struct uts_namespace; |
1019 | 918 | ||
1020 | struct rq; | ||
1021 | struct sched_domain; | ||
1022 | |||
1023 | /* | ||
1024 | * wake flags | ||
1025 | */ | ||
1026 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | ||
1027 | #define WF_FORK 0x02 /* child wakeup after fork */ | ||
1028 | #define WF_MIGRATED 0x04 /* internal use, task got migrated */ | ||
1029 | |||
1030 | #define ENQUEUE_WAKEUP 1 | ||
1031 | #define ENQUEUE_HEAD 2 | ||
1032 | #ifdef CONFIG_SMP | ||
1033 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1034 | #else | ||
1035 | #define ENQUEUE_WAKING 0 | ||
1036 | #endif | ||
1037 | |||
1038 | #define DEQUEUE_SLEEP 1 | ||
1039 | |||
1040 | struct sched_class { | ||
1041 | const struct sched_class *next; | ||
1042 | |||
1043 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
1044 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
1045 | void (*yield_task) (struct rq *rq); | ||
1046 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | ||
1047 | |||
1048 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | ||
1049 | |||
1050 | struct task_struct * (*pick_next_task) (struct rq *rq); | ||
1051 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | ||
1052 | |||
1053 | #ifdef CONFIG_SMP | ||
1054 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | ||
1055 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | ||
1056 | |||
1057 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
1058 | void (*post_schedule) (struct rq *this_rq); | ||
1059 | void (*task_waking) (struct task_struct *task); | ||
1060 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1061 | |||
1062 | void (*set_cpus_allowed)(struct task_struct *p, | ||
1063 | const struct cpumask *newmask); | ||
1064 | |||
1065 | void (*rq_online)(struct rq *rq); | ||
1066 | void (*rq_offline)(struct rq *rq); | ||
1067 | #endif | ||
1068 | |||
1069 | void (*set_curr_task) (struct rq *rq); | ||
1070 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | ||
1071 | void (*task_fork) (struct task_struct *p); | ||
1072 | |||
1073 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); | ||
1074 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); | ||
1075 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | ||
1076 | int oldprio); | ||
1077 | |||
1078 | unsigned int (*get_rr_interval) (struct rq *rq, | ||
1079 | struct task_struct *task); | ||
1080 | |||
1081 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1082 | void (*task_move_group) (struct task_struct *p, int on_rq); | ||
1083 | #endif | ||
1084 | }; | ||
1085 | |||
1086 | struct load_weight { | 919 | struct load_weight { |
1087 | unsigned long weight, inv_weight; | 920 | unsigned long weight, inv_weight; |
1088 | }; | 921 | }; |
@@ -1274,8 +1107,10 @@ struct task_struct { | |||
1274 | int exit_code, exit_signal; | 1107 | int exit_code, exit_signal; |
1275 | int pdeath_signal; /* The signal sent when the parent dies */ | 1108 | int pdeath_signal; /* The signal sent when the parent dies */ |
1276 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ | 1109 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ |
1277 | /* ??? */ | 1110 | |
1111 | /* Used for emulating ABI behavior of previous Linux versions */ | ||
1278 | unsigned int personality; | 1112 | unsigned int personality; |
1113 | |||
1279 | unsigned did_exec:1; | 1114 | unsigned did_exec:1; |
1280 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1115 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
1281 | * execve */ | 1116 | * execve */ |
@@ -1327,7 +1162,7 @@ struct task_struct { | |||
1327 | 1162 | ||
1328 | cputime_t utime, stime, utimescaled, stimescaled; | 1163 | cputime_t utime, stime, utimescaled, stimescaled; |
1329 | cputime_t gtime; | 1164 | cputime_t gtime; |
1330 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 1165 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1331 | struct cputime prev_cputime; | 1166 | struct cputime prev_cputime; |
1332 | #endif | 1167 | #endif |
1333 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1168 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
@@ -2681,28 +2516,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | |||
2681 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2516 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2682 | 2517 | ||
2683 | #ifdef CONFIG_CGROUP_SCHED | 2518 | #ifdef CONFIG_CGROUP_SCHED |
2684 | |||
2685 | extern struct task_group root_task_group; | 2519 | extern struct task_group root_task_group; |
2686 | |||
2687 | extern struct task_group *sched_create_group(struct task_group *parent); | ||
2688 | extern void sched_online_group(struct task_group *tg, | ||
2689 | struct task_group *parent); | ||
2690 | extern void sched_destroy_group(struct task_group *tg); | ||
2691 | extern void sched_offline_group(struct task_group *tg); | ||
2692 | extern void sched_move_task(struct task_struct *tsk); | ||
2693 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
2694 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | ||
2695 | extern unsigned long sched_group_shares(struct task_group *tg); | ||
2696 | #endif | ||
2697 | #ifdef CONFIG_RT_GROUP_SCHED | ||
2698 | extern int sched_group_set_rt_runtime(struct task_group *tg, | ||
2699 | long rt_runtime_us); | ||
2700 | extern long sched_group_rt_runtime(struct task_group *tg); | ||
2701 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
2702 | long rt_period_us); | ||
2703 | extern long sched_group_rt_period(struct task_group *tg); | ||
2704 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2705 | #endif | ||
2706 | #endif /* CONFIG_CGROUP_SCHED */ | 2520 | #endif /* CONFIG_CGROUP_SCHED */ |
2707 | 2521 | ||
2708 | extern int task_can_switch_user(struct user_struct *up, | 2522 | extern int task_can_switch_user(struct user_struct *up, |