aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/context_tracking.h24
-rw-r--r--include/linux/math64.h19
-rw-r--r--include/linux/sched.h204
4 files changed, 49 insertions, 199 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index d86e215ca2b8..646ab9d15e42 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -586,7 +586,6 @@ struct cgroup_subsys {
586 void (*bind)(struct cgroup *root); 586 void (*bind)(struct cgroup *root);
587 587
588 int subsys_id; 588 int subsys_id;
589 int active;
590 int disabled; 589 int disabled;
591 int early_init; 590 int early_init;
592 /* 591 /*
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b28d161c1091..365f4a61bf04 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -1,9 +1,9 @@
1#ifndef _LINUX_CONTEXT_TRACKING_H 1#ifndef _LINUX_CONTEXT_TRACKING_H
2#define _LINUX_CONTEXT_TRACKING_H 2#define _LINUX_CONTEXT_TRACKING_H
3 3
4#ifdef CONFIG_CONTEXT_TRACKING
5#include <linux/sched.h> 4#include <linux/sched.h>
6#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <asm/ptrace.h>
7 7
8struct context_tracking { 8struct context_tracking {
9 /* 9 /*
@@ -13,12 +13,13 @@ struct context_tracking {
13 * may be further optimized using static keys. 13 * may be further optimized using static keys.
14 */ 14 */
15 bool active; 15 bool active;
16 enum { 16 enum ctx_state {
17 IN_KERNEL = 0, 17 IN_KERNEL = 0,
18 IN_USER, 18 IN_USER,
19 } state; 19 } state;
20}; 20};
21 21
22#ifdef CONFIG_CONTEXT_TRACKING
22DECLARE_PER_CPU(struct context_tracking, context_tracking); 23DECLARE_PER_CPU(struct context_tracking, context_tracking);
23 24
24static inline bool context_tracking_in_user(void) 25static inline bool context_tracking_in_user(void)
@@ -33,12 +34,31 @@ static inline bool context_tracking_active(void)
33 34
34extern void user_enter(void); 35extern void user_enter(void);
35extern void user_exit(void); 36extern void user_exit(void);
37
38static inline enum ctx_state exception_enter(void)
39{
40 enum ctx_state prev_ctx;
41
42 prev_ctx = this_cpu_read(context_tracking.state);
43 user_exit();
44
45 return prev_ctx;
46}
47
48static inline void exception_exit(enum ctx_state prev_ctx)
49{
50 if (prev_ctx == IN_USER)
51 user_enter();
52}
53
36extern void context_tracking_task_switch(struct task_struct *prev, 54extern void context_tracking_task_switch(struct task_struct *prev,
37 struct task_struct *next); 55 struct task_struct *next);
38#else 56#else
39static inline bool context_tracking_in_user(void) { return false; } 57static inline bool context_tracking_in_user(void) { return false; }
40static inline void user_enter(void) { } 58static inline void user_enter(void) { }
41static inline void user_exit(void) { } 59static inline void user_exit(void) { }
60static inline enum ctx_state exception_enter(void) { return 0; }
61static inline void exception_exit(enum ctx_state prev_ctx) { }
42static inline void context_tracking_task_switch(struct task_struct *prev, 62static inline void context_tracking_task_switch(struct task_struct *prev,
43 struct task_struct *next) { } 63 struct task_struct *next) { }
44#endif /* !CONFIG_CONTEXT_TRACKING */ 64#endif /* !CONFIG_CONTEXT_TRACKING */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index b8ba85544721..931a619407bf 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -30,6 +30,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
30} 30}
31 31
32/** 32/**
33 * div64_u64_rem - unsigned 64bit divide with 64bit divisor
34 */
35static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
36{
37 *remainder = dividend % divisor;
38 return dividend / divisor;
39}
40
41/**
33 * div64_u64 - unsigned 64bit divide with 64bit divisor 42 * div64_u64 - unsigned 64bit divide with 64bit divisor
34 */ 43 */
35static inline u64 div64_u64(u64 dividend, u64 divisor) 44static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -61,8 +70,16 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
61extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); 70extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
62#endif 71#endif
63 72
73#ifndef div64_u64_rem
74extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
75#endif
76
64#ifndef div64_u64 77#ifndef div64_u64
65extern u64 div64_u64(u64 dividend, u64 divisor); 78static inline u64 div64_u64(u64 dividend, u64 divisor)
79{
80 u64 remainder;
81 return div64_u64_rem(dividend, divisor, &remainder);
82}
66#endif 83#endif
67 84
68#ifndef div64_s64 85#ifndef div64_s64
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bcbc30397f23..01c7d85bcaa7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127extern void proc_sched_set_task(struct task_struct *p); 127extern void proc_sched_set_task(struct task_struct *p);
128extern void 128extern void
129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130#else
131static inline void
132proc_sched_show_task(struct task_struct *p, struct seq_file *m)
133{
134}
135static inline void proc_sched_set_task(struct task_struct *p)
136{
137}
138static inline void
139print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140{
141}
142#endif 130#endif
143 131
144/* 132/*
@@ -570,7 +558,7 @@ struct signal_struct {
570 cputime_t utime, stime, cutime, cstime; 558 cputime_t utime, stime, cutime, cstime;
571 cputime_t gtime; 559 cputime_t gtime;
572 cputime_t cgtime; 560 cputime_t cgtime;
573#ifndef CONFIG_VIRT_CPU_ACCOUNTING 561#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
574 struct cputime prev_cputime; 562 struct cputime prev_cputime;
575#endif 563#endif
576 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 564 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -768,31 +756,6 @@ enum cpu_idle_type {
768}; 756};
769 757
770/* 758/*
771 * Increase resolution of nice-level calculations for 64-bit architectures.
772 * The extra resolution improves shares distribution and load balancing of
773 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
774 * hierarchies, especially on larger systems. This is not a user-visible change
775 * and does not change the user-interface for setting shares/weights.
776 *
777 * We increase resolution only if we have enough bits to allow this increased
778 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
779 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
780 * increased costs.
781 */
782#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
783# define SCHED_LOAD_RESOLUTION 10
784# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
785# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
786#else
787# define SCHED_LOAD_RESOLUTION 0
788# define scale_load(w) (w)
789# define scale_load_down(w) (w)
790#endif
791
792#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
793#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
794
795/*
796 * Increase resolution of cpu_power calculations 759 * Increase resolution of cpu_power calculations
797 */ 760 */
798#define SCHED_POWER_SHIFT 10 761#define SCHED_POWER_SHIFT 10
@@ -817,62 +780,6 @@ enum cpu_idle_type {
817 780
818extern int __weak arch_sd_sibiling_asym_packing(void); 781extern int __weak arch_sd_sibiling_asym_packing(void);
819 782
820struct sched_group_power {
821 atomic_t ref;
822 /*
823 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
824 * single CPU.
825 */
826 unsigned int power, power_orig;
827 unsigned long next_update;
828 /*
829 * Number of busy cpus in this group.
830 */
831 atomic_t nr_busy_cpus;
832
833 unsigned long cpumask[0]; /* iteration mask */
834};
835
836struct sched_group {
837 struct sched_group *next; /* Must be a circular list */
838 atomic_t ref;
839
840 unsigned int group_weight;
841 struct sched_group_power *sgp;
842
843 /*
844 * The CPUs this group covers.
845 *
846 * NOTE: this field is variable length. (Allocated dynamically
847 * by attaching extra space to the end of the structure,
848 * depending on how many CPUs the kernel has booted up with)
849 */
850 unsigned long cpumask[0];
851};
852
853static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
854{
855 return to_cpumask(sg->cpumask);
856}
857
858/*
859 * cpumask masking which cpus in the group are allowed to iterate up the domain
860 * tree.
861 */
862static inline struct cpumask *sched_group_mask(struct sched_group *sg)
863{
864 return to_cpumask(sg->sgp->cpumask);
865}
866
867/**
868 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
869 * @group: The group whose first cpu is to be returned.
870 */
871static inline unsigned int group_first_cpu(struct sched_group *group)
872{
873 return cpumask_first(sched_group_cpus(group));
874}
875
876struct sched_domain_attr { 783struct sched_domain_attr {
877 int relax_domain_level; 784 int relax_domain_level;
878}; 785};
@@ -883,6 +790,8 @@ struct sched_domain_attr {
883 790
884extern int sched_domain_level_max; 791extern int sched_domain_level_max;
885 792
793struct sched_group;
794
886struct sched_domain { 795struct sched_domain {
887 /* These fields must be setup */ 796 /* These fields must be setup */
888 struct sched_domain *parent; /* top domain must be null terminated */ 797 struct sched_domain *parent; /* top domain must be null terminated */
@@ -899,6 +808,8 @@ struct sched_domain {
899 unsigned int wake_idx; 808 unsigned int wake_idx;
900 unsigned int forkexec_idx; 809 unsigned int forkexec_idx;
901 unsigned int smt_gain; 810 unsigned int smt_gain;
811
812 int nohz_idle; /* NOHZ IDLE status */
902 int flags; /* See SD_* */ 813 int flags; /* See SD_* */
903 int level; 814 int level;
904 815
@@ -971,18 +882,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
971cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 882cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
972void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 883void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
973 884
974/* Test a flag in parent sched domain */
975static inline int test_sd_parent(struct sched_domain *sd, int flag)
976{
977 if (sd->parent && (sd->parent->flags & flag))
978 return 1;
979
980 return 0;
981}
982
983unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
984unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
985
986bool cpus_share_cache(int this_cpu, int that_cpu); 885bool cpus_share_cache(int this_cpu, int that_cpu);
987 886
988#else /* CONFIG_SMP */ 887#else /* CONFIG_SMP */
@@ -1017,72 +916,6 @@ struct mempolicy;
1017struct pipe_inode_info; 916struct pipe_inode_info;
1018struct uts_namespace; 917struct uts_namespace;
1019 918
1020struct rq;
1021struct sched_domain;
1022
1023/*
1024 * wake flags
1025 */
1026#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1027#define WF_FORK 0x02 /* child wakeup after fork */
1028#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1029
1030#define ENQUEUE_WAKEUP 1
1031#define ENQUEUE_HEAD 2
1032#ifdef CONFIG_SMP
1033#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1034#else
1035#define ENQUEUE_WAKING 0
1036#endif
1037
1038#define DEQUEUE_SLEEP 1
1039
1040struct sched_class {
1041 const struct sched_class *next;
1042
1043 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1044 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1045 void (*yield_task) (struct rq *rq);
1046 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1047
1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1049
1050 struct task_struct * (*pick_next_task) (struct rq *rq);
1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1052
1053#ifdef CONFIG_SMP
1054 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1055 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1056
1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1058 void (*post_schedule) (struct rq *this_rq);
1059 void (*task_waking) (struct task_struct *task);
1060 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1061
1062 void (*set_cpus_allowed)(struct task_struct *p,
1063 const struct cpumask *newmask);
1064
1065 void (*rq_online)(struct rq *rq);
1066 void (*rq_offline)(struct rq *rq);
1067#endif
1068
1069 void (*set_curr_task) (struct rq *rq);
1070 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1071 void (*task_fork) (struct task_struct *p);
1072
1073 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1074 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1075 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1076 int oldprio);
1077
1078 unsigned int (*get_rr_interval) (struct rq *rq,
1079 struct task_struct *task);
1080
1081#ifdef CONFIG_FAIR_GROUP_SCHED
1082 void (*task_move_group) (struct task_struct *p, int on_rq);
1083#endif
1084};
1085
1086struct load_weight { 919struct load_weight {
1087 unsigned long weight, inv_weight; 920 unsigned long weight, inv_weight;
1088}; 921};
@@ -1274,8 +1107,10 @@ struct task_struct {
1274 int exit_code, exit_signal; 1107 int exit_code, exit_signal;
1275 int pdeath_signal; /* The signal sent when the parent dies */ 1108 int pdeath_signal; /* The signal sent when the parent dies */
1276 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1109 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1277 /* ??? */ 1110
1111 /* Used for emulating ABI behavior of previous Linux versions */
1278 unsigned int personality; 1112 unsigned int personality;
1113
1279 unsigned did_exec:1; 1114 unsigned did_exec:1;
1280 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1115 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1281 * execve */ 1116 * execve */
@@ -1327,7 +1162,7 @@ struct task_struct {
1327 1162
1328 cputime_t utime, stime, utimescaled, stimescaled; 1163 cputime_t utime, stime, utimescaled, stimescaled;
1329 cputime_t gtime; 1164 cputime_t gtime;
1330#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1165#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1331 struct cputime prev_cputime; 1166 struct cputime prev_cputime;
1332#endif 1167#endif
1333#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1168#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -2681,28 +2516,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2681extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2516extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2682 2517
2683#ifdef CONFIG_CGROUP_SCHED 2518#ifdef CONFIG_CGROUP_SCHED
2684
2685extern struct task_group root_task_group; 2519extern struct task_group root_task_group;
2686
2687extern struct task_group *sched_create_group(struct task_group *parent);
2688extern void sched_online_group(struct task_group *tg,
2689 struct task_group *parent);
2690extern void sched_destroy_group(struct task_group *tg);
2691extern void sched_offline_group(struct task_group *tg);
2692extern void sched_move_task(struct task_struct *tsk);
2693#ifdef CONFIG_FAIR_GROUP_SCHED
2694extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2695extern unsigned long sched_group_shares(struct task_group *tg);
2696#endif
2697#ifdef CONFIG_RT_GROUP_SCHED
2698extern int sched_group_set_rt_runtime(struct task_group *tg,
2699 long rt_runtime_us);
2700extern long sched_group_rt_runtime(struct task_group *tg);
2701extern int sched_group_set_rt_period(struct task_group *tg,
2702 long rt_period_us);
2703extern long sched_group_rt_period(struct task_group *tg);
2704extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2705#endif
2706#endif /* CONFIG_CGROUP_SCHED */ 2520#endif /* CONFIG_CGROUP_SCHED */
2707 2521
2708extern int task_can_switch_user(struct user_struct *up, 2522extern int task_can_switch_user(struct user_struct *up,