aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:31:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:31:01 -0500
commitf66ffdedbf0fc059a92219bb08c1dbcac88f074b (patch)
tree9db4ad51764455123130e82fb7acf4f0a0be58ce /include/linux/sched.h
parent2531216f236cb2a1f39ffa12a4a9339541e52191 (diff)
parentdd5feea14a7de4edbd9f36db1a2db785de91b88d (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits) sched: Fix SCHED_MC regression caused by change in sched cpu_power sched: Don't use possibly stale sched_class kthread, sched: Remove reference to kthread_create_on_cpu sched: cpuacct: Use bigger percpu counter batch values for stats counters percpu_counter: Make __percpu_counter_add an inline function on UP sched: Remove member rt_se from struct rt_rq sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu] sched: Remove unused update_shares_locked() sched: Use for_each_bit sched: Queue a deboosted task to the head of the RT prio queue sched: Implement head queueing for sched_rt sched: Extend enqueue_task to allow head queueing sched: Remove USER_SCHED sched: Fix the place where group powers are updated sched: Assume *balance is valid sched: Remove load_balance_newidle() sched: Unify load_balance{,_newidle}() sched: Add a lock break for PREEMPT=y sched: Remove from fwd decls sched: Remove rq_iterator from move_one_task ... Fix up trivial conflicts in kernel/sched.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h25
1 files changed, 3 insertions, 22 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1f5fa53b46b1..0eef87b58ea5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -740,14 +740,6 @@ struct user_struct {
740 uid_t uid; 740 uid_t uid;
741 struct user_namespace *user_ns; 741 struct user_namespace *user_ns;
742 742
743#ifdef CONFIG_USER_SCHED
744 struct task_group *tg;
745#ifdef CONFIG_SYSFS
746 struct kobject kobj;
747 struct delayed_work work;
748#endif
749#endif
750
751#ifdef CONFIG_PERF_EVENTS 743#ifdef CONFIG_PERF_EVENTS
752 atomic_long_t locked_vm; 744 atomic_long_t locked_vm;
753#endif 745#endif
@@ -1087,7 +1079,8 @@ struct sched_domain;
1087struct sched_class { 1079struct sched_class {
1088 const struct sched_class *next; 1080 const struct sched_class *next;
1089 1081
1090 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 1082 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
1083 bool head);
1091 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1084 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1092 void (*yield_task) (struct rq *rq); 1085 void (*yield_task) (struct rq *rq);
1093 1086
@@ -1099,14 +1092,6 @@ struct sched_class {
1099#ifdef CONFIG_SMP 1092#ifdef CONFIG_SMP
1100 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1093 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1101 1094
1102 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1103 struct rq *busiest, unsigned long max_load_move,
1104 struct sched_domain *sd, enum cpu_idle_type idle,
1105 int *all_pinned, int *this_best_prio);
1106
1107 int (*move_one_task) (struct rq *this_rq, int this_cpu,
1108 struct rq *busiest, struct sched_domain *sd,
1109 enum cpu_idle_type idle);
1110 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1095 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1111 void (*post_schedule) (struct rq *this_rq); 1096 void (*post_schedule) (struct rq *this_rq);
1112 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1097 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2520 2505
2521extern void normalize_rt_tasks(void); 2506extern void normalize_rt_tasks(void);
2522 2507
2523#ifdef CONFIG_GROUP_SCHED 2508#ifdef CONFIG_CGROUP_SCHED
2524 2509
2525extern struct task_group init_task_group; 2510extern struct task_group init_task_group;
2526#ifdef CONFIG_USER_SCHED
2527extern struct task_group root_task_group;
2528extern void set_tg_uid(struct user_struct *user);
2529#endif
2530 2511
2531extern struct task_group *sched_create_group(struct task_group *parent); 2512extern struct task_group *sched_create_group(struct task_group *parent);
2532extern void sched_destroy_group(struct task_group *tg); 2513extern void sched_destroy_group(struct task_group *tg);