aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:31:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:31:44 -0400
commit2ba68940c893c8f0bfc8573c041254251bb6aeab (patch)
treefa83ebb01d32abd98123fa28f9f6f0b3eaeee25d /kernel/sched/sched.h
parent9c2b957db1772ebf942ae7a9346b14eba6c8ca66 (diff)
parent600e145882802d6ccbfe2c4aea243d97caeb91a9 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes for v3.4 from Ingo Molnar * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) printk: Make it compile with !CONFIG_PRINTK sched/x86: Fix overflow in cyc2ns_offset sched: Fix nohz load accounting -- again! sched: Update yield() docs printk/sched: Introduce special printk_sched() for those awkward moments sched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer sched: Cleanup cpu_active madness sched: Fix load-balance wreckage sched: Clean up parameter passing of proc_sched_autogroup_set_nice() sched: Ditch per cgroup task lists for load-balancing sched: Rename load-balancing fields sched: Move load-balancing arguments into helper struct sched/rt: Do not submit new work when PI-blocked sched/rt: Prevent idle task boosting sched/wait: Add __wake_up_all_locked() API sched/rt: Document scheduler related skip-resched-check sites sched/rt: Use schedule_preempt_disabled() sched/rt: Add schedule_preempt_disabled() sched/rt: Do not throttle when PI boosting sched/rt: Keep period timer ticking when rt throttling is active ...
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h15
1 files changed, 2 insertions, 13 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b4cd6d8ea150..42b1f304b044 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -36,11 +36,7 @@ extern __read_mostly int scheduler_running;
36 36
37/* 37/*
38 * These are the 'tuning knobs' of the scheduler: 38 * These are the 'tuning knobs' of the scheduler:
39 *
40 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
41 * Timeslices get refilled after they expire.
42 */ 39 */
43#define DEF_TIMESLICE (100 * HZ / 1000)
44 40
45/* 41/*
46 * single value that denotes runtime == period, ie unlimited time. 42 * single value that denotes runtime == period, ie unlimited time.
@@ -216,9 +212,6 @@ struct cfs_rq {
216 struct rb_root tasks_timeline; 212 struct rb_root tasks_timeline;
217 struct rb_node *rb_leftmost; 213 struct rb_node *rb_leftmost;
218 214
219 struct list_head tasks;
220 struct list_head *balance_iterator;
221
222 /* 215 /*
223 * 'curr' points to currently running entity on this cfs_rq. 216 * 'curr' points to currently running entity on this cfs_rq.
224 * It is set to NULL otherwise (i.e when none are currently running). 217 * It is set to NULL otherwise (i.e when none are currently running).
@@ -246,11 +239,6 @@ struct cfs_rq {
246 239
247#ifdef CONFIG_SMP 240#ifdef CONFIG_SMP
248 /* 241 /*
249 * the part of load.weight contributed by tasks
250 */
251 unsigned long task_weight;
252
253 /*
254 * h_load = weight * f(tg) 242 * h_load = weight * f(tg)
255 * 243 *
256 * Where f(tg) is the recursive weight fraction assigned to 244 * Where f(tg) is the recursive weight fraction assigned to
@@ -424,6 +412,8 @@ struct rq {
424 int cpu; 412 int cpu;
425 int online; 413 int online;
426 414
415 struct list_head cfs_tasks;
416
427 u64 rt_avg; 417 u64 rt_avg;
428 u64 age_stamp; 418 u64 age_stamp;
429 u64 idle_stamp; 419 u64 idle_stamp;
@@ -462,7 +452,6 @@ struct rq {
462 unsigned int yld_count; 452 unsigned int yld_count;
463 453
464 /* schedule() stats */ 454 /* schedule() stats */
465 unsigned int sched_switch;
466 unsigned int sched_count; 455 unsigned int sched_count;
467 unsigned int sched_goidle; 456 unsigned int sched_goidle;
468 457