aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
commit6aa645ea5f7a246702e07f29edc7075d487ae4a3 (patch)
treee59f5426c6668262ef082eb46f49c963822b0d36 /kernel
parent20b8a59f2461e1be911dce2cfafefab9d22e4eee (diff)
sched: cfs rq data types
add the CFS rq data types to sched.c. (the old scheduler fields are still intact, they are removed by a later patch) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c84
1 files changed, 78 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f8b8eda4494d..085418bedccd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -91,6 +91,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
91#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ)) 91#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
92#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) 92#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
93 93
94#define NICE_0_LOAD SCHED_LOAD_SCALE
95#define NICE_0_SHIFT SCHED_LOAD_SHIFT
96
94/* 97/*
95 * These are the 'tuning knobs' of the scheduler: 98 * These are the 'tuning knobs' of the scheduler:
96 * 99 *
@@ -218,9 +221,61 @@ static inline unsigned int task_timeslice(struct task_struct *p)
218} 221}
219 222
220/* 223/*
221 * These are the runqueue data structures: 224 * This is the priority-queue data structure of the RT scheduling class:
222 */ 225 */
226struct rt_prio_array {
227 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
228 struct list_head queue[MAX_RT_PRIO];
229};
230
231struct load_stat {
232 struct load_weight load;
233 u64 load_update_start, load_update_last;
234 unsigned long delta_fair, delta_exec, delta_stat;
235};
236
237/* CFS-related fields in a runqueue */
238struct cfs_rq {
239 struct load_weight load;
240 unsigned long nr_running;
241
242 s64 fair_clock;
243 u64 exec_clock;
244 s64 wait_runtime;
245 u64 sleeper_bonus;
246 unsigned long wait_runtime_overruns, wait_runtime_underruns;
247
248 struct rb_root tasks_timeline;
249 struct rb_node *rb_leftmost;
250 struct rb_node *rb_load_balance_curr;
251#ifdef CONFIG_FAIR_GROUP_SCHED
252 /* 'curr' points to currently running entity on this cfs_rq.
253 * It is set to NULL otherwise (i.e when none are currently running).
254 */
255 struct sched_entity *curr;
256 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
257
258 /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
259 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
260 * (like users, containers etc.)
261 *
262 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
263 * list is used during load balance.
264 */
265 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
266#endif
267};
223 268
269/* Real-Time classes' related field in a runqueue: */
270struct rt_rq {
271 struct rt_prio_array active;
272 int rt_load_balance_idx;
273 struct list_head *rt_load_balance_head, *rt_load_balance_curr;
274};
275
276/*
277 * The prio-array type of the old scheduler:
278 */
224struct prio_array { 279struct prio_array {
225 unsigned int nr_active; 280 unsigned int nr_active;
226 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ 281 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
@@ -235,7 +290,7 @@ struct prio_array {
235 * acquire operations must be ordered by ascending &runqueue. 290 * acquire operations must be ordered by ascending &runqueue.
236 */ 291 */
237struct rq { 292struct rq {
238 spinlock_t lock; 293 spinlock_t lock; /* runqueue lock */
239 294
240 /* 295 /*
241 * nr_running and cpu_load should be in the same cacheline because 296 * nr_running and cpu_load should be in the same cacheline because
@@ -243,14 +298,21 @@ struct rq {
243 */ 298 */
244 unsigned long nr_running; 299 unsigned long nr_running;
245 unsigned long raw_weighted_load; 300 unsigned long raw_weighted_load;
246#ifdef CONFIG_SMP 301 #define CPU_LOAD_IDX_MAX 5
247 unsigned long cpu_load[3]; 302 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
248 unsigned char idle_at_tick; 303 unsigned char idle_at_tick;
249#ifdef CONFIG_NO_HZ 304#ifdef CONFIG_NO_HZ
250 unsigned char in_nohz_recently; 305 unsigned char in_nohz_recently;
251#endif 306#endif
307 struct load_stat ls; /* capture load from *all* tasks on this cpu */
308 unsigned long nr_load_updates;
309 u64 nr_switches;
310
311 struct cfs_rq cfs;
312#ifdef CONFIG_FAIR_GROUP_SCHED
313 struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
252#endif 314#endif
253 unsigned long long nr_switches; 315 struct rt_rq rt;
254 316
255 /* 317 /*
256 * This is part of a global counter where only the total sum 318 * This is part of a global counter where only the total sum
@@ -261,13 +323,23 @@ struct rq {
261 unsigned long nr_uninterruptible; 323 unsigned long nr_uninterruptible;
262 324
263 unsigned long expired_timestamp; 325 unsigned long expired_timestamp;
264 /* Cached timestamp set by update_cpu_clock() */
265 unsigned long long most_recent_timestamp; 326 unsigned long long most_recent_timestamp;
327
266 struct task_struct *curr, *idle; 328 struct task_struct *curr, *idle;
267 unsigned long next_balance; 329 unsigned long next_balance;
268 struct mm_struct *prev_mm; 330 struct mm_struct *prev_mm;
331
269 struct prio_array *active, *expired, arrays[2]; 332 struct prio_array *active, *expired, arrays[2];
270 int best_expired_prio; 333 int best_expired_prio;
334
335 u64 clock, prev_clock_raw;
336 s64 clock_max_delta;
337
338 unsigned int clock_warps, clock_overflows;
339 unsigned int clock_unstable_events;
340
341 struct sched_class *load_balance_class;
342
271 atomic_t nr_iowait; 343 atomic_t nr_iowait;
272 344
273#ifdef CONFIG_SMP 345#ifdef CONFIG_SMP