diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
commit | 2d72376b3af1e7d4d4515ebfd0f4383f2e92c343 (patch) | |
tree | a9f36173883f1309640f3a1e58b03a53422262d8 /kernel/sched.c | |
parent | 2b1e315dd2822c99793485f9e53a73459fb399c1 (diff) |
sched: clean up schedstats, cnt -> count
rename all 'cnt' fields and variables to the less yucky 'count' name.
yuckage noticed by Andrew Morton.
no change in code, other than the /proc/sched_debug bkl_count string got
a bit larger:
text data bss dec hex filename
38236 3506 24 41766 a326 sched.o.before
38240 3506 24 41770 a32a sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index cd2b4942fe35..ba9fa6c0ab65 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -349,19 +349,19 @@ struct rq { | |||
349 | unsigned long yld_exp_empty; | 349 | unsigned long yld_exp_empty; |
350 | unsigned long yld_act_empty; | 350 | unsigned long yld_act_empty; |
351 | unsigned long yld_both_empty; | 351 | unsigned long yld_both_empty; |
352 | unsigned long yld_cnt; | 352 | unsigned long yld_count; |
353 | 353 | ||
354 | /* schedule() stats */ | 354 | /* schedule() stats */ |
355 | unsigned long sched_switch; | 355 | unsigned long sched_switch; |
356 | unsigned long sched_cnt; | 356 | unsigned long sched_count; |
357 | unsigned long sched_goidle; | 357 | unsigned long sched_goidle; |
358 | 358 | ||
359 | /* try_to_wake_up() stats */ | 359 | /* try_to_wake_up() stats */ |
360 | unsigned long ttwu_cnt; | 360 | unsigned long ttwu_count; |
361 | unsigned long ttwu_local; | 361 | unsigned long ttwu_local; |
362 | 362 | ||
363 | /* BKL stats */ | 363 | /* BKL stats */ |
364 | unsigned long bkl_cnt; | 364 | unsigned long bkl_count; |
365 | #endif | 365 | #endif |
366 | struct lock_class_key rq_lock_key; | 366 | struct lock_class_key rq_lock_key; |
367 | }; | 367 | }; |
@@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
1481 | 1481 | ||
1482 | new_cpu = cpu; | 1482 | new_cpu = cpu; |
1483 | 1483 | ||
1484 | schedstat_inc(rq, ttwu_cnt); | 1484 | schedstat_inc(rq, ttwu_count); |
1485 | if (cpu == this_cpu) { | 1485 | if (cpu == this_cpu) { |
1486 | schedstat_inc(rq, ttwu_local); | 1486 | schedstat_inc(rq, ttwu_local); |
1487 | goto out_set_cpu; | 1487 | goto out_set_cpu; |
@@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
2637 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2637 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2638 | sd_idle = 1; | 2638 | sd_idle = 1; |
2639 | 2639 | ||
2640 | schedstat_inc(sd, lb_cnt[idle]); | 2640 | schedstat_inc(sd, lb_count[idle]); |
2641 | 2641 | ||
2642 | redo: | 2642 | redo: |
2643 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, | 2643 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, |
@@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
2790 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2790 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2791 | sd_idle = 1; | 2791 | sd_idle = 1; |
2792 | 2792 | ||
2793 | schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]); | 2793 | schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); |
2794 | redo: | 2794 | redo: |
2795 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, | 2795 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, |
2796 | &sd_idle, &cpus, NULL); | 2796 | &sd_idle, &cpus, NULL); |
@@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2924 | } | 2924 | } |
2925 | 2925 | ||
2926 | if (likely(sd)) { | 2926 | if (likely(sd)) { |
2927 | schedstat_inc(sd, alb_cnt); | 2927 | schedstat_inc(sd, alb_count); |
2928 | 2928 | ||
2929 | if (move_one_task(target_rq, target_cpu, busiest_rq, | 2929 | if (move_one_task(target_rq, target_cpu, busiest_rq, |
2930 | sd, CPU_IDLE)) | 2930 | sd, CPU_IDLE)) |
@@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3414 | 3414 | ||
3415 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 3415 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
3416 | 3416 | ||
3417 | schedstat_inc(this_rq(), sched_cnt); | 3417 | schedstat_inc(this_rq(), sched_count); |
3418 | #ifdef CONFIG_SCHEDSTATS | 3418 | #ifdef CONFIG_SCHEDSTATS |
3419 | if (unlikely(prev->lock_depth >= 0)) { | 3419 | if (unlikely(prev->lock_depth >= 0)) { |
3420 | schedstat_inc(this_rq(), bkl_cnt); | 3420 | schedstat_inc(this_rq(), bkl_count); |
3421 | schedstat_inc(prev, sched_info.bkl_cnt); | 3421 | schedstat_inc(prev, sched_info.bkl_count); |
3422 | } | 3422 | } |
3423 | #endif | 3423 | #endif |
3424 | } | 3424 | } |
@@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void) | |||
4558 | { | 4558 | { |
4559 | struct rq *rq = this_rq_lock(); | 4559 | struct rq *rq = this_rq_lock(); |
4560 | 4560 | ||
4561 | schedstat_inc(rq, yld_cnt); | 4561 | schedstat_inc(rq, yld_count); |
4562 | current->sched_class->yield_task(rq); | 4562 | current->sched_class->yield_task(rq); |
4563 | 4563 | ||
4564 | /* | 4564 | /* |