From 2d72376b3af1e7d4d4515ebfd0f4383f2e92c343 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 15 Oct 2007 17:00:12 +0200 Subject: sched: clean up schedstats, cnt -> count rename all 'cnt' fields and variables to the less yucky 'count' name. yuckage noticed by Andrew Morton. no change in code, other than the /proc/sched_debug bkl_count string got a bit larger: text data bss dec hex filename 38236 3506 24 41766 a326 sched.o.before 38240 3506 24 41770 a32a sched.o.after Signed-off-by: Ingo Molnar Reviewed-by: Thomas Gleixner --- kernel/sched.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index cd2b4942fe35..ba9fa6c0ab65 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -349,19 +349,19 @@ struct rq { unsigned long yld_exp_empty; unsigned long yld_act_empty; unsigned long yld_both_empty; - unsigned long yld_cnt; + unsigned long yld_count; /* schedule() stats */ unsigned long sched_switch; - unsigned long sched_cnt; + unsigned long sched_count; unsigned long sched_goidle; /* try_to_wake_up() stats */ - unsigned long ttwu_cnt; + unsigned long ttwu_count; unsigned long ttwu_local; /* BKL stats */ - unsigned long bkl_cnt; + unsigned long bkl_count; #endif struct lock_class_key rq_lock_key; }; @@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) new_cpu = cpu; - schedstat_inc(rq, ttwu_cnt); + schedstat_inc(rq, ttwu_count); if (cpu == this_cpu) { schedstat_inc(rq, ttwu_local); goto out_set_cpu; @@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) sd_idle = 1; - schedstat_inc(sd, lb_cnt[idle]); + schedstat_inc(sd, lb_count[idle]); redo: group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, @@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) sd_idle = 1; - schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]); + schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); redo: group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, &sd_idle, &cpus, NULL); @@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) } if (likely(sd)) { - schedstat_inc(sd, alb_cnt); + schedstat_inc(sd, alb_count); if (move_one_task(target_rq, target_cpu, busiest_rq, sd, CPU_IDLE)) @@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev) profile_hit(SCHED_PROFILING, __builtin_return_address(0)); - schedstat_inc(this_rq(), sched_cnt); + schedstat_inc(this_rq(), sched_count); #ifdef CONFIG_SCHEDSTATS if (unlikely(prev->lock_depth >= 0)) { - schedstat_inc(this_rq(), bkl_cnt); - schedstat_inc(prev, sched_info.bkl_cnt); + schedstat_inc(this_rq(), bkl_count); + schedstat_inc(prev, sched_info.bkl_count); } #endif } @@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void) { struct rq *rq = this_rq_lock(); - schedstat_inc(rq, yld_cnt); + schedstat_inc(rq, yld_count); current->sched_class->yield_task(rq); /* -- cgit v1.2.2