aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/base.c2
-rw-r--r--include/linux/sched.h12
-rw-r--r--kernel/delayacct.c2
-rw-r--r--kernel/sched.c24
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_stats.h24
6 files changed, 36 insertions, 36 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 19489b0d5554..e5d0953d4db1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -304,7 +304,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
304 return sprintf(buffer, "%llu %llu %lu\n", 304 return sprintf(buffer, "%llu %llu %lu\n",
305 task->sched_info.cpu_time, 305 task->sched_info.cpu_time,
306 task->sched_info.run_delay, 306 task->sched_info.run_delay,
307 task->sched_info.pcnt); 307 task->sched_info.pcount);
308} 308}
309#endif 309#endif
310 310
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c33227b0f82..d5daca4bcc6b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -614,7 +614,7 @@ struct reclaim_state;
614#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 614#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
615struct sched_info { 615struct sched_info {
616 /* cumulative counters */ 616 /* cumulative counters */
617 unsigned long pcnt; /* # of times run on this cpu */ 617 unsigned long pcount; /* # of times run on this cpu */
618 unsigned long long cpu_time, /* time spent on the cpu */ 618 unsigned long long cpu_time, /* time spent on the cpu */
619 run_delay; /* time spent waiting on a runqueue */ 619 run_delay; /* time spent waiting on a runqueue */
620 620
@@ -623,7 +623,7 @@ struct sched_info {
623 last_queued; /* when we were last queued to run */ 623 last_queued; /* when we were last queued to run */
624#ifdef CONFIG_SCHEDSTATS 624#ifdef CONFIG_SCHEDSTATS
625 /* BKL stats */ 625 /* BKL stats */
626 unsigned long bkl_cnt; 626 unsigned long bkl_count;
627#endif 627#endif
628}; 628};
629#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 629#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -759,7 +759,7 @@ struct sched_domain {
759 759
760#ifdef CONFIG_SCHEDSTATS 760#ifdef CONFIG_SCHEDSTATS
761 /* load_balance() stats */ 761 /* load_balance() stats */
762 unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; 762 unsigned long lb_count[CPU_MAX_IDLE_TYPES];
763 unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; 763 unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
764 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; 764 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
765 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; 765 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
@@ -769,17 +769,17 @@ struct sched_domain {
769 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; 769 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
770 770
771 /* Active load balancing */ 771 /* Active load balancing */
772 unsigned long alb_cnt; 772 unsigned long alb_count;
773 unsigned long alb_failed; 773 unsigned long alb_failed;
774 unsigned long alb_pushed; 774 unsigned long alb_pushed;
775 775
776 /* SD_BALANCE_EXEC stats */ 776 /* SD_BALANCE_EXEC stats */
777 unsigned long sbe_cnt; 777 unsigned long sbe_count;
778 unsigned long sbe_balanced; 778 unsigned long sbe_balanced;
779 unsigned long sbe_pushed; 779 unsigned long sbe_pushed;
780 780
781 /* SD_BALANCE_FORK stats */ 781 /* SD_BALANCE_FORK stats */
782 unsigned long sbf_cnt; 782 unsigned long sbf_count;
783 unsigned long sbf_balanced; 783 unsigned long sbf_balanced;
784 unsigned long sbf_pushed; 784 unsigned long sbf_pushed;
785 785
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 81e697829633..09e9574eeb26 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
119 * No locking available for sched_info (and too expensive to add one) 119 * No locking available for sched_info (and too expensive to add one)
120 * Mitigate by taking snapshot of values 120 * Mitigate by taking snapshot of values
121 */ 121 */
122 t1 = tsk->sched_info.pcnt; 122 t1 = tsk->sched_info.pcount;
123 t2 = tsk->sched_info.run_delay; 123 t2 = tsk->sched_info.run_delay;
124 t3 = tsk->sched_info.cpu_time; 124 t3 = tsk->sched_info.cpu_time;
125 125
diff --git a/kernel/sched.c b/kernel/sched.c
index cd2b4942fe35..ba9fa6c0ab65 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -349,19 +349,19 @@ struct rq {
349 unsigned long yld_exp_empty; 349 unsigned long yld_exp_empty;
350 unsigned long yld_act_empty; 350 unsigned long yld_act_empty;
351 unsigned long yld_both_empty; 351 unsigned long yld_both_empty;
352 unsigned long yld_cnt; 352 unsigned long yld_count;
353 353
354 /* schedule() stats */ 354 /* schedule() stats */
355 unsigned long sched_switch; 355 unsigned long sched_switch;
356 unsigned long sched_cnt; 356 unsigned long sched_count;
357 unsigned long sched_goidle; 357 unsigned long sched_goidle;
358 358
359 /* try_to_wake_up() stats */ 359 /* try_to_wake_up() stats */
360 unsigned long ttwu_cnt; 360 unsigned long ttwu_count;
361 unsigned long ttwu_local; 361 unsigned long ttwu_local;
362 362
363 /* BKL stats */ 363 /* BKL stats */
364 unsigned long bkl_cnt; 364 unsigned long bkl_count;
365#endif 365#endif
366 struct lock_class_key rq_lock_key; 366 struct lock_class_key rq_lock_key;
367}; 367};
@@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1481 1481
1482 new_cpu = cpu; 1482 new_cpu = cpu;
1483 1483
1484 schedstat_inc(rq, ttwu_cnt); 1484 schedstat_inc(rq, ttwu_count);
1485 if (cpu == this_cpu) { 1485 if (cpu == this_cpu) {
1486 schedstat_inc(rq, ttwu_local); 1486 schedstat_inc(rq, ttwu_local);
1487 goto out_set_cpu; 1487 goto out_set_cpu;
@@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2637 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2637 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2638 sd_idle = 1; 2638 sd_idle = 1;
2639 2639
2640 schedstat_inc(sd, lb_cnt[idle]); 2640 schedstat_inc(sd, lb_count[idle]);
2641 2641
2642redo: 2642redo:
2643 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, 2643 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
@@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2790 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2790 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2791 sd_idle = 1; 2791 sd_idle = 1;
2792 2792
2793 schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]); 2793 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
2794redo: 2794redo:
2795 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, 2795 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
2796 &sd_idle, &cpus, NULL); 2796 &sd_idle, &cpus, NULL);
@@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2924 } 2924 }
2925 2925
2926 if (likely(sd)) { 2926 if (likely(sd)) {
2927 schedstat_inc(sd, alb_cnt); 2927 schedstat_inc(sd, alb_count);
2928 2928
2929 if (move_one_task(target_rq, target_cpu, busiest_rq, 2929 if (move_one_task(target_rq, target_cpu, busiest_rq,
2930 sd, CPU_IDLE)) 2930 sd, CPU_IDLE))
@@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev)
3414 3414
3415 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3415 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3416 3416
3417 schedstat_inc(this_rq(), sched_cnt); 3417 schedstat_inc(this_rq(), sched_count);
3418#ifdef CONFIG_SCHEDSTATS 3418#ifdef CONFIG_SCHEDSTATS
3419 if (unlikely(prev->lock_depth >= 0)) { 3419 if (unlikely(prev->lock_depth >= 0)) {
3420 schedstat_inc(this_rq(), bkl_cnt); 3420 schedstat_inc(this_rq(), bkl_count);
3421 schedstat_inc(prev, sched_info.bkl_cnt); 3421 schedstat_inc(prev, sched_info.bkl_count);
3422 } 3422 }
3423#endif 3423#endif
3424} 3424}
@@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void)
4558{ 4558{
4559 struct rq *rq = this_rq_lock(); 4559 struct rq *rq = this_rq_lock();
4560 4560
4561 schedstat_inc(rq, yld_cnt); 4561 schedstat_inc(rq, yld_count);
4562 current->sched_class->yield_task(rq); 4562 current->sched_class->yield_task(rq);
4563 4563
4564 /* 4564 /*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4659c90c3418..be79cd6d9e80 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -137,8 +137,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); 137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
139#ifdef CONFIG_SCHEDSTATS 139#ifdef CONFIG_SCHEDSTATS
140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt", 140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_count",
141 rq->bkl_cnt); 141 rq->bkl_count);
142#endif 142#endif
143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", 143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
144 cfs_rq->nr_spread_over); 144 cfs_rq->nr_spread_over);
@@ -342,7 +342,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
342 PN(se.exec_max); 342 PN(se.exec_max);
343 PN(se.slice_max); 343 PN(se.slice_max);
344 PN(se.wait_max); 344 PN(se.wait_max);
345 P(sched_info.bkl_cnt); 345 P(sched_info.bkl_count);
346#endif 346#endif
347 SEQ_printf(m, "%-25s:%20Ld\n", 347 SEQ_printf(m, "%-25s:%20Ld\n",
348 "nr_switches", (long long)(p->nvcsw + p->nivcsw)); 348 "nr_switches", (long long)(p->nvcsw + p->nivcsw));
@@ -370,7 +370,7 @@ void proc_sched_set_task(struct task_struct *p)
370 p->se.exec_max = 0; 370 p->se.exec_max = 0;
371 p->se.slice_max = 0; 371 p->se.slice_max = 0;
372 p->se.wait_max = 0; 372 p->se.wait_max = 0;
373 p->sched_info.bkl_cnt = 0; 373 p->sched_info.bkl_count = 0;
374#endif 374#endif
375 p->se.sum_exec_runtime = 0; 375 p->se.sum_exec_runtime = 0;
376 p->se.prev_sum_exec_runtime = 0; 376 p->se.prev_sum_exec_runtime = 0;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 1d9ec98c38de..1c084842c3e7 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -16,18 +16,18 @@ static int show_schedstat(struct seq_file *seq, void *v)
16 struct rq *rq = cpu_rq(cpu); 16 struct rq *rq = cpu_rq(cpu);
17#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
18 struct sched_domain *sd; 18 struct sched_domain *sd;
19 int dcnt = 0; 19 int dcount = 0;
20#endif 20#endif
21 21
22 /* runqueue-specific stats */ 22 /* runqueue-specific stats */
23 seq_printf(seq, 23 seq_printf(seq,
24 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", 24 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
25 cpu, rq->yld_both_empty, 25 cpu, rq->yld_both_empty,
26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt, 26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
27 rq->sched_switch, rq->sched_cnt, rq->sched_goidle, 27 rq->sched_switch, rq->sched_count, rq->sched_goidle,
28 rq->ttwu_cnt, rq->ttwu_local, 28 rq->ttwu_count, rq->ttwu_local,
29 rq->rq_sched_info.cpu_time, 29 rq->rq_sched_info.cpu_time,
30 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt); 30 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
31 31
32 seq_printf(seq, "\n"); 32 seq_printf(seq, "\n");
33 33
@@ -39,12 +39,12 @@ static int show_schedstat(struct seq_file *seq, void *v)
39 char mask_str[NR_CPUS]; 39 char mask_str[NR_CPUS];
40 40
41 cpumask_scnprintf(mask_str, NR_CPUS, sd->span); 41 cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
42 seq_printf(seq, "domain%d %s", dcnt++, mask_str); 42 seq_printf(seq, "domain%d %s", dcount++, mask_str);
43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
44 itype++) { 44 itype++) {
45 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " 45 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
46 "%lu", 46 "%lu",
47 sd->lb_cnt[itype], 47 sd->lb_count[itype],
48 sd->lb_balanced[itype], 48 sd->lb_balanced[itype],
49 sd->lb_failed[itype], 49 sd->lb_failed[itype],
50 sd->lb_imbalance[itype], 50 sd->lb_imbalance[itype],
@@ -55,9 +55,9 @@ static int show_schedstat(struct seq_file *seq, void *v)
55 } 55 }
56 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" 56 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
57 " %lu %lu %lu\n", 57 " %lu %lu %lu\n",
58 sd->alb_cnt, sd->alb_failed, sd->alb_pushed, 58 sd->alb_count, sd->alb_failed, sd->alb_pushed,
59 sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, 59 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
60 sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, 60 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
61 sd->ttwu_wake_remote, sd->ttwu_move_affine, 61 sd->ttwu_wake_remote, sd->ttwu_move_affine,
62 sd->ttwu_move_balance); 62 sd->ttwu_move_balance);
63 } 63 }
@@ -101,7 +101,7 @@ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
101{ 101{
102 if (rq) { 102 if (rq) {
103 rq->rq_sched_info.run_delay += delta; 103 rq->rq_sched_info.run_delay += delta;
104 rq->rq_sched_info.pcnt++; 104 rq->rq_sched_info.pcount++;
105 } 105 }
106} 106}
107 107
@@ -164,7 +164,7 @@ static void sched_info_arrive(struct task_struct *t)
164 sched_info_dequeued(t); 164 sched_info_dequeued(t);
165 t->sched_info.run_delay += delta; 165 t->sched_info.run_delay += delta;
166 t->sched_info.last_arrival = now; 166 t->sched_info.last_arrival = now;
167 t->sched_info.pcnt++; 167 t->sched_info.pcount++;
168 168
169 rq_sched_info_arrive(task_rq(t), delta); 169 rq_sched_info_arrive(task_rq(t), delta);
170} 170}