diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:30:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:30:49 -0500 |
commit | 897e81bea1fcfcd2c5cdb720c9efdb25da9ff374 (patch) | |
tree | 92cf33ed2c35c1ece633f09365702f1c8e24d415 /fs/proc | |
parent | c3fa27d1367fac63ac8533d6f20ea851d0d70a10 (diff) | |
parent | 0cf55e1ec08bb5a22e068309e2d8ba1180ab4239 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits)
sched, cputime: Introduce thread_group_times()
sched, cputime: Cleanups related to task_times()
Revert "sched, x86: Optimize branch hint in __switch_to()"
sched: Fix isolcpus boot option
sched: Revert 498657a478c60be092208422fefa9c7b248729c2
sched, time: Define nsecs_to_jiffies()
sched: Remove task_{u,s,g}time()
sched: Introduce task_times() to replace task_{u,s}time() pair
sched: Limit the number of scheduler debug messages
sched.c: Call debug_show_all_locks() when dumping all tasks
sched, x86: Optimize branch hint in __switch_to()
sched: Optimize branch hint in context_switch()
sched: Optimize branch hint in pick_next_task_fair()
sched_feat_write(): Update ppos instead of file->f_pos
sched: Sched_rt_periodic_timer vs cpu hotplug
sched, kvm: Fix race condition involving sched_in_preempt_notifers
sched: More generic WAKE_AFFINE vs select_idle_sibling()
sched: Cleanup select_task_rq_fair()
sched: Fix granularity of task_u/stime()
sched: Fix/add missing update_rq_clock() calls
...
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/array.c | 23 | ||||
-rw-r--r-- | fs/proc/stat.c | 19 |
2 files changed, 28 insertions, 14 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c index 822c2d506518..4badde179b18 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -410,6 +410,16 @@ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task) | |||
410 | } | 410 | } |
411 | #endif /* CONFIG_MMU */ | 411 | #endif /* CONFIG_MMU */ |
412 | 412 | ||
413 | static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) | ||
414 | { | ||
415 | seq_printf(m, "Cpus_allowed:\t"); | ||
416 | seq_cpumask(m, &task->cpus_allowed); | ||
417 | seq_printf(m, "\n"); | ||
418 | seq_printf(m, "Cpus_allowed_list:\t"); | ||
419 | seq_cpumask_list(m, &task->cpus_allowed); | ||
420 | seq_printf(m, "\n"); | ||
421 | } | ||
422 | |||
413 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | 423 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, |
414 | struct pid *pid, struct task_struct *task) | 424 | struct pid *pid, struct task_struct *task) |
415 | { | 425 | { |
@@ -424,6 +434,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
424 | } | 434 | } |
425 | task_sig(m, task); | 435 | task_sig(m, task); |
426 | task_cap(m, task); | 436 | task_cap(m, task); |
437 | task_cpus_allowed(m, task); | ||
427 | cpuset_task_status_allowed(m, task); | 438 | cpuset_task_status_allowed(m, task); |
428 | #if defined(CONFIG_S390) | 439 | #if defined(CONFIG_S390) |
429 | task_show_regs(m, task); | 440 | task_show_regs(m, task); |
@@ -495,20 +506,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
495 | 506 | ||
496 | /* add up live thread stats at the group level */ | 507 | /* add up live thread stats at the group level */ |
497 | if (whole) { | 508 | if (whole) { |
498 | struct task_cputime cputime; | ||
499 | struct task_struct *t = task; | 509 | struct task_struct *t = task; |
500 | do { | 510 | do { |
501 | min_flt += t->min_flt; | 511 | min_flt += t->min_flt; |
502 | maj_flt += t->maj_flt; | 512 | maj_flt += t->maj_flt; |
503 | gtime = cputime_add(gtime, task_gtime(t)); | 513 | gtime = cputime_add(gtime, t->gtime); |
504 | t = next_thread(t); | 514 | t = next_thread(t); |
505 | } while (t != task); | 515 | } while (t != task); |
506 | 516 | ||
507 | min_flt += sig->min_flt; | 517 | min_flt += sig->min_flt; |
508 | maj_flt += sig->maj_flt; | 518 | maj_flt += sig->maj_flt; |
509 | thread_group_cputime(task, &cputime); | 519 | thread_group_times(task, &utime, &stime); |
510 | utime = cputime.utime; | ||
511 | stime = cputime.stime; | ||
512 | gtime = cputime_add(gtime, sig->gtime); | 520 | gtime = cputime_add(gtime, sig->gtime); |
513 | } | 521 | } |
514 | 522 | ||
@@ -524,9 +532,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
524 | if (!whole) { | 532 | if (!whole) { |
525 | min_flt = task->min_flt; | 533 | min_flt = task->min_flt; |
526 | maj_flt = task->maj_flt; | 534 | maj_flt = task->maj_flt; |
527 | utime = task_utime(task); | 535 | task_times(task, &utime, &stime); |
528 | stime = task_stime(task); | 536 | gtime = task->gtime; |
529 | gtime = task_gtime(task); | ||
530 | } | 537 | } |
531 | 538 | ||
532 | /* scale priority and nice values from timeslices to -20..20 */ | 539 | /* scale priority and nice values from timeslices to -20..20 */ |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 7cc726c6d70a..b9b7aad2003d 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -27,7 +27,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
27 | int i, j; | 27 | int i, j; |
28 | unsigned long jif; | 28 | unsigned long jif; |
29 | cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; | 29 | cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; |
30 | cputime64_t guest; | 30 | cputime64_t guest, guest_nice; |
31 | u64 sum = 0; | 31 | u64 sum = 0; |
32 | u64 sum_softirq = 0; | 32 | u64 sum_softirq = 0; |
33 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; | 33 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; |
@@ -36,7 +36,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
36 | 36 | ||
37 | user = nice = system = idle = iowait = | 37 | user = nice = system = idle = iowait = |
38 | irq = softirq = steal = cputime64_zero; | 38 | irq = softirq = steal = cputime64_zero; |
39 | guest = cputime64_zero; | 39 | guest = guest_nice = cputime64_zero; |
40 | getboottime(&boottime); | 40 | getboottime(&boottime); |
41 | jif = boottime.tv_sec; | 41 | jif = boottime.tv_sec; |
42 | 42 | ||
@@ -51,6 +51,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
51 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 51 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); |
52 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 52 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); |
53 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 53 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); |
54 | guest_nice = cputime64_add(guest_nice, | ||
55 | kstat_cpu(i).cpustat.guest_nice); | ||
54 | for_each_irq_nr(j) { | 56 | for_each_irq_nr(j) { |
55 | sum += kstat_irqs_cpu(j, i); | 57 | sum += kstat_irqs_cpu(j, i); |
56 | } | 58 | } |
@@ -65,7 +67,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
65 | } | 67 | } |
66 | sum += arch_irq_stat(); | 68 | sum += arch_irq_stat(); |
67 | 69 | ||
68 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", | 70 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu " |
71 | "%llu\n", | ||
69 | (unsigned long long)cputime64_to_clock_t(user), | 72 | (unsigned long long)cputime64_to_clock_t(user), |
70 | (unsigned long long)cputime64_to_clock_t(nice), | 73 | (unsigned long long)cputime64_to_clock_t(nice), |
71 | (unsigned long long)cputime64_to_clock_t(system), | 74 | (unsigned long long)cputime64_to_clock_t(system), |
@@ -74,7 +77,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
74 | (unsigned long long)cputime64_to_clock_t(irq), | 77 | (unsigned long long)cputime64_to_clock_t(irq), |
75 | (unsigned long long)cputime64_to_clock_t(softirq), | 78 | (unsigned long long)cputime64_to_clock_t(softirq), |
76 | (unsigned long long)cputime64_to_clock_t(steal), | 79 | (unsigned long long)cputime64_to_clock_t(steal), |
77 | (unsigned long long)cputime64_to_clock_t(guest)); | 80 | (unsigned long long)cputime64_to_clock_t(guest), |
81 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | ||
78 | for_each_online_cpu(i) { | 82 | for_each_online_cpu(i) { |
79 | 83 | ||
80 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ | 84 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ |
@@ -88,8 +92,10 @@ static int show_stat(struct seq_file *p, void *v) | |||
88 | softirq = kstat_cpu(i).cpustat.softirq; | 92 | softirq = kstat_cpu(i).cpustat.softirq; |
89 | steal = kstat_cpu(i).cpustat.steal; | 93 | steal = kstat_cpu(i).cpustat.steal; |
90 | guest = kstat_cpu(i).cpustat.guest; | 94 | guest = kstat_cpu(i).cpustat.guest; |
95 | guest_nice = kstat_cpu(i).cpustat.guest_nice; | ||
91 | seq_printf(p, | 96 | seq_printf(p, |
92 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", | 97 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " |
98 | "%llu\n", | ||
93 | i, | 99 | i, |
94 | (unsigned long long)cputime64_to_clock_t(user), | 100 | (unsigned long long)cputime64_to_clock_t(user), |
95 | (unsigned long long)cputime64_to_clock_t(nice), | 101 | (unsigned long long)cputime64_to_clock_t(nice), |
@@ -99,7 +105,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
99 | (unsigned long long)cputime64_to_clock_t(irq), | 105 | (unsigned long long)cputime64_to_clock_t(irq), |
100 | (unsigned long long)cputime64_to_clock_t(softirq), | 106 | (unsigned long long)cputime64_to_clock_t(softirq), |
101 | (unsigned long long)cputime64_to_clock_t(steal), | 107 | (unsigned long long)cputime64_to_clock_t(steal), |
102 | (unsigned long long)cputime64_to_clock_t(guest)); | 108 | (unsigned long long)cputime64_to_clock_t(guest), |
109 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | ||
103 | } | 110 | } |
104 | seq_printf(p, "intr %llu", (unsigned long long)sum); | 111 | seq_printf(p, "intr %llu", (unsigned long long)sum); |
105 | 112 | ||