aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2007-07-19 04:47:53 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:43 -0400
commit4004c69ad68dd03733179277280ea2946990ba36 (patch)
tree521f206229832f757c0f7652101d187a7a6ff5c8 /fs/proc
parenta0a9983509f45b2225ca87fdcf7b40ea916834ed (diff)
Avoid too many remote cpu references due to /proc/stat
Optimize show_stat to collect per-irq information just once. On x86_64, with newer kernel versions, kstat_irqs is a bit of a problem. On every call to kstat_irqs, the process brings in per-cpu data from all online cpus. Doing this for NR_IRQS, which is now 256 + 32 * NR_CPUS results in (256+32*63) * 63 remote cpu references on a 64 cpu config. Considering the fact that we already compute this value per-cpu, we can save on the remote references as below. Signed-off-by: Alok N Kataria <alok.kataria@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/proc_misc.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index d24b8d46059a..f133afebed7a 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -445,6 +445,11 @@ static int show_stat(struct seq_file *p, void *v)
445 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; 445 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
446 u64 sum = 0; 446 u64 sum = 0;
447 struct timespec boottime; 447 struct timespec boottime;
448 unsigned int *per_irq_sum;
449
450 per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
451 if (!per_irq_sum)
452 return -ENOMEM;
448 453
449 user = nice = system = idle = iowait = 454 user = nice = system = idle = iowait =
450 irq = softirq = steal = cputime64_zero; 455 irq = softirq = steal = cputime64_zero;
@@ -462,8 +467,11 @@ static int show_stat(struct seq_file *p, void *v)
462 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); 467 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
463 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); 468 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
464 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); 469 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
465 for (j = 0 ; j < NR_IRQS ; j++) 470 for (j = 0; j < NR_IRQS; j++) {
466 sum += kstat_cpu(i).irqs[j]; 471 unsigned int temp = kstat_cpu(i).irqs[j];
472 sum += temp;
473 per_irq_sum[j] += temp;
474 }
467 } 475 }
468 476
469 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n", 477 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -501,7 +509,7 @@ static int show_stat(struct seq_file *p, void *v)
501 509
502#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) 510#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
503 for (i = 0; i < NR_IRQS; i++) 511 for (i = 0; i < NR_IRQS; i++)
504 seq_printf(p, " %u", kstat_irqs(i)); 512 seq_printf(p, " %u", per_irq_sum[i]);
505#endif 513#endif
506 514
507 seq_printf(p, 515 seq_printf(p,
@@ -516,6 +524,7 @@ static int show_stat(struct seq_file *p, void *v)
516 nr_running(), 524 nr_running(),
517 nr_iowait()); 525 nr_iowait());
518 526
527 kfree(per_irq_sum);
519 return 0; 528 return 0;
520} 529}
521 530