From f2c66cd8eeddedb440f33bc0f5cec1ed7ae376cb Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 27 Oct 2010 15:34:13 -0700 Subject: /proc/stat: scalability of irq num per cpu /proc/stat shows the total number of all interrupts to each cpu. But when the number of IRQs are very large, it take very long time and 'cat /proc/stat' takes more than 10 secs. This is because sum of all irq events are counted when /proc/stat is read. This patch adds "sum of all irq" counter percpu and reduce read costs. The cost of reading /proc/stat is important because it's used by major applications as 'top', 'ps', 'w', etc.... A test on a mechin (4096cpu, 256 nodes, 4592 irqs) shows %time cat /proc/stat > /dev/null Before Patch: 12.627 sec After Patch: 2.459 sec Signed-off-by: KAMEZAWA Hiroyuki Tested-by: Jack Steiner Acked-by: Jack Steiner Cc: Yinghai Lu Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel_stat.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux/kernel_stat.h') diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index c059044bc6dc..8b9b89085530 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -33,6 +33,7 @@ struct kernel_stat { #ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif + unsigned long irqs_sum; unsigned int softirqs[NR_SOFTIRQS]; }; @@ -54,6 +55,7 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) { kstat_this_cpu.irqs[irq]++; + kstat_this_cpu.irqs_sum++; } static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) @@ -65,8 +67,9 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); #define kstat_irqs_this_cpu(DESC) \ ((DESC)->kstat_irqs[smp_processor_id()]) -#define kstat_incr_irqs_this_cpu(irqno, DESC) \ - ((DESC)->kstat_irqs[smp_processor_id()]++) +#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\ + ((DESC)->kstat_irqs[smp_processor_id()]++);\ + kstat_this_cpu.irqs_sum++; } while (0) #endif @@ -94,6 +97,13 @@ static inline unsigned int kstat_irqs(unsigned int irq) return sum; } +/* + * Number of interrupts per cpu, since bootup + */ +static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) +{ + return kstat_cpu(cpu).irqs_sum; +} /* * Lock/unlock the current runqueue - to extract task statistics: -- cgit v1.2.2 From 478735e38887077ac77a9756121b6ce0cb956e2f Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 27 Oct 2010 15:34:15 -0700 Subject: /proc/stat: fix scalability of irq sum of all cpu In /proc/stat, the number of per-IRQ event is shown by making a sum each irq's events on all cpus. But we can make use of kstat_irqs(). kstat_irqs() do the same calculation, If !CONFIG_GENERIC_HARDIRQ, it's not a big cost. (Both of the number of cpus and irqs are small.) If a system is very big and CONFIG_GENERIC_HARDIRQ, it does for_each_irq() for_each_cpu() - look up a radix tree - read desc->irq_stat[cpu] This seems not efficient. This patch adds kstat_irqs() for CONFIG_GENRIC_HARDIRQ and change the calculation as for_each_irq() look up radix tree for_each_cpu() - read desc->irq_stat[cpu] This reduces cost. A test on (4096cpusp, 256 nodes, 4592 irqs) host (by Jack Steiner) %time cat /proc/stat > /dev/null Before Patch: 2.459 sec After Patch : .561 sec [akpm@linux-foundation.org: unexport kstat_irqs, coding-style tweaks] [akpm@linux-foundation.org: fix unused variable 'per_irq_sum'] Signed-off-by: KAMEZAWA Hiroyuki Tested-by: Jack Steiner Acked-by: Jack Steiner Cc: Yinghai Lu Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel_stat.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux/kernel_stat.h') diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8b9b89085530..ad54c846911b 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -86,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) /* * Number of interrupts per specific IRQ source, since bootup */ +#ifndef CONFIG_GENERIC_HARDIRQS static inline unsigned int kstat_irqs(unsigned int irq) { unsigned int sum = 0; @@ -96,6 +97,9 @@ static inline unsigned int kstat_irqs(unsigned int irq) return sum; } +#else +extern unsigned int kstat_irqs(unsigned int irq); +#endif /* * Number of interrupts per cpu, since bootup -- cgit v1.2.2