diff options
author | Yinghai Lu <yinghai@kernel.org> | 2008-12-05 21:58:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-08 08:31:51 -0500 |
commit | 0b8f1efad30bd58f89961b82dfe68b9edf8fd2ac (patch) | |
tree | 239251bad791fd60af8c0f2ba365b7188395c83f /fs | |
parent | 218d11a8b071b23b76c484fd5f72a4fe3306801e (diff) |
sparse irq_desc[] array: core kernel and x86 changes
Impact: new feature
Problem on distro kernels: irq_desc[NR_IRQS] takes megabytes of RAM with
NR_CPUS set to large values. The goal is to be able to scale up to much
larger NR_IRQS value without impacting the (important) common case.
To solve this, we generalize irq_desc[NR_IRQS] to an (optional) array of
irq_desc pointers.
When CONFIG_SPARSE_IRQ=y is used, we use kzalloc_node to get irq_desc,
this also makes the IRQ descriptors NUMA-local (to the site that calls
request_irq()).
This gets rid of the irq_cfg[] static array on x86 as well: irq_cfg now
uses desc->chip_data for x86 to store irq_cfg.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/proc/stat.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 81904f07679d..a13431ab7c65 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -27,6 +27,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
27 | u64 sum = 0; | 27 | u64 sum = 0; |
28 | struct timespec boottime; | 28 | struct timespec boottime; |
29 | unsigned int per_irq_sum; | 29 | unsigned int per_irq_sum; |
30 | struct irq_desc *desc; | ||
30 | 31 | ||
31 | user = nice = system = idle = iowait = | 32 | user = nice = system = idle = iowait = |
32 | irq = softirq = steal = cputime64_zero; | 33 | irq = softirq = steal = cputime64_zero; |
@@ -44,10 +45,11 @@ static int show_stat(struct seq_file *p, void *v) | |||
44 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 45 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); |
45 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 46 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); |
46 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 47 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); |
47 | 48 | for_each_irq_desc(j, desc) { | |
48 | for_each_irq_nr(j) | 49 | if (!desc) |
50 | continue; | ||
49 | sum += kstat_irqs_cpu(j, i); | 51 | sum += kstat_irqs_cpu(j, i); |
50 | 52 | } | |
51 | sum += arch_irq_stat_cpu(i); | 53 | sum += arch_irq_stat_cpu(i); |
52 | } | 54 | } |
53 | sum += arch_irq_stat(); | 55 | sum += arch_irq_stat(); |
@@ -90,11 +92,14 @@ static int show_stat(struct seq_file *p, void *v) | |||
90 | seq_printf(p, "intr %llu", (unsigned long long)sum); | 92 | seq_printf(p, "intr %llu", (unsigned long long)sum); |
91 | 93 | ||
92 | /* sum again ? it could be updated? */ | 94 | /* sum again ? it could be updated? */ |
93 | for_each_irq_nr(j) { | 95 | for (j = 0; j < NR_IRQS; j++) { |
96 | desc = irq_to_desc(j); | ||
94 | per_irq_sum = 0; | 97 | per_irq_sum = 0; |
95 | 98 | ||
96 | for_each_possible_cpu(i) | 99 | if (desc) { |
97 | per_irq_sum += kstat_irqs_cpu(j, i); | 100 | for_each_possible_cpu(i) |
101 | per_irq_sum += kstat_irqs_cpu(j, i); | ||
102 | } | ||
98 | 103 | ||
99 | seq_printf(p, " %u", per_irq_sum); | 104 | seq_printf(p, " %u", per_irq_sum); |
100 | } | 105 | } |