aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-18 10:38:57 -0500
committerTejun Heo <tj@kernel.org>2009-01-18 10:38:57 -0500
commit1b437c8c73a36daa471dd54a63c426d72af5723d (patch)
tree2b713669c050fe52610959a7d68b53a2da75181d
parent74e7904559a10cbb9fbf9139c5c42fc87c0f62a4 (diff)
x86-64: Move irq stats from PDA to per-cpu and consolidate with 32-bit.
Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/include/asm/hardirq_64.h24
-rw-r--r--arch/x86/include/asm/pda.h10
-rw-r--r--arch/x86/kernel/irq.c6
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/nmi.c10
-rw-r--r--arch/x86/xen/smp.c18
6 files changed, 27 insertions, 44 deletions
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
index b5a6b5d56704..a65bab20f6ce 100644
--- a/arch/x86/include/asm/hardirq_64.h
+++ b/arch/x86/include/asm/hardirq_64.h
@@ -3,22 +3,36 @@
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
6#include <asm/pda.h>
7#include <asm/apic.h> 6#include <asm/apic.h>
8 7
8typedef struct {
9 unsigned int __softirq_pending;
10 unsigned int __nmi_count; /* arch dependent */
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq0_irqs;
13 unsigned int irq_resched_count;
14 unsigned int irq_call_count;
15 unsigned int irq_tlb_count;
16 unsigned int irq_thermal_count;
17 unsigned int irq_spurious_count;
18 unsigned int irq_threshold_count;
19} ____cacheline_aligned irq_cpustat_t;
20
21DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
22
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 23/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS 24#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11 25
12#define __ARCH_IRQ_STAT 1 26#define __ARCH_IRQ_STAT 1
13 27
14#define inc_irq_stat(member) add_pda(member, 1) 28#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
15 29
16#define local_softirq_pending() read_pda(__softirq_pending) 30#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
17 31
18#define __ARCH_SET_SOFTIRQ_PENDING 1 32#define __ARCH_SET_SOFTIRQ_PENDING 1
19 33
20#define set_softirq_pending(x) write_pda(__softirq_pending, (x)) 34#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
21#define or_softirq_pending(x) or_pda(__softirq_pending, (x)) 35#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
22 36
23extern void ack_bad_irq(unsigned int irq); 37extern void ack_bad_irq(unsigned int irq);
24 38
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index 47f274fe6953..69a40757e217 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -25,19 +25,9 @@ struct x8664_pda {
25 char *irqstackptr; 25 char *irqstackptr;
26 short nodenumber; /* number of current node (32k max) */ 26 short nodenumber; /* number of current node (32k max) */
27 short in_bootmem; /* pda lives in bootmem */ 27 short in_bootmem; /* pda lives in bootmem */
28 unsigned int __softirq_pending;
29 unsigned int __nmi_count; /* number of NMI on this CPUs */
30 short mmu_state; 28 short mmu_state;
31 short isidle; 29 short isidle;
32 struct mm_struct *active_mm; 30 struct mm_struct *active_mm;
33 unsigned apic_timer_irqs;
34 unsigned irq0_irqs;
35 unsigned irq_resched_count;
36 unsigned irq_call_count;
37 unsigned irq_tlb_count;
38 unsigned irq_thermal_count;
39 unsigned irq_threshold_count;
40 unsigned irq_spurious_count;
41} ____cacheline_aligned_in_smp; 31} ____cacheline_aligned_in_smp;
42 32
43DECLARE_PER_CPU(struct x8664_pda, __pda); 33DECLARE_PER_CPU(struct x8664_pda, __pda);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3973e2df7f87..8b30d0c2512c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -36,11 +36,7 @@ void ack_bad_irq(unsigned int irq)
36#endif 36#endif
37} 37}
38 38
39#ifdef CONFIG_X86_32 39#define irq_stats(x) (&per_cpu(irq_stat, x))
40# define irq_stats(x) (&per_cpu(irq_stat, x))
41#else
42# define irq_stats(x) cpu_pda(x)
43#endif
44/* 40/*
45 * /proc/interrupts printing: 41 * /proc/interrupts printing:
46 */ 42 */
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 0b21cb1ea11f..1db05247b47f 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -19,6 +19,9 @@
19#include <asm/io_apic.h> 19#include <asm/io_apic.h>
20#include <asm/idle.h> 20#include <asm/idle.h>
21 21
22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23EXPORT_PER_CPU_SYMBOL(irq_stat);
24
22/* 25/*
23 * Probabilistic stack overflow check: 26 * Probabilistic stack overflow check:
24 * 27 *
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 7228979f1e7f..23b6d9e6e4f5 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -61,11 +61,7 @@ static int endflag __initdata;
61 61
62static inline unsigned int get_nmi_count(int cpu) 62static inline unsigned int get_nmi_count(int cpu)
63{ 63{
64#ifdef CONFIG_X86_64 64 return per_cpu(irq_stat, cpu).__nmi_count;
65 return cpu_pda(cpu)->__nmi_count;
66#else
67 return nmi_count(cpu);
68#endif
69} 65}
70 66
71static inline int mce_in_progress(void) 67static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
82 */ 78 */
83static inline unsigned int get_timer_irqs(int cpu) 79static inline unsigned int get_timer_irqs(int cpu)
84{ 80{
85#ifdef CONFIG_X86_64
86 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
87#else
88 return per_cpu(irq_stat, cpu).apic_timer_irqs + 81 return per_cpu(irq_stat, cpu).apic_timer_irqs +
89 per_cpu(irq_stat, cpu).irq0_irqs; 82 per_cpu(irq_stat, cpu).irq0_irqs;
90#endif
91} 83}
92 84
93#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3bfd6dd0b47c..9ff3b0999cfb 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50 */ 50 */
51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52{ 52{
53#ifdef CONFIG_X86_32 53 inc_irq_stat(irq_resched_count);
54 __get_cpu_var(irq_stat).irq_resched_count++;
55#else
56 add_pda(irq_resched_count, 1);
57#endif
58 54
59 return IRQ_HANDLED; 55 return IRQ_HANDLED;
60} 56}
@@ -435,11 +431,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
435{ 431{
436 irq_enter(); 432 irq_enter();
437 generic_smp_call_function_interrupt(); 433 generic_smp_call_function_interrupt();
438#ifdef CONFIG_X86_32 434 inc_irq_stat(irq_call_count);
439 __get_cpu_var(irq_stat).irq_call_count++;
440#else
441 add_pda(irq_call_count, 1);
442#endif
443 irq_exit(); 435 irq_exit();
444 436
445 return IRQ_HANDLED; 437 return IRQ_HANDLED;
@@ -449,11 +441,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
449{ 441{
450 irq_enter(); 442 irq_enter();
451 generic_smp_call_function_single_interrupt(); 443 generic_smp_call_function_single_interrupt();
452#ifdef CONFIG_X86_32 444 inc_irq_stat(irq_call_count);
453 __get_cpu_var(irq_stat).irq_call_count++;
454#else
455 add_pda(irq_call_count, 1);
456#endif
457 irq_exit(); 445 irq_exit();
458 446
459 return IRQ_HANDLED; 447 return IRQ_HANDLED;