diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:11:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:11:46 -0400 |
commit | 4a60cfa9457749f7987fd4f3c956dbba5a281129 (patch) | |
tree | 85f3633276282cde0a3ac558d988704eaa3e68af /arch | |
parent | 62bea97f54d806218a992b18d1f425cfb5060175 (diff) | |
parent | 27afdf2008da0b8878a73e32e4eb12381b84e224 (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits)
apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
apic, x86: Check if EILVT APIC registers are available (AMD only)
x86: ioapic: Call free_irte only if interrupt remapping enabled
arm: Use ARCH_IRQ_INIT_FLAGS
genirq, ARM: Fix boot on ARM platforms
genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build
x86: Switch sparse_irq allocations to GFP_KERNEL
genirq: Switch sparse_irq allocator to GFP_KERNEL
genirq: Make sparse_lock a mutex
x86: lguest: Use new irq allocator
genirq: Remove the now unused sparse irq leftovers
genirq: Sanitize dynamic irq handling
genirq: Remove arch_init_chip_data()
x86: xen: Sanitise sparse_irq handling
x86: Use sane enumeration
x86: uv: Clean up the direct access to irq_desc
x86: Make io_apic.c local functions static
genirq: Remove irq_2_iommu
x86: Speed up the irq_remapped check in hot pathes
intr_remap: Simplify the code further
...
Fix up trivial conflicts in arch/x86/Kconfig
Diffstat (limited to 'arch')
47 files changed, 801 insertions, 926 deletions
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 90831f6f5f5c..5586b7c8ef6f 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h | |||
@@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); | |||
24 | #define IRQF_PROBE (1 << 1) | 24 | #define IRQF_PROBE (1 << 1) |
25 | #define IRQF_NOAUTOEN (1 << 2) | 25 | #define IRQF_NOAUTOEN (1 << 2) |
26 | 26 | ||
27 | #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) | ||
28 | |||
27 | #endif | 29 | #endif |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c0d5c3b3a760..36ad3be4692a 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
154 | 154 | ||
155 | void __init init_IRQ(void) | 155 | void __init init_IRQ(void) |
156 | { | 156 | { |
157 | struct irq_desc *desc; | ||
158 | int irq; | ||
159 | |||
160 | for (irq = 0; irq < nr_irqs; irq++) { | ||
161 | desc = irq_to_desc_alloc_node(irq, 0); | ||
162 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | ||
163 | } | ||
164 | |||
165 | init_arch_irq(); | 157 | init_arch_irq(); |
166 | } | 158 | } |
167 | 159 | ||
@@ -169,7 +161,7 @@ void __init init_IRQ(void) | |||
169 | int __init arch_probe_nr_irqs(void) | 161 | int __init arch_probe_nr_irqs(void) |
170 | { | 162 | { |
171 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; | 163 | nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; |
172 | return 0; | 164 | return nr_irqs; |
173 | } | 165 | } |
174 | #endif | 166 | #endif |
175 | 167 | ||
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index dc1c4939b0ce..e3152631eb37 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c | |||
@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static struct irq_chip bcmring_irq0_chip = { | 69 | static struct irq_chip bcmring_irq0_chip = { |
70 | .typename = "ARM-INTC0", | 70 | .name = "ARM-INTC0", |
71 | .ack = bcmring_mask_irq0, | 71 | .ack = bcmring_mask_irq0, |
72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ | 72 | .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ |
73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ | 73 | .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static struct irq_chip bcmring_irq1_chip = { | 76 | static struct irq_chip bcmring_irq1_chip = { |
77 | .typename = "ARM-INTC1", | 77 | .name = "ARM-INTC1", |
78 | .ack = bcmring_mask_irq1, | 78 | .ack = bcmring_mask_irq1, |
79 | .mask = bcmring_mask_irq1, | 79 | .mask = bcmring_mask_irq1, |
80 | .unmask = bcmring_unmask_irq1, | 80 | .unmask = bcmring_unmask_irq1, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static struct irq_chip bcmring_irq2_chip = { | 83 | static struct irq_chip bcmring_irq2_chip = { |
84 | .typename = "ARM-SINTC", | 84 | .name = "ARM-SINTC", |
85 | .ack = bcmring_mask_irq2, | 85 | .ack = bcmring_mask_irq2, |
86 | .mask = bcmring_mask_irq2, | 86 | .mask = bcmring_mask_irq2, |
87 | .unmask = bcmring_unmask_irq2, | 87 | .unmask = bcmring_unmask_irq2, |
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index f34b0ed80630..7149fcc16c8a 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c | |||
@@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq) | |||
164 | static struct irq_chip iop13xx_msi_chip = { | 164 | static struct irq_chip iop13xx_msi_chip = { |
165 | .name = "PCI-MSI", | 165 | .name = "PCI-MSI", |
166 | .ack = iop13xx_msi_nop, | 166 | .ack = iop13xx_msi_nop, |
167 | .enable = unmask_msi_irq, | 167 | .irq_enable = unmask_msi_irq, |
168 | .disable = mask_msi_irq, | 168 | .irq_disable = mask_msi_irq, |
169 | .mask = mask_msi_irq, | 169 | .irq_mask = mask_msi_irq, |
170 | .unmask = unmask_msi_irq, | 170 | .irq_unmask = unmask_msi_irq, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | 173 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 4a746ea838ff..00b19a416eab 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq) | |||
104 | */ | 104 | */ |
105 | static struct irq_chip ia64_msi_chip = { | 105 | static struct irq_chip ia64_msi_chip = { |
106 | .name = "PCI-MSI", | 106 | .name = "PCI-MSI", |
107 | .mask = mask_msi_irq, | 107 | .irq_mask = mask_msi_irq, |
108 | .unmask = unmask_msi_irq, | 108 | .irq_unmask = unmask_msi_irq, |
109 | .ack = ia64_ack_msi_irq, | 109 | .ack = ia64_ack_msi_irq, |
110 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
111 | .set_affinity = ia64_set_msi_irq_affinity, | 111 | .set_affinity = ia64_set_msi_irq_affinity, |
@@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
160 | 160 | ||
161 | static struct irq_chip dmar_msi_type = { | 161 | static struct irq_chip dmar_msi_type = { |
162 | .name = "DMAR_MSI", | 162 | .name = "DMAR_MSI", |
163 | .unmask = dmar_msi_unmask, | 163 | .irq_unmask = dmar_msi_unmask, |
164 | .mask = dmar_msi_mask, | 164 | .irq_mask = dmar_msi_mask, |
165 | .ack = ia64_ack_msi_irq, | 165 | .ack = ia64_ack_msi_irq, |
166 | #ifdef CONFIG_SMP | 166 | #ifdef CONFIG_SMP |
167 | .set_affinity = dmar_msi_set_affinity, | 167 | .set_affinity = dmar_msi_set_affinity, |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 0c72dd463831..a5e500f02853 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq) | |||
228 | 228 | ||
229 | static struct irq_chip sn_msi_chip = { | 229 | static struct irq_chip sn_msi_chip = { |
230 | .name = "PCI-MSI", | 230 | .name = "PCI-MSI", |
231 | .mask = mask_msi_irq, | 231 | .irq_mask = mask_msi_irq, |
232 | .unmask = unmask_msi_irq, | 232 | .irq_unmask = unmask_msi_irq, |
233 | .ack = sn_ack_msi_irq, | 233 | .ack = sn_ack_msi_irq, |
234 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
235 | .set_affinity = sn_set_msi_irq_affinity, | 235 | .set_affinity = sn_set_msi_irq_affinity, |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 3c71f776872c..7db26f1f082d 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
51 | for_each_online_cpu(j) | 51 | for_each_online_cpu(j) |
52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 52 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
53 | #endif | 53 | #endif |
54 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 54 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
55 | seq_printf(p, " %s", action->name); | 55 | seq_printf(p, " %s", action->name); |
56 | 56 | ||
57 | for (action=action->next; action; action = action->next) | 57 | for (action=action->next; action; action = action->next) |
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 922fdfdadeaa..402a59d7219b 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c | |||
@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) | |||
65 | 65 | ||
66 | static struct irq_chip m32104ut_irq_type = | 66 | static struct irq_chip m32104ut_irq_type = |
67 | { | 67 | { |
68 | .typename = "M32104UT-IRQ", | 68 | .name = "M32104UT-IRQ", |
69 | .startup = startup_m32104ut_irq, | 69 | .startup = startup_m32104ut_irq, |
70 | .shutdown = shutdown_m32104ut_irq, | 70 | .shutdown = shutdown_m32104ut_irq, |
71 | .enable = enable_m32104ut_irq, | 71 | .enable = enable_m32104ut_irq, |
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 9c1bc7487c1e..80b1a026795a 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c | |||
@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) | |||
71 | 71 | ||
72 | static struct irq_chip m32700ut_irq_type = | 72 | static struct irq_chip m32700ut_irq_type = |
73 | { | 73 | { |
74 | .typename = "M32700UT-IRQ", | 74 | .name = "M32700UT-IRQ", |
75 | .startup = startup_m32700ut_irq, | 75 | .startup = startup_m32700ut_irq, |
76 | .shutdown = shutdown_m32700ut_irq, | 76 | .shutdown = shutdown_m32700ut_irq, |
77 | .enable = enable_m32700ut_irq, | 77 | .enable = enable_m32700ut_irq, |
@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
148 | 148 | ||
149 | static struct irq_chip m32700ut_pld_irq_type = | 149 | static struct irq_chip m32700ut_pld_irq_type = |
150 | { | 150 | { |
151 | .typename = "M32700UT-PLD-IRQ", | 151 | .name = "M32700UT-PLD-IRQ", |
152 | .startup = startup_m32700ut_pld_irq, | 152 | .startup = startup_m32700ut_pld_irq, |
153 | .shutdown = shutdown_m32700ut_pld_irq, | 153 | .shutdown = shutdown_m32700ut_pld_irq, |
154 | .enable = enable_m32700ut_pld_irq, | 154 | .enable = enable_m32700ut_pld_irq, |
@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) | |||
217 | 217 | ||
218 | static struct irq_chip m32700ut_lanpld_irq_type = | 218 | static struct irq_chip m32700ut_lanpld_irq_type = |
219 | { | 219 | { |
220 | .typename = "M32700UT-PLD-LAN-IRQ", | 220 | .name = "M32700UT-PLD-LAN-IRQ", |
221 | .startup = startup_m32700ut_lanpld_irq, | 221 | .startup = startup_m32700ut_lanpld_irq, |
222 | .shutdown = shutdown_m32700ut_lanpld_irq, | 222 | .shutdown = shutdown_m32700ut_lanpld_irq, |
223 | .enable = enable_m32700ut_lanpld_irq, | 223 | .enable = enable_m32700ut_lanpld_irq, |
@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) | |||
286 | 286 | ||
287 | static struct irq_chip m32700ut_lcdpld_irq_type = | 287 | static struct irq_chip m32700ut_lcdpld_irq_type = |
288 | { | 288 | { |
289 | .typename = "M32700UT-PLD-LCD-IRQ", | 289 | .name = "M32700UT-PLD-LCD-IRQ", |
290 | .startup = startup_m32700ut_lcdpld_irq, | 290 | .startup = startup_m32700ut_lcdpld_irq, |
291 | .shutdown = shutdown_m32700ut_lcdpld_irq, | 291 | .shutdown = shutdown_m32700ut_lcdpld_irq, |
292 | .enable = enable_m32700ut_lcdpld_irq, | 292 | .enable = enable_m32700ut_lcdpld_irq, |
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index fb4b17799b66..ea00c84d6b1b 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c | |||
@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
65 | 65 | ||
66 | static struct irq_chip mappi_irq_type = | 66 | static struct irq_chip mappi_irq_type = |
67 | { | 67 | { |
68 | .typename = "MAPPI-IRQ", | 68 | .name = "MAPPI-IRQ", |
69 | .startup = startup_mappi_irq, | 69 | .startup = startup_mappi_irq, |
70 | .shutdown = shutdown_mappi_irq, | 70 | .shutdown = shutdown_mappi_irq, |
71 | .enable = enable_mappi_irq, | 71 | .enable = enable_mappi_irq, |
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index 6a65eda0a056..c049376d0270 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip mappi2_irq_type = | 73 | static struct irq_chip mappi2_irq_type = |
74 | { | 74 | { |
75 | .typename = "MAPPI2-IRQ", | 75 | .name = "MAPPI2-IRQ", |
76 | .startup = startup_mappi2_irq, | 76 | .startup = startup_mappi2_irq, |
77 | .shutdown = shutdown_mappi2_irq, | 77 | .shutdown = shutdown_mappi2_irq, |
78 | .enable = enable_mappi2_irq, | 78 | .enable = enable_mappi2_irq, |
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 9c337aeac94b..882de25c6e8c 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip mappi3_irq_type = | 73 | static struct irq_chip mappi3_irq_type = |
74 | { | 74 | { |
75 | .typename = "MAPPI3-IRQ", | 75 | .name = "MAPPI3-IRQ", |
76 | .startup = startup_mappi3_irq, | 76 | .startup = startup_mappi3_irq, |
77 | .shutdown = shutdown_mappi3_irq, | 77 | .shutdown = shutdown_mappi3_irq, |
78 | .enable = enable_mappi3_irq, | 78 | .enable = enable_mappi3_irq, |
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index ed865741c38d..d11d93bf74f5 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c | |||
@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) | |||
63 | 63 | ||
64 | static struct irq_chip oaks32r_irq_type = | 64 | static struct irq_chip oaks32r_irq_type = |
65 | { | 65 | { |
66 | .typename = "OAKS32R-IRQ", | 66 | .name = "OAKS32R-IRQ", |
67 | .startup = startup_oaks32r_irq, | 67 | .startup = startup_oaks32r_irq, |
68 | .shutdown = shutdown_oaks32r_irq, | 68 | .shutdown = shutdown_oaks32r_irq, |
69 | .enable = enable_oaks32r_irq, | 69 | .enable = enable_oaks32r_irq, |
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 80d680657019..5f3402a2fbaf 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c | |||
@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) | |||
72 | 72 | ||
73 | static struct irq_chip opsput_irq_type = | 73 | static struct irq_chip opsput_irq_type = |
74 | { | 74 | { |
75 | .typename = "OPSPUT-IRQ", | 75 | .name = "OPSPUT-IRQ", |
76 | .startup = startup_opsput_irq, | 76 | .startup = startup_opsput_irq, |
77 | .shutdown = shutdown_opsput_irq, | 77 | .shutdown = shutdown_opsput_irq, |
78 | .enable = enable_opsput_irq, | 78 | .enable = enable_opsput_irq, |
@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) | |||
149 | 149 | ||
150 | static struct irq_chip opsput_pld_irq_type = | 150 | static struct irq_chip opsput_pld_irq_type = |
151 | { | 151 | { |
152 | .typename = "OPSPUT-PLD-IRQ", | 152 | .name = "OPSPUT-PLD-IRQ", |
153 | .startup = startup_opsput_pld_irq, | 153 | .startup = startup_opsput_pld_irq, |
154 | .shutdown = shutdown_opsput_pld_irq, | 154 | .shutdown = shutdown_opsput_pld_irq, |
155 | .enable = enable_opsput_pld_irq, | 155 | .enable = enable_opsput_pld_irq, |
@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) | |||
218 | 218 | ||
219 | static struct irq_chip opsput_lanpld_irq_type = | 219 | static struct irq_chip opsput_lanpld_irq_type = |
220 | { | 220 | { |
221 | .typename = "OPSPUT-PLD-LAN-IRQ", | 221 | .name = "OPSPUT-PLD-LAN-IRQ", |
222 | .startup = startup_opsput_lanpld_irq, | 222 | .startup = startup_opsput_lanpld_irq, |
223 | .shutdown = shutdown_opsput_lanpld_irq, | 223 | .shutdown = shutdown_opsput_lanpld_irq, |
224 | .enable = enable_opsput_lanpld_irq, | 224 | .enable = enable_opsput_lanpld_irq, |
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 757302660af8..1beac7a51ed4 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c | |||
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) | |||
63 | 63 | ||
64 | static struct irq_chip mappi_irq_type = | 64 | static struct irq_chip mappi_irq_type = |
65 | { | 65 | { |
66 | .typename = "M32700-IRQ", | 66 | .name = "M32700-IRQ", |
67 | .startup = startup_mappi_irq, | 67 | .startup = startup_mappi_irq, |
68 | .shutdown = shutdown_mappi_irq, | 68 | .shutdown = shutdown_mappi_irq, |
69 | .enable = enable_mappi_irq, | 69 | .enable = enable_mappi_irq, |
@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) | |||
136 | 136 | ||
137 | static struct irq_chip m32700ut_pld_irq_type = | 137 | static struct irq_chip m32700ut_pld_irq_type = |
138 | { | 138 | { |
139 | .typename = "USRV-PLD-IRQ", | 139 | .name = "USRV-PLD-IRQ", |
140 | .startup = startup_m32700ut_pld_irq, | 140 | .startup = startup_m32700ut_pld_irq, |
141 | .shutdown = shutdown_m32700ut_pld_irq, | 141 | .shutdown = shutdown_m32700ut_pld_irq, |
142 | .enable = enable_m32700ut_pld_irq, | 142 | .enable = enable_m32700ut_pld_irq, |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa63..e3e379c6caa7 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
310 | } | 310 | } |
311 | 311 | ||
312 | static struct irq_chip msic_irq_chip = { | 312 | static struct irq_chip msic_irq_chip = { |
313 | .mask = mask_msi_irq, | 313 | .irq_mask = mask_msi_irq, |
314 | .unmask = unmask_msi_irq, | 314 | .irq_unmask = unmask_msi_irq, |
315 | .shutdown = unmask_msi_irq, | 315 | .irq_shutdown = mask_msi_irq, |
316 | .name = "AXON-MSI", | 316 | .name = "AXON-MSI", |
317 | }; | 317 | }; |
318 | 318 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 93834b0d8272..67e2c4bdac8f 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq) | |||
243 | * at that level, so we do it here by hand. | 243 | * at that level, so we do it here by hand. |
244 | */ | 244 | */ |
245 | if (irq_to_desc(virq)->msi_desc) | 245 | if (irq_to_desc(virq)->msi_desc) |
246 | unmask_msi_irq(virq); | 246 | unmask_msi_irq(irq_get_irq_data(virq)); |
247 | 247 | ||
248 | /* unmask it */ | 248 | /* unmask it */ |
249 | xics_unmask_irq(virq); | 249 | xics_unmask_irq(virq); |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 87991d3abbab..bdbd896c89d8 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static struct irq_chip fsl_msi_chip = { | 53 | static struct irq_chip fsl_msi_chip = { |
54 | .mask = mask_msi_irq, | 54 | .irq_mask = mask_msi_irq, |
55 | .unmask = unmask_msi_irq, | 55 | .irq_unmask = unmask_msi_irq, |
56 | .ack = fsl_msi_end_irq, | 56 | .ack = fsl_msi_end_irq, |
57 | .name = "FSL-MSI", | 57 | .name = "FSL-MSI", |
58 | }; | 58 | }; |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 3b6a9a43718f..320ad5a9a25d 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
@@ -39,24 +39,24 @@ | |||
39 | static struct mpic *msi_mpic; | 39 | static struct mpic *msi_mpic; |
40 | 40 | ||
41 | 41 | ||
42 | static void mpic_pasemi_msi_mask_irq(unsigned int irq) | 42 | static void mpic_pasemi_msi_mask_irq(struct irq_data *data) |
43 | { | 43 | { |
44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); | 44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); |
45 | mask_msi_irq(irq); | 45 | mask_msi_irq(data); |
46 | mpic_mask_irq(irq); | 46 | mpic_mask_irq(data->irq); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void mpic_pasemi_msi_unmask_irq(unsigned int irq) | 49 | static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) |
50 | { | 50 | { |
51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); | 51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); |
52 | mpic_unmask_irq(irq); | 52 | mpic_unmask_irq(data->irq); |
53 | unmask_msi_irq(irq); | 53 | unmask_msi_irq(data); |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct irq_chip mpic_pasemi_msi_chip = { | 56 | static struct irq_chip mpic_pasemi_msi_chip = { |
57 | .shutdown = mpic_pasemi_msi_mask_irq, | 57 | .irq_shutdown = mpic_pasemi_msi_mask_irq, |
58 | .mask = mpic_pasemi_msi_mask_irq, | 58 | .irq_mask = mpic_pasemi_msi_mask_irq, |
59 | .unmask = mpic_pasemi_msi_unmask_irq, | 59 | .irq_unmask = mpic_pasemi_msi_unmask_irq, |
60 | .eoi = mpic_end_irq, | 60 | .eoi = mpic_end_irq, |
61 | .set_type = mpic_set_irq_type, | 61 | .set_type = mpic_set_irq_type, |
62 | .set_affinity = mpic_set_affinity, | 62 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index bcbfe79c704b..a2b028b4a202 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
@@ -23,22 +23,22 @@ | |||
23 | /* A bit ugly, can we get this from the pci_dev somehow? */ | 23 | /* A bit ugly, can we get this from the pci_dev somehow? */ |
24 | static struct mpic *msi_mpic; | 24 | static struct mpic *msi_mpic; |
25 | 25 | ||
26 | static void mpic_u3msi_mask_irq(unsigned int irq) | 26 | static void mpic_u3msi_mask_irq(struct irq_data *data) |
27 | { | 27 | { |
28 | mask_msi_irq(irq); | 28 | mask_msi_irq(data); |
29 | mpic_mask_irq(irq); | 29 | mpic_mask_irq(data->irq); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void mpic_u3msi_unmask_irq(unsigned int irq) | 32 | static void mpic_u3msi_unmask_irq(struct irq_data *data) |
33 | { | 33 | { |
34 | mpic_unmask_irq(irq); | 34 | mpic_unmask_irq(data->irq); |
35 | unmask_msi_irq(irq); | 35 | unmask_msi_irq(data); |
36 | } | 36 | } |
37 | 37 | ||
38 | static struct irq_chip mpic_u3msi_chip = { | 38 | static struct irq_chip mpic_u3msi_chip = { |
39 | .shutdown = mpic_u3msi_mask_irq, | 39 | .irq_shutdown = mpic_u3msi_mask_irq, |
40 | .mask = mpic_u3msi_mask_irq, | 40 | .irq_mask = mpic_u3msi_mask_irq, |
41 | .unmask = mpic_u3msi_unmask_irq, | 41 | .irq_unmask = mpic_u3msi_unmask_irq, |
42 | .eoi = mpic_end_irq, | 42 | .eoi = mpic_end_irq, |
43 | .set_type = mpic_set_irq_type, | 43 | .set_type = mpic_set_irq_type, |
44 | .set_affinity = mpic_set_affinity, | 44 | .set_affinity = mpic_set_affinity, |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692b..ae5bac39b896 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -290,7 +290,7 @@ void __init init_IRQ(void) | |||
290 | int __init arch_probe_nr_irqs(void) | 290 | int __init arch_probe_nr_irqs(void) |
291 | { | 291 | { |
292 | nr_irqs = sh_mv.mv_nr_irqs; | 292 | nr_irqs = sh_mv.mv_nr_irqs; |
293 | return 0; | 293 | return NR_IRQS_LEGACY; |
294 | } | 294 | } |
295 | #endif | 295 | #endif |
296 | 296 | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 548b8ca9c210..b210416ace7b 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |||
114 | 114 | ||
115 | static struct irq_chip msi_irq = { | 115 | static struct irq_chip msi_irq = { |
116 | .name = "PCI-MSI", | 116 | .name = "PCI-MSI", |
117 | .mask = mask_msi_irq, | 117 | .irq_mask = mask_msi_irq, |
118 | .unmask = unmask_msi_irq, | 118 | .irq_unmask = unmask_msi_irq, |
119 | .enable = unmask_msi_irq, | 119 | .irq_enable = unmask_msi_irq, |
120 | .disable = mask_msi_irq, | 120 | .irq_disable = mask_msi_irq, |
121 | /* XXX affinity XXX */ | 121 | /* XXX affinity XXX */ |
122 | }; | 122 | }; |
123 | 123 | ||
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 596c60086930..9a27d563fc30 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | static struct irq_chip tile_irq_chip = { | 210 | static struct irq_chip tile_irq_chip = { |
211 | .typename = "tile_irq_chip", | 211 | .name = "tile_irq_chip", |
212 | .ack = tile_irq_chip_ack, | 212 | .ack = tile_irq_chip_ack, |
213 | .eoi = tile_irq_chip_eoi, | 213 | .eoi = tile_irq_chip_eoi, |
214 | .mask = tile_irq_chip_mask, | 214 | .mask = tile_irq_chip_mask, |
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
288 | for_each_online_cpu(j) | 288 | for_each_online_cpu(j) |
289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
290 | #endif | 290 | #endif |
291 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 291 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
292 | seq_printf(p, " %s", action->name); | 292 | seq_printf(p, " %s", action->name); |
293 | 293 | ||
294 | for (action = action->next; action; action = action->next) | 294 | for (action = action->next; action; action = action->next) |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a3f0b04d7101..a746e3037a5b 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
46 | for_each_online_cpu(j) | 46 | for_each_online_cpu(j) |
47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
48 | #endif | 48 | #endif |
49 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 49 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
50 | seq_printf(p, " %s", action->name); | 50 | seq_printf(p, " %s", action->name); |
51 | 51 | ||
52 | for (action=action->next; action; action = action->next) | 52 | for (action=action->next; action; action = action->next) |
@@ -369,7 +369,7 @@ static void dummy(unsigned int irq) | |||
369 | 369 | ||
370 | /* This is used for everything else than the timer. */ | 370 | /* This is used for everything else than the timer. */ |
371 | static struct irq_chip normal_irq_type = { | 371 | static struct irq_chip normal_irq_type = { |
372 | .typename = "SIGIO", | 372 | .name = "SIGIO", |
373 | .release = free_irq_by_irq_and_dev, | 373 | .release = free_irq_by_irq_and_dev, |
374 | .disable = dummy, | 374 | .disable = dummy, |
375 | .enable = dummy, | 375 | .enable = dummy, |
@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { | |||
378 | }; | 378 | }; |
379 | 379 | ||
380 | static struct irq_chip SIGVTALRM_irq_type = { | 380 | static struct irq_chip SIGVTALRM_irq_type = { |
381 | .typename = "SIGVTALRM", | 381 | .name = "SIGVTALRM", |
382 | .release = free_irq_by_irq_and_dev, | 382 | .release = free_irq_by_irq_and_dev, |
383 | .shutdown = dummy, /* never called */ | 383 | .shutdown = dummy, /* never called */ |
384 | .disable = dummy, | 384 | .disable = dummy, |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8c9e609a175b..7ab9db88ab6a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -63,6 +63,10 @@ config X86 | |||
63 | select HAVE_USER_RETURN_NOTIFIER | 63 | select HAVE_USER_RETURN_NOTIFIER |
64 | select HAVE_ARCH_JUMP_LABEL | 64 | select HAVE_ARCH_JUMP_LABEL |
65 | select HAVE_TEXT_POKE_SMP | 65 | select HAVE_TEXT_POKE_SMP |
66 | select HAVE_GENERIC_HARDIRQS | ||
67 | select HAVE_SPARSE_IRQ | ||
68 | select GENERIC_IRQ_PROBE | ||
69 | select GENERIC_PENDING_IRQ if SMP | ||
66 | 70 | ||
67 | config INSTRUCTION_DECODER | 71 | config INSTRUCTION_DECODER |
68 | def_bool (KPROBES || PERF_EVENTS) | 72 | def_bool (KPROBES || PERF_EVENTS) |
@@ -204,20 +208,6 @@ config HAVE_INTEL_TXT | |||
204 | def_bool y | 208 | def_bool y |
205 | depends on EXPERIMENTAL && DMAR && ACPI | 209 | depends on EXPERIMENTAL && DMAR && ACPI |
206 | 210 | ||
207 | # Use the generic interrupt handling code in kernel/irq/: | ||
208 | config GENERIC_HARDIRQS | ||
209 | def_bool y | ||
210 | |||
211 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
212 | def_bool y | ||
213 | |||
214 | config GENERIC_IRQ_PROBE | ||
215 | def_bool y | ||
216 | |||
217 | config GENERIC_PENDING_IRQ | ||
218 | def_bool y | ||
219 | depends on GENERIC_HARDIRQS && SMP | ||
220 | |||
221 | config USE_GENERIC_SMP_HELPERS | 211 | config USE_GENERIC_SMP_HELPERS |
222 | def_bool y | 212 | def_bool y |
223 | depends on SMP | 213 | depends on SMP |
@@ -300,23 +290,6 @@ config X86_X2APIC | |||
300 | 290 | ||
301 | If you don't know what to do here, say N. | 291 | If you don't know what to do here, say N. |
302 | 292 | ||
303 | config SPARSE_IRQ | ||
304 | bool "Support sparse irq numbering" | ||
305 | depends on PCI_MSI || HT_IRQ | ||
306 | ---help--- | ||
307 | This enables support for sparse irqs. This is useful for distro | ||
308 | kernels that want to define a high CONFIG_NR_CPUS value but still | ||
309 | want to have low kernel memory footprint on smaller machines. | ||
310 | |||
311 | ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread | ||
312 | out the irq_desc[] array in a more NUMA-friendly way. ) | ||
313 | |||
314 | If you don't know what to do here, say N. | ||
315 | |||
316 | config NUMA_IRQ_DESC | ||
317 | def_bool y | ||
318 | depends on SPARSE_IRQ && NUMA | ||
319 | |||
320 | config X86_MPPARSE | 293 | config X86_MPPARSE |
321 | bool "Enable MPS table" if ACPI | 294 | bool "Enable MPS table" if ACPI |
322 | default y | 295 | default y |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1fa03e04ae44..286de34b0ed6 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) | |||
252 | } | 252 | } |
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 255 | extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); |
256 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | ||
257 | |||
258 | 256 | ||
259 | #else /* !CONFIG_X86_LOCAL_APIC */ | 257 | #else /* !CONFIG_X86_LOCAL_APIC */ |
260 | static inline void lapic_shutdown(void) { } | 258 | static inline void lapic_shutdown(void) { } |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 7fe3b3060f08..a859ca461fb0 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -131,6 +131,7 @@ | |||
131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) | 131 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) |
132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ | 132 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
133 | #define APIC_EILVT_NR_AMD_10H 4 | 133 | #define APIC_EILVT_NR_AMD_10H 4 |
134 | #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H | ||
134 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) | 135 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
135 | #define APIC_EILVT_MSG_FIX 0x0 | 136 | #define APIC_EILVT_MSG_FIX 0x0 |
136 | #define APIC_EILVT_MSG_SMI 0x2 | 137 | #define APIC_EILVT_MSG_SMI 0x2 |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1d5c08a1bdfd..2c392d663dce 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -74,10 +74,12 @@ extern void hpet_disable(void); | |||
74 | extern unsigned int hpet_readl(unsigned int a); | 74 | extern unsigned int hpet_readl(unsigned int a); |
75 | extern void force_hpet_resume(void); | 75 | extern void force_hpet_resume(void); |
76 | 76 | ||
77 | extern void hpet_msi_unmask(unsigned int irq); | 77 | struct irq_data; |
78 | extern void hpet_msi_mask(unsigned int irq); | 78 | extern void hpet_msi_unmask(struct irq_data *data); |
79 | extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); | 79 | extern void hpet_msi_mask(struct irq_data *data); |
80 | extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); | 80 | struct hpet_dev; |
81 | extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | ||
82 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | ||
81 | 83 | ||
82 | #ifdef CONFIG_PCI_MSI | 84 | #ifdef CONFIG_PCI_MSI |
83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 85 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 3a54a1ca1a02..0274ec5a7e62 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
78 | irq_attr->polarity = polarity; | 78 | irq_attr->polarity = polarity; |
79 | } | 79 | } |
80 | 80 | ||
81 | struct irq_2_iommu { | ||
82 | struct intel_iommu *iommu; | ||
83 | u16 irte_index; | ||
84 | u16 sub_handle; | ||
85 | u8 irte_mask; | ||
86 | }; | ||
87 | |||
81 | /* | 88 | /* |
82 | * This is performance-critical, we want to do it O(1) | 89 | * This is performance-critical, we want to do it O(1) |
83 | * | 90 | * |
@@ -89,15 +96,17 @@ struct irq_cfg { | |||
89 | cpumask_var_t old_domain; | 96 | cpumask_var_t old_domain; |
90 | u8 vector; | 97 | u8 vector; |
91 | u8 move_in_progress : 1; | 98 | u8 move_in_progress : 1; |
99 | #ifdef CONFIG_INTR_REMAP | ||
100 | struct irq_2_iommu irq_2_iommu; | ||
101 | #endif | ||
92 | }; | 102 | }; |
93 | 103 | ||
94 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
95 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | 104 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); |
96 | extern void send_cleanup_vector(struct irq_cfg *); | 105 | extern void send_cleanup_vector(struct irq_cfg *); |
97 | 106 | ||
98 | struct irq_desc; | 107 | struct irq_data; |
99 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, | 108 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, |
100 | unsigned int *dest_id); | 109 | unsigned int *dest_id); |
101 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | 110 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); |
102 | extern void setup_ioapic_dest(void); | 111 | extern void setup_ioapic_dest(void); |
103 | 112 | ||
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646aa..a20365953bf8 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; | |||
55 | struct legacy_pic { | 55 | struct legacy_pic { |
56 | int nr_legacy_irqs; | 56 | int nr_legacy_irqs; |
57 | struct irq_chip *chip; | 57 | struct irq_chip *chip; |
58 | void (*mask)(unsigned int irq); | ||
59 | void (*unmask)(unsigned int irq); | ||
58 | void (*mask_all)(void); | 60 | void (*mask_all)(void); |
59 | void (*restore_mask)(void); | 61 | void (*restore_mask)(void); |
60 | void (*init)(int auto_eoi); | 62 | void (*init)(int auto_eoi); |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2f..c8be4566c3d2 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | |||
170 | 170 | ||
171 | extern void probe_nr_irqs_gsi(void); | 171 | extern void probe_nr_irqs_gsi(void); |
172 | 172 | ||
173 | extern int setup_ioapic_entry(int apic, int irq, | ||
174 | struct IO_APIC_route_entry *entry, | ||
175 | unsigned int destination, int trigger, | ||
176 | int polarity, int vector, int pin); | ||
177 | extern void ioapic_write_entry(int apic, int pin, | ||
178 | struct IO_APIC_route_entry e); | ||
179 | extern void setup_ioapic_ids_from_mpc(void); | 173 | extern void setup_ioapic_ids_from_mpc(void); |
180 | 174 | ||
181 | struct mp_ioapic_gsi{ | 175 | struct mp_ioapic_gsi{ |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 8d841505344e..1c23360fb2d8 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector, | |||
24 | irte->dest_id = IRTE_DEST(dest); | 24 | irte->dest_id = IRTE_DEST(dest); |
25 | irte->redir_hint = 1; | 25 | irte->redir_hint = 1; |
26 | } | 26 | } |
27 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
28 | { | ||
29 | return cfg->irq_2_iommu.iommu != NULL; | ||
30 | } | ||
27 | #else | 31 | #else |
28 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) | 32 | static void prepare_irte(struct irte *irte, int vector, unsigned int dest) |
29 | { | 33 | { |
30 | } | 34 | } |
35 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
36 | { | ||
37 | return false; | ||
38 | } | ||
31 | #endif | 39 | #endif |
32 | 40 | ||
33 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 41 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 6fe2b5cb4f3c..92543c73cf8e 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs) | |||
231 | apbt_start_counter(phy_cs_timer_id); | 231 | apbt_start_counter(phy_cs_timer_id); |
232 | } | 232 | } |
233 | 233 | ||
234 | /* Setup IRQ routing via IOAPIC */ | ||
235 | #ifdef CONFIG_SMP | ||
236 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
237 | { | ||
238 | struct irq_chip *chip; | ||
239 | struct irq_desc *desc; | ||
240 | |||
241 | /* timer0 irq has been setup early */ | ||
242 | if (adev->irq == 0) | ||
243 | return; | ||
244 | desc = irq_to_desc(adev->irq); | ||
245 | chip = get_irq_chip(adev->irq); | ||
246 | disable_irq(adev->irq); | ||
247 | desc->status |= IRQ_MOVE_PCNTXT; | ||
248 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
249 | /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ | ||
250 | set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); | ||
251 | enable_irq(adev->irq); | ||
252 | if (system_state == SYSTEM_BOOTING) | ||
253 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
254 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
255 | adev->name, adev)) { | ||
256 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
257 | adev->num); | ||
258 | } | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | static void apbt_enable_int(int n) | 234 | static void apbt_enable_int(int n) |
263 | { | 235 | { |
264 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | 236 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); |
@@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void) | |||
334 | } | 306 | } |
335 | 307 | ||
336 | #ifdef CONFIG_SMP | 308 | #ifdef CONFIG_SMP |
309 | |||
310 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
311 | { | ||
312 | /* timer0 irq has been setup early */ | ||
313 | if (adev->irq == 0) | ||
314 | return; | ||
315 | |||
316 | if (system_state == SYSTEM_BOOTING) { | ||
317 | irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); | ||
318 | /* APB timer irqs are set up as mp_irqs, timer is edge type */ | ||
319 | __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); | ||
320 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
321 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
322 | adev->name, adev)) { | ||
323 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
324 | adev->num); | ||
325 | } | ||
326 | } else | ||
327 | enable_irq(adev->irq); | ||
328 | } | ||
329 | |||
337 | /* Should be called with per cpu */ | 330 | /* Should be called with per cpu */ |
338 | void apbt_setup_secondary_clock(void) | 331 | void apbt_setup_secondary_clock(void) |
339 | { | 332 | { |
@@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n, | |||
389 | 382 | ||
390 | switch (action & 0xf) { | 383 | switch (action & 0xf) { |
391 | case CPU_DEAD: | 384 | case CPU_DEAD: |
385 | disable_irq(adev->irq); | ||
392 | apbt_disable_int(cpu); | 386 | apbt_disable_int(cpu); |
393 | if (system_state == SYSTEM_RUNNING) | 387 | if (system_state == SYSTEM_RUNNING) { |
394 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | 388 | pr_debug("skipping APBT CPU %lu offline\n", cpu); |
395 | else if (adev) { | 389 | } else if (adev) { |
396 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | 390 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); |
397 | free_irq(adev->irq, adev); | 391 | free_irq(adev->irq, adev); |
398 | } | 392 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8cf86fb3b4e3..850657d1b0ed 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/mce.h> | 52 | #include <asm/mce.h> |
53 | #include <asm/kvm_para.h> | 53 | #include <asm/kvm_para.h> |
54 | #include <asm/tsc.h> | 54 | #include <asm/tsc.h> |
55 | #include <asm/atomic.h> | ||
55 | 56 | ||
56 | unsigned int num_processors; | 57 | unsigned int num_processors; |
57 | 58 | ||
@@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | |||
370 | } | 371 | } |
371 | 372 | ||
372 | /* | 373 | /* |
373 | * Setup extended LVT, AMD specific (K8, family 10h) | 374 | * Setup extended LVT, AMD specific |
374 | * | 375 | * |
375 | * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and | 376 | * Software should use the LVT offsets the BIOS provides. The offsets |
376 | * MCE interrupts are supported. Thus MCE offset must be set to 0. | 377 | * are determined by the subsystems using it like those for MCE |
378 | * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts | ||
379 | * are supported. Beginning with family 10h at least 4 offsets are | ||
380 | * available. | ||
377 | * | 381 | * |
378 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | 382 | * Since the offsets must be consistent for all cores, we keep track |
379 | * enables the vector. See also the BKDGs. | 383 | * of the LVT offsets in software and reserve the offset for the same |
384 | * vector also to be used on other cores. An offset is freed by | ||
385 | * setting the entry to APIC_EILVT_MASKED. | ||
386 | * | ||
387 | * If the BIOS is right, there should be no conflicts. Otherwise a | ||
388 | * "[Firmware Bug]: ..." error message is generated. However, if | ||
389 | * software does not properly determines the offsets, it is not | ||
390 | * necessarily a BIOS bug. | ||
380 | */ | 391 | */ |
381 | 392 | ||
382 | #define APIC_EILVT_LVTOFF_MCE 0 | 393 | static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; |
383 | #define APIC_EILVT_LVTOFF_IBS 1 | ||
384 | 394 | ||
385 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | 395 | static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) |
386 | { | 396 | { |
387 | unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); | 397 | return (old & APIC_EILVT_MASKED) |
388 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | 398 | || (new == APIC_EILVT_MASKED) |
389 | 399 | || ((new & ~APIC_EILVT_MASKED) == old); | |
390 | apic_write(reg, v); | ||
391 | } | 400 | } |
392 | 401 | ||
393 | u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) | 402 | static unsigned int reserve_eilvt_offset(int offset, unsigned int new) |
394 | { | 403 | { |
395 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); | 404 | unsigned int rsvd; /* 0: uninitialized */ |
396 | return APIC_EILVT_LVTOFF_MCE; | 405 | |
406 | if (offset >= APIC_EILVT_NR_MAX) | ||
407 | return ~0; | ||
408 | |||
409 | rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; | ||
410 | do { | ||
411 | if (rsvd && | ||
412 | !eilvt_entry_is_changeable(rsvd, new)) | ||
413 | /* may not change if vectors are different */ | ||
414 | return rsvd; | ||
415 | rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); | ||
416 | } while (rsvd != new); | ||
417 | |||
418 | return new; | ||
397 | } | 419 | } |
398 | 420 | ||
399 | u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) | 421 | /* |
422 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | ||
423 | * enables the vector. See also the BKDGs. | ||
424 | */ | ||
425 | |||
426 | int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) | ||
400 | { | 427 | { |
401 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); | 428 | unsigned long reg = APIC_EILVTn(offset); |
402 | return APIC_EILVT_LVTOFF_IBS; | 429 | unsigned int new, old, reserved; |
430 | |||
431 | new = (mask << 16) | (msg_type << 8) | vector; | ||
432 | old = apic_read(reg); | ||
433 | reserved = reserve_eilvt_offset(offset, new); | ||
434 | |||
435 | if (reserved != new) { | ||
436 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " | ||
437 | "vector 0x%x was already reserved by another core, " | ||
438 | "APIC%lX=0x%x\n", | ||
439 | smp_processor_id(), new, reserved, reg, old); | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | if (!eilvt_entry_is_changeable(old, new)) { | ||
444 | pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " | ||
445 | "register already in use, APIC%lX=0x%x\n", | ||
446 | smp_processor_id(), new, reg, old); | ||
447 | return -EBUSY; | ||
448 | } | ||
449 | |||
450 | apic_write(reg, new); | ||
451 | |||
452 | return 0; | ||
403 | } | 453 | } |
404 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); | 454 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt); |
405 | 455 | ||
406 | /* | 456 | /* |
407 | * Program the next event, relative to now | 457 | * Program the next event, relative to now |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9508811e8448..8ae808d110f4 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -131,13 +131,9 @@ struct irq_pin_list { | |||
131 | struct irq_pin_list *next; | 131 | struct irq_pin_list *next; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static struct irq_pin_list *get_one_free_irq_2_pin(int node) | 134 | static struct irq_pin_list *alloc_irq_pin_list(int node) |
135 | { | 135 | { |
136 | struct irq_pin_list *pin; | 136 | return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); |
137 | |||
138 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); | ||
139 | |||
140 | return pin; | ||
141 | } | 137 | } |
142 | 138 | ||
143 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 139 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
@@ -150,10 +146,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS]; | |||
150 | int __init arch_early_irq_init(void) | 146 | int __init arch_early_irq_init(void) |
151 | { | 147 | { |
152 | struct irq_cfg *cfg; | 148 | struct irq_cfg *cfg; |
153 | struct irq_desc *desc; | 149 | int count, node, i; |
154 | int count; | ||
155 | int node; | ||
156 | int i; | ||
157 | 150 | ||
158 | if (!legacy_pic->nr_legacy_irqs) { | 151 | if (!legacy_pic->nr_legacy_irqs) { |
159 | nr_irqs_gsi = 0; | 152 | nr_irqs_gsi = 0; |
@@ -164,11 +157,13 @@ int __init arch_early_irq_init(void) | |||
164 | count = ARRAY_SIZE(irq_cfgx); | 157 | count = ARRAY_SIZE(irq_cfgx); |
165 | node = cpu_to_node(0); | 158 | node = cpu_to_node(0); |
166 | 159 | ||
160 | /* Make sure the legacy interrupts are marked in the bitmap */ | ||
161 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | ||
162 | |||
167 | for (i = 0; i < count; i++) { | 163 | for (i = 0; i < count; i++) { |
168 | desc = irq_to_desc(i); | 164 | set_irq_chip_data(i, &cfg[i]); |
169 | desc->chip_data = &cfg[i]; | 165 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
170 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); | 166 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
171 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); | ||
172 | /* | 167 | /* |
173 | * For legacy IRQ's, start with assigning irq0 to irq15 to | 168 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
174 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. | 169 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. |
@@ -183,170 +178,88 @@ int __init arch_early_irq_init(void) | |||
183 | } | 178 | } |
184 | 179 | ||
185 | #ifdef CONFIG_SPARSE_IRQ | 180 | #ifdef CONFIG_SPARSE_IRQ |
186 | struct irq_cfg *irq_cfg(unsigned int irq) | 181 | static struct irq_cfg *irq_cfg(unsigned int irq) |
187 | { | 182 | { |
188 | struct irq_cfg *cfg = NULL; | 183 | return get_irq_chip_data(irq); |
189 | struct irq_desc *desc; | ||
190 | |||
191 | desc = irq_to_desc(irq); | ||
192 | if (desc) | ||
193 | cfg = desc->chip_data; | ||
194 | |||
195 | return cfg; | ||
196 | } | 184 | } |
197 | 185 | ||
198 | static struct irq_cfg *get_one_free_irq_cfg(int node) | 186 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
199 | { | 187 | { |
200 | struct irq_cfg *cfg; | 188 | struct irq_cfg *cfg; |
201 | 189 | ||
202 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 190 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); |
203 | if (cfg) { | 191 | if (!cfg) |
204 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | 192 | return NULL; |
205 | kfree(cfg); | 193 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) |
206 | cfg = NULL; | 194 | goto out_cfg; |
207 | } else if (!zalloc_cpumask_var_node(&cfg->old_domain, | 195 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) |
208 | GFP_ATOMIC, node)) { | 196 | goto out_domain; |
209 | free_cpumask_var(cfg->domain); | ||
210 | kfree(cfg); | ||
211 | cfg = NULL; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | return cfg; | 197 | return cfg; |
198 | out_domain: | ||
199 | free_cpumask_var(cfg->domain); | ||
200 | out_cfg: | ||
201 | kfree(cfg); | ||
202 | return NULL; | ||
216 | } | 203 | } |
217 | 204 | ||
218 | int arch_init_chip_data(struct irq_desc *desc, int node) | 205 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) |
219 | { | ||
220 | struct irq_cfg *cfg; | ||
221 | |||
222 | cfg = desc->chip_data; | ||
223 | if (!cfg) { | ||
224 | desc->chip_data = get_one_free_irq_cfg(node); | ||
225 | if (!desc->chip_data) { | ||
226 | printk(KERN_ERR "can not alloc irq_cfg\n"); | ||
227 | BUG_ON(1); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /* for move_irq_desc */ | ||
235 | static void | ||
236 | init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) | ||
237 | { | 206 | { |
238 | struct irq_pin_list *old_entry, *head, *tail, *entry; | 207 | if (!cfg) |
239 | |||
240 | cfg->irq_2_pin = NULL; | ||
241 | old_entry = old_cfg->irq_2_pin; | ||
242 | if (!old_entry) | ||
243 | return; | ||
244 | |||
245 | entry = get_one_free_irq_2_pin(node); | ||
246 | if (!entry) | ||
247 | return; | 208 | return; |
209 | set_irq_chip_data(at, NULL); | ||
210 | free_cpumask_var(cfg->domain); | ||
211 | free_cpumask_var(cfg->old_domain); | ||
212 | kfree(cfg); | ||
213 | } | ||
248 | 214 | ||
249 | entry->apic = old_entry->apic; | 215 | #else |
250 | entry->pin = old_entry->pin; | ||
251 | head = entry; | ||
252 | tail = entry; | ||
253 | old_entry = old_entry->next; | ||
254 | while (old_entry) { | ||
255 | entry = get_one_free_irq_2_pin(node); | ||
256 | if (!entry) { | ||
257 | entry = head; | ||
258 | while (entry) { | ||
259 | head = entry->next; | ||
260 | kfree(entry); | ||
261 | entry = head; | ||
262 | } | ||
263 | /* still use the old one */ | ||
264 | return; | ||
265 | } | ||
266 | entry->apic = old_entry->apic; | ||
267 | entry->pin = old_entry->pin; | ||
268 | tail->next = entry; | ||
269 | tail = entry; | ||
270 | old_entry = old_entry->next; | ||
271 | } | ||
272 | 216 | ||
273 | tail->next = NULL; | 217 | struct irq_cfg *irq_cfg(unsigned int irq) |
274 | cfg->irq_2_pin = head; | 218 | { |
219 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | ||
275 | } | 220 | } |
276 | 221 | ||
277 | static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) | 222 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
278 | { | 223 | { |
279 | struct irq_pin_list *entry, *next; | 224 | return irq_cfgx + irq; |
280 | 225 | } | |
281 | if (old_cfg->irq_2_pin == cfg->irq_2_pin) | ||
282 | return; | ||
283 | 226 | ||
284 | entry = old_cfg->irq_2_pin; | 227 | static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } |
285 | 228 | ||
286 | while (entry) { | 229 | #endif |
287 | next = entry->next; | ||
288 | kfree(entry); | ||
289 | entry = next; | ||
290 | } | ||
291 | old_cfg->irq_2_pin = NULL; | ||
292 | } | ||
293 | 230 | ||
294 | void arch_init_copy_chip_data(struct irq_desc *old_desc, | 231 | static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) |
295 | struct irq_desc *desc, int node) | ||
296 | { | 232 | { |
233 | int res = irq_alloc_desc_at(at, node); | ||
297 | struct irq_cfg *cfg; | 234 | struct irq_cfg *cfg; |
298 | struct irq_cfg *old_cfg; | ||
299 | |||
300 | cfg = get_one_free_irq_cfg(node); | ||
301 | 235 | ||
302 | if (!cfg) | 236 | if (res < 0) { |
303 | return; | 237 | if (res != -EEXIST) |
304 | 238 | return NULL; | |
305 | desc->chip_data = cfg; | 239 | cfg = get_irq_chip_data(at); |
306 | 240 | if (cfg) | |
307 | old_cfg = old_desc->chip_data; | 241 | return cfg; |
308 | 242 | } | |
309 | cfg->vector = old_cfg->vector; | ||
310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
313 | |||
314 | init_copy_irq_2_pin(old_cfg, cfg, node); | ||
315 | } | ||
316 | 243 | ||
317 | static void free_irq_cfg(struct irq_cfg *cfg) | 244 | cfg = alloc_irq_cfg(at, node); |
318 | { | 245 | if (cfg) |
319 | free_cpumask_var(cfg->domain); | 246 | set_irq_chip_data(at, cfg); |
320 | free_cpumask_var(cfg->old_domain); | 247 | else |
321 | kfree(cfg); | 248 | irq_free_desc(at); |
249 | return cfg; | ||
322 | } | 250 | } |
323 | 251 | ||
324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 252 | static int alloc_irq_from(unsigned int from, int node) |
325 | { | 253 | { |
326 | struct irq_cfg *old_cfg, *cfg; | 254 | return irq_alloc_desc_from(from, node); |
327 | |||
328 | old_cfg = old_desc->chip_data; | ||
329 | cfg = desc->chip_data; | ||
330 | |||
331 | if (old_cfg == cfg) | ||
332 | return; | ||
333 | |||
334 | if (old_cfg) { | ||
335 | free_irq_2_pin(old_cfg, cfg); | ||
336 | free_irq_cfg(old_cfg); | ||
337 | old_desc->chip_data = NULL; | ||
338 | } | ||
339 | } | 255 | } |
340 | /* end for move_irq_desc */ | ||
341 | 256 | ||
342 | #else | 257 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) |
343 | struct irq_cfg *irq_cfg(unsigned int irq) | ||
344 | { | 258 | { |
345 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 259 | free_irq_cfg(at, cfg); |
260 | irq_free_desc(at); | ||
346 | } | 261 | } |
347 | 262 | ||
348 | #endif | ||
349 | |||
350 | struct io_apic { | 263 | struct io_apic { |
351 | unsigned int index; | 264 | unsigned int index; |
352 | unsigned int unused[3]; | 265 | unsigned int unused[3]; |
@@ -451,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | |||
451 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 364 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
452 | } | 365 | } |
453 | 366 | ||
454 | void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 367 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
455 | { | 368 | { |
456 | unsigned long flags; | 369 | unsigned long flags; |
457 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 370 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
@@ -481,7 +394,7 @@ static void ioapic_mask_entry(int apic, int pin) | |||
481 | * fast in the common case, and fast for shared ISA-space IRQs. | 394 | * fast in the common case, and fast for shared ISA-space IRQs. |
482 | */ | 395 | */ |
483 | static int | 396 | static int |
484 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | 397 | __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
485 | { | 398 | { |
486 | struct irq_pin_list **last, *entry; | 399 | struct irq_pin_list **last, *entry; |
487 | 400 | ||
@@ -493,7 +406,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
493 | last = &entry->next; | 406 | last = &entry->next; |
494 | } | 407 | } |
495 | 408 | ||
496 | entry = get_one_free_irq_2_pin(node); | 409 | entry = alloc_irq_pin_list(node); |
497 | if (!entry) { | 410 | if (!entry) { |
498 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | 411 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", |
499 | node, apic, pin); | 412 | node, apic, pin); |
@@ -508,7 +421,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | |||
508 | 421 | ||
509 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 422 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
510 | { | 423 | { |
511 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | 424 | if (__add_pin_to_irq_node(cfg, node, apic, pin)) |
512 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | 425 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); |
513 | } | 426 | } |
514 | 427 | ||
@@ -571,11 +484,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | |||
571 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 484 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
572 | } | 485 | } |
573 | 486 | ||
574 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | ||
575 | { | ||
576 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | ||
577 | } | ||
578 | |||
579 | static void io_apic_sync(struct irq_pin_list *entry) | 487 | static void io_apic_sync(struct irq_pin_list *entry) |
580 | { | 488 | { |
581 | /* | 489 | /* |
@@ -587,44 +495,37 @@ static void io_apic_sync(struct irq_pin_list *entry) | |||
587 | readl(&io_apic->data); | 495 | readl(&io_apic->data); |
588 | } | 496 | } |
589 | 497 | ||
590 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | 498 | static void mask_ioapic(struct irq_cfg *cfg) |
591 | { | 499 | { |
500 | unsigned long flags; | ||
501 | |||
502 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
592 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 503 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
504 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
593 | } | 505 | } |
594 | 506 | ||
595 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 507 | static void mask_ioapic_irq(struct irq_data *data) |
596 | { | 508 | { |
597 | struct irq_cfg *cfg = desc->chip_data; | 509 | mask_ioapic(data->chip_data); |
598 | unsigned long flags; | 510 | } |
599 | |||
600 | BUG_ON(!cfg); | ||
601 | 511 | ||
602 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 512 | static void __unmask_ioapic(struct irq_cfg *cfg) |
603 | __mask_IO_APIC_irq(cfg); | 513 | { |
604 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 514 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
605 | } | 515 | } |
606 | 516 | ||
607 | static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) | 517 | static void unmask_ioapic(struct irq_cfg *cfg) |
608 | { | 518 | { |
609 | struct irq_cfg *cfg = desc->chip_data; | ||
610 | unsigned long flags; | 519 | unsigned long flags; |
611 | 520 | ||
612 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 521 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
613 | __unmask_IO_APIC_irq(cfg); | 522 | __unmask_ioapic(cfg); |
614 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 523 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
615 | } | 524 | } |
616 | 525 | ||
617 | static void mask_IO_APIC_irq(unsigned int irq) | 526 | static void unmask_ioapic_irq(struct irq_data *data) |
618 | { | 527 | { |
619 | struct irq_desc *desc = irq_to_desc(irq); | 528 | unmask_ioapic(data->chip_data); |
620 | |||
621 | mask_IO_APIC_irq_desc(desc); | ||
622 | } | ||
623 | static void unmask_IO_APIC_irq(unsigned int irq) | ||
624 | { | ||
625 | struct irq_desc *desc = irq_to_desc(irq); | ||
626 | |||
627 | unmask_IO_APIC_irq_desc(desc); | ||
628 | } | 529 | } |
629 | 530 | ||
630 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 531 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
@@ -694,14 +595,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) | |||
694 | struct IO_APIC_route_entry **ioapic_entries; | 595 | struct IO_APIC_route_entry **ioapic_entries; |
695 | 596 | ||
696 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, | 597 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, |
697 | GFP_ATOMIC); | 598 | GFP_KERNEL); |
698 | if (!ioapic_entries) | 599 | if (!ioapic_entries) |
699 | return 0; | 600 | return 0; |
700 | 601 | ||
701 | for (apic = 0; apic < nr_ioapics; apic++) { | 602 | for (apic = 0; apic < nr_ioapics; apic++) { |
702 | ioapic_entries[apic] = | 603 | ioapic_entries[apic] = |
703 | kzalloc(sizeof(struct IO_APIC_route_entry) * | 604 | kzalloc(sizeof(struct IO_APIC_route_entry) * |
704 | nr_ioapic_registers[apic], GFP_ATOMIC); | 605 | nr_ioapic_registers[apic], GFP_KERNEL); |
705 | if (!ioapic_entries[apic]) | 606 | if (!ioapic_entries[apic]) |
706 | goto nomem; | 607 | goto nomem; |
707 | } | 608 | } |
@@ -1259,7 +1160,6 @@ void __setup_vector_irq(int cpu) | |||
1259 | /* Initialize vector_irq on a new cpu */ | 1160 | /* Initialize vector_irq on a new cpu */ |
1260 | int irq, vector; | 1161 | int irq, vector; |
1261 | struct irq_cfg *cfg; | 1162 | struct irq_cfg *cfg; |
1262 | struct irq_desc *desc; | ||
1263 | 1163 | ||
1264 | /* | 1164 | /* |
1265 | * vector_lock will make sure that we don't run into irq vector | 1165 | * vector_lock will make sure that we don't run into irq vector |
@@ -1268,9 +1168,10 @@ void __setup_vector_irq(int cpu) | |||
1268 | */ | 1168 | */ |
1269 | raw_spin_lock(&vector_lock); | 1169 | raw_spin_lock(&vector_lock); |
1270 | /* Mark the inuse vectors */ | 1170 | /* Mark the inuse vectors */ |
1271 | for_each_irq_desc(irq, desc) { | 1171 | for_each_active_irq(irq) { |
1272 | cfg = desc->chip_data; | 1172 | cfg = get_irq_chip_data(irq); |
1273 | 1173 | if (!cfg) | |
1174 | continue; | ||
1274 | /* | 1175 | /* |
1275 | * If it is a legacy IRQ handled by the legacy PIC, this cpu | 1176 | * If it is a legacy IRQ handled by the legacy PIC, this cpu |
1276 | * will be part of the irq_cfg's domain. | 1177 | * will be part of the irq_cfg's domain. |
@@ -1327,17 +1228,17 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
1327 | } | 1228 | } |
1328 | #endif | 1229 | #endif |
1329 | 1230 | ||
1330 | static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) | 1231 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) |
1331 | { | 1232 | { |
1332 | 1233 | ||
1333 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1234 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1334 | trigger == IOAPIC_LEVEL) | 1235 | trigger == IOAPIC_LEVEL) |
1335 | desc->status |= IRQ_LEVEL; | 1236 | irq_set_status_flags(irq, IRQ_LEVEL); |
1336 | else | 1237 | else |
1337 | desc->status &= ~IRQ_LEVEL; | 1238 | irq_clear_status_flags(irq, IRQ_LEVEL); |
1338 | 1239 | ||
1339 | if (irq_remapped(irq)) { | 1240 | if (irq_remapped(get_irq_chip_data(irq))) { |
1340 | desc->status |= IRQ_MOVE_PCNTXT; | 1241 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
1341 | if (trigger) | 1242 | if (trigger) |
1342 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1243 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, |
1343 | handle_fasteoi_irq, | 1244 | handle_fasteoi_irq, |
@@ -1358,10 +1259,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t | |||
1358 | handle_edge_irq, "edge"); | 1259 | handle_edge_irq, "edge"); |
1359 | } | 1260 | } |
1360 | 1261 | ||
1361 | int setup_ioapic_entry(int apic_id, int irq, | 1262 | static int setup_ioapic_entry(int apic_id, int irq, |
1362 | struct IO_APIC_route_entry *entry, | 1263 | struct IO_APIC_route_entry *entry, |
1363 | unsigned int destination, int trigger, | 1264 | unsigned int destination, int trigger, |
1364 | int polarity, int vector, int pin) | 1265 | int polarity, int vector, int pin) |
1365 | { | 1266 | { |
1366 | /* | 1267 | /* |
1367 | * add it to the IO-APIC irq-routing table: | 1268 | * add it to the IO-APIC irq-routing table: |
@@ -1417,18 +1318,14 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
1417 | return 0; | 1318 | return 0; |
1418 | } | 1319 | } |
1419 | 1320 | ||
1420 | static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, | 1321 | static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, |
1421 | int trigger, int polarity) | 1322 | struct irq_cfg *cfg, int trigger, int polarity) |
1422 | { | 1323 | { |
1423 | struct irq_cfg *cfg; | ||
1424 | struct IO_APIC_route_entry entry; | 1324 | struct IO_APIC_route_entry entry; |
1425 | unsigned int dest; | 1325 | unsigned int dest; |
1426 | 1326 | ||
1427 | if (!IO_APIC_IRQ(irq)) | 1327 | if (!IO_APIC_IRQ(irq)) |
1428 | return; | 1328 | return; |
1429 | |||
1430 | cfg = desc->chip_data; | ||
1431 | |||
1432 | /* | 1329 | /* |
1433 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy | 1330 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy |
1434 | * controllers like 8259. Now that IO-APIC can handle this irq, update | 1331 | * controllers like 8259. Now that IO-APIC can handle this irq, update |
@@ -1457,9 +1354,9 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1457 | return; | 1354 | return; |
1458 | } | 1355 | } |
1459 | 1356 | ||
1460 | ioapic_register_intr(irq, desc, trigger); | 1357 | ioapic_register_intr(irq, trigger); |
1461 | if (irq < legacy_pic->nr_legacy_irqs) | 1358 | if (irq < legacy_pic->nr_legacy_irqs) |
1462 | legacy_pic->chip->mask(irq); | 1359 | legacy_pic->mask(irq); |
1463 | 1360 | ||
1464 | ioapic_write_entry(apic_id, pin, entry); | 1361 | ioapic_write_entry(apic_id, pin, entry); |
1465 | } | 1362 | } |
@@ -1470,11 +1367,9 @@ static struct { | |||
1470 | 1367 | ||
1471 | static void __init setup_IO_APIC_irqs(void) | 1368 | static void __init setup_IO_APIC_irqs(void) |
1472 | { | 1369 | { |
1473 | int apic_id, pin, idx, irq; | 1370 | int apic_id, pin, idx, irq, notcon = 0; |
1474 | int notcon = 0; | ||
1475 | struct irq_desc *desc; | ||
1476 | struct irq_cfg *cfg; | ||
1477 | int node = cpu_to_node(0); | 1371 | int node = cpu_to_node(0); |
1372 | struct irq_cfg *cfg; | ||
1478 | 1373 | ||
1479 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1374 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1480 | 1375 | ||
@@ -1511,19 +1406,17 @@ static void __init setup_IO_APIC_irqs(void) | |||
1511 | apic->multi_timer_check(apic_id, irq)) | 1406 | apic->multi_timer_check(apic_id, irq)) |
1512 | continue; | 1407 | continue; |
1513 | 1408 | ||
1514 | desc = irq_to_desc_alloc_node(irq, node); | 1409 | cfg = alloc_irq_and_cfg_at(irq, node); |
1515 | if (!desc) { | 1410 | if (!cfg) |
1516 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1517 | continue; | 1411 | continue; |
1518 | } | 1412 | |
1519 | cfg = desc->chip_data; | ||
1520 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
1521 | /* | 1414 | /* |
1522 | * don't mark it in pin_programmed, so later acpi could | 1415 | * don't mark it in pin_programmed, so later acpi could |
1523 | * set it correctly when irq < 16 | 1416 | * set it correctly when irq < 16 |
1524 | */ | 1417 | */ |
1525 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1418 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), |
1526 | irq_trigger(idx), irq_polarity(idx)); | 1419 | irq_polarity(idx)); |
1527 | } | 1420 | } |
1528 | 1421 | ||
1529 | if (notcon) | 1422 | if (notcon) |
@@ -1538,9 +1431,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1538 | */ | 1431 | */ |
1539 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
1540 | { | 1433 | { |
1541 | int apic_id = 0, pin, idx, irq; | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
1542 | int node = cpu_to_node(0); | ||
1543 | struct irq_desc *desc; | ||
1544 | struct irq_cfg *cfg; | 1435 | struct irq_cfg *cfg; |
1545 | 1436 | ||
1546 | /* | 1437 | /* |
@@ -1556,18 +1447,15 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1556 | return; | 1447 | return; |
1557 | 1448 | ||
1558 | irq = pin_2_irq(idx, apic_id, pin); | 1449 | irq = pin_2_irq(idx, apic_id, pin); |
1559 | #ifdef CONFIG_SPARSE_IRQ | 1450 | |
1560 | desc = irq_to_desc(irq); | 1451 | /* Only handle the non legacy irqs on secondary ioapics */ |
1561 | if (desc) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
1562 | return; | 1453 | return; |
1563 | #endif | 1454 | |
1564 | desc = irq_to_desc_alloc_node(irq, node); | 1455 | cfg = alloc_irq_and_cfg_at(irq, node); |
1565 | if (!desc) { | 1456 | if (!cfg) |
1566 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1567 | return; | 1457 | return; |
1568 | } | ||
1569 | 1458 | ||
1570 | cfg = desc->chip_data; | ||
1571 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1459 | add_pin_to_irq_node(cfg, node, apic_id, pin); |
1572 | 1460 | ||
1573 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | 1461 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { |
@@ -1577,7 +1465,7 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1577 | } | 1465 | } |
1578 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | 1466 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); |
1579 | 1467 | ||
1580 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1468 | setup_ioapic_irq(apic_id, pin, irq, cfg, |
1581 | irq_trigger(idx), irq_polarity(idx)); | 1469 | irq_trigger(idx), irq_polarity(idx)); |
1582 | } | 1470 | } |
1583 | 1471 | ||
@@ -1628,7 +1516,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1628 | union IO_APIC_reg_03 reg_03; | 1516 | union IO_APIC_reg_03 reg_03; |
1629 | unsigned long flags; | 1517 | unsigned long flags; |
1630 | struct irq_cfg *cfg; | 1518 | struct irq_cfg *cfg; |
1631 | struct irq_desc *desc; | ||
1632 | unsigned int irq; | 1519 | unsigned int irq; |
1633 | 1520 | ||
1634 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1521 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
@@ -1715,10 +1602,10 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1715 | } | 1602 | } |
1716 | } | 1603 | } |
1717 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | 1604 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
1718 | for_each_irq_desc(irq, desc) { | 1605 | for_each_active_irq(irq) { |
1719 | struct irq_pin_list *entry; | 1606 | struct irq_pin_list *entry; |
1720 | 1607 | ||
1721 | cfg = desc->chip_data; | 1608 | cfg = get_irq_chip_data(irq); |
1722 | if (!cfg) | 1609 | if (!cfg) |
1723 | continue; | 1610 | continue; |
1724 | entry = cfg->irq_2_pin; | 1611 | entry = cfg->irq_2_pin; |
@@ -2225,29 +2112,26 @@ static int __init timer_irq_works(void) | |||
2225 | * an edge even if it isn't on the 8259A... | 2112 | * an edge even if it isn't on the 8259A... |
2226 | */ | 2113 | */ |
2227 | 2114 | ||
2228 | static unsigned int startup_ioapic_irq(unsigned int irq) | 2115 | static unsigned int startup_ioapic_irq(struct irq_data *data) |
2229 | { | 2116 | { |
2230 | int was_pending = 0; | 2117 | int was_pending = 0, irq = data->irq; |
2231 | unsigned long flags; | 2118 | unsigned long flags; |
2232 | struct irq_cfg *cfg; | ||
2233 | 2119 | ||
2234 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2120 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2235 | if (irq < legacy_pic->nr_legacy_irqs) { | 2121 | if (irq < legacy_pic->nr_legacy_irqs) { |
2236 | legacy_pic->chip->mask(irq); | 2122 | legacy_pic->mask(irq); |
2237 | if (legacy_pic->irq_pending(irq)) | 2123 | if (legacy_pic->irq_pending(irq)) |
2238 | was_pending = 1; | 2124 | was_pending = 1; |
2239 | } | 2125 | } |
2240 | cfg = irq_cfg(irq); | 2126 | __unmask_ioapic(data->chip_data); |
2241 | __unmask_IO_APIC_irq(cfg); | ||
2242 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2127 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2243 | 2128 | ||
2244 | return was_pending; | 2129 | return was_pending; |
2245 | } | 2130 | } |
2246 | 2131 | ||
2247 | static int ioapic_retrigger_irq(unsigned int irq) | 2132 | static int ioapic_retrigger_irq(struct irq_data *data) |
2248 | { | 2133 | { |
2249 | 2134 | struct irq_cfg *cfg = data->chip_data; | |
2250 | struct irq_cfg *cfg = irq_cfg(irq); | ||
2251 | unsigned long flags; | 2135 | unsigned long flags; |
2252 | 2136 | ||
2253 | raw_spin_lock_irqsave(&vector_lock, flags); | 2137 | raw_spin_lock_irqsave(&vector_lock, flags); |
@@ -2298,7 +2182,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2298 | * With interrupt-remapping, destination information comes | 2182 | * With interrupt-remapping, destination information comes |
2299 | * from interrupt-remapping table entry. | 2183 | * from interrupt-remapping table entry. |
2300 | */ | 2184 | */ |
2301 | if (!irq_remapped(irq)) | 2185 | if (!irq_remapped(cfg)) |
2302 | io_apic_write(apic, 0x11 + pin*2, dest); | 2186 | io_apic_write(apic, 0x11 + pin*2, dest); |
2303 | reg = io_apic_read(apic, 0x10 + pin*2); | 2187 | reg = io_apic_read(apic, 0x10 + pin*2); |
2304 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2188 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
@@ -2308,65 +2192,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2308 | } | 2192 | } |
2309 | 2193 | ||
2310 | /* | 2194 | /* |
2311 | * Either sets desc->affinity to a valid value, and returns | 2195 | * Either sets data->affinity to a valid value, and returns |
2312 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | 2196 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and |
2313 | * leaves desc->affinity untouched. | 2197 | * leaves data->affinity untouched. |
2314 | */ | 2198 | */ |
2315 | unsigned int | 2199 | int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2316 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, | 2200 | unsigned int *dest_id) |
2317 | unsigned int *dest_id) | ||
2318 | { | 2201 | { |
2319 | struct irq_cfg *cfg; | 2202 | struct irq_cfg *cfg = data->chip_data; |
2320 | unsigned int irq; | ||
2321 | 2203 | ||
2322 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2204 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2323 | return -1; | 2205 | return -1; |
2324 | 2206 | ||
2325 | irq = desc->irq; | 2207 | if (assign_irq_vector(data->irq, data->chip_data, mask)) |
2326 | cfg = desc->chip_data; | ||
2327 | if (assign_irq_vector(irq, cfg, mask)) | ||
2328 | return -1; | 2208 | return -1; |
2329 | 2209 | ||
2330 | cpumask_copy(desc->affinity, mask); | 2210 | cpumask_copy(data->affinity, mask); |
2331 | 2211 | ||
2332 | *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); | 2212 | *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); |
2333 | return 0; | 2213 | return 0; |
2334 | } | 2214 | } |
2335 | 2215 | ||
2336 | static int | 2216 | static int |
2337 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2217 | ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2218 | bool force) | ||
2338 | { | 2219 | { |
2339 | struct irq_cfg *cfg; | 2220 | unsigned int dest, irq = data->irq; |
2340 | unsigned long flags; | 2221 | unsigned long flags; |
2341 | unsigned int dest; | 2222 | int ret; |
2342 | unsigned int irq; | ||
2343 | int ret = -1; | ||
2344 | |||
2345 | irq = desc->irq; | ||
2346 | cfg = desc->chip_data; | ||
2347 | 2223 | ||
2348 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2224 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2349 | ret = set_desc_affinity(desc, mask, &dest); | 2225 | ret = __ioapic_set_affinity(data, mask, &dest); |
2350 | if (!ret) { | 2226 | if (!ret) { |
2351 | /* Only the high 8 bits are valid. */ | 2227 | /* Only the high 8 bits are valid. */ |
2352 | dest = SET_APIC_LOGICAL_ID(dest); | 2228 | dest = SET_APIC_LOGICAL_ID(dest); |
2353 | __target_IO_APIC_irq(irq, dest, cfg); | 2229 | __target_IO_APIC_irq(irq, dest, data->chip_data); |
2354 | } | 2230 | } |
2355 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2231 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2356 | |||
2357 | return ret; | 2232 | return ret; |
2358 | } | 2233 | } |
2359 | 2234 | ||
2360 | static int | ||
2361 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
2362 | { | ||
2363 | struct irq_desc *desc; | ||
2364 | |||
2365 | desc = irq_to_desc(irq); | ||
2366 | |||
2367 | return set_ioapic_affinity_irq_desc(desc, mask); | ||
2368 | } | ||
2369 | |||
2370 | #ifdef CONFIG_INTR_REMAP | 2235 | #ifdef CONFIG_INTR_REMAP |
2371 | 2236 | ||
2372 | /* | 2237 | /* |
@@ -2381,24 +2246,21 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | |||
2381 | * the interrupt-remapping table entry. | 2246 | * the interrupt-remapping table entry. |
2382 | */ | 2247 | */ |
2383 | static int | 2248 | static int |
2384 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2249 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2250 | bool force) | ||
2385 | { | 2251 | { |
2386 | struct irq_cfg *cfg; | 2252 | struct irq_cfg *cfg = data->chip_data; |
2253 | unsigned int dest, irq = data->irq; | ||
2387 | struct irte irte; | 2254 | struct irte irte; |
2388 | unsigned int dest; | ||
2389 | unsigned int irq; | ||
2390 | int ret = -1; | ||
2391 | 2255 | ||
2392 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2256 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2393 | return ret; | 2257 | return -EINVAL; |
2394 | 2258 | ||
2395 | irq = desc->irq; | ||
2396 | if (get_irte(irq, &irte)) | 2259 | if (get_irte(irq, &irte)) |
2397 | return ret; | 2260 | return -EBUSY; |
2398 | 2261 | ||
2399 | cfg = desc->chip_data; | ||
2400 | if (assign_irq_vector(irq, cfg, mask)) | 2262 | if (assign_irq_vector(irq, cfg, mask)) |
2401 | return ret; | 2263 | return -EBUSY; |
2402 | 2264 | ||
2403 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); | 2265 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); |
2404 | 2266 | ||
@@ -2413,29 +2275,14 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2413 | if (cfg->move_in_progress) | 2275 | if (cfg->move_in_progress) |
2414 | send_cleanup_vector(cfg); | 2276 | send_cleanup_vector(cfg); |
2415 | 2277 | ||
2416 | cpumask_copy(desc->affinity, mask); | 2278 | cpumask_copy(data->affinity, mask); |
2417 | |||
2418 | return 0; | 2279 | return 0; |
2419 | } | 2280 | } |
2420 | 2281 | ||
2421 | /* | ||
2422 | * Migrates the IRQ destination in the process context. | ||
2423 | */ | ||
2424 | static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | ||
2425 | const struct cpumask *mask) | ||
2426 | { | ||
2427 | return migrate_ioapic_irq_desc(desc, mask); | ||
2428 | } | ||
2429 | static int set_ir_ioapic_affinity_irq(unsigned int irq, | ||
2430 | const struct cpumask *mask) | ||
2431 | { | ||
2432 | struct irq_desc *desc = irq_to_desc(irq); | ||
2433 | |||
2434 | return set_ir_ioapic_affinity_irq_desc(desc, mask); | ||
2435 | } | ||
2436 | #else | 2282 | #else |
2437 | static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | 2283 | static inline int |
2438 | const struct cpumask *mask) | 2284 | ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
2285 | bool force) | ||
2439 | { | 2286 | { |
2440 | return 0; | 2287 | return 0; |
2441 | } | 2288 | } |
@@ -2497,10 +2344,8 @@ unlock: | |||
2497 | irq_exit(); | 2344 | irq_exit(); |
2498 | } | 2345 | } |
2499 | 2346 | ||
2500 | static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | 2347 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) |
2501 | { | 2348 | { |
2502 | struct irq_desc *desc = *descp; | ||
2503 | struct irq_cfg *cfg = desc->chip_data; | ||
2504 | unsigned me; | 2349 | unsigned me; |
2505 | 2350 | ||
2506 | if (likely(!cfg->move_in_progress)) | 2351 | if (likely(!cfg->move_in_progress)) |
@@ -2512,31 +2357,28 @@ static void __irq_complete_move(struct irq_desc **descp, unsigned vector) | |||
2512 | send_cleanup_vector(cfg); | 2357 | send_cleanup_vector(cfg); |
2513 | } | 2358 | } |
2514 | 2359 | ||
2515 | static void irq_complete_move(struct irq_desc **descp) | 2360 | static void irq_complete_move(struct irq_cfg *cfg) |
2516 | { | 2361 | { |
2517 | __irq_complete_move(descp, ~get_irq_regs()->orig_ax); | 2362 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); |
2518 | } | 2363 | } |
2519 | 2364 | ||
2520 | void irq_force_complete_move(int irq) | 2365 | void irq_force_complete_move(int irq) |
2521 | { | 2366 | { |
2522 | struct irq_desc *desc = irq_to_desc(irq); | 2367 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
2523 | struct irq_cfg *cfg = desc->chip_data; | ||
2524 | 2368 | ||
2525 | if (!cfg) | 2369 | if (!cfg) |
2526 | return; | 2370 | return; |
2527 | 2371 | ||
2528 | __irq_complete_move(&desc, cfg->vector); | 2372 | __irq_complete_move(cfg, cfg->vector); |
2529 | } | 2373 | } |
2530 | #else | 2374 | #else |
2531 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2375 | static inline void irq_complete_move(struct irq_cfg *cfg) { } |
2532 | #endif | 2376 | #endif |
2533 | 2377 | ||
2534 | static void ack_apic_edge(unsigned int irq) | 2378 | static void ack_apic_edge(struct irq_data *data) |
2535 | { | 2379 | { |
2536 | struct irq_desc *desc = irq_to_desc(irq); | 2380 | irq_complete_move(data->chip_data); |
2537 | 2381 | move_native_irq(data->irq); | |
2538 | irq_complete_move(&desc); | ||
2539 | move_native_irq(irq); | ||
2540 | ack_APIC_irq(); | 2382 | ack_APIC_irq(); |
2541 | } | 2383 | } |
2542 | 2384 | ||
@@ -2558,10 +2400,12 @@ atomic_t irq_mis_count; | |||
2558 | * Otherwise, we simulate the EOI message manually by changing the trigger | 2400 | * Otherwise, we simulate the EOI message manually by changing the trigger |
2559 | * mode to edge and then back to level, with RTE being masked during this. | 2401 | * mode to edge and then back to level, with RTE being masked during this. |
2560 | */ | 2402 | */ |
2561 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2403 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2562 | { | 2404 | { |
2563 | struct irq_pin_list *entry; | 2405 | struct irq_pin_list *entry; |
2406 | unsigned long flags; | ||
2564 | 2407 | ||
2408 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2565 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 2409 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
2566 | if (mp_ioapics[entry->apic].apicver >= 0x20) { | 2410 | if (mp_ioapics[entry->apic].apicver >= 0x20) { |
2567 | /* | 2411 | /* |
@@ -2570,7 +2414,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
2570 | * intr-remapping table entry. Hence for the io-apic | 2414 | * intr-remapping table entry. Hence for the io-apic |
2571 | * EOI we use the pin number. | 2415 | * EOI we use the pin number. |
2572 | */ | 2416 | */ |
2573 | if (irq_remapped(irq)) | 2417 | if (irq_remapped(cfg)) |
2574 | io_apic_eoi(entry->apic, entry->pin); | 2418 | io_apic_eoi(entry->apic, entry->pin); |
2575 | else | 2419 | else |
2576 | io_apic_eoi(entry->apic, cfg->vector); | 2420 | io_apic_eoi(entry->apic, cfg->vector); |
@@ -2579,36 +2423,22 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | |||
2579 | __unmask_and_level_IO_APIC_irq(entry); | 2423 | __unmask_and_level_IO_APIC_irq(entry); |
2580 | } | 2424 | } |
2581 | } | 2425 | } |
2582 | } | ||
2583 | |||
2584 | static void eoi_ioapic_irq(struct irq_desc *desc) | ||
2585 | { | ||
2586 | struct irq_cfg *cfg; | ||
2587 | unsigned long flags; | ||
2588 | unsigned int irq; | ||
2589 | |||
2590 | irq = desc->irq; | ||
2591 | cfg = desc->chip_data; | ||
2592 | |||
2593 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2594 | __eoi_ioapic_irq(irq, cfg); | ||
2595 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 2426 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2596 | } | 2427 | } |
2597 | 2428 | ||
2598 | static void ack_apic_level(unsigned int irq) | 2429 | static void ack_apic_level(struct irq_data *data) |
2599 | { | 2430 | { |
2431 | struct irq_cfg *cfg = data->chip_data; | ||
2432 | int i, do_unmask_irq = 0, irq = data->irq; | ||
2600 | struct irq_desc *desc = irq_to_desc(irq); | 2433 | struct irq_desc *desc = irq_to_desc(irq); |
2601 | unsigned long v; | 2434 | unsigned long v; |
2602 | int i; | ||
2603 | struct irq_cfg *cfg; | ||
2604 | int do_unmask_irq = 0; | ||
2605 | 2435 | ||
2606 | irq_complete_move(&desc); | 2436 | irq_complete_move(cfg); |
2607 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2437 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
2608 | /* If we are moving the irq we need to mask it */ | 2438 | /* If we are moving the irq we need to mask it */ |
2609 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { | 2439 | if (unlikely(desc->status & IRQ_MOVE_PENDING)) { |
2610 | do_unmask_irq = 1; | 2440 | do_unmask_irq = 1; |
2611 | mask_IO_APIC_irq_desc(desc); | 2441 | mask_ioapic(cfg); |
2612 | } | 2442 | } |
2613 | #endif | 2443 | #endif |
2614 | 2444 | ||
@@ -2644,7 +2474,6 @@ static void ack_apic_level(unsigned int irq) | |||
2644 | * we use the above logic (mask+edge followed by unmask+level) from | 2474 | * we use the above logic (mask+edge followed by unmask+level) from |
2645 | * Manfred Spraul to clear the remote IRR. | 2475 | * Manfred Spraul to clear the remote IRR. |
2646 | */ | 2476 | */ |
2647 | cfg = desc->chip_data; | ||
2648 | i = cfg->vector; | 2477 | i = cfg->vector; |
2649 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2478 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
2650 | 2479 | ||
@@ -2664,7 +2493,7 @@ static void ack_apic_level(unsigned int irq) | |||
2664 | if (!(v & (1 << (i & 0x1f)))) { | 2493 | if (!(v & (1 << (i & 0x1f)))) { |
2665 | atomic_inc(&irq_mis_count); | 2494 | atomic_inc(&irq_mis_count); |
2666 | 2495 | ||
2667 | eoi_ioapic_irq(desc); | 2496 | eoi_ioapic_irq(irq, cfg); |
2668 | } | 2497 | } |
2669 | 2498 | ||
2670 | /* Now we can move and renable the irq */ | 2499 | /* Now we can move and renable the irq */ |
@@ -2695,61 +2524,57 @@ static void ack_apic_level(unsigned int irq) | |||
2695 | * accurate and is causing problems then it is a hardware bug | 2524 | * accurate and is causing problems then it is a hardware bug |
2696 | * and you can go talk to the chipset vendor about it. | 2525 | * and you can go talk to the chipset vendor about it. |
2697 | */ | 2526 | */ |
2698 | cfg = desc->chip_data; | ||
2699 | if (!io_apic_level_ack_pending(cfg)) | 2527 | if (!io_apic_level_ack_pending(cfg)) |
2700 | move_masked_irq(irq); | 2528 | move_masked_irq(irq); |
2701 | unmask_IO_APIC_irq_desc(desc); | 2529 | unmask_ioapic(cfg); |
2702 | } | 2530 | } |
2703 | } | 2531 | } |
2704 | 2532 | ||
2705 | #ifdef CONFIG_INTR_REMAP | 2533 | #ifdef CONFIG_INTR_REMAP |
2706 | static void ir_ack_apic_edge(unsigned int irq) | 2534 | static void ir_ack_apic_edge(struct irq_data *data) |
2707 | { | 2535 | { |
2708 | ack_APIC_irq(); | 2536 | ack_APIC_irq(); |
2709 | } | 2537 | } |
2710 | 2538 | ||
2711 | static void ir_ack_apic_level(unsigned int irq) | 2539 | static void ir_ack_apic_level(struct irq_data *data) |
2712 | { | 2540 | { |
2713 | struct irq_desc *desc = irq_to_desc(irq); | ||
2714 | |||
2715 | ack_APIC_irq(); | 2541 | ack_APIC_irq(); |
2716 | eoi_ioapic_irq(desc); | 2542 | eoi_ioapic_irq(data->irq, data->chip_data); |
2717 | } | 2543 | } |
2718 | #endif /* CONFIG_INTR_REMAP */ | 2544 | #endif /* CONFIG_INTR_REMAP */ |
2719 | 2545 | ||
2720 | static struct irq_chip ioapic_chip __read_mostly = { | 2546 | static struct irq_chip ioapic_chip __read_mostly = { |
2721 | .name = "IO-APIC", | 2547 | .name = "IO-APIC", |
2722 | .startup = startup_ioapic_irq, | 2548 | .irq_startup = startup_ioapic_irq, |
2723 | .mask = mask_IO_APIC_irq, | 2549 | .irq_mask = mask_ioapic_irq, |
2724 | .unmask = unmask_IO_APIC_irq, | 2550 | .irq_unmask = unmask_ioapic_irq, |
2725 | .ack = ack_apic_edge, | 2551 | .irq_ack = ack_apic_edge, |
2726 | .eoi = ack_apic_level, | 2552 | .irq_eoi = ack_apic_level, |
2727 | #ifdef CONFIG_SMP | 2553 | #ifdef CONFIG_SMP |
2728 | .set_affinity = set_ioapic_affinity_irq, | 2554 | .irq_set_affinity = ioapic_set_affinity, |
2729 | #endif | 2555 | #endif |
2730 | .retrigger = ioapic_retrigger_irq, | 2556 | .irq_retrigger = ioapic_retrigger_irq, |
2731 | }; | 2557 | }; |
2732 | 2558 | ||
2733 | static struct irq_chip ir_ioapic_chip __read_mostly = { | 2559 | static struct irq_chip ir_ioapic_chip __read_mostly = { |
2734 | .name = "IR-IO-APIC", | 2560 | .name = "IR-IO-APIC", |
2735 | .startup = startup_ioapic_irq, | 2561 | .irq_startup = startup_ioapic_irq, |
2736 | .mask = mask_IO_APIC_irq, | 2562 | .irq_mask = mask_ioapic_irq, |
2737 | .unmask = unmask_IO_APIC_irq, | 2563 | .irq_unmask = unmask_ioapic_irq, |
2738 | #ifdef CONFIG_INTR_REMAP | 2564 | #ifdef CONFIG_INTR_REMAP |
2739 | .ack = ir_ack_apic_edge, | 2565 | .irq_ack = ir_ack_apic_edge, |
2740 | .eoi = ir_ack_apic_level, | 2566 | .irq_eoi = ir_ack_apic_level, |
2741 | #ifdef CONFIG_SMP | 2567 | #ifdef CONFIG_SMP |
2742 | .set_affinity = set_ir_ioapic_affinity_irq, | 2568 | .irq_set_affinity = ir_ioapic_set_affinity, |
2743 | #endif | 2569 | #endif |
2744 | #endif | 2570 | #endif |
2745 | .retrigger = ioapic_retrigger_irq, | 2571 | .irq_retrigger = ioapic_retrigger_irq, |
2746 | }; | 2572 | }; |
2747 | 2573 | ||
2748 | static inline void init_IO_APIC_traps(void) | 2574 | static inline void init_IO_APIC_traps(void) |
2749 | { | 2575 | { |
2750 | int irq; | ||
2751 | struct irq_desc *desc; | ||
2752 | struct irq_cfg *cfg; | 2576 | struct irq_cfg *cfg; |
2577 | unsigned int irq; | ||
2753 | 2578 | ||
2754 | /* | 2579 | /* |
2755 | * NOTE! The local APIC isn't very good at handling | 2580 | * NOTE! The local APIC isn't very good at handling |
@@ -2762,8 +2587,8 @@ static inline void init_IO_APIC_traps(void) | |||
2762 | * Also, we've got to be careful not to trash gate | 2587 | * Also, we've got to be careful not to trash gate |
2763 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2588 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2764 | */ | 2589 | */ |
2765 | for_each_irq_desc(irq, desc) { | 2590 | for_each_active_irq(irq) { |
2766 | cfg = desc->chip_data; | 2591 | cfg = get_irq_chip_data(irq); |
2767 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2592 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
2768 | /* | 2593 | /* |
2769 | * Hmm.. We don't have an entry for this, | 2594 | * Hmm.. We don't have an entry for this, |
@@ -2774,7 +2599,7 @@ static inline void init_IO_APIC_traps(void) | |||
2774 | legacy_pic->make_irq(irq); | 2599 | legacy_pic->make_irq(irq); |
2775 | else | 2600 | else |
2776 | /* Strange. Oh, well.. */ | 2601 | /* Strange. Oh, well.. */ |
2777 | desc->chip = &no_irq_chip; | 2602 | set_irq_chip(irq, &no_irq_chip); |
2778 | } | 2603 | } |
2779 | } | 2604 | } |
2780 | } | 2605 | } |
@@ -2783,7 +2608,7 @@ static inline void init_IO_APIC_traps(void) | |||
2783 | * The local APIC irq-chip implementation: | 2608 | * The local APIC irq-chip implementation: |
2784 | */ | 2609 | */ |
2785 | 2610 | ||
2786 | static void mask_lapic_irq(unsigned int irq) | 2611 | static void mask_lapic_irq(struct irq_data *data) |
2787 | { | 2612 | { |
2788 | unsigned long v; | 2613 | unsigned long v; |
2789 | 2614 | ||
@@ -2791,7 +2616,7 @@ static void mask_lapic_irq(unsigned int irq) | |||
2791 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | 2616 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
2792 | } | 2617 | } |
2793 | 2618 | ||
2794 | static void unmask_lapic_irq(unsigned int irq) | 2619 | static void unmask_lapic_irq(struct irq_data *data) |
2795 | { | 2620 | { |
2796 | unsigned long v; | 2621 | unsigned long v; |
2797 | 2622 | ||
@@ -2799,21 +2624,21 @@ static void unmask_lapic_irq(unsigned int irq) | |||
2799 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); | 2624 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); |
2800 | } | 2625 | } |
2801 | 2626 | ||
2802 | static void ack_lapic_irq(unsigned int irq) | 2627 | static void ack_lapic_irq(struct irq_data *data) |
2803 | { | 2628 | { |
2804 | ack_APIC_irq(); | 2629 | ack_APIC_irq(); |
2805 | } | 2630 | } |
2806 | 2631 | ||
2807 | static struct irq_chip lapic_chip __read_mostly = { | 2632 | static struct irq_chip lapic_chip __read_mostly = { |
2808 | .name = "local-APIC", | 2633 | .name = "local-APIC", |
2809 | .mask = mask_lapic_irq, | 2634 | .irq_mask = mask_lapic_irq, |
2810 | .unmask = unmask_lapic_irq, | 2635 | .irq_unmask = unmask_lapic_irq, |
2811 | .ack = ack_lapic_irq, | 2636 | .irq_ack = ack_lapic_irq, |
2812 | }; | 2637 | }; |
2813 | 2638 | ||
2814 | static void lapic_register_intr(int irq, struct irq_desc *desc) | 2639 | static void lapic_register_intr(int irq) |
2815 | { | 2640 | { |
2816 | desc->status &= ~IRQ_LEVEL; | 2641 | irq_clear_status_flags(irq, IRQ_LEVEL); |
2817 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2642 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
2818 | "edge"); | 2643 | "edge"); |
2819 | } | 2644 | } |
@@ -2916,8 +2741,7 @@ int timer_through_8259 __initdata; | |||
2916 | */ | 2741 | */ |
2917 | static inline void __init check_timer(void) | 2742 | static inline void __init check_timer(void) |
2918 | { | 2743 | { |
2919 | struct irq_desc *desc = irq_to_desc(0); | 2744 | struct irq_cfg *cfg = get_irq_chip_data(0); |
2920 | struct irq_cfg *cfg = desc->chip_data; | ||
2921 | int node = cpu_to_node(0); | 2745 | int node = cpu_to_node(0); |
2922 | int apic1, pin1, apic2, pin2; | 2746 | int apic1, pin1, apic2, pin2; |
2923 | unsigned long flags; | 2747 | unsigned long flags; |
@@ -2928,7 +2752,7 @@ static inline void __init check_timer(void) | |||
2928 | /* | 2752 | /* |
2929 | * get/set the timer IRQ vector: | 2753 | * get/set the timer IRQ vector: |
2930 | */ | 2754 | */ |
2931 | legacy_pic->chip->mask(0); | 2755 | legacy_pic->mask(0); |
2932 | assign_irq_vector(0, cfg, apic->target_cpus()); | 2756 | assign_irq_vector(0, cfg, apic->target_cpus()); |
2933 | 2757 | ||
2934 | /* | 2758 | /* |
@@ -2987,7 +2811,7 @@ static inline void __init check_timer(void) | |||
2987 | add_pin_to_irq_node(cfg, node, apic1, pin1); | 2811 | add_pin_to_irq_node(cfg, node, apic1, pin1); |
2988 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2812 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
2989 | } else { | 2813 | } else { |
2990 | /* for edge trigger, setup_IO_APIC_irq already | 2814 | /* for edge trigger, setup_ioapic_irq already |
2991 | * leave it unmasked. | 2815 | * leave it unmasked. |
2992 | * so only need to unmask if it is level-trigger | 2816 | * so only need to unmask if it is level-trigger |
2993 | * do we really have level trigger timer? | 2817 | * do we really have level trigger timer? |
@@ -2995,12 +2819,12 @@ static inline void __init check_timer(void) | |||
2995 | int idx; | 2819 | int idx; |
2996 | idx = find_irq_entry(apic1, pin1, mp_INT); | 2820 | idx = find_irq_entry(apic1, pin1, mp_INT); |
2997 | if (idx != -1 && irq_trigger(idx)) | 2821 | if (idx != -1 && irq_trigger(idx)) |
2998 | unmask_IO_APIC_irq_desc(desc); | 2822 | unmask_ioapic(cfg); |
2999 | } | 2823 | } |
3000 | if (timer_irq_works()) { | 2824 | if (timer_irq_works()) { |
3001 | if (nmi_watchdog == NMI_IO_APIC) { | 2825 | if (nmi_watchdog == NMI_IO_APIC) { |
3002 | setup_nmi(); | 2826 | setup_nmi(); |
3003 | legacy_pic->chip->unmask(0); | 2827 | legacy_pic->unmask(0); |
3004 | } | 2828 | } |
3005 | if (disable_timer_pin_1 > 0) | 2829 | if (disable_timer_pin_1 > 0) |
3006 | clear_IO_APIC_pin(0, pin1); | 2830 | clear_IO_APIC_pin(0, pin1); |
@@ -3023,14 +2847,14 @@ static inline void __init check_timer(void) | |||
3023 | */ | 2847 | */ |
3024 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); | 2848 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); |
3025 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 2849 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
3026 | legacy_pic->chip->unmask(0); | 2850 | legacy_pic->unmask(0); |
3027 | if (timer_irq_works()) { | 2851 | if (timer_irq_works()) { |
3028 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); | 2852 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); |
3029 | timer_through_8259 = 1; | 2853 | timer_through_8259 = 1; |
3030 | if (nmi_watchdog == NMI_IO_APIC) { | 2854 | if (nmi_watchdog == NMI_IO_APIC) { |
3031 | legacy_pic->chip->mask(0); | 2855 | legacy_pic->mask(0); |
3032 | setup_nmi(); | 2856 | setup_nmi(); |
3033 | legacy_pic->chip->unmask(0); | 2857 | legacy_pic->unmask(0); |
3034 | } | 2858 | } |
3035 | goto out; | 2859 | goto out; |
3036 | } | 2860 | } |
@@ -3038,7 +2862,7 @@ static inline void __init check_timer(void) | |||
3038 | * Cleanup, just in case ... | 2862 | * Cleanup, just in case ... |
3039 | */ | 2863 | */ |
3040 | local_irq_disable(); | 2864 | local_irq_disable(); |
3041 | legacy_pic->chip->mask(0); | 2865 | legacy_pic->mask(0); |
3042 | clear_IO_APIC_pin(apic2, pin2); | 2866 | clear_IO_APIC_pin(apic2, pin2); |
3043 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); | 2867 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); |
3044 | } | 2868 | } |
@@ -3055,16 +2879,16 @@ static inline void __init check_timer(void) | |||
3055 | apic_printk(APIC_QUIET, KERN_INFO | 2879 | apic_printk(APIC_QUIET, KERN_INFO |
3056 | "...trying to set up timer as Virtual Wire IRQ...\n"); | 2880 | "...trying to set up timer as Virtual Wire IRQ...\n"); |
3057 | 2881 | ||
3058 | lapic_register_intr(0, desc); | 2882 | lapic_register_intr(0); |
3059 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ | 2883 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ |
3060 | legacy_pic->chip->unmask(0); | 2884 | legacy_pic->unmask(0); |
3061 | 2885 | ||
3062 | if (timer_irq_works()) { | 2886 | if (timer_irq_works()) { |
3063 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | 2887 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); |
3064 | goto out; | 2888 | goto out; |
3065 | } | 2889 | } |
3066 | local_irq_disable(); | 2890 | local_irq_disable(); |
3067 | legacy_pic->chip->mask(0); | 2891 | legacy_pic->mask(0); |
3068 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); | 2892 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
3069 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); | 2893 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); |
3070 | 2894 | ||
@@ -3230,44 +3054,37 @@ device_initcall(ioapic_init_sysfs); | |||
3230 | /* | 3054 | /* |
3231 | * Dynamic irq allocate and deallocation | 3055 | * Dynamic irq allocate and deallocation |
3232 | */ | 3056 | */ |
3233 | unsigned int create_irq_nr(unsigned int irq_want, int node) | 3057 | unsigned int create_irq_nr(unsigned int from, int node) |
3234 | { | 3058 | { |
3235 | /* Allocate an unused irq */ | 3059 | struct irq_cfg *cfg; |
3236 | unsigned int irq; | ||
3237 | unsigned int new; | ||
3238 | unsigned long flags; | 3060 | unsigned long flags; |
3239 | struct irq_cfg *cfg_new = NULL; | 3061 | unsigned int ret = 0; |
3240 | struct irq_desc *desc_new = NULL; | 3062 | int irq; |
3241 | |||
3242 | irq = 0; | ||
3243 | if (irq_want < nr_irqs_gsi) | ||
3244 | irq_want = nr_irqs_gsi; | ||
3245 | |||
3246 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
3247 | for (new = irq_want; new < nr_irqs; new++) { | ||
3248 | desc_new = irq_to_desc_alloc_node(new, node); | ||
3249 | if (!desc_new) { | ||
3250 | printk(KERN_INFO "can not get irq_desc for %d\n", new); | ||
3251 | continue; | ||
3252 | } | ||
3253 | cfg_new = desc_new->chip_data; | ||
3254 | |||
3255 | if (cfg_new->vector != 0) | ||
3256 | continue; | ||
3257 | 3063 | ||
3258 | desc_new = move_irq_desc(desc_new, node); | 3064 | if (from < nr_irqs_gsi) |
3259 | cfg_new = desc_new->chip_data; | 3065 | from = nr_irqs_gsi; |
3260 | 3066 | ||
3261 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3067 | irq = alloc_irq_from(from, node); |
3262 | irq = new; | 3068 | if (irq < 0) |
3263 | break; | 3069 | return 0; |
3070 | cfg = alloc_irq_cfg(irq, node); | ||
3071 | if (!cfg) { | ||
3072 | free_irq_at(irq, NULL); | ||
3073 | return 0; | ||
3264 | } | 3074 | } |
3265 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3266 | 3075 | ||
3267 | if (irq > 0) | 3076 | raw_spin_lock_irqsave(&vector_lock, flags); |
3268 | dynamic_irq_init_keep_chip_data(irq); | 3077 | if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) |
3078 | ret = irq; | ||
3079 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3269 | 3080 | ||
3270 | return irq; | 3081 | if (ret) { |
3082 | set_irq_chip_data(irq, cfg); | ||
3083 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | ||
3084 | } else { | ||
3085 | free_irq_at(irq, cfg); | ||
3086 | } | ||
3087 | return ret; | ||
3271 | } | 3088 | } |
3272 | 3089 | ||
3273 | int create_irq(void) | 3090 | int create_irq(void) |
@@ -3287,14 +3104,17 @@ int create_irq(void) | |||
3287 | 3104 | ||
3288 | void destroy_irq(unsigned int irq) | 3105 | void destroy_irq(unsigned int irq) |
3289 | { | 3106 | { |
3107 | struct irq_cfg *cfg = get_irq_chip_data(irq); | ||
3290 | unsigned long flags; | 3108 | unsigned long flags; |
3291 | 3109 | ||
3292 | dynamic_irq_cleanup_keep_chip_data(irq); | 3110 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
3293 | 3111 | ||
3294 | free_irte(irq); | 3112 | if (intr_remapping_enabled) |
3113 | free_irte(irq); | ||
3295 | raw_spin_lock_irqsave(&vector_lock, flags); | 3114 | raw_spin_lock_irqsave(&vector_lock, flags); |
3296 | __clear_irq_vector(irq, get_irq_chip_data(irq)); | 3115 | __clear_irq_vector(irq, cfg); |
3297 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3116 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3117 | free_irq_at(irq, cfg); | ||
3298 | } | 3118 | } |
3299 | 3119 | ||
3300 | /* | 3120 | /* |
@@ -3318,7 +3138,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3318 | 3138 | ||
3319 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3139 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
3320 | 3140 | ||
3321 | if (irq_remapped(irq)) { | 3141 | if (irq_remapped(get_irq_chip_data(irq))) { |
3322 | struct irte irte; | 3142 | struct irte irte; |
3323 | int ir_index; | 3143 | int ir_index; |
3324 | u16 sub_handle; | 3144 | u16 sub_handle; |
@@ -3371,26 +3191,24 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3371 | } | 3191 | } |
3372 | 3192 | ||
3373 | #ifdef CONFIG_SMP | 3193 | #ifdef CONFIG_SMP |
3374 | static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3194 | static int |
3195 | msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3375 | { | 3196 | { |
3376 | struct irq_desc *desc = irq_to_desc(irq); | 3197 | struct irq_cfg *cfg = data->chip_data; |
3377 | struct irq_cfg *cfg; | ||
3378 | struct msi_msg msg; | 3198 | struct msi_msg msg; |
3379 | unsigned int dest; | 3199 | unsigned int dest; |
3380 | 3200 | ||
3381 | if (set_desc_affinity(desc, mask, &dest)) | 3201 | if (__ioapic_set_affinity(data, mask, &dest)) |
3382 | return -1; | 3202 | return -1; |
3383 | 3203 | ||
3384 | cfg = desc->chip_data; | 3204 | __get_cached_msi_msg(data->msi_desc, &msg); |
3385 | |||
3386 | get_cached_msi_msg_desc(desc, &msg); | ||
3387 | 3205 | ||
3388 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3206 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
3389 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3207 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
3390 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3208 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
3391 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3209 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3392 | 3210 | ||
3393 | write_msi_msg_desc(desc, &msg); | 3211 | __write_msi_msg(data->msi_desc, &msg); |
3394 | 3212 | ||
3395 | return 0; | 3213 | return 0; |
3396 | } | 3214 | } |
@@ -3400,17 +3218,17 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3400 | * done in the process context using interrupt-remapping hardware. | 3218 | * done in the process context using interrupt-remapping hardware. |
3401 | */ | 3219 | */ |
3402 | static int | 3220 | static int |
3403 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3221 | ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, |
3222 | bool force) | ||
3404 | { | 3223 | { |
3405 | struct irq_desc *desc = irq_to_desc(irq); | 3224 | struct irq_cfg *cfg = data->chip_data; |
3406 | struct irq_cfg *cfg = desc->chip_data; | 3225 | unsigned int dest, irq = data->irq; |
3407 | unsigned int dest; | ||
3408 | struct irte irte; | 3226 | struct irte irte; |
3409 | 3227 | ||
3410 | if (get_irte(irq, &irte)) | 3228 | if (get_irte(irq, &irte)) |
3411 | return -1; | 3229 | return -1; |
3412 | 3230 | ||
3413 | if (set_desc_affinity(desc, mask, &dest)) | 3231 | if (__ioapic_set_affinity(data, mask, &dest)) |
3414 | return -1; | 3232 | return -1; |
3415 | 3233 | ||
3416 | irte.vector = cfg->vector; | 3234 | irte.vector = cfg->vector; |
@@ -3440,27 +3258,27 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3440 | * which implement the MSI or MSI-X Capability Structure. | 3258 | * which implement the MSI or MSI-X Capability Structure. |
3441 | */ | 3259 | */ |
3442 | static struct irq_chip msi_chip = { | 3260 | static struct irq_chip msi_chip = { |
3443 | .name = "PCI-MSI", | 3261 | .name = "PCI-MSI", |
3444 | .unmask = unmask_msi_irq, | 3262 | .irq_unmask = unmask_msi_irq, |
3445 | .mask = mask_msi_irq, | 3263 | .irq_mask = mask_msi_irq, |
3446 | .ack = ack_apic_edge, | 3264 | .irq_ack = ack_apic_edge, |
3447 | #ifdef CONFIG_SMP | 3265 | #ifdef CONFIG_SMP |
3448 | .set_affinity = set_msi_irq_affinity, | 3266 | .irq_set_affinity = msi_set_affinity, |
3449 | #endif | 3267 | #endif |
3450 | .retrigger = ioapic_retrigger_irq, | 3268 | .irq_retrigger = ioapic_retrigger_irq, |
3451 | }; | 3269 | }; |
3452 | 3270 | ||
3453 | static struct irq_chip msi_ir_chip = { | 3271 | static struct irq_chip msi_ir_chip = { |
3454 | .name = "IR-PCI-MSI", | 3272 | .name = "IR-PCI-MSI", |
3455 | .unmask = unmask_msi_irq, | 3273 | .irq_unmask = unmask_msi_irq, |
3456 | .mask = mask_msi_irq, | 3274 | .irq_mask = mask_msi_irq, |
3457 | #ifdef CONFIG_INTR_REMAP | 3275 | #ifdef CONFIG_INTR_REMAP |
3458 | .ack = ir_ack_apic_edge, | 3276 | .irq_ack = ir_ack_apic_edge, |
3459 | #ifdef CONFIG_SMP | 3277 | #ifdef CONFIG_SMP |
3460 | .set_affinity = ir_set_msi_irq_affinity, | 3278 | .irq_set_affinity = ir_msi_set_affinity, |
3461 | #endif | 3279 | #endif |
3462 | #endif | 3280 | #endif |
3463 | .retrigger = ioapic_retrigger_irq, | 3281 | .irq_retrigger = ioapic_retrigger_irq, |
3464 | }; | 3282 | }; |
3465 | 3283 | ||
3466 | /* | 3284 | /* |
@@ -3492,8 +3310,8 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
3492 | 3310 | ||
3493 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3311 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
3494 | { | 3312 | { |
3495 | int ret; | ||
3496 | struct msi_msg msg; | 3313 | struct msi_msg msg; |
3314 | int ret; | ||
3497 | 3315 | ||
3498 | ret = msi_compose_msg(dev, irq, &msg, -1); | 3316 | ret = msi_compose_msg(dev, irq, &msg, -1); |
3499 | if (ret < 0) | 3317 | if (ret < 0) |
@@ -3502,12 +3320,8 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3502 | set_irq_msi(irq, msidesc); | 3320 | set_irq_msi(irq, msidesc); |
3503 | write_msi_msg(irq, &msg); | 3321 | write_msi_msg(irq, &msg); |
3504 | 3322 | ||
3505 | if (irq_remapped(irq)) { | 3323 | if (irq_remapped(get_irq_chip_data(irq))) { |
3506 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3507 | /* | ||
3508 | * irq migration in process context | ||
3509 | */ | ||
3510 | desc->status |= IRQ_MOVE_PCNTXT; | ||
3511 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3325 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); |
3512 | } else | 3326 | } else |
3513 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3327 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); |
@@ -3519,13 +3333,10 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3519 | 3333 | ||
3520 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3334 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
3521 | { | 3335 | { |
3522 | unsigned int irq; | 3336 | int node, ret, sub_handle, index = 0; |
3523 | int ret, sub_handle; | 3337 | unsigned int irq, irq_want; |
3524 | struct msi_desc *msidesc; | 3338 | struct msi_desc *msidesc; |
3525 | unsigned int irq_want; | ||
3526 | struct intel_iommu *iommu = NULL; | 3339 | struct intel_iommu *iommu = NULL; |
3527 | int index = 0; | ||
3528 | int node; | ||
3529 | 3340 | ||
3530 | /* x86 doesn't support multiple MSI yet */ | 3341 | /* x86 doesn't support multiple MSI yet */ |
3531 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3342 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
@@ -3585,18 +3396,17 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3585 | 3396 | ||
3586 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) | 3397 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) |
3587 | #ifdef CONFIG_SMP | 3398 | #ifdef CONFIG_SMP |
3588 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3399 | static int |
3400 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
3401 | bool force) | ||
3589 | { | 3402 | { |
3590 | struct irq_desc *desc = irq_to_desc(irq); | 3403 | struct irq_cfg *cfg = data->chip_data; |
3591 | struct irq_cfg *cfg; | 3404 | unsigned int dest, irq = data->irq; |
3592 | struct msi_msg msg; | 3405 | struct msi_msg msg; |
3593 | unsigned int dest; | ||
3594 | 3406 | ||
3595 | if (set_desc_affinity(desc, mask, &dest)) | 3407 | if (__ioapic_set_affinity(data, mask, &dest)) |
3596 | return -1; | 3408 | return -1; |
3597 | 3409 | ||
3598 | cfg = desc->chip_data; | ||
3599 | |||
3600 | dmar_msi_read(irq, &msg); | 3410 | dmar_msi_read(irq, &msg); |
3601 | 3411 | ||
3602 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3412 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
@@ -3612,14 +3422,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3612 | #endif /* CONFIG_SMP */ | 3422 | #endif /* CONFIG_SMP */ |
3613 | 3423 | ||
3614 | static struct irq_chip dmar_msi_type = { | 3424 | static struct irq_chip dmar_msi_type = { |
3615 | .name = "DMAR_MSI", | 3425 | .name = "DMAR_MSI", |
3616 | .unmask = dmar_msi_unmask, | 3426 | .irq_unmask = dmar_msi_unmask, |
3617 | .mask = dmar_msi_mask, | 3427 | .irq_mask = dmar_msi_mask, |
3618 | .ack = ack_apic_edge, | 3428 | .irq_ack = ack_apic_edge, |
3619 | #ifdef CONFIG_SMP | 3429 | #ifdef CONFIG_SMP |
3620 | .set_affinity = dmar_msi_set_affinity, | 3430 | .irq_set_affinity = dmar_msi_set_affinity, |
3621 | #endif | 3431 | #endif |
3622 | .retrigger = ioapic_retrigger_irq, | 3432 | .irq_retrigger = ioapic_retrigger_irq, |
3623 | }; | 3433 | }; |
3624 | 3434 | ||
3625 | int arch_setup_dmar_msi(unsigned int irq) | 3435 | int arch_setup_dmar_msi(unsigned int irq) |
@@ -3640,26 +3450,24 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3640 | #ifdef CONFIG_HPET_TIMER | 3450 | #ifdef CONFIG_HPET_TIMER |
3641 | 3451 | ||
3642 | #ifdef CONFIG_SMP | 3452 | #ifdef CONFIG_SMP |
3643 | static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3453 | static int hpet_msi_set_affinity(struct irq_data *data, |
3454 | const struct cpumask *mask, bool force) | ||
3644 | { | 3455 | { |
3645 | struct irq_desc *desc = irq_to_desc(irq); | 3456 | struct irq_cfg *cfg = data->chip_data; |
3646 | struct irq_cfg *cfg; | ||
3647 | struct msi_msg msg; | 3457 | struct msi_msg msg; |
3648 | unsigned int dest; | 3458 | unsigned int dest; |
3649 | 3459 | ||
3650 | if (set_desc_affinity(desc, mask, &dest)) | 3460 | if (__ioapic_set_affinity(data, mask, &dest)) |
3651 | return -1; | 3461 | return -1; |
3652 | 3462 | ||
3653 | cfg = desc->chip_data; | 3463 | hpet_msi_read(data->handler_data, &msg); |
3654 | |||
3655 | hpet_msi_read(irq, &msg); | ||
3656 | 3464 | ||
3657 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 3465 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
3658 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3466 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
3659 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3467 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
3660 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3468 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3661 | 3469 | ||
3662 | hpet_msi_write(irq, &msg); | 3470 | hpet_msi_write(data->handler_data, &msg); |
3663 | 3471 | ||
3664 | return 0; | 3472 | return 0; |
3665 | } | 3473 | } |
@@ -3667,34 +3475,33 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3667 | #endif /* CONFIG_SMP */ | 3475 | #endif /* CONFIG_SMP */ |
3668 | 3476 | ||
3669 | static struct irq_chip ir_hpet_msi_type = { | 3477 | static struct irq_chip ir_hpet_msi_type = { |
3670 | .name = "IR-HPET_MSI", | 3478 | .name = "IR-HPET_MSI", |
3671 | .unmask = hpet_msi_unmask, | 3479 | .irq_unmask = hpet_msi_unmask, |
3672 | .mask = hpet_msi_mask, | 3480 | .irq_mask = hpet_msi_mask, |
3673 | #ifdef CONFIG_INTR_REMAP | 3481 | #ifdef CONFIG_INTR_REMAP |
3674 | .ack = ir_ack_apic_edge, | 3482 | .irq_ack = ir_ack_apic_edge, |
3675 | #ifdef CONFIG_SMP | 3483 | #ifdef CONFIG_SMP |
3676 | .set_affinity = ir_set_msi_irq_affinity, | 3484 | .irq_set_affinity = ir_msi_set_affinity, |
3677 | #endif | 3485 | #endif |
3678 | #endif | 3486 | #endif |
3679 | .retrigger = ioapic_retrigger_irq, | 3487 | .irq_retrigger = ioapic_retrigger_irq, |
3680 | }; | 3488 | }; |
3681 | 3489 | ||
3682 | static struct irq_chip hpet_msi_type = { | 3490 | static struct irq_chip hpet_msi_type = { |
3683 | .name = "HPET_MSI", | 3491 | .name = "HPET_MSI", |
3684 | .unmask = hpet_msi_unmask, | 3492 | .irq_unmask = hpet_msi_unmask, |
3685 | .mask = hpet_msi_mask, | 3493 | .irq_mask = hpet_msi_mask, |
3686 | .ack = ack_apic_edge, | 3494 | .irq_ack = ack_apic_edge, |
3687 | #ifdef CONFIG_SMP | 3495 | #ifdef CONFIG_SMP |
3688 | .set_affinity = hpet_msi_set_affinity, | 3496 | .irq_set_affinity = hpet_msi_set_affinity, |
3689 | #endif | 3497 | #endif |
3690 | .retrigger = ioapic_retrigger_irq, | 3498 | .irq_retrigger = ioapic_retrigger_irq, |
3691 | }; | 3499 | }; |
3692 | 3500 | ||
3693 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3501 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
3694 | { | 3502 | { |
3695 | int ret; | ||
3696 | struct msi_msg msg; | 3503 | struct msi_msg msg; |
3697 | struct irq_desc *desc = irq_to_desc(irq); | 3504 | int ret; |
3698 | 3505 | ||
3699 | if (intr_remapping_enabled) { | 3506 | if (intr_remapping_enabled) { |
3700 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 3507 | struct intel_iommu *iommu = map_hpet_to_ir(id); |
@@ -3712,9 +3519,9 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
3712 | if (ret < 0) | 3519 | if (ret < 0) |
3713 | return ret; | 3520 | return ret; |
3714 | 3521 | ||
3715 | hpet_msi_write(irq, &msg); | 3522 | hpet_msi_write(get_irq_data(irq), &msg); |
3716 | desc->status |= IRQ_MOVE_PCNTXT; | 3523 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3717 | if (irq_remapped(irq)) | 3524 | if (irq_remapped(get_irq_chip_data(irq))) |
3718 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3525 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, |
3719 | handle_edge_irq, "edge"); | 3526 | handle_edge_irq, "edge"); |
3720 | else | 3527 | else |
@@ -3747,33 +3554,30 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3747 | write_ht_irq_msg(irq, &msg); | 3554 | write_ht_irq_msg(irq, &msg); |
3748 | } | 3555 | } |
3749 | 3556 | ||
3750 | static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3557 | static int |
3558 | ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3751 | { | 3559 | { |
3752 | struct irq_desc *desc = irq_to_desc(irq); | 3560 | struct irq_cfg *cfg = data->chip_data; |
3753 | struct irq_cfg *cfg; | ||
3754 | unsigned int dest; | 3561 | unsigned int dest; |
3755 | 3562 | ||
3756 | if (set_desc_affinity(desc, mask, &dest)) | 3563 | if (__ioapic_set_affinity(data, mask, &dest)) |
3757 | return -1; | 3564 | return -1; |
3758 | 3565 | ||
3759 | cfg = desc->chip_data; | 3566 | target_ht_irq(data->irq, dest, cfg->vector); |
3760 | |||
3761 | target_ht_irq(irq, dest, cfg->vector); | ||
3762 | |||
3763 | return 0; | 3567 | return 0; |
3764 | } | 3568 | } |
3765 | 3569 | ||
3766 | #endif | 3570 | #endif |
3767 | 3571 | ||
3768 | static struct irq_chip ht_irq_chip = { | 3572 | static struct irq_chip ht_irq_chip = { |
3769 | .name = "PCI-HT", | 3573 | .name = "PCI-HT", |
3770 | .mask = mask_ht_irq, | 3574 | .irq_mask = mask_ht_irq, |
3771 | .unmask = unmask_ht_irq, | 3575 | .irq_unmask = unmask_ht_irq, |
3772 | .ack = ack_apic_edge, | 3576 | .irq_ack = ack_apic_edge, |
3773 | #ifdef CONFIG_SMP | 3577 | #ifdef CONFIG_SMP |
3774 | .set_affinity = set_ht_irq_affinity, | 3578 | .irq_set_affinity = ht_set_affinity, |
3775 | #endif | 3579 | #endif |
3776 | .retrigger = ioapic_retrigger_irq, | 3580 | .irq_retrigger = ioapic_retrigger_irq, |
3777 | }; | 3581 | }; |
3778 | 3582 | ||
3779 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3583 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
@@ -3864,14 +3668,13 @@ int __init arch_probe_nr_irqs(void) | |||
3864 | if (nr < nr_irqs) | 3668 | if (nr < nr_irqs) |
3865 | nr_irqs = nr; | 3669 | nr_irqs = nr; |
3866 | 3670 | ||
3867 | return 0; | 3671 | return NR_IRQS_LEGACY; |
3868 | } | 3672 | } |
3869 | #endif | 3673 | #endif |
3870 | 3674 | ||
3871 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3675 | static int __io_apic_set_pci_routing(struct device *dev, int irq, |
3872 | struct io_apic_irq_attr *irq_attr) | 3676 | struct io_apic_irq_attr *irq_attr) |
3873 | { | 3677 | { |
3874 | struct irq_desc *desc; | ||
3875 | struct irq_cfg *cfg; | 3678 | struct irq_cfg *cfg; |
3876 | int node; | 3679 | int node; |
3877 | int ioapic, pin; | 3680 | int ioapic, pin; |
@@ -3889,11 +3692,9 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3889 | else | 3692 | else |
3890 | node = cpu_to_node(0); | 3693 | node = cpu_to_node(0); |
3891 | 3694 | ||
3892 | desc = irq_to_desc_alloc_node(irq, node); | 3695 | cfg = alloc_irq_and_cfg_at(irq, node); |
3893 | if (!desc) { | 3696 | if (!cfg) |
3894 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
3895 | return 0; | 3697 | return 0; |
3896 | } | ||
3897 | 3698 | ||
3898 | pin = irq_attr->ioapic_pin; | 3699 | pin = irq_attr->ioapic_pin; |
3899 | trigger = irq_attr->trigger; | 3700 | trigger = irq_attr->trigger; |
@@ -3903,15 +3704,14 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3903 | * IRQs < 16 are already in the irq_2_pin[] map | 3704 | * IRQs < 16 are already in the irq_2_pin[] map |
3904 | */ | 3705 | */ |
3905 | if (irq >= legacy_pic->nr_legacy_irqs) { | 3706 | if (irq >= legacy_pic->nr_legacy_irqs) { |
3906 | cfg = desc->chip_data; | 3707 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { |
3907 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { | ||
3908 | printk(KERN_INFO "can not add pin %d for irq %d\n", | 3708 | printk(KERN_INFO "can not add pin %d for irq %d\n", |
3909 | pin, irq); | 3709 | pin, irq); |
3910 | return 0; | 3710 | return 0; |
3911 | } | 3711 | } |
3912 | } | 3712 | } |
3913 | 3713 | ||
3914 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3714 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); |
3915 | 3715 | ||
3916 | return 0; | 3716 | return 0; |
3917 | } | 3717 | } |
@@ -4104,14 +3904,14 @@ void __init setup_ioapic_dest(void) | |||
4104 | */ | 3904 | */ |
4105 | if (desc->status & | 3905 | if (desc->status & |
4106 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3906 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4107 | mask = desc->affinity; | 3907 | mask = desc->irq_data.affinity; |
4108 | else | 3908 | else |
4109 | mask = apic->target_cpus(); | 3909 | mask = apic->target_cpus(); |
4110 | 3910 | ||
4111 | if (intr_remapping_enabled) | 3911 | if (intr_remapping_enabled) |
4112 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 3912 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); |
4113 | else | 3913 | else |
4114 | set_ioapic_affinity_irq_desc(desc, mask); | 3914 | ioapic_set_affinity(&desc->irq_data, mask, false); |
4115 | } | 3915 | } |
4116 | 3916 | ||
4117 | } | 3917 | } |
@@ -4295,19 +4095,18 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4295 | void __init pre_init_apic_IRQ0(void) | 4095 | void __init pre_init_apic_IRQ0(void) |
4296 | { | 4096 | { |
4297 | struct irq_cfg *cfg; | 4097 | struct irq_cfg *cfg; |
4298 | struct irq_desc *desc; | ||
4299 | 4098 | ||
4300 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4099 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
4301 | #ifndef CONFIG_SMP | 4100 | #ifndef CONFIG_SMP |
4302 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 4101 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
4303 | #endif | 4102 | #endif |
4304 | desc = irq_to_desc_alloc_node(0, 0); | 4103 | /* Make sure the irq descriptor is set up */ |
4104 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
4305 | 4105 | ||
4306 | setup_local_APIC(); | 4106 | setup_local_APIC(); |
4307 | 4107 | ||
4308 | cfg = irq_cfg(0); | ||
4309 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4108 | add_pin_to_irq_node(cfg, 0, 0, 0); |
4310 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4109 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); |
4311 | 4110 | ||
4312 | setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); | 4111 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); |
4313 | } | 4112 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a43f71cb30f8..c90041ccb742 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) | |||
178 | error: | 178 | error: |
179 | if (nmi_watchdog == NMI_IO_APIC) { | 179 | if (nmi_watchdog == NMI_IO_APIC) { |
180 | if (!timer_through_8259) | 180 | if (!timer_through_8259) |
181 | legacy_pic->chip->mask(0); | 181 | legacy_pic->mask(0); |
182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
183 | } | 183 | } |
184 | 184 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 39aaee5c1ab2..80c482382d5c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
131 | u32 low = 0, high = 0, address = 0; | 131 | u32 low = 0, high = 0, address = 0; |
132 | unsigned int bank, block; | 132 | unsigned int bank, block; |
133 | struct thresh_restart tr; | 133 | struct thresh_restart tr; |
134 | u8 lvt_off; | 134 | int lvt_off = -1; |
135 | u8 offset; | ||
135 | 136 | ||
136 | for (bank = 0; bank < NR_BANKS; ++bank) { | 137 | for (bank = 0; bank < NR_BANKS; ++bank) { |
137 | for (block = 0; block < NR_BLOCKS; ++block) { | 138 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
162 | if (shared_bank[bank] && c->cpu_core_id) | 163 | if (shared_bank[bank] && c->cpu_core_id) |
163 | break; | 164 | break; |
164 | #endif | 165 | #endif |
165 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, | 166 | offset = (high & MASK_LVTOFF_HI) >> 20; |
166 | APIC_EILVT_MSG_FIX, 0); | 167 | if (lvt_off < 0) { |
168 | if (setup_APIC_eilvt(offset, | ||
169 | THRESHOLD_APIC_VECTOR, | ||
170 | APIC_EILVT_MSG_FIX, 0)) { | ||
171 | pr_err(FW_BUG "cpu %d, failed to " | ||
172 | "setup threshold interrupt " | ||
173 | "for bank %d, block %d " | ||
174 | "(MSR%08X=0x%x%08x)", | ||
175 | smp_processor_id(), bank, block, | ||
176 | address, high, low); | ||
177 | continue; | ||
178 | } | ||
179 | lvt_off = offset; | ||
180 | } else if (lvt_off != offset) { | ||
181 | pr_err(FW_BUG "cpu %d, invalid threshold " | ||
182 | "interrupt offset %d for bank %d," | ||
183 | "block %d (MSR%08X=0x%x%08x)", | ||
184 | smp_processor_id(), lvt_off, bank, | ||
185 | block, address, high, low); | ||
186 | continue; | ||
187 | } | ||
167 | 188 | ||
168 | high &= ~MASK_LVTOFF_HI; | 189 | high &= ~MASK_LVTOFF_HI; |
169 | high |= lvt_off << 20; | 190 | high |= lvt_off << 20; |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b3..efaf906daf93 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, | |||
440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | 440 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); |
441 | static struct hpet_dev *hpet_devs; | 441 | static struct hpet_dev *hpet_devs; |
442 | 442 | ||
443 | void hpet_msi_unmask(unsigned int irq) | 443 | void hpet_msi_unmask(struct irq_data *data) |
444 | { | 444 | { |
445 | struct hpet_dev *hdev = get_irq_data(irq); | 445 | struct hpet_dev *hdev = data->handler_data; |
446 | unsigned int cfg; | 446 | unsigned int cfg; |
447 | 447 | ||
448 | /* unmask it */ | 448 | /* unmask it */ |
@@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) | |||
451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 451 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
452 | } | 452 | } |
453 | 453 | ||
454 | void hpet_msi_mask(unsigned int irq) | 454 | void hpet_msi_mask(struct irq_data *data) |
455 | { | 455 | { |
456 | struct hpet_dev *hdev = data->handler_data; | ||
456 | unsigned int cfg; | 457 | unsigned int cfg; |
457 | struct hpet_dev *hdev = get_irq_data(irq); | ||
458 | 458 | ||
459 | /* mask it */ | 459 | /* mask it */ |
460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | 460 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); |
@@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) | |||
462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | 462 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); |
463 | } | 463 | } |
464 | 464 | ||
465 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | 465 | void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) |
466 | { | 466 | { |
467 | struct hpet_dev *hdev = get_irq_data(irq); | ||
468 | |||
469 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | 467 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); |
470 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | 468 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); |
471 | } | 469 | } |
472 | 470 | ||
473 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | 471 | void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) |
474 | { | 472 | { |
475 | struct hpet_dev *hdev = get_irq_data(irq); | ||
476 | |||
477 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | 473 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); |
478 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | 474 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); |
479 | msg->address_hi = 0; | 475 | msg->address_hi = 0; |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index cafa7c80ac95..20757cb2efa3 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -29,24 +29,10 @@ | |||
29 | * plus some generic x86 specific things if generic specifics makes | 29 | * plus some generic x86 specific things if generic specifics makes |
30 | * any sense at all. | 30 | * any sense at all. |
31 | */ | 31 | */ |
32 | static void init_8259A(int auto_eoi); | ||
32 | 33 | ||
33 | static int i8259A_auto_eoi; | 34 | static int i8259A_auto_eoi; |
34 | DEFINE_RAW_SPINLOCK(i8259A_lock); | 35 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
35 | static void mask_and_ack_8259A(unsigned int); | ||
36 | static void mask_8259A(void); | ||
37 | static void unmask_8259A(void); | ||
38 | static void disable_8259A_irq(unsigned int irq); | ||
39 | static void enable_8259A_irq(unsigned int irq); | ||
40 | static void init_8259A(int auto_eoi); | ||
41 | static int i8259A_irq_pending(unsigned int irq); | ||
42 | |||
43 | struct irq_chip i8259A_chip = { | ||
44 | .name = "XT-PIC", | ||
45 | .mask = disable_8259A_irq, | ||
46 | .disable = disable_8259A_irq, | ||
47 | .unmask = enable_8259A_irq, | ||
48 | .mask_ack = mask_and_ack_8259A, | ||
49 | }; | ||
50 | 36 | ||
51 | /* | 37 | /* |
52 | * 8259A PIC functions to handle ISA devices: | 38 | * 8259A PIC functions to handle ISA devices: |
@@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; | |||
68 | */ | 54 | */ |
69 | unsigned long io_apic_irqs; | 55 | unsigned long io_apic_irqs; |
70 | 56 | ||
71 | static void disable_8259A_irq(unsigned int irq) | 57 | static void mask_8259A_irq(unsigned int irq) |
72 | { | 58 | { |
73 | unsigned int mask = 1 << irq; | 59 | unsigned int mask = 1 << irq; |
74 | unsigned long flags; | 60 | unsigned long flags; |
@@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) | |||
82 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 68 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
83 | } | 69 | } |
84 | 70 | ||
85 | static void enable_8259A_irq(unsigned int irq) | 71 | static void disable_8259A_irq(struct irq_data *data) |
72 | { | ||
73 | mask_8259A_irq(data->irq); | ||
74 | } | ||
75 | |||
76 | static void unmask_8259A_irq(unsigned int irq) | ||
86 | { | 77 | { |
87 | unsigned int mask = ~(1 << irq); | 78 | unsigned int mask = ~(1 << irq); |
88 | unsigned long flags; | 79 | unsigned long flags; |
@@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) | |||
96 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 87 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
97 | } | 88 | } |
98 | 89 | ||
90 | static void enable_8259A_irq(struct irq_data *data) | ||
91 | { | ||
92 | unmask_8259A_irq(data->irq); | ||
93 | } | ||
94 | |||
99 | static int i8259A_irq_pending(unsigned int irq) | 95 | static int i8259A_irq_pending(unsigned int irq) |
100 | { | 96 | { |
101 | unsigned int mask = 1<<irq; | 97 | unsigned int mask = 1<<irq; |
@@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq) | |||
117 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
118 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
119 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
120 | "XT"); | 116 | i8259A_chip.name); |
121 | enable_irq(irq); | 117 | enable_irq(irq); |
122 | } | 118 | } |
123 | 119 | ||
@@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
150 | * first, _then_ send the EOI, and the order of EOI | 146 | * first, _then_ send the EOI, and the order of EOI |
151 | * to the two 8259s is important! | 147 | * to the two 8259s is important! |
152 | */ | 148 | */ |
153 | static void mask_and_ack_8259A(unsigned int irq) | 149 | static void mask_and_ack_8259A(struct irq_data *data) |
154 | { | 150 | { |
151 | unsigned int irq = data->irq; | ||
155 | unsigned int irqmask = 1 << irq; | 152 | unsigned int irqmask = 1 << irq; |
156 | unsigned long flags; | 153 | unsigned long flags; |
157 | 154 | ||
@@ -223,6 +220,14 @@ spurious_8259A_irq: | |||
223 | } | 220 | } |
224 | } | 221 | } |
225 | 222 | ||
223 | struct irq_chip i8259A_chip = { | ||
224 | .name = "XT-PIC", | ||
225 | .irq_mask = disable_8259A_irq, | ||
226 | .irq_disable = disable_8259A_irq, | ||
227 | .irq_unmask = enable_8259A_irq, | ||
228 | .irq_mask_ack = mask_and_ack_8259A, | ||
229 | }; | ||
230 | |||
226 | static char irq_trigger[2]; | 231 | static char irq_trigger[2]; |
227 | /** | 232 | /** |
228 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ | 233 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ |
@@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) | |||
342 | * In AEOI mode we just have to mask the interrupt | 347 | * In AEOI mode we just have to mask the interrupt |
343 | * when acking. | 348 | * when acking. |
344 | */ | 349 | */ |
345 | i8259A_chip.mask_ack = disable_8259A_irq; | 350 | i8259A_chip.irq_mask_ack = disable_8259A_irq; |
346 | else | 351 | else |
347 | i8259A_chip.mask_ack = mask_and_ack_8259A; | 352 | i8259A_chip.irq_mask_ack = mask_and_ack_8259A; |
348 | 353 | ||
349 | udelay(100); /* wait for 8259A to initialize */ | 354 | udelay(100); /* wait for 8259A to initialize */ |
350 | 355 | ||
@@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) | |||
363 | static void legacy_pic_noop(void) { }; | 368 | static void legacy_pic_noop(void) { }; |
364 | static void legacy_pic_uint_noop(unsigned int unused) { }; | 369 | static void legacy_pic_uint_noop(unsigned int unused) { }; |
365 | static void legacy_pic_int_noop(int unused) { }; | 370 | static void legacy_pic_int_noop(int unused) { }; |
366 | |||
367 | static struct irq_chip dummy_pic_chip = { | ||
368 | .name = "dummy pic", | ||
369 | .mask = legacy_pic_uint_noop, | ||
370 | .unmask = legacy_pic_uint_noop, | ||
371 | .disable = legacy_pic_uint_noop, | ||
372 | .mask_ack = legacy_pic_uint_noop, | ||
373 | }; | ||
374 | static int legacy_pic_irq_pending_noop(unsigned int irq) | 371 | static int legacy_pic_irq_pending_noop(unsigned int irq) |
375 | { | 372 | { |
376 | return 0; | 373 | return 0; |
@@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) | |||
378 | 375 | ||
379 | struct legacy_pic null_legacy_pic = { | 376 | struct legacy_pic null_legacy_pic = { |
380 | .nr_legacy_irqs = 0, | 377 | .nr_legacy_irqs = 0, |
381 | .chip = &dummy_pic_chip, | 378 | .chip = &dummy_irq_chip, |
379 | .mask = legacy_pic_uint_noop, | ||
380 | .unmask = legacy_pic_uint_noop, | ||
382 | .mask_all = legacy_pic_noop, | 381 | .mask_all = legacy_pic_noop, |
383 | .restore_mask = legacy_pic_noop, | 382 | .restore_mask = legacy_pic_noop, |
384 | .init = legacy_pic_int_noop, | 383 | .init = legacy_pic_int_noop, |
@@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { | |||
389 | struct legacy_pic default_legacy_pic = { | 388 | struct legacy_pic default_legacy_pic = { |
390 | .nr_legacy_irqs = NR_IRQS_LEGACY, | 389 | .nr_legacy_irqs = NR_IRQS_LEGACY, |
391 | .chip = &i8259A_chip, | 390 | .chip = &i8259A_chip, |
392 | .mask_all = mask_8259A, | 391 | .mask = mask_8259A_irq, |
392 | .unmask = unmask_8259A_irq, | ||
393 | .mask_all = mask_8259A, | ||
393 | .restore_mask = unmask_8259A, | 394 | .restore_mask = unmask_8259A, |
394 | .init = init_8259A, | 395 | .init = init_8259A, |
395 | .irq_pending = i8259A_irq_pending, | 396 | .irq_pending = i8259A_irq_pending, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 44edb03fc9ec..83ec0175f986 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
159 | seq_printf(p, "%*d: ", prec, i); | 159 | seq_printf(p, "%*d: ", prec, i); |
160 | for_each_online_cpu(j) | 160 | for_each_online_cpu(j) |
161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 161 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
162 | seq_printf(p, " %8s", desc->chip->name); | 162 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
163 | seq_printf(p, "-%-8s", desc->name); | 163 | seq_printf(p, "-%-8s", desc->name); |
164 | 164 | ||
165 | if (action) { | 165 | if (action) { |
@@ -282,6 +282,7 @@ void fixup_irqs(void) | |||
282 | unsigned int irq, vector; | 282 | unsigned int irq, vector; |
283 | static int warned; | 283 | static int warned; |
284 | struct irq_desc *desc; | 284 | struct irq_desc *desc; |
285 | struct irq_data *data; | ||
285 | 286 | ||
286 | for_each_irq_desc(irq, desc) { | 287 | for_each_irq_desc(irq, desc) { |
287 | int break_affinity = 0; | 288 | int break_affinity = 0; |
@@ -296,7 +297,8 @@ void fixup_irqs(void) | |||
296 | /* interrupt's are disabled at this point */ | 297 | /* interrupt's are disabled at this point */ |
297 | raw_spin_lock(&desc->lock); | 298 | raw_spin_lock(&desc->lock); |
298 | 299 | ||
299 | affinity = desc->affinity; | 300 | data = &desc->irq_data; |
301 | affinity = data->affinity; | ||
300 | if (!irq_has_action(irq) || | 302 | if (!irq_has_action(irq) || |
301 | cpumask_equal(affinity, cpu_online_mask)) { | 303 | cpumask_equal(affinity, cpu_online_mask)) { |
302 | raw_spin_unlock(&desc->lock); | 304 | raw_spin_unlock(&desc->lock); |
@@ -315,16 +317,16 @@ void fixup_irqs(void) | |||
315 | affinity = cpu_all_mask; | 317 | affinity = cpu_all_mask; |
316 | } | 318 | } |
317 | 319 | ||
318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | 320 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) |
319 | desc->chip->mask(irq); | 321 | data->chip->irq_mask(data); |
320 | 322 | ||
321 | if (desc->chip->set_affinity) | 323 | if (data->chip->irq_set_affinity) |
322 | desc->chip->set_affinity(irq, affinity); | 324 | data->chip->irq_set_affinity(data, affinity, true); |
323 | else if (!(warned++)) | 325 | else if (!(warned++)) |
324 | set_affinity = 0; | 326 | set_affinity = 0; |
325 | 327 | ||
326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | 328 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) |
327 | desc->chip->unmask(irq); | 329 | data->chip->irq_unmask(data); |
328 | 330 | ||
329 | raw_spin_unlock(&desc->lock); | 331 | raw_spin_unlock(&desc->lock); |
330 | 332 | ||
@@ -355,10 +357,10 @@ void fixup_irqs(void) | |||
355 | if (irr & (1 << (vector % 32))) { | 357 | if (irr & (1 << (vector % 32))) { |
356 | irq = __get_cpu_var(vector_irq)[vector]; | 358 | irq = __get_cpu_var(vector_irq)[vector]; |
357 | 359 | ||
358 | desc = irq_to_desc(irq); | 360 | data = irq_get_irq_data(irq); |
359 | raw_spin_lock(&desc->lock); | 361 | raw_spin_lock(&desc->lock); |
360 | if (desc->chip->retrigger) | 362 | if (data->chip->irq_retrigger) |
361 | desc->chip->retrigger(irq); | 363 | data->chip->irq_retrigger(data); |
362 | raw_spin_unlock(&desc->lock); | 364 | raw_spin_unlock(&desc->lock); |
363 | } | 365 | } |
364 | } | 366 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 713969b9266b..c752e973958d 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
100 | 100 | ||
101 | void __init init_ISA_irqs(void) | 101 | void __init init_ISA_irqs(void) |
102 | { | 102 | { |
103 | struct irq_chip *chip = legacy_pic->chip; | ||
104 | const char *name = chip->name; | ||
103 | int i; | 105 | int i; |
104 | 106 | ||
105 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 107 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
@@ -107,19 +109,8 @@ void __init init_ISA_irqs(void) | |||
107 | #endif | 109 | #endif |
108 | legacy_pic->init(0); | 110 | legacy_pic->init(0); |
109 | 111 | ||
110 | /* | 112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
111 | * 16 old-style INTA-cycle interrupts: | 113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); |
112 | */ | ||
113 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { | ||
114 | struct irq_desc *desc = irq_to_desc(i); | ||
115 | |||
116 | desc->status = IRQ_DISABLED; | ||
117 | desc->action = NULL; | ||
118 | desc->depth = 1; | ||
119 | |||
120 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
121 | handle_level_irq, "XT"); | ||
122 | } | ||
123 | } | 114 | } |
124 | 115 | ||
125 | void __init init_IRQ(void) | 116 | void __init init_IRQ(void) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2ced73ba048c..dfb50890b5b7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -323,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
323 | check_tsc_sync_target(); | 323 | check_tsc_sync_target(); |
324 | 324 | ||
325 | if (nmi_watchdog == NMI_IO_APIC) { | 325 | if (nmi_watchdog == NMI_IO_APIC) { |
326 | legacy_pic->chip->mask(0); | 326 | legacy_pic->mask(0); |
327 | enable_NMI_through_LVT0(); | 327 | enable_NMI_through_LVT0(); |
328 | legacy_pic->chip->unmask(0); | 328 | legacy_pic->unmask(0); |
329 | } | 329 | } |
330 | 330 | ||
331 | /* This must be done before setting cpu_online_mask */ | 331 | /* This must be done before setting cpu_online_mask */ |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 1132129db792..7b24460917d5 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ | |||
28 | static spinlock_t uv_irq_lock; | 28 | static spinlock_t uv_irq_lock; |
29 | static struct rb_root uv_irq_root; | 29 | static struct rb_root uv_irq_root; |
30 | 30 | ||
31 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | 31 | static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); |
32 | 32 | ||
33 | static void uv_noop(unsigned int irq) | 33 | static void uv_noop(struct irq_data *data) { } |
34 | { | ||
35 | } | ||
36 | |||
37 | static unsigned int uv_noop_ret(unsigned int irq) | ||
38 | { | ||
39 | return 0; | ||
40 | } | ||
41 | 34 | ||
42 | static void uv_ack_apic(unsigned int irq) | 35 | static void uv_ack_apic(struct irq_data *data) |
43 | { | 36 | { |
44 | ack_APIC_irq(); | 37 | ack_APIC_irq(); |
45 | } | 38 | } |
46 | 39 | ||
47 | static struct irq_chip uv_irq_chip = { | 40 | static struct irq_chip uv_irq_chip = { |
48 | .name = "UV-CORE", | 41 | .name = "UV-CORE", |
49 | .startup = uv_noop_ret, | 42 | .irq_mask = uv_noop, |
50 | .shutdown = uv_noop, | 43 | .irq_unmask = uv_noop, |
51 | .enable = uv_noop, | 44 | .irq_eoi = uv_ack_apic, |
52 | .disable = uv_noop, | 45 | .irq_set_affinity = uv_set_irq_affinity, |
53 | .ack = uv_noop, | ||
54 | .mask = uv_noop, | ||
55 | .unmask = uv_noop, | ||
56 | .eoi = uv_ack_apic, | ||
57 | .end = uv_noop, | ||
58 | .set_affinity = uv_set_irq_affinity, | ||
59 | }; | 46 | }; |
60 | 47 | ||
61 | /* | 48 | /* |
@@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
144 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
145 | { | 132 | { |
146 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
147 | struct irq_desc *desc = irq_to_desc(irq); | 134 | struct irq_cfg *cfg = get_irq_chip_data(irq); |
148 | struct irq_cfg *cfg; | ||
149 | int mmr_pnode; | ||
150 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
151 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
152 | int err; | 137 | int mmr_pnode, err; |
153 | 138 | ||
154 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | 139 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != |
155 | sizeof(unsigned long)); | 140 | sizeof(unsigned long)); |
156 | 141 | ||
157 | cfg = irq_cfg(irq); | ||
158 | |||
159 | err = assign_irq_vector(irq, cfg, eligible_cpu); | 142 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
160 | if (err != 0) | 143 | if (err != 0) |
161 | return err; | 144 | return err; |
162 | 145 | ||
163 | if (limit == UV_AFFINITY_CPU) | 146 | if (limit == UV_AFFINITY_CPU) |
164 | desc->status |= IRQ_NO_BALANCING; | 147 | irq_set_status_flags(irq, IRQ_NO_BALANCING); |
165 | else | 148 | else |
166 | desc->status |= IRQ_MOVE_PCNTXT; | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
167 | 150 | ||
168 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
169 | irq_name); | 152 | irq_name); |
@@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | |||
206 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 189 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
207 | } | 190 | } |
208 | 191 | ||
209 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | 192 | static int |
193 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | ||
194 | bool force) | ||
210 | { | 195 | { |
211 | struct irq_desc *desc = irq_to_desc(irq); | 196 | struct irq_cfg *cfg = data->chip_data; |
212 | struct irq_cfg *cfg = desc->chip_data; | ||
213 | unsigned int dest; | 197 | unsigned int dest; |
214 | unsigned long mmr_value; | 198 | unsigned long mmr_value, mmr_offset; |
215 | struct uv_IO_APIC_route_entry *entry; | 199 | struct uv_IO_APIC_route_entry *entry; |
216 | unsigned long mmr_offset; | ||
217 | int mmr_pnode; | 200 | int mmr_pnode; |
218 | 201 | ||
219 | if (set_desc_affinity(desc, mask, &dest)) | 202 | if (__ioapic_set_affinity(data, mask, &dest)) |
220 | return -1; | 203 | return -1; |
221 | 204 | ||
222 | mmr_value = 0; | 205 | mmr_value = 0; |
@@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
231 | entry->dest = dest; | 214 | entry->dest = dest; |
232 | 215 | ||
233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | 216 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ |
234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | 217 | if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) |
235 | return -1; | 218 | return -1; |
236 | 219 | ||
237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 220 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index e680ea52db9b..3371bd053b89 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -66,10 +66,7 @@ static void __init visws_time_init(void) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* Replaces the default init_ISA_irqs in the generic setup */ | 68 | /* Replaces the default init_ISA_irqs in the generic setup */ |
69 | static void __init visws_pre_intr_init(void) | 69 | static void __init visws_pre_intr_init(void); |
70 | { | ||
71 | init_VISWS_APIC_irqs(); | ||
72 | } | ||
73 | 70 | ||
74 | /* Quirk for machine specific memory setup. */ | 71 | /* Quirk for machine specific memory setup. */ |
75 | 72 | ||
@@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq) | |||
429 | /* | 426 | /* |
430 | * This is the SGI Cobalt (IO-)APIC: | 427 | * This is the SGI Cobalt (IO-)APIC: |
431 | */ | 428 | */ |
432 | 429 | static void enable_cobalt_irq(struct irq_data *data) | |
433 | static void enable_cobalt_irq(unsigned int irq) | ||
434 | { | 430 | { |
435 | co_apic_set(is_co_apic(irq), irq); | 431 | co_apic_set(is_co_apic(data->irq), data->irq); |
436 | } | 432 | } |
437 | 433 | ||
438 | static void disable_cobalt_irq(unsigned int irq) | 434 | static void disable_cobalt_irq(struct irq_data *data) |
439 | { | 435 | { |
440 | int entry = is_co_apic(irq); | 436 | int entry = is_co_apic(data->irq); |
441 | 437 | ||
442 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); | 438 | co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); |
443 | co_apic_read(CO_APIC_LO(entry)); | 439 | co_apic_read(CO_APIC_LO(entry)); |
444 | } | 440 | } |
445 | 441 | ||
446 | /* | 442 | static void ack_cobalt_irq(struct irq_data *data) |
447 | * "irq" really just serves to identify the device. Here is where we | ||
448 | * map this to the Cobalt APIC entry where it's physically wired. | ||
449 | * This is called via request_irq -> setup_irq -> irq_desc->startup() | ||
450 | */ | ||
451 | static unsigned int startup_cobalt_irq(unsigned int irq) | ||
452 | { | 443 | { |
453 | unsigned long flags; | 444 | unsigned long flags; |
454 | struct irq_desc *desc = irq_to_desc(irq); | ||
455 | 445 | ||
456 | spin_lock_irqsave(&cobalt_lock, flags); | 446 | spin_lock_irqsave(&cobalt_lock, flags); |
457 | if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) | 447 | disable_cobalt_irq(data); |
458 | desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); | ||
459 | enable_cobalt_irq(irq); | ||
460 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static void ack_cobalt_irq(unsigned int irq) | ||
465 | { | ||
466 | unsigned long flags; | ||
467 | |||
468 | spin_lock_irqsave(&cobalt_lock, flags); | ||
469 | disable_cobalt_irq(irq); | ||
470 | apic_write(APIC_EOI, APIC_EIO_ACK); | 448 | apic_write(APIC_EOI, APIC_EIO_ACK); |
471 | spin_unlock_irqrestore(&cobalt_lock, flags); | 449 | spin_unlock_irqrestore(&cobalt_lock, flags); |
472 | } | 450 | } |
473 | 451 | ||
474 | static void end_cobalt_irq(unsigned int irq) | ||
475 | { | ||
476 | unsigned long flags; | ||
477 | struct irq_desc *desc = irq_to_desc(irq); | ||
478 | |||
479 | spin_lock_irqsave(&cobalt_lock, flags); | ||
480 | if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
481 | enable_cobalt_irq(irq); | ||
482 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
483 | } | ||
484 | |||
485 | static struct irq_chip cobalt_irq_type = { | 452 | static struct irq_chip cobalt_irq_type = { |
486 | .name = "Cobalt-APIC", | 453 | .name = "Cobalt-APIC", |
487 | .startup = startup_cobalt_irq, | 454 | .irq_enable = enable_cobalt_irq, |
488 | .shutdown = disable_cobalt_irq, | 455 | .irq_disable = disable_cobalt_irq, |
489 | .enable = enable_cobalt_irq, | 456 | .irq_ack = ack_cobalt_irq, |
490 | .disable = disable_cobalt_irq, | ||
491 | .ack = ack_cobalt_irq, | ||
492 | .end = end_cobalt_irq, | ||
493 | }; | 457 | }; |
494 | 458 | ||
495 | 459 | ||
@@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = { | |||
503 | * interrupt controller type, and through a special virtual interrupt- | 467 | * interrupt controller type, and through a special virtual interrupt- |
504 | * controller. Device drivers only see the virtual interrupt sources. | 468 | * controller. Device drivers only see the virtual interrupt sources. |
505 | */ | 469 | */ |
506 | static unsigned int startup_piix4_master_irq(unsigned int irq) | 470 | static unsigned int startup_piix4_master_irq(struct irq_data *data) |
507 | { | 471 | { |
508 | legacy_pic->init(0); | 472 | legacy_pic->init(0); |
509 | 473 | enable_cobalt_irq(data); | |
510 | return startup_cobalt_irq(irq); | ||
511 | } | 474 | } |
512 | 475 | ||
513 | static void end_piix4_master_irq(unsigned int irq) | 476 | static void end_piix4_master_irq(struct irq_data *data) |
514 | { | 477 | { |
515 | unsigned long flags; | 478 | unsigned long flags; |
516 | 479 | ||
517 | spin_lock_irqsave(&cobalt_lock, flags); | 480 | spin_lock_irqsave(&cobalt_lock, flags); |
518 | enable_cobalt_irq(irq); | 481 | enable_cobalt_irq(data); |
519 | spin_unlock_irqrestore(&cobalt_lock, flags); | 482 | spin_unlock_irqrestore(&cobalt_lock, flags); |
520 | } | 483 | } |
521 | 484 | ||
522 | static struct irq_chip piix4_master_irq_type = { | 485 | static struct irq_chip piix4_master_irq_type = { |
523 | .name = "PIIX4-master", | 486 | .name = "PIIX4-master", |
524 | .startup = startup_piix4_master_irq, | 487 | .irq_startup = startup_piix4_master_irq, |
525 | .ack = ack_cobalt_irq, | 488 | .irq_ack = ack_cobalt_irq, |
526 | .end = end_piix4_master_irq, | ||
527 | }; | 489 | }; |
528 | 490 | ||
491 | static void pii4_mask(struct irq_data *data) { } | ||
529 | 492 | ||
530 | static struct irq_chip piix4_virtual_irq_type = { | 493 | static struct irq_chip piix4_virtual_irq_type = { |
531 | .name = "PIIX4-virtual", | 494 | .name = "PIIX4-virtual", |
495 | .mask = pii4_mask, | ||
532 | }; | 496 | }; |
533 | 497 | ||
534 | |||
535 | /* | 498 | /* |
536 | * PIIX4-8259 master/virtual functions to handle interrupt requests | 499 | * PIIX4-8259 master/virtual functions to handle interrupt requests |
537 | * from legacy devices: floppy, parallel, serial, rtc. | 500 | * from legacy devices: floppy, parallel, serial, rtc. |
@@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = { | |||
549 | */ | 512 | */ |
550 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) | 513 | static irqreturn_t piix4_master_intr(int irq, void *dev_id) |
551 | { | 514 | { |
552 | int realirq; | ||
553 | struct irq_desc *desc; | ||
554 | unsigned long flags; | 515 | unsigned long flags; |
516 | int realirq; | ||
555 | 517 | ||
556 | raw_spin_lock_irqsave(&i8259A_lock, flags); | 518 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
557 | 519 | ||
@@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
592 | 554 | ||
593 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 555 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
594 | 556 | ||
595 | desc = irq_to_desc(realirq); | ||
596 | |||
597 | /* | 557 | /* |
598 | * handle this 'virtual interrupt' as a Cobalt one now. | 558 | * handle this 'virtual interrupt' as a Cobalt one now. |
599 | */ | 559 | */ |
600 | kstat_incr_irqs_this_cpu(realirq, desc); | 560 | generic_handle_irq(realirq); |
601 | |||
602 | if (likely(desc->action != NULL)) | ||
603 | handle_IRQ_event(realirq, desc->action); | ||
604 | |||
605 | if (!(desc->status & IRQ_DISABLED)) | ||
606 | legacy_pic->chip->unmask(realirq); | ||
607 | 561 | ||
608 | return IRQ_HANDLED; | 562 | return IRQ_HANDLED; |
609 | 563 | ||
@@ -624,41 +578,35 @@ static struct irqaction cascade_action = { | |||
624 | 578 | ||
625 | static inline void set_piix4_virtual_irq_type(void) | 579 | static inline void set_piix4_virtual_irq_type(void) |
626 | { | 580 | { |
627 | piix4_virtual_irq_type.shutdown = i8259A_chip.mask; | ||
628 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; | 581 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; |
629 | piix4_virtual_irq_type.disable = i8259A_chip.mask; | 582 | piix4_virtual_irq_type.disable = i8259A_chip.mask; |
583 | piix4_virtual_irq_type.unmask = i8259A_chip.unmask; | ||
630 | } | 584 | } |
631 | 585 | ||
632 | void init_VISWS_APIC_irqs(void) | 586 | static void __init visws_pre_intr_init(void) |
633 | { | 587 | { |
634 | int i; | 588 | int i; |
635 | 589 | ||
636 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | 590 | set_piix4_virtual_irq_type(); |
637 | struct irq_desc *desc = irq_to_desc(i); | ||
638 | |||
639 | desc->status = IRQ_DISABLED; | ||
640 | desc->action = 0; | ||
641 | desc->depth = 1; | ||
642 | 591 | ||
643 | if (i == 0) { | 592 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { |
644 | desc->chip = &cobalt_irq_type; | 593 | struct irq_chip *chip = NULL; |
645 | } | 594 | |
646 | else if (i == CO_IRQ_IDE0) { | 595 | if (i == 0) |
647 | desc->chip = &cobalt_irq_type; | 596 | chip = &cobalt_irq_type; |
648 | } | 597 | else if (i == CO_IRQ_IDE0) |
649 | else if (i == CO_IRQ_IDE1) { | 598 | chip = &cobalt_irq_type; |
650 | desc->chip = &cobalt_irq_type; | 599 | else if (i == CO_IRQ_IDE1) |
651 | } | 600 | >chip = &cobalt_irq_type; |
652 | else if (i == CO_IRQ_8259) { | 601 | else if (i == CO_IRQ_8259) |
653 | desc->chip = &piix4_master_irq_type; | 602 | chip = &piix4_master_irq_type; |
654 | } | 603 | else if (i < CO_IRQ_APIC0) |
655 | else if (i < CO_IRQ_APIC0) { | 604 | chip = &piix4_virtual_irq_type; |
656 | set_piix4_virtual_irq_type(); | 605 | else if (IS_CO_APIC(i)) |
657 | desc->chip = &piix4_virtual_irq_type; | 606 | chip = &cobalt_irq_type; |
658 | } | 607 | |
659 | else if (IS_CO_APIC(i)) { | 608 | if (chip) |
660 | desc->chip = &cobalt_irq_type; | 609 | set_irq_chip(i, chip); |
661 | } | ||
662 | } | 610 | } |
663 | 611 | ||
664 | setup_irq(CO_IRQ_8259, &master_action); | 612 | setup_irq(CO_IRQ_8259, &master_action); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9d5f55848455..73b1e1a1f489 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void) | |||
791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we | 791 | * simple as setting a bit. We don't actually "ack" interrupts as such, we |
792 | * just mask and unmask them. I wonder if we should be cleverer? | 792 | * just mask and unmask them. I wonder if we should be cleverer? |
793 | */ | 793 | */ |
794 | static void disable_lguest_irq(unsigned int irq) | 794 | static void disable_lguest_irq(struct irq_data *data) |
795 | { | 795 | { |
796 | set_bit(irq, lguest_data.blocked_interrupts); | 796 | set_bit(data->irq, lguest_data.blocked_interrupts); |
797 | } | 797 | } |
798 | 798 | ||
799 | static void enable_lguest_irq(unsigned int irq) | 799 | static void enable_lguest_irq(struct irq_data *data) |
800 | { | 800 | { |
801 | clear_bit(irq, lguest_data.blocked_interrupts); | 801 | clear_bit(data->irq, lguest_data.blocked_interrupts); |
802 | } | 802 | } |
803 | 803 | ||
804 | /* This structure describes the lguest IRQ controller. */ | 804 | /* This structure describes the lguest IRQ controller. */ |
805 | static struct irq_chip lguest_irq_controller = { | 805 | static struct irq_chip lguest_irq_controller = { |
806 | .name = "lguest", | 806 | .name = "lguest", |
807 | .mask = disable_lguest_irq, | 807 | .irq_mask = disable_lguest_irq, |
808 | .mask_ack = disable_lguest_irq, | 808 | .irq_mask_ack = disable_lguest_irq, |
809 | .unmask = enable_lguest_irq, | 809 | .irq_unmask = enable_lguest_irq, |
810 | }; | 810 | }; |
811 | 811 | ||
812 | /* | 812 | /* |
@@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void) | |||
838 | * rather than set them in lguest_init_IRQ we are called here every time an | 838 | * rather than set them in lguest_init_IRQ we are called here every time an |
839 | * lguest device needs an interrupt. | 839 | * lguest device needs an interrupt. |
840 | * | 840 | * |
841 | * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should | 841 | * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should |
842 | * pass that up! | 842 | * pass that up! |
843 | */ | 843 | */ |
844 | void lguest_setup_irq(unsigned int irq) | 844 | void lguest_setup_irq(unsigned int irq) |
845 | { | 845 | { |
846 | irq_to_desc_alloc_node(irq, 0); | 846 | irq_alloc_desc_at(irq, 0); |
847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 847 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, |
848 | handle_level_irq, "level"); | 848 | handle_level_irq, "level"); |
849 | } | 849 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5aa8d4..42fb46f83883 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -64,15 +64,22 @@ static u64 ibs_op_ctl; | |||
64 | * IBS cpuid feature detection | 64 | * IBS cpuid feature detection |
65 | */ | 65 | */ |
66 | 66 | ||
67 | #define IBS_CPUID_FEATURES 0x8000001b | 67 | #define IBS_CPUID_FEATURES 0x8000001b |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | 70 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but |
71 | * bit 0 is used to indicate the existence of IBS. | 71 | * bit 0 is used to indicate the existence of IBS. |
72 | */ | 72 | */ |
73 | #define IBS_CAPS_AVAIL (1LL<<0) | 73 | #define IBS_CAPS_AVAIL (1U<<0) |
74 | #define IBS_CAPS_RDWROPCNT (1LL<<3) | 74 | #define IBS_CAPS_RDWROPCNT (1U<<3) |
75 | #define IBS_CAPS_OPCNT (1LL<<4) | 75 | #define IBS_CAPS_OPCNT (1U<<4) |
76 | |||
77 | /* | ||
78 | * IBS APIC setup | ||
79 | */ | ||
80 | #define IBSCTL 0x1cc | ||
81 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | ||
82 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | ||
76 | 83 | ||
77 | /* | 84 | /* |
78 | * IBS randomization macros | 85 | * IBS randomization macros |
@@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void) | |||
266 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 273 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
267 | } | 274 | } |
268 | 275 | ||
276 | static inline int eilvt_is_available(int offset) | ||
277 | { | ||
278 | /* check if we may assign a vector */ | ||
279 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | ||
280 | } | ||
281 | |||
282 | static inline int ibs_eilvt_valid(void) | ||
283 | { | ||
284 | u64 val; | ||
285 | int offset; | ||
286 | |||
287 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
288 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | ||
289 | pr_err(FW_BUG "cpu %d, invalid IBS " | ||
290 | "interrupt offset %d (MSR%08X=0x%016llx)", | ||
291 | smp_processor_id(), offset, | ||
292 | MSR_AMD64_IBSCTL, val); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | offset = val & IBSCTL_LVT_OFFSET_MASK; | ||
297 | |||
298 | if (eilvt_is_available(offset)) | ||
299 | return !0; | ||
300 | |||
301 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " | ||
302 | "not available (MSR%08X=0x%016llx)", | ||
303 | smp_processor_id(), offset, | ||
304 | MSR_AMD64_IBSCTL, val); | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static inline int get_ibs_offset(void) | ||
310 | { | ||
311 | u64 val; | ||
312 | |||
313 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
314 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | ||
315 | return -EINVAL; | ||
316 | |||
317 | return val & IBSCTL_LVT_OFFSET_MASK; | ||
318 | } | ||
319 | |||
320 | static void setup_APIC_ibs(void) | ||
321 | { | ||
322 | int offset; | ||
323 | |||
324 | offset = get_ibs_offset(); | ||
325 | if (offset < 0) | ||
326 | goto failed; | ||
327 | |||
328 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | ||
329 | return; | ||
330 | failed: | ||
331 | pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", | ||
332 | smp_processor_id()); | ||
333 | } | ||
334 | |||
335 | static void clear_APIC_ibs(void) | ||
336 | { | ||
337 | int offset; | ||
338 | |||
339 | offset = get_ibs_offset(); | ||
340 | if (offset >= 0) | ||
341 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | ||
342 | } | ||
343 | |||
269 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 344 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
270 | 345 | ||
271 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | 346 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, |
@@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
376 | } | 451 | } |
377 | 452 | ||
378 | if (ibs_caps) | 453 | if (ibs_caps) |
379 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | 454 | setup_APIC_ibs(); |
380 | } | 455 | } |
381 | 456 | ||
382 | static void op_amd_cpu_shutdown(void) | 457 | static void op_amd_cpu_shutdown(void) |
383 | { | 458 | { |
384 | if (ibs_caps) | 459 | if (ibs_caps) |
385 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | 460 | clear_APIC_ibs(); |
386 | } | 461 | } |
387 | 462 | ||
388 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 463 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
@@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
445 | op_amd_stop_ibs(); | 520 | op_amd_stop_ibs(); |
446 | } | 521 | } |
447 | 522 | ||
448 | static int __init_ibs_nmi(void) | 523 | static int setup_ibs_ctl(int ibs_eilvt_off) |
449 | { | 524 | { |
450 | #define IBSCTL_LVTOFFSETVAL (1 << 8) | ||
451 | #define IBSCTL 0x1cc | ||
452 | struct pci_dev *cpu_cfg; | 525 | struct pci_dev *cpu_cfg; |
453 | int nodes; | 526 | int nodes; |
454 | u32 value = 0; | 527 | u32 value = 0; |
455 | u8 ibs_eilvt_off; | ||
456 | |||
457 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
458 | 528 | ||
459 | nodes = 0; | 529 | nodes = 0; |
460 | cpu_cfg = NULL; | 530 | cpu_cfg = NULL; |
@@ -466,21 +536,60 @@ static int __init_ibs_nmi(void) | |||
466 | break; | 536 | break; |
467 | ++nodes; | 537 | ++nodes; |
468 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | 538 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off |
469 | | IBSCTL_LVTOFFSETVAL); | 539 | | IBSCTL_LVT_OFFSET_VALID); |
470 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 540 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
471 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 541 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { |
472 | pci_dev_put(cpu_cfg); | 542 | pci_dev_put(cpu_cfg); |
473 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 543 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
474 | "IBSCTL = 0x%08x", value); | 544 | "IBSCTL = 0x%08x\n", value); |
475 | return 1; | 545 | return -EINVAL; |
476 | } | 546 | } |
477 | } while (1); | 547 | } while (1); |
478 | 548 | ||
479 | if (!nodes) { | 549 | if (!nodes) { |
480 | printk(KERN_DEBUG "No CPU node configured for IBS"); | 550 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); |
481 | return 1; | 551 | return -ENODEV; |
552 | } | ||
553 | |||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | static int force_ibs_eilvt_setup(void) | ||
558 | { | ||
559 | int i; | ||
560 | int ret; | ||
561 | |||
562 | /* find the next free available EILVT entry */ | ||
563 | for (i = 1; i < 4; i++) { | ||
564 | if (!eilvt_is_available(i)) | ||
565 | continue; | ||
566 | ret = setup_ibs_ctl(i); | ||
567 | if (ret) | ||
568 | return ret; | ||
569 | return 0; | ||
482 | } | 570 | } |
483 | 571 | ||
572 | printk(KERN_DEBUG "No EILVT entry available\n"); | ||
573 | |||
574 | return -EBUSY; | ||
575 | } | ||
576 | |||
577 | static int __init_ibs_nmi(void) | ||
578 | { | ||
579 | int ret; | ||
580 | |||
581 | if (ibs_eilvt_valid()) | ||
582 | return 0; | ||
583 | |||
584 | ret = force_ibs_eilvt_setup(); | ||
585 | if (ret) | ||
586 | return ret; | ||
587 | |||
588 | if (!ibs_eilvt_valid()) | ||
589 | return -EFAULT; | ||
590 | |||
591 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
592 | |||
484 | return 0; | 593 | return 0; |
485 | } | 594 | } |
486 | 595 | ||
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c64a5d387de5..87508886cbbd 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
92 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 93 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
94 | #endif | 94 | #endif |
95 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 95 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
96 | seq_printf(p, " %s", action->name); | 96 | seq_printf(p, " %s", action->name); |
97 | 97 | ||
98 | for (action=action->next; action; action = action->next) | 98 | for (action=action->next; action; action = action->next) |