aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 17:02:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 17:02:02 -0500
commite589c9e13aeb0c5539bf1314b3a78442ea8fc0c2 (patch)
tree87aaebb0ef7c4cc7ee6cffc152a9264b24659b7e /arch/x86/kernel/apic
parent4bb9374e0bd40d8fe97860ea0d61a0330b7c3925 (diff)
parent719b530cdc70b45f22fed2185db8b34568b4c3f8 (diff)
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Thomas Gleixner: "After stopping the full x86/apic branch, I took some time to go through the first block of patches again, which are mostly cleanups and preparatory work for the irqdomain conversion and ioapic hotplug support. Unfortunaly one of the real problematic commits was right at the beginning, so I rebased this portion of the pending patches without the offenders. It would be great to get this into 3.19. That makes reworking the problematic parts simpler. The usual tip testing did not unearth any issues and it is fully bisectible now. I'm pretty confident that this wont affect the calmness of the xmas season. Changes: - Split the convoluted io_apic.c code into domain specific parts (vector, ioapic, msi, htirq) - Introduce proper helper functions to retrieve irq specific data instead of open coded dereferencing of pointers - Preparatory work for ioapic hotplug and irqdomain conversion - Removal of the non functional pci-ioapic driver - Removal of unused irq entry stubs - Make native_smp_prepare_cpus() preemtible to avoid GFP_ATOMIC allocations for everything which is called from there. - Small cleanups and fixes" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) iommu/amd: Use helpers to access irq_cfg data structure associated with IRQ iommu/vt-d: Use helpers to access irq_cfg data structure associated with IRQ x86: irq_remapping: Use helpers to access irq_cfg data structure associated with IRQ x86, irq: Use helpers to access irq_cfg data structure associated with IRQ x86, irq: Make MSI and HT_IRQ indepenent of X86_IO_APIC x86, irq: Move IRQ initialization routines from io_apic.c into vector.c x86, irq: Move IOAPIC related declarations from hw_irq.h into io_apic.h x86, irq: Move HT IRQ related code from io_apic.c into htirq.c x86, irq: Move PCI MSI related code from io_apic.c into msi.c x86, irq: Replace printk(KERN_LVL) with pr_lvl() utilities x86, irq: Make UP version of irq_complete_move() an inline stub x86, irq: Move local APIC related code from io_apic.c into vector.c x86, irq: Introduce helpers to access struct irq_cfg x86, irq: Protect __clear_irq_vector() with vector_lock x86, irq: Rename local APIC related functions in io_apic.c as apic_xxx() x86, irq: Refine hw_irq.h to prepare for irqdomain support x86, irq: Convert irq_2_pin list to generic list x86, irq: Kill useless parameter 'irq_attr' of IO_APIC_get_PCI_irq_vector() x86, irq, acpi: Get rid of special handling of GSI for ACPI SCI x86, irq: Introduce helper to check whether an IOAPIC has been registered ...
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/Makefile4
-rw-r--r--arch/x86/kernel/apic/apic.c22
-rw-r--r--arch/x86/kernel/apic/htirq.c107
-rw-r--r--arch/x86/kernel/apic/io_apic.c1356
-rw-r--r--arch/x86/kernel/apic/msi.c286
-rw-r--r--arch/x86/kernel/apic/vector.c719
6 files changed, 1363 insertions, 1131 deletions
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index dcb5b15401ce..8bb12ddc5db8 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,10 +2,12 @@
2# Makefile for local APIC drivers and for the IO-APIC code 2# Makefile for local APIC drivers and for the IO-APIC code
3# 3#
4 4
5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o 5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
6obj-y += hw_nmi.o 6obj-y += hw_nmi.o
7 7
8obj-$(CONFIG_X86_IO_APIC) += io_apic.o 8obj-$(CONFIG_X86_IO_APIC) += io_apic.o
9obj-$(CONFIG_PCI_MSI) += msi.o
10obj-$(CONFIG_HT_IRQ) += htirq.o
9obj-$(CONFIG_SMP) += ipi.o 11obj-$(CONFIG_SMP) += ipi.o
10 12
11ifeq ($(CONFIG_X86_64),y) 13ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ba6cc041edb1..29b5b18afa27 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -196,7 +196,7 @@ static int disable_apic_timer __initdata;
196int local_apic_timer_c2_ok; 196int local_apic_timer_c2_ok;
197EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 197EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
198 198
199int first_system_vector = 0xfe; 199int first_system_vector = FIRST_SYSTEM_VECTOR;
200 200
201/* 201/*
202 * Debug level, exported for io_apic.c 202 * Debug level, exported for io_apic.c
@@ -1930,7 +1930,7 @@ int __init APIC_init_uniprocessor(void)
1930/* 1930/*
1931 * This interrupt should _never_ happen with our APIC/SMP architecture 1931 * This interrupt should _never_ happen with our APIC/SMP architecture
1932 */ 1932 */
1933static inline void __smp_spurious_interrupt(void) 1933static inline void __smp_spurious_interrupt(u8 vector)
1934{ 1934{
1935 u32 v; 1935 u32 v;
1936 1936
@@ -1939,30 +1939,32 @@ static inline void __smp_spurious_interrupt(void)
1939 * if it is a vectored one. Just in case... 1939 * if it is a vectored one. Just in case...
1940 * Spurious interrupts should not be ACKed. 1940 * Spurious interrupts should not be ACKed.
1941 */ 1941 */
1942 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); 1942 v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
1943 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) 1943 if (v & (1 << (vector & 0x1f)))
1944 ack_APIC_irq(); 1944 ack_APIC_irq();
1945 1945
1946 inc_irq_stat(irq_spurious_count); 1946 inc_irq_stat(irq_spurious_count);
1947 1947
1948 /* see sw-dev-man vol 3, chapter 7.4.13.5 */ 1948 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1949 pr_info("spurious APIC interrupt on CPU#%d, " 1949 pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
1950 "should never happen.\n", smp_processor_id()); 1950 "should never happen.\n", vector, smp_processor_id());
1951} 1951}
1952 1952
1953__visible void smp_spurious_interrupt(struct pt_regs *regs) 1953__visible void smp_spurious_interrupt(struct pt_regs *regs)
1954{ 1954{
1955 entering_irq(); 1955 entering_irq();
1956 __smp_spurious_interrupt(); 1956 __smp_spurious_interrupt(~regs->orig_ax);
1957 exiting_irq(); 1957 exiting_irq();
1958} 1958}
1959 1959
1960__visible void smp_trace_spurious_interrupt(struct pt_regs *regs) 1960__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
1961{ 1961{
1962 u8 vector = ~regs->orig_ax;
1963
1962 entering_irq(); 1964 entering_irq();
1963 trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR); 1965 trace_spurious_apic_entry(vector);
1964 __smp_spurious_interrupt(); 1966 __smp_spurious_interrupt(vector);
1965 trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR); 1967 trace_spurious_apic_exit(vector);
1966 exiting_irq(); 1968 exiting_irq();
1967} 1969}
1968 1970
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
new file mode 100644
index 000000000000..816f36e979ad
--- /dev/null
+++ b/arch/x86/kernel/apic/htirq.c
@@ -0,0 +1,107 @@
1/*
2 * Support Hypertransport IRQ
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/mm.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/device.h>
15#include <linux/pci.h>
16#include <linux/htirq.h>
17#include <asm/hw_irq.h>
18#include <asm/apic.h>
19#include <asm/hypertransport.h>
20
21/*
22 * Hypertransport interrupt support
23 */
24static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
25{
26 struct ht_irq_msg msg;
27
28 fetch_ht_irq_msg(irq, &msg);
29
30 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
31 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
32
33 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
34 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
35
36 write_ht_irq_msg(irq, &msg);
37}
38
39static int
40ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
41{
42 struct irq_cfg *cfg = irqd_cfg(data);
43 unsigned int dest;
44 int ret;
45
46 ret = apic_set_affinity(data, mask, &dest);
47 if (ret)
48 return ret;
49
50 target_ht_irq(data->irq, dest, cfg->vector);
51 return IRQ_SET_MASK_OK_NOCOPY;
52}
53
54static struct irq_chip ht_irq_chip = {
55 .name = "PCI-HT",
56 .irq_mask = mask_ht_irq,
57 .irq_unmask = unmask_ht_irq,
58 .irq_ack = apic_ack_edge,
59 .irq_set_affinity = ht_set_affinity,
60 .irq_retrigger = apic_retrigger_irq,
61 .flags = IRQCHIP_SKIP_SET_WAKE,
62};
63
64int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
65{
66 struct irq_cfg *cfg;
67 struct ht_irq_msg msg;
68 unsigned dest;
69 int err;
70
71 if (disable_apic)
72 return -ENXIO;
73
74 cfg = irq_cfg(irq);
75 err = assign_irq_vector(irq, cfg, apic->target_cpus());
76 if (err)
77 return err;
78
79 err = apic->cpu_mask_to_apicid_and(cfg->domain,
80 apic->target_cpus(), &dest);
81 if (err)
82 return err;
83
84 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
85
86 msg.address_lo =
87 HT_IRQ_LOW_BASE |
88 HT_IRQ_LOW_DEST_ID(dest) |
89 HT_IRQ_LOW_VECTOR(cfg->vector) |
90 ((apic->irq_dest_mode == 0) ?
91 HT_IRQ_LOW_DM_PHYSICAL :
92 HT_IRQ_LOW_DM_LOGICAL) |
93 HT_IRQ_LOW_RQEOI_EDGE |
94 ((apic->irq_delivery_mode != dest_LowestPrio) ?
95 HT_IRQ_LOW_MT_FIXED :
96 HT_IRQ_LOW_MT_ARBITRATED) |
97 HT_IRQ_LOW_IRQ_MASKED;
98
99 write_ht_irq_msg(irq, &msg);
100
101 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
102 handle_edge_irq, "edge");
103
104 dev_dbg(&dev->dev, "irq %d for HT\n", irq);
105
106 return 0;
107}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a6745e756729..3f5f60406ab1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -32,15 +32,11 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/syscore_ops.h> 33#include <linux/syscore_ops.h>
34#include <linux/irqdomain.h> 34#include <linux/irqdomain.h>
35#include <linux/msi.h>
36#include <linux/htirq.h>
37#include <linux/freezer.h> 35#include <linux/freezer.h>
38#include <linux/kthread.h> 36#include <linux/kthread.h>
39#include <linux/jiffies.h> /* time_after() */ 37#include <linux/jiffies.h> /* time_after() */
40#include <linux/slab.h> 38#include <linux/slab.h>
41#include <linux/bootmem.h> 39#include <linux/bootmem.h>
42#include <linux/dmar.h>
43#include <linux/hpet.h>
44 40
45#include <asm/idle.h> 41#include <asm/idle.h>
46#include <asm/io.h> 42#include <asm/io.h>
@@ -52,17 +48,12 @@
52#include <asm/dma.h> 48#include <asm/dma.h>
53#include <asm/timer.h> 49#include <asm/timer.h>
54#include <asm/i8259.h> 50#include <asm/i8259.h>
55#include <asm/msidef.h>
56#include <asm/hypertransport.h>
57#include <asm/setup.h> 51#include <asm/setup.h>
58#include <asm/irq_remapping.h> 52#include <asm/irq_remapping.h>
59#include <asm/hpet.h>
60#include <asm/hw_irq.h> 53#include <asm/hw_irq.h>
61 54
62#include <asm/apic.h> 55#include <asm/apic.h>
63 56
64#define __apicdebuginit(type) static type __init
65
66#define for_each_ioapic(idx) \ 57#define for_each_ioapic(idx) \
67 for ((idx) = 0; (idx) < nr_ioapics; (idx)++) 58 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
68#define for_each_ioapic_reverse(idx) \ 59#define for_each_ioapic_reverse(idx) \
@@ -74,7 +65,7 @@
74 for_each_pin((idx), (pin)) 65 for_each_pin((idx), (pin))
75 66
76#define for_each_irq_pin(entry, head) \ 67#define for_each_irq_pin(entry, head) \
77 for (entry = head; entry; entry = entry->next) 68 list_for_each_entry(entry, &head, list)
78 69
79/* 70/*
80 * Is the SiS APIC rmw bug present ? 71 * Is the SiS APIC rmw bug present ?
@@ -83,7 +74,6 @@
83int sis_apic_bug = -1; 74int sis_apic_bug = -1;
84 75
85static DEFINE_RAW_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
86static DEFINE_RAW_SPINLOCK(vector_lock);
87static DEFINE_MUTEX(ioapic_mutex); 77static DEFINE_MUTEX(ioapic_mutex);
88static unsigned int ioapic_dynirq_base; 78static unsigned int ioapic_dynirq_base;
89static int ioapic_initialized; 79static int ioapic_initialized;
@@ -112,6 +102,7 @@ static struct ioapic {
112 struct ioapic_domain_cfg irqdomain_cfg; 102 struct ioapic_domain_cfg irqdomain_cfg;
113 struct irq_domain *irqdomain; 103 struct irq_domain *irqdomain;
114 struct mp_pin_info *pin_info; 104 struct mp_pin_info *pin_info;
105 struct resource *iomem_res;
115} ioapics[MAX_IO_APICS]; 106} ioapics[MAX_IO_APICS];
116 107
117#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver 108#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
@@ -205,8 +196,6 @@ static int __init parse_noapic(char *str)
205} 196}
206early_param("noapic", parse_noapic); 197early_param("noapic", parse_noapic);
207 198
208static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
209
210/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 199/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
211void mp_save_irq(struct mpc_intsrc *m) 200void mp_save_irq(struct mpc_intsrc *m)
212{ 201{
@@ -228,8 +217,8 @@ void mp_save_irq(struct mpc_intsrc *m)
228} 217}
229 218
230struct irq_pin_list { 219struct irq_pin_list {
220 struct list_head list;
231 int apic, pin; 221 int apic, pin;
232 struct irq_pin_list *next;
233}; 222};
234 223
235static struct irq_pin_list *alloc_irq_pin_list(int node) 224static struct irq_pin_list *alloc_irq_pin_list(int node)
@@ -237,7 +226,26 @@ static struct irq_pin_list *alloc_irq_pin_list(int node)
237 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 226 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
238} 227}
239 228
240int __init arch_early_irq_init(void) 229static void alloc_ioapic_saved_registers(int idx)
230{
231 size_t size;
232
233 if (ioapics[idx].saved_registers)
234 return;
235
236 size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
237 ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
238 if (!ioapics[idx].saved_registers)
239 pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
240}
241
242static void free_ioapic_saved_registers(int idx)
243{
244 kfree(ioapics[idx].saved_registers);
245 ioapics[idx].saved_registers = NULL;
246}
247
248int __init arch_early_ioapic_init(void)
241{ 249{
242 struct irq_cfg *cfg; 250 struct irq_cfg *cfg;
243 int i, node = cpu_to_node(0); 251 int i, node = cpu_to_node(0);
@@ -245,13 +253,8 @@ int __init arch_early_irq_init(void)
245 if (!nr_legacy_irqs()) 253 if (!nr_legacy_irqs())
246 io_apic_irqs = ~0UL; 254 io_apic_irqs = ~0UL;
247 255
248 for_each_ioapic(i) { 256 for_each_ioapic(i)
249 ioapics[i].saved_registers = 257 alloc_ioapic_saved_registers(i);
250 kzalloc(sizeof(struct IO_APIC_route_entry) *
251 ioapics[i].nr_registers, GFP_KERNEL);
252 if (!ioapics[i].saved_registers)
253 pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
254 }
255 258
256 /* 259 /*
257 * For legacy IRQ's, start with assigning irq0 to irq15 to 260 * For legacy IRQ's, start with assigning irq0 to irq15 to
@@ -266,61 +269,6 @@ int __init arch_early_irq_init(void)
266 return 0; 269 return 0;
267} 270}
268 271
269static inline struct irq_cfg *irq_cfg(unsigned int irq)
270{
271 return irq_get_chip_data(irq);
272}
273
274static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
275{
276 struct irq_cfg *cfg;
277
278 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
279 if (!cfg)
280 return NULL;
281 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
282 goto out_cfg;
283 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
284 goto out_domain;
285 return cfg;
286out_domain:
287 free_cpumask_var(cfg->domain);
288out_cfg:
289 kfree(cfg);
290 return NULL;
291}
292
293static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
294{
295 if (!cfg)
296 return;
297 irq_set_chip_data(at, NULL);
298 free_cpumask_var(cfg->domain);
299 free_cpumask_var(cfg->old_domain);
300 kfree(cfg);
301}
302
303static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
304{
305 int res = irq_alloc_desc_at(at, node);
306 struct irq_cfg *cfg;
307
308 if (res < 0) {
309 if (res != -EEXIST)
310 return NULL;
311 cfg = irq_cfg(at);
312 if (cfg)
313 return cfg;
314 }
315
316 cfg = alloc_irq_cfg(at, node);
317 if (cfg)
318 irq_set_chip_data(at, cfg);
319 else
320 irq_free_desc(at);
321 return cfg;
322}
323
324struct io_apic { 272struct io_apic {
325 unsigned int index; 273 unsigned int index;
326 unsigned int unused[3]; 274 unsigned int unused[3];
@@ -445,15 +393,12 @@ static void ioapic_mask_entry(int apic, int pin)
445 */ 393 */
446static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 394static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
447{ 395{
448 struct irq_pin_list **last, *entry; 396 struct irq_pin_list *entry;
449 397
450 /* don't allow duplicates */ 398 /* don't allow duplicates */
451 last = &cfg->irq_2_pin; 399 for_each_irq_pin(entry, cfg->irq_2_pin)
452 for_each_irq_pin(entry, cfg->irq_2_pin) {
453 if (entry->apic == apic && entry->pin == pin) 400 if (entry->apic == apic && entry->pin == pin)
454 return 0; 401 return 0;
455 last = &entry->next;
456 }
457 402
458 entry = alloc_irq_pin_list(node); 403 entry = alloc_irq_pin_list(node);
459 if (!entry) { 404 if (!entry) {
@@ -464,22 +409,19 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
464 entry->apic = apic; 409 entry->apic = apic;
465 entry->pin = pin; 410 entry->pin = pin;
466 411
467 *last = entry; 412 list_add_tail(&entry->list, &cfg->irq_2_pin);
468 return 0; 413 return 0;
469} 414}
470 415
471static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin) 416static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
472{ 417{
473 struct irq_pin_list **last, *entry; 418 struct irq_pin_list *tmp, *entry;
474 419
475 last = &cfg->irq_2_pin; 420 list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list)
476 for_each_irq_pin(entry, cfg->irq_2_pin)
477 if (entry->apic == apic && entry->pin == pin) { 421 if (entry->apic == apic && entry->pin == pin) {
478 *last = entry->next; 422 list_del(&entry->list);
479 kfree(entry); 423 kfree(entry);
480 return; 424 return;
481 } else {
482 last = &entry->next;
483 } 425 }
484} 426}
485 427
@@ -559,7 +501,7 @@ static void mask_ioapic(struct irq_cfg *cfg)
559 501
560static void mask_ioapic_irq(struct irq_data *data) 502static void mask_ioapic_irq(struct irq_data *data)
561{ 503{
562 mask_ioapic(data->chip_data); 504 mask_ioapic(irqd_cfg(data));
563} 505}
564 506
565static void __unmask_ioapic(struct irq_cfg *cfg) 507static void __unmask_ioapic(struct irq_cfg *cfg)
@@ -578,7 +520,7 @@ static void unmask_ioapic(struct irq_cfg *cfg)
578 520
579static void unmask_ioapic_irq(struct irq_data *data) 521static void unmask_ioapic_irq(struct irq_data *data)
580{ 522{
581 unmask_ioapic(data->chip_data); 523 unmask_ioapic(irqd_cfg(data));
582} 524}
583 525
584/* 526/*
@@ -1164,8 +1106,7 @@ void mp_unmap_irq(int irq)
1164 * Find a specific PCI IRQ entry. 1106 * Find a specific PCI IRQ entry.
1165 * Not an __init, possibly needed by modules 1107 * Not an __init, possibly needed by modules
1166 */ 1108 */
1167int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1109int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1168 struct io_apic_irq_attr *irq_attr)
1169{ 1110{
1170 int irq, i, best_ioapic = -1, best_idx = -1; 1111 int irq, i, best_ioapic = -1, best_idx = -1;
1171 1112
@@ -1219,195 +1160,11 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1219 return -1; 1160 return -1;
1220 1161
1221out: 1162out:
1222 irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, 1163 return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
1223 IOAPIC_MAP_ALLOC); 1164 IOAPIC_MAP_ALLOC);
1224 if (irq > 0)
1225 set_io_apic_irq_attr(irq_attr, best_ioapic,
1226 mp_irqs[best_idx].dstirq,
1227 irq_trigger(best_idx),
1228 irq_polarity(best_idx));
1229 return irq;
1230} 1165}
1231EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1166EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1232 1167
1233void lock_vector_lock(void)
1234{
1235 /* Used to the online set of cpus does not change
1236 * during assign_irq_vector.
1237 */
1238 raw_spin_lock(&vector_lock);
1239}
1240
1241void unlock_vector_lock(void)
1242{
1243 raw_spin_unlock(&vector_lock);
1244}
1245
1246static int
1247__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1248{
1249 /*
1250 * NOTE! The local APIC isn't very good at handling
1251 * multiple interrupts at the same interrupt level.
1252 * As the interrupt level is determined by taking the
1253 * vector number and shifting that right by 4, we
1254 * want to spread these out a bit so that they don't
1255 * all fall in the same interrupt level.
1256 *
1257 * Also, we've got to be careful not to trash gate
1258 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1259 */
1260 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1261 static int current_offset = VECTOR_OFFSET_START % 16;
1262 int cpu, err;
1263 cpumask_var_t tmp_mask;
1264
1265 if (cfg->move_in_progress)
1266 return -EBUSY;
1267
1268 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1269 return -ENOMEM;
1270
1271 /* Only try and allocate irqs on cpus that are present */
1272 err = -ENOSPC;
1273 cpumask_clear(cfg->old_domain);
1274 cpu = cpumask_first_and(mask, cpu_online_mask);
1275 while (cpu < nr_cpu_ids) {
1276 int new_cpu, vector, offset;
1277
1278 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1279
1280 if (cpumask_subset(tmp_mask, cfg->domain)) {
1281 err = 0;
1282 if (cpumask_equal(tmp_mask, cfg->domain))
1283 break;
1284 /*
1285 * New cpumask using the vector is a proper subset of
1286 * the current in use mask. So cleanup the vector
1287 * allocation for the members that are not used anymore.
1288 */
1289 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1290 cfg->move_in_progress =
1291 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1292 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1293 break;
1294 }
1295
1296 vector = current_vector;
1297 offset = current_offset;
1298next:
1299 vector += 16;
1300 if (vector >= first_system_vector) {
1301 offset = (offset + 1) % 16;
1302 vector = FIRST_EXTERNAL_VECTOR + offset;
1303 }
1304
1305 if (unlikely(current_vector == vector)) {
1306 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1307 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1308 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1309 continue;
1310 }
1311
1312 if (test_bit(vector, used_vectors))
1313 goto next;
1314
1315 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
1316 if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
1317 goto next;
1318 }
1319 /* Found one! */
1320 current_vector = vector;
1321 current_offset = offset;
1322 if (cfg->vector) {
1323 cpumask_copy(cfg->old_domain, cfg->domain);
1324 cfg->move_in_progress =
1325 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1326 }
1327 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1328 per_cpu(vector_irq, new_cpu)[vector] = irq;
1329 cfg->vector = vector;
1330 cpumask_copy(cfg->domain, tmp_mask);
1331 err = 0;
1332 break;
1333 }
1334 free_cpumask_var(tmp_mask);
1335 return err;
1336}
1337
1338int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1339{
1340 int err;
1341 unsigned long flags;
1342
1343 raw_spin_lock_irqsave(&vector_lock, flags);
1344 err = __assign_irq_vector(irq, cfg, mask);
1345 raw_spin_unlock_irqrestore(&vector_lock, flags);
1346 return err;
1347}
1348
1349static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1350{
1351 int cpu, vector;
1352
1353 BUG_ON(!cfg->vector);
1354
1355 vector = cfg->vector;
1356 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1357 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1358
1359 cfg->vector = 0;
1360 cpumask_clear(cfg->domain);
1361
1362 if (likely(!cfg->move_in_progress))
1363 return;
1364 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1365 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1366 if (per_cpu(vector_irq, cpu)[vector] != irq)
1367 continue;
1368 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1369 break;
1370 }
1371 }
1372 cfg->move_in_progress = 0;
1373}
1374
1375void __setup_vector_irq(int cpu)
1376{
1377 /* Initialize vector_irq on a new cpu */
1378 int irq, vector;
1379 struct irq_cfg *cfg;
1380
1381 /*
1382 * vector_lock will make sure that we don't run into irq vector
1383 * assignments that might be happening on another cpu in parallel,
1384 * while we setup our initial vector to irq mappings.
1385 */
1386 raw_spin_lock(&vector_lock);
1387 /* Mark the inuse vectors */
1388 for_each_active_irq(irq) {
1389 cfg = irq_cfg(irq);
1390 if (!cfg)
1391 continue;
1392
1393 if (!cpumask_test_cpu(cpu, cfg->domain))
1394 continue;
1395 vector = cfg->vector;
1396 per_cpu(vector_irq, cpu)[vector] = irq;
1397 }
1398 /* Mark the free vectors */
1399 for (vector = 0; vector < NR_VECTORS; ++vector) {
1400 irq = per_cpu(vector_irq, cpu)[vector];
1401 if (irq <= VECTOR_UNDEFINED)
1402 continue;
1403
1404 cfg = irq_cfg(irq);
1405 if (!cpumask_test_cpu(cpu, cfg->domain))
1406 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1407 }
1408 raw_spin_unlock(&vector_lock);
1409}
1410
1411static struct irq_chip ioapic_chip; 1168static struct irq_chip ioapic_chip;
1412 1169
1413#ifdef CONFIG_X86_32 1170#ifdef CONFIG_X86_32
@@ -1496,7 +1253,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1496 &dest)) { 1253 &dest)) {
1497 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", 1254 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1498 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1255 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1499 __clear_irq_vector(irq, cfg); 1256 clear_irq_vector(irq, cfg);
1500 1257
1501 return; 1258 return;
1502 } 1259 }
@@ -1510,7 +1267,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1510 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { 1267 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
1511 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1268 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1512 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1269 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1513 __clear_irq_vector(irq, cfg); 1270 clear_irq_vector(irq, cfg);
1514 1271
1515 return; 1272 return;
1516 } 1273 }
@@ -1641,7 +1398,7 @@ void ioapic_zap_locks(void)
1641 raw_spin_lock_init(&ioapic_lock); 1398 raw_spin_lock_init(&ioapic_lock);
1642} 1399}
1643 1400
1644__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1401static void __init print_IO_APIC(int ioapic_idx)
1645{ 1402{
1646 union IO_APIC_reg_00 reg_00; 1403 union IO_APIC_reg_00 reg_00;
1647 union IO_APIC_reg_01 reg_01; 1404 union IO_APIC_reg_01 reg_01;
@@ -1698,7 +1455,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1698 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); 1455 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1699} 1456}
1700 1457
1701__apicdebuginit(void) print_IO_APICs(void) 1458void __init print_IO_APICs(void)
1702{ 1459{
1703 int ioapic_idx; 1460 int ioapic_idx;
1704 struct irq_cfg *cfg; 1461 struct irq_cfg *cfg;
@@ -1731,8 +1488,7 @@ __apicdebuginit(void) print_IO_APICs(void)
1731 cfg = irq_cfg(irq); 1488 cfg = irq_cfg(irq);
1732 if (!cfg) 1489 if (!cfg)
1733 continue; 1490 continue;
1734 entry = cfg->irq_2_pin; 1491 if (list_empty(&cfg->irq_2_pin))
1735 if (!entry)
1736 continue; 1492 continue;
1737 printk(KERN_DEBUG "IRQ%d ", irq); 1493 printk(KERN_DEBUG "IRQ%d ", irq);
1738 for_each_irq_pin(entry, cfg->irq_2_pin) 1494 for_each_irq_pin(entry, cfg->irq_2_pin)
@@ -1743,205 +1499,6 @@ __apicdebuginit(void) print_IO_APICs(void)
1743 printk(KERN_INFO ".................................... done.\n"); 1499 printk(KERN_INFO ".................................... done.\n");
1744} 1500}
1745 1501
1746__apicdebuginit(void) print_APIC_field(int base)
1747{
1748 int i;
1749
1750 printk(KERN_DEBUG);
1751
1752 for (i = 0; i < 8; i++)
1753 pr_cont("%08x", apic_read(base + i*0x10));
1754
1755 pr_cont("\n");
1756}
1757
1758__apicdebuginit(void) print_local_APIC(void *dummy)
1759{
1760 unsigned int i, v, ver, maxlvt;
1761 u64 icr;
1762
1763 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1764 smp_processor_id(), hard_smp_processor_id());
1765 v = apic_read(APIC_ID);
1766 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1767 v = apic_read(APIC_LVR);
1768 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1769 ver = GET_APIC_VERSION(v);
1770 maxlvt = lapic_get_maxlvt();
1771
1772 v = apic_read(APIC_TASKPRI);
1773 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1774
1775 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1776 if (!APIC_XAPIC(ver)) {
1777 v = apic_read(APIC_ARBPRI);
1778 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1779 v & APIC_ARBPRI_MASK);
1780 }
1781 v = apic_read(APIC_PROCPRI);
1782 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1783 }
1784
1785 /*
1786 * Remote read supported only in the 82489DX and local APIC for
1787 * Pentium processors.
1788 */
1789 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1790 v = apic_read(APIC_RRR);
1791 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1792 }
1793
1794 v = apic_read(APIC_LDR);
1795 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1796 if (!x2apic_enabled()) {
1797 v = apic_read(APIC_DFR);
1798 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1799 }
1800 v = apic_read(APIC_SPIV);
1801 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1802
1803 printk(KERN_DEBUG "... APIC ISR field:\n");
1804 print_APIC_field(APIC_ISR);
1805 printk(KERN_DEBUG "... APIC TMR field:\n");
1806 print_APIC_field(APIC_TMR);
1807 printk(KERN_DEBUG "... APIC IRR field:\n");
1808 print_APIC_field(APIC_IRR);
1809
1810 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1811 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1812 apic_write(APIC_ESR, 0);
1813
1814 v = apic_read(APIC_ESR);
1815 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1816 }
1817
1818 icr = apic_icr_read();
1819 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1820 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1821
1822 v = apic_read(APIC_LVTT);
1823 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1824
1825 if (maxlvt > 3) { /* PC is LVT#4. */
1826 v = apic_read(APIC_LVTPC);
1827 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1828 }
1829 v = apic_read(APIC_LVT0);
1830 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1831 v = apic_read(APIC_LVT1);
1832 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1833
1834 if (maxlvt > 2) { /* ERR is LVT#3. */
1835 v = apic_read(APIC_LVTERR);
1836 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1837 }
1838
1839 v = apic_read(APIC_TMICT);
1840 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1841 v = apic_read(APIC_TMCCT);
1842 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1843 v = apic_read(APIC_TDCR);
1844 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1845
1846 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1847 v = apic_read(APIC_EFEAT);
1848 maxlvt = (v >> 16) & 0xff;
1849 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1850 v = apic_read(APIC_ECTRL);
1851 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1852 for (i = 0; i < maxlvt; i++) {
1853 v = apic_read(APIC_EILVTn(i));
1854 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1855 }
1856 }
1857 pr_cont("\n");
1858}
1859
1860__apicdebuginit(void) print_local_APICs(int maxcpu)
1861{
1862 int cpu;
1863
1864 if (!maxcpu)
1865 return;
1866
1867 preempt_disable();
1868 for_each_online_cpu(cpu) {
1869 if (cpu >= maxcpu)
1870 break;
1871 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1872 }
1873 preempt_enable();
1874}
1875
1876__apicdebuginit(void) print_PIC(void)
1877{
1878 unsigned int v;
1879 unsigned long flags;
1880
1881 if (!nr_legacy_irqs())
1882 return;
1883
1884 printk(KERN_DEBUG "\nprinting PIC contents\n");
1885
1886 raw_spin_lock_irqsave(&i8259A_lock, flags);
1887
1888 v = inb(0xa1) << 8 | inb(0x21);
1889 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1890
1891 v = inb(0xa0) << 8 | inb(0x20);
1892 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1893
1894 outb(0x0b,0xa0);
1895 outb(0x0b,0x20);
1896 v = inb(0xa0) << 8 | inb(0x20);
1897 outb(0x0a,0xa0);
1898 outb(0x0a,0x20);
1899
1900 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1901
1902 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1903
1904 v = inb(0x4d1) << 8 | inb(0x4d0);
1905 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1906}
1907
1908static int __initdata show_lapic = 1;
1909static __init int setup_show_lapic(char *arg)
1910{
1911 int num = -1;
1912
1913 if (strcmp(arg, "all") == 0) {
1914 show_lapic = CONFIG_NR_CPUS;
1915 } else {
1916 get_option(&arg, &num);
1917 if (num >= 0)
1918 show_lapic = num;
1919 }
1920
1921 return 1;
1922}
1923__setup("show_lapic=", setup_show_lapic);
1924
1925__apicdebuginit(int) print_ICs(void)
1926{
1927 if (apic_verbosity == APIC_QUIET)
1928 return 0;
1929
1930 print_PIC();
1931
1932 /* don't print out if apic is not there */
1933 if (!cpu_has_apic && !apic_from_smp_config())
1934 return 0;
1935
1936 print_local_APICs(show_lapic);
1937 print_IO_APICs();
1938
1939 return 0;
1940}
1941
1942late_initcall(print_ICs);
1943
1944
1945/* Where if anywhere is the i8259 connect in external int mode */ 1502/* Where if anywhere is the i8259 connect in external int mode */
1946static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1503static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1947 1504
@@ -2244,26 +1801,12 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
2244 if (legacy_pic->irq_pending(irq)) 1801 if (legacy_pic->irq_pending(irq))
2245 was_pending = 1; 1802 was_pending = 1;
2246 } 1803 }
2247 __unmask_ioapic(data->chip_data); 1804 __unmask_ioapic(irqd_cfg(data));
2248 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1805 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2249 1806
2250 return was_pending; 1807 return was_pending;
2251} 1808}
2252 1809
2253static int ioapic_retrigger_irq(struct irq_data *data)
2254{
2255 struct irq_cfg *cfg = data->chip_data;
2256 unsigned long flags;
2257 int cpu;
2258
2259 raw_spin_lock_irqsave(&vector_lock, flags);
2260 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
2261 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
2262 raw_spin_unlock_irqrestore(&vector_lock, flags);
2263
2264 return 1;
2265}
2266
2267/* 1810/*
2268 * Level and edge triggered IO-APIC interrupts need different handling, 1811 * Level and edge triggered IO-APIC interrupts need different handling,
2269 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 1812 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2273,113 +1816,6 @@ static int ioapic_retrigger_irq(struct irq_data *data)
2273 * races. 1816 * races.
2274 */ 1817 */
2275 1818
2276#ifdef CONFIG_SMP
2277void send_cleanup_vector(struct irq_cfg *cfg)
2278{
2279 cpumask_var_t cleanup_mask;
2280
2281 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2282 unsigned int i;
2283 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2284 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2285 } else {
2286 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2287 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2288 free_cpumask_var(cleanup_mask);
2289 }
2290 cfg->move_in_progress = 0;
2291}
2292
2293asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2294{
2295 unsigned vector, me;
2296
2297 ack_APIC_irq();
2298 irq_enter();
2299 exit_idle();
2300
2301 me = smp_processor_id();
2302 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2303 int irq;
2304 unsigned int irr;
2305 struct irq_desc *desc;
2306 struct irq_cfg *cfg;
2307 irq = __this_cpu_read(vector_irq[vector]);
2308
2309 if (irq <= VECTOR_UNDEFINED)
2310 continue;
2311
2312 desc = irq_to_desc(irq);
2313 if (!desc)
2314 continue;
2315
2316 cfg = irq_cfg(irq);
2317 if (!cfg)
2318 continue;
2319
2320 raw_spin_lock(&desc->lock);
2321
2322 /*
2323 * Check if the irq migration is in progress. If so, we
2324 * haven't received the cleanup request yet for this irq.
2325 */
2326 if (cfg->move_in_progress)
2327 goto unlock;
2328
2329 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2330 goto unlock;
2331
2332 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2333 /*
2334 * Check if the vector that needs to be cleanedup is
2335 * registered at the cpu's IRR. If so, then this is not
2336 * the best time to clean it up. Lets clean it up in the
2337 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2338 * to myself.
2339 */
2340 if (irr & (1 << (vector % 32))) {
2341 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2342 goto unlock;
2343 }
2344 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
2345unlock:
2346 raw_spin_unlock(&desc->lock);
2347 }
2348
2349 irq_exit();
2350}
2351
2352static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2353{
2354 unsigned me;
2355
2356 if (likely(!cfg->move_in_progress))
2357 return;
2358
2359 me = smp_processor_id();
2360
2361 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2362 send_cleanup_vector(cfg);
2363}
2364
2365static void irq_complete_move(struct irq_cfg *cfg)
2366{
2367 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2368}
2369
2370void irq_force_complete_move(int irq)
2371{
2372 struct irq_cfg *cfg = irq_cfg(irq);
2373
2374 if (!cfg)
2375 return;
2376
2377 __irq_complete_move(cfg, cfg->vector);
2378}
2379#else
2380static inline void irq_complete_move(struct irq_cfg *cfg) { }
2381#endif
2382
2383static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 1819static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2384{ 1820{
2385 int apic, pin; 1821 int apic, pin;
@@ -2400,41 +1836,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2400 } 1836 }
2401} 1837}
2402 1838
2403/*
2404 * Either sets data->affinity to a valid value, and returns
2405 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2406 * leaves data->affinity untouched.
2407 */
2408int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2409 unsigned int *dest_id)
2410{
2411 struct irq_cfg *cfg = data->chip_data;
2412 unsigned int irq = data->irq;
2413 int err;
2414
2415 if (!config_enabled(CONFIG_SMP))
2416 return -EPERM;
2417
2418 if (!cpumask_intersects(mask, cpu_online_mask))
2419 return -EINVAL;
2420
2421 err = assign_irq_vector(irq, cfg, mask);
2422 if (err)
2423 return err;
2424
2425 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2426 if (err) {
2427 if (assign_irq_vector(irq, cfg, data->affinity))
2428 pr_err("Failed to recover vector for irq %d\n", irq);
2429 return err;
2430 }
2431
2432 cpumask_copy(data->affinity, mask);
2433
2434 return 0;
2435}
2436
2437
2438int native_ioapic_set_affinity(struct irq_data *data, 1839int native_ioapic_set_affinity(struct irq_data *data,
2439 const struct cpumask *mask, 1840 const struct cpumask *mask,
2440 bool force) 1841 bool force)
@@ -2447,24 +1848,17 @@ int native_ioapic_set_affinity(struct irq_data *data,
2447 return -EPERM; 1848 return -EPERM;
2448 1849
2449 raw_spin_lock_irqsave(&ioapic_lock, flags); 1850 raw_spin_lock_irqsave(&ioapic_lock, flags);
2450 ret = __ioapic_set_affinity(data, mask, &dest); 1851 ret = apic_set_affinity(data, mask, &dest);
2451 if (!ret) { 1852 if (!ret) {
2452 /* Only the high 8 bits are valid. */ 1853 /* Only the high 8 bits are valid. */
2453 dest = SET_APIC_LOGICAL_ID(dest); 1854 dest = SET_APIC_LOGICAL_ID(dest);
2454 __target_IO_APIC_irq(irq, dest, data->chip_data); 1855 __target_IO_APIC_irq(irq, dest, irqd_cfg(data));
2455 ret = IRQ_SET_MASK_OK_NOCOPY; 1856 ret = IRQ_SET_MASK_OK_NOCOPY;
2456 } 1857 }
2457 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1858 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2458 return ret; 1859 return ret;
2459} 1860}
2460 1861
2461static void ack_apic_edge(struct irq_data *data)
2462{
2463 irq_complete_move(data->chip_data);
2464 irq_move_irq(data);
2465 ack_APIC_irq();
2466}
2467
2468atomic_t irq_mis_count; 1862atomic_t irq_mis_count;
2469 1863
2470#ifdef CONFIG_GENERIC_PENDING_IRQ 1864#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -2547,9 +1941,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data,
2547} 1941}
2548#endif 1942#endif
2549 1943
2550static void ack_apic_level(struct irq_data *data) 1944static void ack_ioapic_level(struct irq_data *data)
2551{ 1945{
2552 struct irq_cfg *cfg = data->chip_data; 1946 struct irq_cfg *cfg = irqd_cfg(data);
2553 int i, irq = data->irq; 1947 int i, irq = data->irq;
2554 unsigned long v; 1948 unsigned long v;
2555 bool masked; 1949 bool masked;
@@ -2619,10 +2013,10 @@ static struct irq_chip ioapic_chip __read_mostly = {
2619 .irq_startup = startup_ioapic_irq, 2013 .irq_startup = startup_ioapic_irq,
2620 .irq_mask = mask_ioapic_irq, 2014 .irq_mask = mask_ioapic_irq,
2621 .irq_unmask = unmask_ioapic_irq, 2015 .irq_unmask = unmask_ioapic_irq,
2622 .irq_ack = ack_apic_edge, 2016 .irq_ack = apic_ack_edge,
2623 .irq_eoi = ack_apic_level, 2017 .irq_eoi = ack_ioapic_level,
2624 .irq_set_affinity = native_ioapic_set_affinity, 2018 .irq_set_affinity = native_ioapic_set_affinity,
2625 .irq_retrigger = ioapic_retrigger_irq, 2019 .irq_retrigger = apic_retrigger_irq,
2626 .flags = IRQCHIP_SKIP_SET_WAKE, 2020 .flags = IRQCHIP_SKIP_SET_WAKE,
2627}; 2021};
2628 2022
@@ -2965,6 +2359,16 @@ static int mp_irqdomain_create(int ioapic)
2965 return 0; 2359 return 0;
2966} 2360}
2967 2361
2362static void ioapic_destroy_irqdomain(int idx)
2363{
2364 if (ioapics[idx].irqdomain) {
2365 irq_domain_remove(ioapics[idx].irqdomain);
2366 ioapics[idx].irqdomain = NULL;
2367 }
2368 kfree(ioapics[idx].pin_info);
2369 ioapics[idx].pin_info = NULL;
2370}
2371
2968void __init setup_IO_APIC(void) 2372void __init setup_IO_APIC(void)
2969{ 2373{
2970 int ioapic; 2374 int ioapic;
@@ -3044,399 +2448,6 @@ static int __init ioapic_init_ops(void)
3044 2448
3045device_initcall(ioapic_init_ops); 2449device_initcall(ioapic_init_ops);
3046 2450
3047/*
3048 * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
3049 */
3050int arch_setup_hwirq(unsigned int irq, int node)
3051{
3052 struct irq_cfg *cfg;
3053 unsigned long flags;
3054 int ret;
3055
3056 cfg = alloc_irq_cfg(irq, node);
3057 if (!cfg)
3058 return -ENOMEM;
3059
3060 raw_spin_lock_irqsave(&vector_lock, flags);
3061 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
3062 raw_spin_unlock_irqrestore(&vector_lock, flags);
3063
3064 if (!ret)
3065 irq_set_chip_data(irq, cfg);
3066 else
3067 free_irq_cfg(irq, cfg);
3068 return ret;
3069}
3070
3071void arch_teardown_hwirq(unsigned int irq)
3072{
3073 struct irq_cfg *cfg = irq_cfg(irq);
3074 unsigned long flags;
3075
3076 free_remapped_irq(irq);
3077 raw_spin_lock_irqsave(&vector_lock, flags);
3078 __clear_irq_vector(irq, cfg);
3079 raw_spin_unlock_irqrestore(&vector_lock, flags);
3080 free_irq_cfg(irq, cfg);
3081}
3082
3083/*
3084 * MSI message composition
3085 */
3086void native_compose_msi_msg(struct pci_dev *pdev,
3087 unsigned int irq, unsigned int dest,
3088 struct msi_msg *msg, u8 hpet_id)
3089{
3090 struct irq_cfg *cfg = irq_cfg(irq);
3091
3092 msg->address_hi = MSI_ADDR_BASE_HI;
3093
3094 if (x2apic_enabled())
3095 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
3096
3097 msg->address_lo =
3098 MSI_ADDR_BASE_LO |
3099 ((apic->irq_dest_mode == 0) ?
3100 MSI_ADDR_DEST_MODE_PHYSICAL:
3101 MSI_ADDR_DEST_MODE_LOGICAL) |
3102 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3103 MSI_ADDR_REDIRECTION_CPU:
3104 MSI_ADDR_REDIRECTION_LOWPRI) |
3105 MSI_ADDR_DEST_ID(dest);
3106
3107 msg->data =
3108 MSI_DATA_TRIGGER_EDGE |
3109 MSI_DATA_LEVEL_ASSERT |
3110 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3111 MSI_DATA_DELIVERY_FIXED:
3112 MSI_DATA_DELIVERY_LOWPRI) |
3113 MSI_DATA_VECTOR(cfg->vector);
3114}
3115
3116#ifdef CONFIG_PCI_MSI
3117static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3118 struct msi_msg *msg, u8 hpet_id)
3119{
3120 struct irq_cfg *cfg;
3121 int err;
3122 unsigned dest;
3123
3124 if (disable_apic)
3125 return -ENXIO;
3126
3127 cfg = irq_cfg(irq);
3128 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3129 if (err)
3130 return err;
3131
3132 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3133 apic->target_cpus(), &dest);
3134 if (err)
3135 return err;
3136
3137 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
3138
3139 return 0;
3140}
3141
3142static int
3143msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3144{
3145 struct irq_cfg *cfg = data->chip_data;
3146 struct msi_msg msg;
3147 unsigned int dest;
3148 int ret;
3149
3150 ret = __ioapic_set_affinity(data, mask, &dest);
3151 if (ret)
3152 return ret;
3153
3154 __get_cached_msi_msg(data->msi_desc, &msg);
3155
3156 msg.data &= ~MSI_DATA_VECTOR_MASK;
3157 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3158 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3159 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3160
3161 __pci_write_msi_msg(data->msi_desc, &msg);
3162
3163 return IRQ_SET_MASK_OK_NOCOPY;
3164}
3165
3166/*
3167 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3168 * which implement the MSI or MSI-X Capability Structure.
3169 */
3170static struct irq_chip msi_chip = {
3171 .name = "PCI-MSI",
3172 .irq_unmask = pci_msi_unmask_irq,
3173 .irq_mask = pci_msi_mask_irq,
3174 .irq_ack = ack_apic_edge,
3175 .irq_set_affinity = msi_set_affinity,
3176 .irq_retrigger = ioapic_retrigger_irq,
3177 .flags = IRQCHIP_SKIP_SET_WAKE,
3178};
3179
3180int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3181 unsigned int irq_base, unsigned int irq_offset)
3182{
3183 struct irq_chip *chip = &msi_chip;
3184 struct msi_msg msg;
3185 unsigned int irq = irq_base + irq_offset;
3186 int ret;
3187
3188 ret = msi_compose_msg(dev, irq, &msg, -1);
3189 if (ret < 0)
3190 return ret;
3191
3192 irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
3193
3194 /*
3195 * MSI-X message is written per-IRQ, the offset is always 0.
3196 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
3197 */
3198 if (!irq_offset)
3199 pci_write_msi_msg(irq, &msg);
3200
3201 setup_remapped_irq(irq, irq_cfg(irq), chip);
3202
3203 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3204
3205 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3206
3207 return 0;
3208}
3209
3210int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3211{
3212 struct msi_desc *msidesc;
3213 unsigned int irq;
3214 int node, ret;
3215
3216 /* Multiple MSI vectors only supported with interrupt remapping */
3217 if (type == PCI_CAP_ID_MSI && nvec > 1)
3218 return 1;
3219
3220 node = dev_to_node(&dev->dev);
3221
3222 list_for_each_entry(msidesc, &dev->msi_list, list) {
3223 irq = irq_alloc_hwirq(node);
3224 if (!irq)
3225 return -ENOSPC;
3226
3227 ret = setup_msi_irq(dev, msidesc, irq, 0);
3228 if (ret < 0) {
3229 irq_free_hwirq(irq);
3230 return ret;
3231 }
3232
3233 }
3234 return 0;
3235}
3236
3237void native_teardown_msi_irq(unsigned int irq)
3238{
3239 irq_free_hwirq(irq);
3240}
3241
3242#ifdef CONFIG_DMAR_TABLE
3243static int
3244dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3245 bool force)
3246{
3247 struct irq_cfg *cfg = data->chip_data;
3248 unsigned int dest, irq = data->irq;
3249 struct msi_msg msg;
3250 int ret;
3251
3252 ret = __ioapic_set_affinity(data, mask, &dest);
3253 if (ret)
3254 return ret;
3255
3256 dmar_msi_read(irq, &msg);
3257
3258 msg.data &= ~MSI_DATA_VECTOR_MASK;
3259 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3260 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3261 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3262 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3263
3264 dmar_msi_write(irq, &msg);
3265
3266 return IRQ_SET_MASK_OK_NOCOPY;
3267}
3268
3269static struct irq_chip dmar_msi_type = {
3270 .name = "DMAR_MSI",
3271 .irq_unmask = dmar_msi_unmask,
3272 .irq_mask = dmar_msi_mask,
3273 .irq_ack = ack_apic_edge,
3274 .irq_set_affinity = dmar_msi_set_affinity,
3275 .irq_retrigger = ioapic_retrigger_irq,
3276 .flags = IRQCHIP_SKIP_SET_WAKE,
3277};
3278
3279int arch_setup_dmar_msi(unsigned int irq)
3280{
3281 int ret;
3282 struct msi_msg msg;
3283
3284 ret = msi_compose_msg(NULL, irq, &msg, -1);
3285 if (ret < 0)
3286 return ret;
3287 dmar_msi_write(irq, &msg);
3288 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3289 "edge");
3290 return 0;
3291}
3292#endif
3293
3294#ifdef CONFIG_HPET_TIMER
3295
3296static int hpet_msi_set_affinity(struct irq_data *data,
3297 const struct cpumask *mask, bool force)
3298{
3299 struct irq_cfg *cfg = data->chip_data;
3300 struct msi_msg msg;
3301 unsigned int dest;
3302 int ret;
3303
3304 ret = __ioapic_set_affinity(data, mask, &dest);
3305 if (ret)
3306 return ret;
3307
3308 hpet_msi_read(data->handler_data, &msg);
3309
3310 msg.data &= ~MSI_DATA_VECTOR_MASK;
3311 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3312 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3313 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3314
3315 hpet_msi_write(data->handler_data, &msg);
3316
3317 return IRQ_SET_MASK_OK_NOCOPY;
3318}
3319
3320static struct irq_chip hpet_msi_type = {
3321 .name = "HPET_MSI",
3322 .irq_unmask = hpet_msi_unmask,
3323 .irq_mask = hpet_msi_mask,
3324 .irq_ack = ack_apic_edge,
3325 .irq_set_affinity = hpet_msi_set_affinity,
3326 .irq_retrigger = ioapic_retrigger_irq,
3327 .flags = IRQCHIP_SKIP_SET_WAKE,
3328};
3329
3330int default_setup_hpet_msi(unsigned int irq, unsigned int id)
3331{
3332 struct irq_chip *chip = &hpet_msi_type;
3333 struct msi_msg msg;
3334 int ret;
3335
3336 ret = msi_compose_msg(NULL, irq, &msg, id);
3337 if (ret < 0)
3338 return ret;
3339
3340 hpet_msi_write(irq_get_handler_data(irq), &msg);
3341 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3342 setup_remapped_irq(irq, irq_cfg(irq), chip);
3343
3344 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3345 return 0;
3346}
3347#endif
3348
3349#endif /* CONFIG_PCI_MSI */
3350/*
3351 * Hypertransport interrupt support
3352 */
3353#ifdef CONFIG_HT_IRQ
3354
3355static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3356{
3357 struct ht_irq_msg msg;
3358 fetch_ht_irq_msg(irq, &msg);
3359
3360 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3361 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3362
3363 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3364 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3365
3366 write_ht_irq_msg(irq, &msg);
3367}
3368
3369static int
3370ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3371{
3372 struct irq_cfg *cfg = data->chip_data;
3373 unsigned int dest;
3374 int ret;
3375
3376 ret = __ioapic_set_affinity(data, mask, &dest);
3377 if (ret)
3378 return ret;
3379
3380 target_ht_irq(data->irq, dest, cfg->vector);
3381 return IRQ_SET_MASK_OK_NOCOPY;
3382}
3383
3384static struct irq_chip ht_irq_chip = {
3385 .name = "PCI-HT",
3386 .irq_mask = mask_ht_irq,
3387 .irq_unmask = unmask_ht_irq,
3388 .irq_ack = ack_apic_edge,
3389 .irq_set_affinity = ht_set_affinity,
3390 .irq_retrigger = ioapic_retrigger_irq,
3391 .flags = IRQCHIP_SKIP_SET_WAKE,
3392};
3393
3394int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3395{
3396 struct irq_cfg *cfg;
3397 struct ht_irq_msg msg;
3398 unsigned dest;
3399 int err;
3400
3401 if (disable_apic)
3402 return -ENXIO;
3403
3404 cfg = irq_cfg(irq);
3405 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3406 if (err)
3407 return err;
3408
3409 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3410 apic->target_cpus(), &dest);
3411 if (err)
3412 return err;
3413
3414 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3415
3416 msg.address_lo =
3417 HT_IRQ_LOW_BASE |
3418 HT_IRQ_LOW_DEST_ID(dest) |
3419 HT_IRQ_LOW_VECTOR(cfg->vector) |
3420 ((apic->irq_dest_mode == 0) ?
3421 HT_IRQ_LOW_DM_PHYSICAL :
3422 HT_IRQ_LOW_DM_LOGICAL) |
3423 HT_IRQ_LOW_RQEOI_EDGE |
3424 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3425 HT_IRQ_LOW_MT_FIXED :
3426 HT_IRQ_LOW_MT_ARBITRATED) |
3427 HT_IRQ_LOW_IRQ_MASKED;
3428
3429 write_ht_irq_msg(irq, &msg);
3430
3431 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3432 handle_edge_irq, "edge");
3433
3434 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3435
3436 return 0;
3437}
3438#endif /* CONFIG_HT_IRQ */
3439
3440static int 2451static int
3441io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 2452io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3442{ 2453{
@@ -3451,7 +2462,7 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3451 return ret; 2462 return ret;
3452} 2463}
3453 2464
3454static int __init io_apic_get_redir_entries(int ioapic) 2465static int io_apic_get_redir_entries(int ioapic)
3455{ 2466{
3456 union IO_APIC_reg_01 reg_01; 2467 union IO_APIC_reg_01 reg_01;
3457 unsigned long flags; 2468 unsigned long flags;
@@ -3476,28 +2487,8 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
3476 return ioapic_initialized ? ioapic_dynirq_base : gsi_top; 2487 return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
3477} 2488}
3478 2489
3479int __init arch_probe_nr_irqs(void)
3480{
3481 int nr;
3482
3483 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3484 nr_irqs = NR_VECTORS * nr_cpu_ids;
3485
3486 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
3487#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3488 /*
3489 * for MSI and HT dyn irq
3490 */
3491 nr += gsi_top * 16;
3492#endif
3493 if (nr < nr_irqs)
3494 nr_irqs = nr;
3495
3496 return 0;
3497}
3498
3499#ifdef CONFIG_X86_32 2490#ifdef CONFIG_X86_32
3500static int __init io_apic_get_unique_id(int ioapic, int apic_id) 2491static int io_apic_get_unique_id(int ioapic, int apic_id)
3501{ 2492{
3502 union IO_APIC_reg_00 reg_00; 2493 union IO_APIC_reg_00 reg_00;
3503 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 2494 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -3572,30 +2563,63 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
3572 return apic_id; 2563 return apic_id;
3573} 2564}
3574 2565
3575static u8 __init io_apic_unique_id(u8 id) 2566static u8 io_apic_unique_id(int idx, u8 id)
3576{ 2567{
3577 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 2568 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3578 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2569 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3579 return io_apic_get_unique_id(nr_ioapics, id); 2570 return io_apic_get_unique_id(idx, id);
3580 else 2571 else
3581 return id; 2572 return id;
3582} 2573}
3583#else 2574#else
3584static u8 __init io_apic_unique_id(u8 id) 2575static u8 io_apic_unique_id(int idx, u8 id)
3585{ 2576{
3586 int i; 2577 union IO_APIC_reg_00 reg_00;
3587 DECLARE_BITMAP(used, 256); 2578 DECLARE_BITMAP(used, 256);
2579 unsigned long flags;
2580 u8 new_id;
2581 int i;
3588 2582
3589 bitmap_zero(used, 256); 2583 bitmap_zero(used, 256);
3590 for_each_ioapic(i) 2584 for_each_ioapic(i)
3591 __set_bit(mpc_ioapic_id(i), used); 2585 __set_bit(mpc_ioapic_id(i), used);
2586
2587 /* Hand out the requested id if available */
3592 if (!test_bit(id, used)) 2588 if (!test_bit(id, used))
3593 return id; 2589 return id;
3594 return find_first_zero_bit(used, 256); 2590
2591 /*
2592 * Read the current id from the ioapic and keep it if
2593 * available.
2594 */
2595 raw_spin_lock_irqsave(&ioapic_lock, flags);
2596 reg_00.raw = io_apic_read(idx, 0);
2597 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2598 new_id = reg_00.bits.ID;
2599 if (!test_bit(new_id, used)) {
2600 apic_printk(APIC_VERBOSE, KERN_INFO
2601 "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
2602 idx, new_id, id);
2603 return new_id;
2604 }
2605
2606 /*
2607 * Get the next free id and write it to the ioapic.
2608 */
2609 new_id = find_first_zero_bit(used, 256);
2610 reg_00.bits.ID = new_id;
2611 raw_spin_lock_irqsave(&ioapic_lock, flags);
2612 io_apic_write(idx, 0, reg_00.raw);
2613 reg_00.raw = io_apic_read(idx, 0);
2614 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2615 /* Sanity check */
2616 BUG_ON(reg_00.bits.ID != new_id);
2617
2618 return new_id;
3595} 2619}
3596#endif 2620#endif
3597 2621
3598static int __init io_apic_get_version(int ioapic) 2622static int io_apic_get_version(int ioapic)
3599{ 2623{
3600 union IO_APIC_reg_01 reg_01; 2624 union IO_APIC_reg_01 reg_01;
3601 unsigned long flags; 2625 unsigned long flags;
@@ -3702,6 +2726,7 @@ static struct resource * __init ioapic_setup_resources(void)
3702 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 2726 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3703 mem += IOAPIC_RESOURCE_NAME_SIZE; 2727 mem += IOAPIC_RESOURCE_NAME_SIZE;
3704 num++; 2728 num++;
2729 ioapics[i].iomem_res = res;
3705 } 2730 }
3706 2731
3707 ioapic_resources = res; 2732 ioapic_resources = res;
@@ -3799,21 +2824,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
3799 return gsi - gsi_cfg->gsi_base; 2824 return gsi - gsi_cfg->gsi_base;
3800} 2825}
3801 2826
3802static __init int bad_ioapic(unsigned long address) 2827static int bad_ioapic_register(int idx)
3803{
3804 if (nr_ioapics >= MAX_IO_APICS) {
3805 pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
3806 MAX_IO_APICS, nr_ioapics);
3807 return 1;
3808 }
3809 if (!address) {
3810 pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
3811 return 1;
3812 }
3813 return 0;
3814}
3815
3816static __init int bad_ioapic_register(int idx)
3817{ 2828{
3818 union IO_APIC_reg_00 reg_00; 2829 union IO_APIC_reg_00 reg_00;
3819 union IO_APIC_reg_01 reg_01; 2830 union IO_APIC_reg_01 reg_01;
@@ -3832,32 +2843,61 @@ static __init int bad_ioapic_register(int idx)
3832 return 0; 2843 return 0;
3833} 2844}
3834 2845
3835void __init mp_register_ioapic(int id, u32 address, u32 gsi_base, 2846static int find_free_ioapic_entry(void)
3836 struct ioapic_domain_cfg *cfg)
3837{ 2847{
3838 int idx = 0; 2848 int idx;
3839 int entries; 2849
2850 for (idx = 0; idx < MAX_IO_APICS; idx++)
2851 if (ioapics[idx].nr_registers == 0)
2852 return idx;
2853
2854 return MAX_IO_APICS;
2855}
2856
2857/**
2858 * mp_register_ioapic - Register an IOAPIC device
2859 * @id: hardware IOAPIC ID
2860 * @address: physical address of IOAPIC register area
2861 * @gsi_base: base of GSI associated with the IOAPIC
2862 * @cfg: configuration information for the IOAPIC
2863 */
2864int mp_register_ioapic(int id, u32 address, u32 gsi_base,
2865 struct ioapic_domain_cfg *cfg)
2866{
2867 bool hotplug = !!ioapic_initialized;
3840 struct mp_ioapic_gsi *gsi_cfg; 2868 struct mp_ioapic_gsi *gsi_cfg;
2869 int idx, ioapic, entries;
2870 u32 gsi_end;
3841 2871
3842 if (bad_ioapic(address)) 2872 if (!address) {
3843 return; 2873 pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
2874 return -EINVAL;
2875 }
2876 for_each_ioapic(ioapic)
2877 if (ioapics[ioapic].mp_config.apicaddr == address) {
2878 pr_warn("address 0x%x conflicts with IOAPIC%d\n",
2879 address, ioapic);
2880 return -EEXIST;
2881 }
3844 2882
3845 idx = nr_ioapics; 2883 idx = find_free_ioapic_entry();
2884 if (idx >= MAX_IO_APICS) {
2885 pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
2886 MAX_IO_APICS, idx);
2887 return -ENOSPC;
2888 }
3846 2889
3847 ioapics[idx].mp_config.type = MP_IOAPIC; 2890 ioapics[idx].mp_config.type = MP_IOAPIC;
3848 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 2891 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
3849 ioapics[idx].mp_config.apicaddr = address; 2892 ioapics[idx].mp_config.apicaddr = address;
3850 ioapics[idx].irqdomain = NULL;
3851 ioapics[idx].irqdomain_cfg = *cfg;
3852 2893
3853 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 2894 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
3854
3855 if (bad_ioapic_register(idx)) { 2895 if (bad_ioapic_register(idx)) {
3856 clear_fixmap(FIX_IO_APIC_BASE_0 + idx); 2896 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
3857 return; 2897 return -ENODEV;
3858 } 2898 }
3859 2899
3860 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 2900 ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
3861 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 2901 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
3862 2902
3863 /* 2903 /*
@@ -3865,24 +2905,112 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
3865 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 2905 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
3866 */ 2906 */
3867 entries = io_apic_get_redir_entries(idx); 2907 entries = io_apic_get_redir_entries(idx);
2908 gsi_end = gsi_base + entries - 1;
2909 for_each_ioapic(ioapic) {
2910 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2911 if ((gsi_base >= gsi_cfg->gsi_base &&
2912 gsi_base <= gsi_cfg->gsi_end) ||
2913 (gsi_end >= gsi_cfg->gsi_base &&
2914 gsi_end <= gsi_cfg->gsi_end)) {
2915 pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
2916 gsi_base, gsi_end,
2917 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2918 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2919 return -ENOSPC;
2920 }
2921 }
3868 gsi_cfg = mp_ioapic_gsi_routing(idx); 2922 gsi_cfg = mp_ioapic_gsi_routing(idx);
3869 gsi_cfg->gsi_base = gsi_base; 2923 gsi_cfg->gsi_base = gsi_base;
3870 gsi_cfg->gsi_end = gsi_base + entries - 1; 2924 gsi_cfg->gsi_end = gsi_end;
2925
2926 ioapics[idx].irqdomain = NULL;
2927 ioapics[idx].irqdomain_cfg = *cfg;
3871 2928
3872 /* 2929 /*
3873 * The number of IO-APIC IRQ registers (== #pins): 2930 * If mp_register_ioapic() is called during early boot stage when
2931 * walking ACPI/SFI/DT tables, it's too early to create irqdomain,
2932 * we are still using bootmem allocator. So delay it to setup_IO_APIC().
3874 */ 2933 */
3875 ioapics[idx].nr_registers = entries; 2934 if (hotplug) {
2935 if (mp_irqdomain_create(idx)) {
2936 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2937 return -ENOMEM;
2938 }
2939 alloc_ioapic_saved_registers(idx);
2940 }
3876 2941
3877 if (gsi_cfg->gsi_end >= gsi_top) 2942 if (gsi_cfg->gsi_end >= gsi_top)
3878 gsi_top = gsi_cfg->gsi_end + 1; 2943 gsi_top = gsi_cfg->gsi_end + 1;
2944 if (nr_ioapics <= idx)
2945 nr_ioapics = idx + 1;
2946
2947 /* Set nr_registers to mark entry present */
2948 ioapics[idx].nr_registers = entries;
3879 2949
3880 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", 2950 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
3881 idx, mpc_ioapic_id(idx), 2951 idx, mpc_ioapic_id(idx),
3882 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 2952 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
3883 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 2953 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
3884 2954
3885 nr_ioapics++; 2955 return 0;
2956}
2957
2958int mp_unregister_ioapic(u32 gsi_base)
2959{
2960 int ioapic, pin;
2961 int found = 0;
2962 struct mp_pin_info *pin_info;
2963
2964 for_each_ioapic(ioapic)
2965 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
2966 found = 1;
2967 break;
2968 }
2969 if (!found) {
2970 pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
2971 return -ENODEV;
2972 }
2973
2974 for_each_pin(ioapic, pin) {
2975 pin_info = mp_pin_info(ioapic, pin);
2976 if (pin_info->count) {
2977 pr_warn("pin%d on IOAPIC%d is still in use.\n",
2978 pin, ioapic);
2979 return -EBUSY;
2980 }
2981 }
2982
2983 /* Mark entry not present */
2984 ioapics[ioapic].nr_registers = 0;
2985 ioapic_destroy_irqdomain(ioapic);
2986 free_ioapic_saved_registers(ioapic);
2987 if (ioapics[ioapic].iomem_res)
2988 release_resource(ioapics[ioapic].iomem_res);
2989 clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
2990 memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
2991
2992 return 0;
2993}
2994
2995int mp_ioapic_registered(u32 gsi_base)
2996{
2997 int ioapic;
2998
2999 for_each_ioapic(ioapic)
3000 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
3001 return 1;
3002
3003 return 0;
3004}
3005
3006static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
3007 int ioapic, int ioapic_pin,
3008 int trigger, int polarity)
3009{
3010 irq_attr->ioapic = ioapic;
3011 irq_attr->ioapic_pin = ioapic_pin;
3012 irq_attr->trigger = trigger;
3013 irq_attr->polarity = polarity;
3886} 3014}
3887 3015
3888int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, 3016int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
@@ -3931,7 +3059,7 @@ void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
3931 3059
3932 ioapic_mask_entry(ioapic, pin); 3060 ioapic_mask_entry(ioapic, pin);
3933 __remove_pin_from_irq(cfg, ioapic, pin); 3061 __remove_pin_from_irq(cfg, ioapic, pin);
3934 WARN_ON(cfg->irq_2_pin != NULL); 3062 WARN_ON(!list_empty(&cfg->irq_2_pin));
3935 arch_teardown_hwirq(virq); 3063 arch_teardown_hwirq(virq);
3936} 3064}
3937 3065
@@ -3964,18 +3092,6 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
3964 return ret; 3092 return ret;
3965} 3093}
3966 3094
3967bool mp_should_keep_irq(struct device *dev)
3968{
3969 if (dev->power.is_prepared)
3970 return true;
3971#ifdef CONFIG_PM
3972 if (dev->power.runtime_status == RPM_SUSPENDING)
3973 return true;
3974#endif
3975
3976 return false;
3977}
3978
3979/* Enable IOAPIC early just for system timer */ 3095/* Enable IOAPIC early just for system timer */
3980void __init pre_init_apic_IRQ0(void) 3096void __init pre_init_apic_IRQ0(void)
3981{ 3097{
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
new file mode 100644
index 000000000000..d6ba2d660dc5
--- /dev/null
+++ b/arch/x86/kernel/apic/msi.c
@@ -0,0 +1,286 @@
1/*
2 * Support of MSI, HPET and DMAR interrupts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/mm.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14#include <linux/dmar.h>
15#include <linux/hpet.h>
16#include <linux/msi.h>
17#include <asm/msidef.h>
18#include <asm/hpet.h>
19#include <asm/hw_irq.h>
20#include <asm/apic.h>
21#include <asm/irq_remapping.h>
22
23void native_compose_msi_msg(struct pci_dev *pdev,
24 unsigned int irq, unsigned int dest,
25 struct msi_msg *msg, u8 hpet_id)
26{
27 struct irq_cfg *cfg = irq_cfg(irq);
28
29 msg->address_hi = MSI_ADDR_BASE_HI;
30
31 if (x2apic_enabled())
32 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
33
34 msg->address_lo =
35 MSI_ADDR_BASE_LO |
36 ((apic->irq_dest_mode == 0) ?
37 MSI_ADDR_DEST_MODE_PHYSICAL :
38 MSI_ADDR_DEST_MODE_LOGICAL) |
39 ((apic->irq_delivery_mode != dest_LowestPrio) ?
40 MSI_ADDR_REDIRECTION_CPU :
41 MSI_ADDR_REDIRECTION_LOWPRI) |
42 MSI_ADDR_DEST_ID(dest);
43
44 msg->data =
45 MSI_DATA_TRIGGER_EDGE |
46 MSI_DATA_LEVEL_ASSERT |
47 ((apic->irq_delivery_mode != dest_LowestPrio) ?
48 MSI_DATA_DELIVERY_FIXED :
49 MSI_DATA_DELIVERY_LOWPRI) |
50 MSI_DATA_VECTOR(cfg->vector);
51}
52
53static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
54 struct msi_msg *msg, u8 hpet_id)
55{
56 struct irq_cfg *cfg;
57 int err;
58 unsigned dest;
59
60 if (disable_apic)
61 return -ENXIO;
62
63 cfg = irq_cfg(irq);
64 err = assign_irq_vector(irq, cfg, apic->target_cpus());
65 if (err)
66 return err;
67
68 err = apic->cpu_mask_to_apicid_and(cfg->domain,
69 apic->target_cpus(), &dest);
70 if (err)
71 return err;
72
73 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
74
75 return 0;
76}
77
78static int
79msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
80{
81 struct irq_cfg *cfg = irqd_cfg(data);
82 struct msi_msg msg;
83 unsigned int dest;
84 int ret;
85
86 ret = apic_set_affinity(data, mask, &dest);
87 if (ret)
88 return ret;
89
90 __get_cached_msi_msg(data->msi_desc, &msg);
91
92 msg.data &= ~MSI_DATA_VECTOR_MASK;
93 msg.data |= MSI_DATA_VECTOR(cfg->vector);
94 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
95 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
96
97 __pci_write_msi_msg(data->msi_desc, &msg);
98
99 return IRQ_SET_MASK_OK_NOCOPY;
100}
101
102/*
103 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
104 * which implement the MSI or MSI-X Capability Structure.
105 */
106static struct irq_chip msi_chip = {
107 .name = "PCI-MSI",
108 .irq_unmask = pci_msi_unmask_irq,
109 .irq_mask = pci_msi_mask_irq,
110 .irq_ack = apic_ack_edge,
111 .irq_set_affinity = msi_set_affinity,
112 .irq_retrigger = apic_retrigger_irq,
113 .flags = IRQCHIP_SKIP_SET_WAKE,
114};
115
116int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
117 unsigned int irq_base, unsigned int irq_offset)
118{
119 struct irq_chip *chip = &msi_chip;
120 struct msi_msg msg;
121 unsigned int irq = irq_base + irq_offset;
122 int ret;
123
124 ret = msi_compose_msg(dev, irq, &msg, -1);
125 if (ret < 0)
126 return ret;
127
128 irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
129
130 /*
131 * MSI-X message is written per-IRQ, the offset is always 0.
132 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
133 */
134 if (!irq_offset)
135 pci_write_msi_msg(irq, &msg);
136
137 setup_remapped_irq(irq, irq_cfg(irq), chip);
138
139 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
140
141 dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq);
142
143 return 0;
144}
145
146int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
147{
148 struct msi_desc *msidesc;
149 unsigned int irq;
150 int node, ret;
151
152 /* Multiple MSI vectors only supported with interrupt remapping */
153 if (type == PCI_CAP_ID_MSI && nvec > 1)
154 return 1;
155
156 node = dev_to_node(&dev->dev);
157
158 list_for_each_entry(msidesc, &dev->msi_list, list) {
159 irq = irq_alloc_hwirq(node);
160 if (!irq)
161 return -ENOSPC;
162
163 ret = setup_msi_irq(dev, msidesc, irq, 0);
164 if (ret < 0) {
165 irq_free_hwirq(irq);
166 return ret;
167 }
168
169 }
170 return 0;
171}
172
173void native_teardown_msi_irq(unsigned int irq)
174{
175 irq_free_hwirq(irq);
176}
177
178#ifdef CONFIG_DMAR_TABLE
179static int
180dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
181 bool force)
182{
183 struct irq_cfg *cfg = irqd_cfg(data);
184 unsigned int dest, irq = data->irq;
185 struct msi_msg msg;
186 int ret;
187
188 ret = apic_set_affinity(data, mask, &dest);
189 if (ret)
190 return ret;
191
192 dmar_msi_read(irq, &msg);
193
194 msg.data &= ~MSI_DATA_VECTOR_MASK;
195 msg.data |= MSI_DATA_VECTOR(cfg->vector);
196 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
197 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
198 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
199
200 dmar_msi_write(irq, &msg);
201
202 return IRQ_SET_MASK_OK_NOCOPY;
203}
204
205static struct irq_chip dmar_msi_type = {
206 .name = "DMAR_MSI",
207 .irq_unmask = dmar_msi_unmask,
208 .irq_mask = dmar_msi_mask,
209 .irq_ack = apic_ack_edge,
210 .irq_set_affinity = dmar_msi_set_affinity,
211 .irq_retrigger = apic_retrigger_irq,
212 .flags = IRQCHIP_SKIP_SET_WAKE,
213};
214
215int arch_setup_dmar_msi(unsigned int irq)
216{
217 int ret;
218 struct msi_msg msg;
219
220 ret = msi_compose_msg(NULL, irq, &msg, -1);
221 if (ret < 0)
222 return ret;
223 dmar_msi_write(irq, &msg);
224 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
225 "edge");
226 return 0;
227}
228#endif
229
230/*
231 * MSI message composition
232 */
233#ifdef CONFIG_HPET_TIMER
234
235static int hpet_msi_set_affinity(struct irq_data *data,
236 const struct cpumask *mask, bool force)
237{
238 struct irq_cfg *cfg = irqd_cfg(data);
239 struct msi_msg msg;
240 unsigned int dest;
241 int ret;
242
243 ret = apic_set_affinity(data, mask, &dest);
244 if (ret)
245 return ret;
246
247 hpet_msi_read(data->handler_data, &msg);
248
249 msg.data &= ~MSI_DATA_VECTOR_MASK;
250 msg.data |= MSI_DATA_VECTOR(cfg->vector);
251 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
252 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
253
254 hpet_msi_write(data->handler_data, &msg);
255
256 return IRQ_SET_MASK_OK_NOCOPY;
257}
258
259static struct irq_chip hpet_msi_type = {
260 .name = "HPET_MSI",
261 .irq_unmask = hpet_msi_unmask,
262 .irq_mask = hpet_msi_mask,
263 .irq_ack = apic_ack_edge,
264 .irq_set_affinity = hpet_msi_set_affinity,
265 .irq_retrigger = apic_retrigger_irq,
266 .flags = IRQCHIP_SKIP_SET_WAKE,
267};
268
269int default_setup_hpet_msi(unsigned int irq, unsigned int id)
270{
271 struct irq_chip *chip = &hpet_msi_type;
272 struct msi_msg msg;
273 int ret;
274
275 ret = msi_compose_msg(NULL, irq, &msg, id);
276 if (ret < 0)
277 return ret;
278
279 hpet_msi_write(irq_get_handler_data(irq), &msg);
280 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
281 setup_remapped_irq(irq, irq_cfg(irq), chip);
282
283 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
284 return 0;
285}
286#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
new file mode 100644
index 000000000000..6cedd7914581
--- /dev/null
+++ b/arch/x86/kernel/apic/vector.c
@@ -0,0 +1,719 @@
1/*
2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/compiler.h>
14#include <linux/irqdomain.h>
15#include <linux/slab.h>
16#include <asm/hw_irq.h>
17#include <asm/apic.h>
18#include <asm/i8259.h>
19#include <asm/desc.h>
20#include <asm/irq_remapping.h>
21
22static DEFINE_RAW_SPINLOCK(vector_lock);
23
24void lock_vector_lock(void)
25{
26 /* Used to the online set of cpus does not change
27 * during assign_irq_vector.
28 */
29 raw_spin_lock(&vector_lock);
30}
31
32void unlock_vector_lock(void)
33{
34 raw_spin_unlock(&vector_lock);
35}
36
37struct irq_cfg *irq_cfg(unsigned int irq)
38{
39 return irq_get_chip_data(irq);
40}
41
42struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
43{
44 return irq_data->chip_data;
45}
46
47static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
48{
49 struct irq_cfg *cfg;
50
51 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
52 if (!cfg)
53 return NULL;
54 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
55 goto out_cfg;
56 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
57 goto out_domain;
58#ifdef CONFIG_X86_IO_APIC
59 INIT_LIST_HEAD(&cfg->irq_2_pin);
60#endif
61 return cfg;
62out_domain:
63 free_cpumask_var(cfg->domain);
64out_cfg:
65 kfree(cfg);
66 return NULL;
67}
68
69struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
70{
71 int res = irq_alloc_desc_at(at, node);
72 struct irq_cfg *cfg;
73
74 if (res < 0) {
75 if (res != -EEXIST)
76 return NULL;
77 cfg = irq_cfg(at);
78 if (cfg)
79 return cfg;
80 }
81
82 cfg = alloc_irq_cfg(at, node);
83 if (cfg)
84 irq_set_chip_data(at, cfg);
85 else
86 irq_free_desc(at);
87 return cfg;
88}
89
90static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
91{
92 if (!cfg)
93 return;
94 irq_set_chip_data(at, NULL);
95 free_cpumask_var(cfg->domain);
96 free_cpumask_var(cfg->old_domain);
97 kfree(cfg);
98}
99
100static int
101__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
102{
103 /*
104 * NOTE! The local APIC isn't very good at handling
105 * multiple interrupts at the same interrupt level.
106 * As the interrupt level is determined by taking the
107 * vector number and shifting that right by 4, we
108 * want to spread these out a bit so that they don't
109 * all fall in the same interrupt level.
110 *
111 * Also, we've got to be careful not to trash gate
112 * 0x80, because int 0x80 is hm, kind of importantish. ;)
113 */
114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
115 static int current_offset = VECTOR_OFFSET_START % 16;
116 int cpu, err;
117 cpumask_var_t tmp_mask;
118
119 if (cfg->move_in_progress)
120 return -EBUSY;
121
122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
123 return -ENOMEM;
124
125 /* Only try and allocate irqs on cpus that are present */
126 err = -ENOSPC;
127 cpumask_clear(cfg->old_domain);
128 cpu = cpumask_first_and(mask, cpu_online_mask);
129 while (cpu < nr_cpu_ids) {
130 int new_cpu, vector, offset;
131
132 apic->vector_allocation_domain(cpu, tmp_mask, mask);
133
134 if (cpumask_subset(tmp_mask, cfg->domain)) {
135 err = 0;
136 if (cpumask_equal(tmp_mask, cfg->domain))
137 break;
138 /*
139 * New cpumask using the vector is a proper subset of
140 * the current in use mask. So cleanup the vector
141 * allocation for the members that are not used anymore.
142 */
143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
144 cfg->move_in_progress =
145 cpumask_intersects(cfg->old_domain, cpu_online_mask);
146 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
147 break;
148 }
149
150 vector = current_vector;
151 offset = current_offset;
152next:
153 vector += 16;
154 if (vector >= first_system_vector) {
155 offset = (offset + 1) % 16;
156 vector = FIRST_EXTERNAL_VECTOR + offset;
157 }
158
159 if (unlikely(current_vector == vector)) {
160 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
161 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
162 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
163 continue;
164 }
165
166 if (test_bit(vector, used_vectors))
167 goto next;
168
169 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
170 if (per_cpu(vector_irq, new_cpu)[vector] >
171 VECTOR_UNDEFINED)
172 goto next;
173 }
174 /* Found one! */
175 current_vector = vector;
176 current_offset = offset;
177 if (cfg->vector) {
178 cpumask_copy(cfg->old_domain, cfg->domain);
179 cfg->move_in_progress =
180 cpumask_intersects(cfg->old_domain, cpu_online_mask);
181 }
182 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
183 per_cpu(vector_irq, new_cpu)[vector] = irq;
184 cfg->vector = vector;
185 cpumask_copy(cfg->domain, tmp_mask);
186 err = 0;
187 break;
188 }
189 free_cpumask_var(tmp_mask);
190
191 return err;
192}
193
194int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
195{
196 int err;
197 unsigned long flags;
198
199 raw_spin_lock_irqsave(&vector_lock, flags);
200 err = __assign_irq_vector(irq, cfg, mask);
201 raw_spin_unlock_irqrestore(&vector_lock, flags);
202 return err;
203}
204
205void clear_irq_vector(int irq, struct irq_cfg *cfg)
206{
207 int cpu, vector;
208 unsigned long flags;
209
210 raw_spin_lock_irqsave(&vector_lock, flags);
211 BUG_ON(!cfg->vector);
212
213 vector = cfg->vector;
214 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
215 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
216
217 cfg->vector = 0;
218 cpumask_clear(cfg->domain);
219
220 if (likely(!cfg->move_in_progress)) {
221 raw_spin_unlock_irqrestore(&vector_lock, flags);
222 return;
223 }
224
225 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
226 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
227 vector++) {
228 if (per_cpu(vector_irq, cpu)[vector] != irq)
229 continue;
230 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
231 break;
232 }
233 }
234 cfg->move_in_progress = 0;
235 raw_spin_unlock_irqrestore(&vector_lock, flags);
236}
237
238int __init arch_probe_nr_irqs(void)
239{
240 int nr;
241
242 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
243 nr_irqs = NR_VECTORS * nr_cpu_ids;
244
245 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
246#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
247 /*
248 * for MSI and HT dyn irq
249 */
250 if (gsi_top <= NR_IRQS_LEGACY)
251 nr += 8 * nr_cpu_ids;
252 else
253 nr += gsi_top * 16;
254#endif
255 if (nr < nr_irqs)
256 nr_irqs = nr;
257
258 return nr_legacy_irqs();
259}
260
261int __init arch_early_irq_init(void)
262{
263 return arch_early_ioapic_init();
264}
265
266static void __setup_vector_irq(int cpu)
267{
268 /* Initialize vector_irq on a new cpu */
269 int irq, vector;
270 struct irq_cfg *cfg;
271
272 /*
273 * vector_lock will make sure that we don't run into irq vector
274 * assignments that might be happening on another cpu in parallel,
275 * while we setup our initial vector to irq mappings.
276 */
277 raw_spin_lock(&vector_lock);
278 /* Mark the inuse vectors */
279 for_each_active_irq(irq) {
280 cfg = irq_cfg(irq);
281 if (!cfg)
282 continue;
283
284 if (!cpumask_test_cpu(cpu, cfg->domain))
285 continue;
286 vector = cfg->vector;
287 per_cpu(vector_irq, cpu)[vector] = irq;
288 }
289 /* Mark the free vectors */
290 for (vector = 0; vector < NR_VECTORS; ++vector) {
291 irq = per_cpu(vector_irq, cpu)[vector];
292 if (irq <= VECTOR_UNDEFINED)
293 continue;
294
295 cfg = irq_cfg(irq);
296 if (!cpumask_test_cpu(cpu, cfg->domain))
297 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
298 }
299 raw_spin_unlock(&vector_lock);
300}
301
302/*
303 * Setup the vector to irq mappings.
304 */
305void setup_vector_irq(int cpu)
306{
307 int irq;
308
309 /*
310 * On most of the platforms, legacy PIC delivers the interrupts on the
311 * boot cpu. But there are certain platforms where PIC interrupts are
312 * delivered to multiple cpu's. If the legacy IRQ is handled by the
313 * legacy PIC, for the new cpu that is coming online, setup the static
314 * legacy vector to irq mapping:
315 */
316 for (irq = 0; irq < nr_legacy_irqs(); irq++)
317 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
318
319 __setup_vector_irq(cpu);
320}
321
322int apic_retrigger_irq(struct irq_data *data)
323{
324 struct irq_cfg *cfg = irqd_cfg(data);
325 unsigned long flags;
326 int cpu;
327
328 raw_spin_lock_irqsave(&vector_lock, flags);
329 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
330 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
331 raw_spin_unlock_irqrestore(&vector_lock, flags);
332
333 return 1;
334}
335
336void apic_ack_edge(struct irq_data *data)
337{
338 irq_complete_move(irqd_cfg(data));
339 irq_move_irq(data);
340 ack_APIC_irq();
341}
342
343/*
344 * Either sets data->affinity to a valid value, and returns
345 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
346 * leaves data->affinity untouched.
347 */
348int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
349 unsigned int *dest_id)
350{
351 struct irq_cfg *cfg = irqd_cfg(data);
352 unsigned int irq = data->irq;
353 int err;
354
355 if (!config_enabled(CONFIG_SMP))
356 return -EPERM;
357
358 if (!cpumask_intersects(mask, cpu_online_mask))
359 return -EINVAL;
360
361 err = assign_irq_vector(irq, cfg, mask);
362 if (err)
363 return err;
364
365 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
366 if (err) {
367 if (assign_irq_vector(irq, cfg, data->affinity))
368 pr_err("Failed to recover vector for irq %d\n", irq);
369 return err;
370 }
371
372 cpumask_copy(data->affinity, mask);
373
374 return 0;
375}
376
377#ifdef CONFIG_SMP
378void send_cleanup_vector(struct irq_cfg *cfg)
379{
380 cpumask_var_t cleanup_mask;
381
382 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
383 unsigned int i;
384
385 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
386 apic->send_IPI_mask(cpumask_of(i),
387 IRQ_MOVE_CLEANUP_VECTOR);
388 } else {
389 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
390 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
391 free_cpumask_var(cleanup_mask);
392 }
393 cfg->move_in_progress = 0;
394}
395
396asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
397{
398 unsigned vector, me;
399
400 ack_APIC_irq();
401 irq_enter();
402 exit_idle();
403
404 me = smp_processor_id();
405 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
406 int irq;
407 unsigned int irr;
408 struct irq_desc *desc;
409 struct irq_cfg *cfg;
410
411 irq = __this_cpu_read(vector_irq[vector]);
412
413 if (irq <= VECTOR_UNDEFINED)
414 continue;
415
416 desc = irq_to_desc(irq);
417 if (!desc)
418 continue;
419
420 cfg = irq_cfg(irq);
421 if (!cfg)
422 continue;
423
424 raw_spin_lock(&desc->lock);
425
426 /*
427 * Check if the irq migration is in progress. If so, we
428 * haven't received the cleanup request yet for this irq.
429 */
430 if (cfg->move_in_progress)
431 goto unlock;
432
433 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
434 goto unlock;
435
436 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
437 /*
438 * Check if the vector that needs to be cleanedup is
439 * registered at the cpu's IRR. If so, then this is not
440 * the best time to clean it up. Lets clean it up in the
441 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
442 * to myself.
443 */
444 if (irr & (1 << (vector % 32))) {
445 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
446 goto unlock;
447 }
448 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
449unlock:
450 raw_spin_unlock(&desc->lock);
451 }
452
453 irq_exit();
454}
455
456static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
457{
458 unsigned me;
459
460 if (likely(!cfg->move_in_progress))
461 return;
462
463 me = smp_processor_id();
464
465 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
466 send_cleanup_vector(cfg);
467}
468
469void irq_complete_move(struct irq_cfg *cfg)
470{
471 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
472}
473
474void irq_force_complete_move(int irq)
475{
476 struct irq_cfg *cfg = irq_cfg(irq);
477
478 if (!cfg)
479 return;
480
481 __irq_complete_move(cfg, cfg->vector);
482}
483#endif
484
485/*
486 * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
487 */
488int arch_setup_hwirq(unsigned int irq, int node)
489{
490 struct irq_cfg *cfg;
491 unsigned long flags;
492 int ret;
493
494 cfg = alloc_irq_cfg(irq, node);
495 if (!cfg)
496 return -ENOMEM;
497
498 raw_spin_lock_irqsave(&vector_lock, flags);
499 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
500 raw_spin_unlock_irqrestore(&vector_lock, flags);
501
502 if (!ret)
503 irq_set_chip_data(irq, cfg);
504 else
505 free_irq_cfg(irq, cfg);
506 return ret;
507}
508
509void arch_teardown_hwirq(unsigned int irq)
510{
511 struct irq_cfg *cfg = irq_cfg(irq);
512
513 free_remapped_irq(irq);
514 clear_irq_vector(irq, cfg);
515 free_irq_cfg(irq, cfg);
516}
517
518static void __init print_APIC_field(int base)
519{
520 int i;
521
522 printk(KERN_DEBUG);
523
524 for (i = 0; i < 8; i++)
525 pr_cont("%08x", apic_read(base + i*0x10));
526
527 pr_cont("\n");
528}
529
530static void __init print_local_APIC(void *dummy)
531{
532 unsigned int i, v, ver, maxlvt;
533 u64 icr;
534
535 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
536 smp_processor_id(), hard_smp_processor_id());
537 v = apic_read(APIC_ID);
538 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
539 v = apic_read(APIC_LVR);
540 pr_info("... APIC VERSION: %08x\n", v);
541 ver = GET_APIC_VERSION(v);
542 maxlvt = lapic_get_maxlvt();
543
544 v = apic_read(APIC_TASKPRI);
545 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
546
547 /* !82489DX */
548 if (APIC_INTEGRATED(ver)) {
549 if (!APIC_XAPIC(ver)) {
550 v = apic_read(APIC_ARBPRI);
551 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
552 v, v & APIC_ARBPRI_MASK);
553 }
554 v = apic_read(APIC_PROCPRI);
555 pr_debug("... APIC PROCPRI: %08x\n", v);
556 }
557
558 /*
559 * Remote read supported only in the 82489DX and local APIC for
560 * Pentium processors.
561 */
562 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
563 v = apic_read(APIC_RRR);
564 pr_debug("... APIC RRR: %08x\n", v);
565 }
566
567 v = apic_read(APIC_LDR);
568 pr_debug("... APIC LDR: %08x\n", v);
569 if (!x2apic_enabled()) {
570 v = apic_read(APIC_DFR);
571 pr_debug("... APIC DFR: %08x\n", v);
572 }
573 v = apic_read(APIC_SPIV);
574 pr_debug("... APIC SPIV: %08x\n", v);
575
576 pr_debug("... APIC ISR field:\n");
577 print_APIC_field(APIC_ISR);
578 pr_debug("... APIC TMR field:\n");
579 print_APIC_field(APIC_TMR);
580 pr_debug("... APIC IRR field:\n");
581 print_APIC_field(APIC_IRR);
582
583 /* !82489DX */
584 if (APIC_INTEGRATED(ver)) {
585 /* Due to the Pentium erratum 3AP. */
586 if (maxlvt > 3)
587 apic_write(APIC_ESR, 0);
588
589 v = apic_read(APIC_ESR);
590 pr_debug("... APIC ESR: %08x\n", v);
591 }
592
593 icr = apic_icr_read();
594 pr_debug("... APIC ICR: %08x\n", (u32)icr);
595 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
596
597 v = apic_read(APIC_LVTT);
598 pr_debug("... APIC LVTT: %08x\n", v);
599
600 if (maxlvt > 3) {
601 /* PC is LVT#4. */
602 v = apic_read(APIC_LVTPC);
603 pr_debug("... APIC LVTPC: %08x\n", v);
604 }
605 v = apic_read(APIC_LVT0);
606 pr_debug("... APIC LVT0: %08x\n", v);
607 v = apic_read(APIC_LVT1);
608 pr_debug("... APIC LVT1: %08x\n", v);
609
610 if (maxlvt > 2) {
611 /* ERR is LVT#3. */
612 v = apic_read(APIC_LVTERR);
613 pr_debug("... APIC LVTERR: %08x\n", v);
614 }
615
616 v = apic_read(APIC_TMICT);
617 pr_debug("... APIC TMICT: %08x\n", v);
618 v = apic_read(APIC_TMCCT);
619 pr_debug("... APIC TMCCT: %08x\n", v);
620 v = apic_read(APIC_TDCR);
621 pr_debug("... APIC TDCR: %08x\n", v);
622
623 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
624 v = apic_read(APIC_EFEAT);
625 maxlvt = (v >> 16) & 0xff;
626 pr_debug("... APIC EFEAT: %08x\n", v);
627 v = apic_read(APIC_ECTRL);
628 pr_debug("... APIC ECTRL: %08x\n", v);
629 for (i = 0; i < maxlvt; i++) {
630 v = apic_read(APIC_EILVTn(i));
631 pr_debug("... APIC EILVT%d: %08x\n", i, v);
632 }
633 }
634 pr_cont("\n");
635}
636
637static void __init print_local_APICs(int maxcpu)
638{
639 int cpu;
640
641 if (!maxcpu)
642 return;
643
644 preempt_disable();
645 for_each_online_cpu(cpu) {
646 if (cpu >= maxcpu)
647 break;
648 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
649 }
650 preempt_enable();
651}
652
653static void __init print_PIC(void)
654{
655 unsigned int v;
656 unsigned long flags;
657
658 if (!nr_legacy_irqs())
659 return;
660
661 pr_debug("\nprinting PIC contents\n");
662
663 raw_spin_lock_irqsave(&i8259A_lock, flags);
664
665 v = inb(0xa1) << 8 | inb(0x21);
666 pr_debug("... PIC IMR: %04x\n", v);
667
668 v = inb(0xa0) << 8 | inb(0x20);
669 pr_debug("... PIC IRR: %04x\n", v);
670
671 outb(0x0b, 0xa0);
672 outb(0x0b, 0x20);
673 v = inb(0xa0) << 8 | inb(0x20);
674 outb(0x0a, 0xa0);
675 outb(0x0a, 0x20);
676
677 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
678
679 pr_debug("... PIC ISR: %04x\n", v);
680
681 v = inb(0x4d1) << 8 | inb(0x4d0);
682 pr_debug("... PIC ELCR: %04x\n", v);
683}
684
685static int show_lapic __initdata = 1;
686static __init int setup_show_lapic(char *arg)
687{
688 int num = -1;
689
690 if (strcmp(arg, "all") == 0) {
691 show_lapic = CONFIG_NR_CPUS;
692 } else {
693 get_option(&arg, &num);
694 if (num >= 0)
695 show_lapic = num;
696 }
697
698 return 1;
699}
700__setup("show_lapic=", setup_show_lapic);
701
702static int __init print_ICs(void)
703{
704 if (apic_verbosity == APIC_QUIET)
705 return 0;
706
707 print_PIC();
708
709 /* don't print out if apic is not there */
710 if (!cpu_has_apic && !apic_from_smp_config())
711 return 0;
712
713 print_local_APICs(show_lapic);
714 print_IO_APICs();
715
716 return 0;
717}
718
719late_initcall(print_ICs);