aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Liu <jiang.liu@linux.intel.com>2014-10-27 04:12:00 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-12-16 08:08:16 -0500
commit74afab7af7d9aeba86b3b8e39670cf7d0058f6df (patch)
treeef65e6502d8a56eada797a278002cdd9e1307f04
parent55a0e2b122c26c7496ea85754bceddc05dba402b (diff)
x86, irq: Move local APIC related code from io_apic.c into vector.c
Create arch/x86/kernel/apic/vector.c to host local APIC related code, prepare for making MSI/HT_IRQ independent of IOAPIC. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Grant Likely <grant.likely@linaro.org> Link: http://lkml.kernel.org/r/1414397531-28254-10-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/hw_irq.h29
-rw-r--r--arch/x86/kernel/apic/Makefile2
-rw-r--r--arch/x86/kernel/apic/io_apic.c649
-rw-r--r--arch/x86/kernel/apic/vector.c694
-rw-r--r--arch/x86/kernel/irqinit.c22
6 files changed, 714 insertions, 684 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bea3a0159496..1e851b1ff61b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -886,11 +886,11 @@ config X86_UP_IOAPIC
886config X86_LOCAL_APIC 886config X86_LOCAL_APIC
887 def_bool y 887 def_bool y
888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI 888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
889 select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
889 890
890config X86_IO_APIC 891config X86_IO_APIC
891 def_bool y 892 def_bool y
892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI 893 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
893 select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
894 select IRQ_DOMAIN 894 select IRQ_DOMAIN
895 895
896config X86_REROUTE_FOR_BROKEN_BOOT_IRQS 896config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 454c9e4056e6..ef50db16bb44 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -111,6 +111,8 @@ struct irq_2_irte {
111#endif /* CONFIG_IRQ_REMAP */ 111#endif /* CONFIG_IRQ_REMAP */
112 112
113#ifdef CONFIG_X86_LOCAL_APIC 113#ifdef CONFIG_X86_LOCAL_APIC
114struct irq_data;
115
114struct irq_cfg { 116struct irq_cfg {
115 cpumask_var_t domain; 117 cpumask_var_t domain;
116 cpumask_var_t old_domain; 118 cpumask_var_t old_domain;
@@ -134,28 +136,27 @@ struct irq_cfg {
134 136
135extern struct irq_cfg *irq_cfg(unsigned int irq); 137extern struct irq_cfg *irq_cfg(unsigned int irq);
136extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data); 138extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
137extern void setup_vector_irq(int cpu); 139extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
140extern void lock_vector_lock(void);
141extern void unlock_vector_lock(void);
138extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); 142extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
143extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
144extern void setup_vector_irq(int cpu);
139#ifdef CONFIG_SMP 145#ifdef CONFIG_SMP
140extern void send_cleanup_vector(struct irq_cfg *); 146extern void send_cleanup_vector(struct irq_cfg *);
141#else 147#else
142static inline void send_cleanup_vector(struct irq_cfg *c) { } 148static inline void send_cleanup_vector(struct irq_cfg *c) { }
143#endif 149#endif
150extern void irq_complete_move(struct irq_cfg *cfg);
144 151
145struct irq_data; 152extern int apic_retrigger_irq(struct irq_data *data);
146int apic_set_affinity(struct irq_data *, const struct cpumask *, 153extern void apic_ack_edge(struct irq_data *data);
147 unsigned int *dest_id); 154extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
148#endif /* CONFIG_X86_LOCAL_APIC */ 155 unsigned int *dest_id);
149 156#else /* CONFIG_X86_LOCAL_APIC */
150#ifdef CONFIG_X86_IO_APIC
151extern void lock_vector_lock(void);
152extern void unlock_vector_lock(void);
153extern void __setup_vector_irq(int cpu);
154#else
155static inline void lock_vector_lock(void) {} 157static inline void lock_vector_lock(void) {}
156static inline void unlock_vector_lock(void) {} 158static inline void unlock_vector_lock(void) {}
157static inline void __setup_vector_irq(int cpu) {} 159#endif /* CONFIG_X86_LOCAL_APIC */
158#endif
159 160
160/* IOAPIC */ 161/* IOAPIC */
161#ifdef CONFIG_X86_IO_APIC 162#ifdef CONFIG_X86_IO_APIC
@@ -181,11 +182,13 @@ extern void enable_IO_APIC(void);
181extern void disable_IO_APIC(void); 182extern void disable_IO_APIC(void);
182extern void setup_ioapic_dest(void); 183extern void setup_ioapic_dest(void);
183extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); 184extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
185extern void print_IO_APICs(void);
184 186
185extern unsigned long io_apic_irqs; 187extern unsigned long io_apic_irqs;
186#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs)) 188#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
187#else /* CONFIG_X86_IO_APIC */ 189#else /* CONFIG_X86_IO_APIC */
188#define IO_APIC_IRQ(x) 0 190#define IO_APIC_IRQ(x) 0
191static inline void print_IO_APICs(void) {}
189#endif /* CONFIG_X86_IO_APIC */ 192#endif /* CONFIG_X86_IO_APIC */
190 193
191/* Statistics */ 194/* Statistics */
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index dcb5b15401ce..84299e5d10dc 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,7 +2,7 @@
2# Makefile for local APIC drivers and for the IO-APIC code 2# Makefile for local APIC drivers and for the IO-APIC code
3# 3#
4 4
5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o 5obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
6obj-y += hw_nmi.o 6obj-y += hw_nmi.o
7 7
8obj-$(CONFIG_X86_IO_APIC) += io_apic.o 8obj-$(CONFIG_X86_IO_APIC) += io_apic.o
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 783468efa4a1..677044df2d7a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -61,8 +61,6 @@
61 61
62#include <asm/apic.h> 62#include <asm/apic.h>
63 63
64#define __apicdebuginit(type) static type __init
65
66#define for_each_ioapic(idx) \ 64#define for_each_ioapic(idx) \
67 for ((idx) = 0; (idx) < nr_ioapics; (idx)++) 65 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
68#define for_each_ioapic_reverse(idx) \ 66#define for_each_ioapic_reverse(idx) \
@@ -83,7 +81,6 @@
83int sis_apic_bug = -1; 81int sis_apic_bug = -1;
84 82
85static DEFINE_RAW_SPINLOCK(ioapic_lock); 83static DEFINE_RAW_SPINLOCK(ioapic_lock);
86static DEFINE_RAW_SPINLOCK(vector_lock);
87static DEFINE_MUTEX(ioapic_mutex); 84static DEFINE_MUTEX(ioapic_mutex);
88static unsigned int ioapic_dynirq_base; 85static unsigned int ioapic_dynirq_base;
89static int ioapic_initialized; 86static int ioapic_initialized;
@@ -206,8 +203,6 @@ static int __init parse_noapic(char *str)
206} 203}
207early_param("noapic", parse_noapic); 204early_param("noapic", parse_noapic);
208 205
209static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
210
211/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 206/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
212void mp_save_irq(struct mpc_intsrc *m) 207void mp_save_irq(struct mpc_intsrc *m)
213{ 208{
@@ -281,67 +276,6 @@ int __init arch_early_irq_init(void)
281 return 0; 276 return 0;
282} 277}
283 278
284struct irq_cfg *irq_cfg(unsigned int irq)
285{
286 return irq_get_chip_data(irq);
287}
288
289struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
290{
291 return irq_data->chip_data;
292}
293
294static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
295{
296 struct irq_cfg *cfg;
297
298 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
299 if (!cfg)
300 return NULL;
301 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
302 goto out_cfg;
303 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
304 goto out_domain;
305 INIT_LIST_HEAD(&cfg->irq_2_pin);
306 return cfg;
307out_domain:
308 free_cpumask_var(cfg->domain);
309out_cfg:
310 kfree(cfg);
311 return NULL;
312}
313
314static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
315{
316 if (!cfg)
317 return;
318 irq_set_chip_data(at, NULL);
319 free_cpumask_var(cfg->domain);
320 free_cpumask_var(cfg->old_domain);
321 kfree(cfg);
322}
323
324static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
325{
326 int res = irq_alloc_desc_at(at, node);
327 struct irq_cfg *cfg;
328
329 if (res < 0) {
330 if (res != -EEXIST)
331 return NULL;
332 cfg = irq_cfg(at);
333 if (cfg)
334 return cfg;
335 }
336
337 cfg = alloc_irq_cfg(at, node);
338 if (cfg)
339 irq_set_chip_data(at, cfg);
340 else
341 irq_free_desc(at);
342 return cfg;
343}
344
345struct io_apic { 279struct io_apic {
346 unsigned int index; 280 unsigned int index;
347 unsigned int unused[3]; 281 unsigned int unused[3];
@@ -1238,190 +1172,6 @@ out:
1238} 1172}
1239EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1173EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1240 1174
1241void lock_vector_lock(void)
1242{
1243 /* Used to the online set of cpus does not change
1244 * during assign_irq_vector.
1245 */
1246 raw_spin_lock(&vector_lock);
1247}
1248
1249void unlock_vector_lock(void)
1250{
1251 raw_spin_unlock(&vector_lock);
1252}
1253
1254static int
1255__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1256{
1257 /*
1258 * NOTE! The local APIC isn't very good at handling
1259 * multiple interrupts at the same interrupt level.
1260 * As the interrupt level is determined by taking the
1261 * vector number and shifting that right by 4, we
1262 * want to spread these out a bit so that they don't
1263 * all fall in the same interrupt level.
1264 *
1265 * Also, we've got to be careful not to trash gate
1266 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1267 */
1268 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1269 static int current_offset = VECTOR_OFFSET_START % 16;
1270 int cpu, err;
1271 cpumask_var_t tmp_mask;
1272
1273 if (cfg->move_in_progress)
1274 return -EBUSY;
1275
1276 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1277 return -ENOMEM;
1278
1279 /* Only try and allocate irqs on cpus that are present */
1280 err = -ENOSPC;
1281 cpumask_clear(cfg->old_domain);
1282 cpu = cpumask_first_and(mask, cpu_online_mask);
1283 while (cpu < nr_cpu_ids) {
1284 int new_cpu, vector, offset;
1285
1286 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1287
1288 if (cpumask_subset(tmp_mask, cfg->domain)) {
1289 err = 0;
1290 if (cpumask_equal(tmp_mask, cfg->domain))
1291 break;
1292 /*
1293 * New cpumask using the vector is a proper subset of
1294 * the current in use mask. So cleanup the vector
1295 * allocation for the members that are not used anymore.
1296 */
1297 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1298 cfg->move_in_progress =
1299 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1300 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1301 break;
1302 }
1303
1304 vector = current_vector;
1305 offset = current_offset;
1306next:
1307 vector += 16;
1308 if (vector >= first_system_vector) {
1309 offset = (offset + 1) % 16;
1310 vector = FIRST_EXTERNAL_VECTOR + offset;
1311 }
1312
1313 if (unlikely(current_vector == vector)) {
1314 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1315 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1316 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1317 continue;
1318 }
1319
1320 if (test_bit(vector, used_vectors))
1321 goto next;
1322
1323 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
1324 if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
1325 goto next;
1326 }
1327 /* Found one! */
1328 current_vector = vector;
1329 current_offset = offset;
1330 if (cfg->vector) {
1331 cpumask_copy(cfg->old_domain, cfg->domain);
1332 cfg->move_in_progress =
1333 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1334 }
1335 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1336 per_cpu(vector_irq, new_cpu)[vector] = irq;
1337 cfg->vector = vector;
1338 cpumask_copy(cfg->domain, tmp_mask);
1339 err = 0;
1340 break;
1341 }
1342 free_cpumask_var(tmp_mask);
1343 return err;
1344}
1345
1346int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1347{
1348 int err;
1349 unsigned long flags;
1350
1351 raw_spin_lock_irqsave(&vector_lock, flags);
1352 err = __assign_irq_vector(irq, cfg, mask);
1353 raw_spin_unlock_irqrestore(&vector_lock, flags);
1354 return err;
1355}
1356
1357static void clear_irq_vector(int irq, struct irq_cfg *cfg)
1358{
1359 int cpu, vector;
1360 unsigned long flags;
1361
1362 raw_spin_lock_irqsave(&vector_lock, flags);
1363 BUG_ON(!cfg->vector);
1364
1365 vector = cfg->vector;
1366 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1367 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1368
1369 cfg->vector = 0;
1370 cpumask_clear(cfg->domain);
1371
1372 if (likely(!cfg->move_in_progress)) {
1373 raw_spin_unlock_irqrestore(&vector_lock, flags);
1374 return;
1375 }
1376
1377 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1378 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1379 if (per_cpu(vector_irq, cpu)[vector] != irq)
1380 continue;
1381 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1382 break;
1383 }
1384 }
1385 cfg->move_in_progress = 0;
1386 raw_spin_unlock_irqrestore(&vector_lock, flags);
1387}
1388
1389void __setup_vector_irq(int cpu)
1390{
1391 /* Initialize vector_irq on a new cpu */
1392 int irq, vector;
1393 struct irq_cfg *cfg;
1394
1395 /*
1396 * vector_lock will make sure that we don't run into irq vector
1397 * assignments that might be happening on another cpu in parallel,
1398 * while we setup our initial vector to irq mappings.
1399 */
1400 raw_spin_lock(&vector_lock);
1401 /* Mark the inuse vectors */
1402 for_each_active_irq(irq) {
1403 cfg = irq_cfg(irq);
1404 if (!cfg)
1405 continue;
1406
1407 if (!cpumask_test_cpu(cpu, cfg->domain))
1408 continue;
1409 vector = cfg->vector;
1410 per_cpu(vector_irq, cpu)[vector] = irq;
1411 }
1412 /* Mark the free vectors */
1413 for (vector = 0; vector < NR_VECTORS; ++vector) {
1414 irq = per_cpu(vector_irq, cpu)[vector];
1415 if (irq <= VECTOR_UNDEFINED)
1416 continue;
1417
1418 cfg = irq_cfg(irq);
1419 if (!cpumask_test_cpu(cpu, cfg->domain))
1420 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1421 }
1422 raw_spin_unlock(&vector_lock);
1423}
1424
1425static struct irq_chip ioapic_chip; 1175static struct irq_chip ioapic_chip;
1426 1176
1427#ifdef CONFIG_X86_32 1177#ifdef CONFIG_X86_32
@@ -1655,7 +1405,7 @@ void ioapic_zap_locks(void)
1655 raw_spin_lock_init(&ioapic_lock); 1405 raw_spin_lock_init(&ioapic_lock);
1656} 1406}
1657 1407
1658__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1408static void __init print_IO_APIC(int ioapic_idx)
1659{ 1409{
1660 union IO_APIC_reg_00 reg_00; 1410 union IO_APIC_reg_00 reg_00;
1661 union IO_APIC_reg_01 reg_01; 1411 union IO_APIC_reg_01 reg_01;
@@ -1712,7 +1462,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1712 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); 1462 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1713} 1463}
1714 1464
1715__apicdebuginit(void) print_IO_APICs(void) 1465void __init print_IO_APICs(void)
1716{ 1466{
1717 int ioapic_idx; 1467 int ioapic_idx;
1718 struct irq_cfg *cfg; 1468 struct irq_cfg *cfg;
@@ -1756,205 +1506,6 @@ __apicdebuginit(void) print_IO_APICs(void)
1756 printk(KERN_INFO ".................................... done.\n"); 1506 printk(KERN_INFO ".................................... done.\n");
1757} 1507}
1758 1508
1759__apicdebuginit(void) print_APIC_field(int base)
1760{
1761 int i;
1762
1763 printk(KERN_DEBUG);
1764
1765 for (i = 0; i < 8; i++)
1766 pr_cont("%08x", apic_read(base + i*0x10));
1767
1768 pr_cont("\n");
1769}
1770
1771__apicdebuginit(void) print_local_APIC(void *dummy)
1772{
1773 unsigned int i, v, ver, maxlvt;
1774 u64 icr;
1775
1776 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1777 smp_processor_id(), hard_smp_processor_id());
1778 v = apic_read(APIC_ID);
1779 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1780 v = apic_read(APIC_LVR);
1781 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1782 ver = GET_APIC_VERSION(v);
1783 maxlvt = lapic_get_maxlvt();
1784
1785 v = apic_read(APIC_TASKPRI);
1786 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1787
1788 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1789 if (!APIC_XAPIC(ver)) {
1790 v = apic_read(APIC_ARBPRI);
1791 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1792 v & APIC_ARBPRI_MASK);
1793 }
1794 v = apic_read(APIC_PROCPRI);
1795 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1796 }
1797
1798 /*
1799 * Remote read supported only in the 82489DX and local APIC for
1800 * Pentium processors.
1801 */
1802 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1803 v = apic_read(APIC_RRR);
1804 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1805 }
1806
1807 v = apic_read(APIC_LDR);
1808 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1809 if (!x2apic_enabled()) {
1810 v = apic_read(APIC_DFR);
1811 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1812 }
1813 v = apic_read(APIC_SPIV);
1814 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1815
1816 printk(KERN_DEBUG "... APIC ISR field:\n");
1817 print_APIC_field(APIC_ISR);
1818 printk(KERN_DEBUG "... APIC TMR field:\n");
1819 print_APIC_field(APIC_TMR);
1820 printk(KERN_DEBUG "... APIC IRR field:\n");
1821 print_APIC_field(APIC_IRR);
1822
1823 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1824 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1825 apic_write(APIC_ESR, 0);
1826
1827 v = apic_read(APIC_ESR);
1828 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1829 }
1830
1831 icr = apic_icr_read();
1832 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1833 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1834
1835 v = apic_read(APIC_LVTT);
1836 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1837
1838 if (maxlvt > 3) { /* PC is LVT#4. */
1839 v = apic_read(APIC_LVTPC);
1840 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1841 }
1842 v = apic_read(APIC_LVT0);
1843 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1844 v = apic_read(APIC_LVT1);
1845 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1846
1847 if (maxlvt > 2) { /* ERR is LVT#3. */
1848 v = apic_read(APIC_LVTERR);
1849 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1850 }
1851
1852 v = apic_read(APIC_TMICT);
1853 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1854 v = apic_read(APIC_TMCCT);
1855 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1856 v = apic_read(APIC_TDCR);
1857 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1858
1859 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1860 v = apic_read(APIC_EFEAT);
1861 maxlvt = (v >> 16) & 0xff;
1862 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1863 v = apic_read(APIC_ECTRL);
1864 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1865 for (i = 0; i < maxlvt; i++) {
1866 v = apic_read(APIC_EILVTn(i));
1867 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1868 }
1869 }
1870 pr_cont("\n");
1871}
1872
1873__apicdebuginit(void) print_local_APICs(int maxcpu)
1874{
1875 int cpu;
1876
1877 if (!maxcpu)
1878 return;
1879
1880 preempt_disable();
1881 for_each_online_cpu(cpu) {
1882 if (cpu >= maxcpu)
1883 break;
1884 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1885 }
1886 preempt_enable();
1887}
1888
1889__apicdebuginit(void) print_PIC(void)
1890{
1891 unsigned int v;
1892 unsigned long flags;
1893
1894 if (!nr_legacy_irqs())
1895 return;
1896
1897 printk(KERN_DEBUG "\nprinting PIC contents\n");
1898
1899 raw_spin_lock_irqsave(&i8259A_lock, flags);
1900
1901 v = inb(0xa1) << 8 | inb(0x21);
1902 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1903
1904 v = inb(0xa0) << 8 | inb(0x20);
1905 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1906
1907 outb(0x0b,0xa0);
1908 outb(0x0b,0x20);
1909 v = inb(0xa0) << 8 | inb(0x20);
1910 outb(0x0a,0xa0);
1911 outb(0x0a,0x20);
1912
1913 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1914
1915 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1916
1917 v = inb(0x4d1) << 8 | inb(0x4d0);
1918 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1919}
1920
1921static int __initdata show_lapic = 1;
1922static __init int setup_show_lapic(char *arg)
1923{
1924 int num = -1;
1925
1926 if (strcmp(arg, "all") == 0) {
1927 show_lapic = CONFIG_NR_CPUS;
1928 } else {
1929 get_option(&arg, &num);
1930 if (num >= 0)
1931 show_lapic = num;
1932 }
1933
1934 return 1;
1935}
1936__setup("show_lapic=", setup_show_lapic);
1937
1938__apicdebuginit(int) print_ICs(void)
1939{
1940 if (apic_verbosity == APIC_QUIET)
1941 return 0;
1942
1943 print_PIC();
1944
1945 /* don't print out if apic is not there */
1946 if (!cpu_has_apic && !apic_from_smp_config())
1947 return 0;
1948
1949 print_local_APICs(show_lapic);
1950 print_IO_APICs();
1951
1952 return 0;
1953}
1954
1955late_initcall(print_ICs);
1956
1957
1958/* Where if anywhere is the i8259 connect in external int mode */ 1509/* Where if anywhere is the i8259 connect in external int mode */
1959static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1510static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1960 1511
@@ -2263,20 +1814,6 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
2263 return was_pending; 1814 return was_pending;
2264} 1815}
2265 1816
2266static int apic_retrigger_irq(struct irq_data *data)
2267{
2268 struct irq_cfg *cfg = data->chip_data;
2269 unsigned long flags;
2270 int cpu;
2271
2272 raw_spin_lock_irqsave(&vector_lock, flags);
2273 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
2274 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
2275 raw_spin_unlock_irqrestore(&vector_lock, flags);
2276
2277 return 1;
2278}
2279
2280/* 1817/*
2281 * Level and edge triggered IO-APIC interrupts need different handling, 1818 * Level and edge triggered IO-APIC interrupts need different handling,
2282 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 1819 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2286,113 +1823,6 @@ static int apic_retrigger_irq(struct irq_data *data)
2286 * races. 1823 * races.
2287 */ 1824 */
2288 1825
2289#ifdef CONFIG_SMP
2290void send_cleanup_vector(struct irq_cfg *cfg)
2291{
2292 cpumask_var_t cleanup_mask;
2293
2294 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2295 unsigned int i;
2296 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2297 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2298 } else {
2299 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2300 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2301 free_cpumask_var(cleanup_mask);
2302 }
2303 cfg->move_in_progress = 0;
2304}
2305
2306asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2307{
2308 unsigned vector, me;
2309
2310 ack_APIC_irq();
2311 irq_enter();
2312 exit_idle();
2313
2314 me = smp_processor_id();
2315 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2316 int irq;
2317 unsigned int irr;
2318 struct irq_desc *desc;
2319 struct irq_cfg *cfg;
2320 irq = __this_cpu_read(vector_irq[vector]);
2321
2322 if (irq <= VECTOR_UNDEFINED)
2323 continue;
2324
2325 desc = irq_to_desc(irq);
2326 if (!desc)
2327 continue;
2328
2329 cfg = irq_cfg(irq);
2330 if (!cfg)
2331 continue;
2332
2333 raw_spin_lock(&desc->lock);
2334
2335 /*
2336 * Check if the irq migration is in progress. If so, we
2337 * haven't received the cleanup request yet for this irq.
2338 */
2339 if (cfg->move_in_progress)
2340 goto unlock;
2341
2342 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2343 goto unlock;
2344
2345 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2346 /*
2347 * Check if the vector that needs to be cleanedup is
2348 * registered at the cpu's IRR. If so, then this is not
2349 * the best time to clean it up. Lets clean it up in the
2350 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2351 * to myself.
2352 */
2353 if (irr & (1 << (vector % 32))) {
2354 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2355 goto unlock;
2356 }
2357 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
2358unlock:
2359 raw_spin_unlock(&desc->lock);
2360 }
2361
2362 irq_exit();
2363}
2364
2365static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2366{
2367 unsigned me;
2368
2369 if (likely(!cfg->move_in_progress))
2370 return;
2371
2372 me = smp_processor_id();
2373
2374 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2375 send_cleanup_vector(cfg);
2376}
2377
2378static void irq_complete_move(struct irq_cfg *cfg)
2379{
2380 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2381}
2382
2383void irq_force_complete_move(int irq)
2384{
2385 struct irq_cfg *cfg = irq_cfg(irq);
2386
2387 if (!cfg)
2388 return;
2389
2390 __irq_complete_move(cfg, cfg->vector);
2391}
2392#else
2393static inline void irq_complete_move(struct irq_cfg *cfg) { }
2394#endif
2395
2396static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 1826static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2397{ 1827{
2398 int apic, pin; 1828 int apic, pin;
@@ -2413,41 +1843,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2413 } 1843 }
2414} 1844}
2415 1845
2416/*
2417 * Either sets data->affinity to a valid value, and returns
2418 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2419 * leaves data->affinity untouched.
2420 */
2421int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2422 unsigned int *dest_id)
2423{
2424 struct irq_cfg *cfg = data->chip_data;
2425 unsigned int irq = data->irq;
2426 int err;
2427
2428 if (!config_enabled(CONFIG_SMP))
2429 return -EPERM;
2430
2431 if (!cpumask_intersects(mask, cpu_online_mask))
2432 return -EINVAL;
2433
2434 err = assign_irq_vector(irq, cfg, mask);
2435 if (err)
2436 return err;
2437
2438 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2439 if (err) {
2440 if (assign_irq_vector(irq, cfg, data->affinity))
2441 pr_err("Failed to recover vector for irq %d\n", irq);
2442 return err;
2443 }
2444
2445 cpumask_copy(data->affinity, mask);
2446
2447 return 0;
2448}
2449
2450
2451int native_ioapic_set_affinity(struct irq_data *data, 1846int native_ioapic_set_affinity(struct irq_data *data,
2452 const struct cpumask *mask, 1847 const struct cpumask *mask,
2453 bool force) 1848 bool force)
@@ -2471,13 +1866,6 @@ int native_ioapic_set_affinity(struct irq_data *data,
2471 return ret; 1866 return ret;
2472} 1867}
2473 1868
2474static void apic_ack_edge(struct irq_data *data)
2475{
2476 irq_complete_move(data->chip_data);
2477 irq_move_irq(data);
2478 ack_APIC_irq();
2479}
2480
2481atomic_t irq_mis_count; 1869atomic_t irq_mis_count;
2482 1870
2483#ifdef CONFIG_GENERIC_PENDING_IRQ 1871#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -3068,39 +2456,6 @@ static int __init ioapic_init_ops(void)
3068device_initcall(ioapic_init_ops); 2456device_initcall(ioapic_init_ops);
3069 2457
3070/* 2458/*
3071 * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
3072 */
3073int arch_setup_hwirq(unsigned int irq, int node)
3074{
3075 struct irq_cfg *cfg;
3076 unsigned long flags;
3077 int ret;
3078
3079 cfg = alloc_irq_cfg(irq, node);
3080 if (!cfg)
3081 return -ENOMEM;
3082
3083 raw_spin_lock_irqsave(&vector_lock, flags);
3084 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
3085 raw_spin_unlock_irqrestore(&vector_lock, flags);
3086
3087 if (!ret)
3088 irq_set_chip_data(irq, cfg);
3089 else
3090 free_irq_cfg(irq, cfg);
3091 return ret;
3092}
3093
3094void arch_teardown_hwirq(unsigned int irq)
3095{
3096 struct irq_cfg *cfg = irq_cfg(irq);
3097
3098 free_remapped_irq(irq);
3099 clear_irq_vector(irq, cfg);
3100 free_irq_cfg(irq, cfg);
3101}
3102
3103/*
3104 * MSI message composition 2459 * MSI message composition
3105 */ 2460 */
3106void native_compose_msi_msg(struct pci_dev *pdev, 2461void native_compose_msi_msg(struct pci_dev *pdev,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
new file mode 100644
index 000000000000..9ba9bd477051
--- /dev/null
+++ b/arch/x86/kernel/apic/vector.c
@@ -0,0 +1,694 @@
1/*
2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/compiler.h>
14#include <linux/irqdomain.h>
15#include <linux/slab.h>
16#include <asm/hw_irq.h>
17#include <asm/apic.h>
18#include <asm/i8259.h>
19#include <asm/desc.h>
20#include <asm/irq_remapping.h>
21
22static DEFINE_RAW_SPINLOCK(vector_lock);
23
24void lock_vector_lock(void)
25{
26 /* Used to the online set of cpus does not change
27 * during assign_irq_vector.
28 */
29 raw_spin_lock(&vector_lock);
30}
31
32void unlock_vector_lock(void)
33{
34 raw_spin_unlock(&vector_lock);
35}
36
37struct irq_cfg *irq_cfg(unsigned int irq)
38{
39 return irq_get_chip_data(irq);
40}
41
42struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
43{
44 return irq_data->chip_data;
45}
46
47static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
48{
49 struct irq_cfg *cfg;
50
51 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
52 if (!cfg)
53 return NULL;
54 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
55 goto out_cfg;
56 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
57 goto out_domain;
58#ifdef CONFIG_X86_IO_APIC
59 INIT_LIST_HEAD(&cfg->irq_2_pin);
60#endif
61 return cfg;
62out_domain:
63 free_cpumask_var(cfg->domain);
64out_cfg:
65 kfree(cfg);
66 return NULL;
67}
68
69struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
70{
71 int res = irq_alloc_desc_at(at, node);
72 struct irq_cfg *cfg;
73
74 if (res < 0) {
75 if (res != -EEXIST)
76 return NULL;
77 cfg = irq_cfg(at);
78 if (cfg)
79 return cfg;
80 }
81
82 cfg = alloc_irq_cfg(at, node);
83 if (cfg)
84 irq_set_chip_data(at, cfg);
85 else
86 irq_free_desc(at);
87 return cfg;
88}
89
90static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
91{
92 if (!cfg)
93 return;
94 irq_set_chip_data(at, NULL);
95 free_cpumask_var(cfg->domain);
96 free_cpumask_var(cfg->old_domain);
97 kfree(cfg);
98}
99
100static int
101__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
102{
103 /*
104 * NOTE! The local APIC isn't very good at handling
105 * multiple interrupts at the same interrupt level.
106 * As the interrupt level is determined by taking the
107 * vector number and shifting that right by 4, we
108 * want to spread these out a bit so that they don't
109 * all fall in the same interrupt level.
110 *
111 * Also, we've got to be careful not to trash gate
112 * 0x80, because int 0x80 is hm, kind of importantish. ;)
113 */
114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
115 static int current_offset = VECTOR_OFFSET_START % 16;
116 int cpu, err;
117 cpumask_var_t tmp_mask;
118
119 if (cfg->move_in_progress)
120 return -EBUSY;
121
122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
123 return -ENOMEM;
124
125 /* Only try and allocate irqs on cpus that are present */
126 err = -ENOSPC;
127 cpumask_clear(cfg->old_domain);
128 cpu = cpumask_first_and(mask, cpu_online_mask);
129 while (cpu < nr_cpu_ids) {
130 int new_cpu, vector, offset;
131
132 apic->vector_allocation_domain(cpu, tmp_mask, mask);
133
134 if (cpumask_subset(tmp_mask, cfg->domain)) {
135 err = 0;
136 if (cpumask_equal(tmp_mask, cfg->domain))
137 break;
138 /*
139 * New cpumask using the vector is a proper subset of
140 * the current in use mask. So cleanup the vector
141 * allocation for the members that are not used anymore.
142 */
143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
144 cfg->move_in_progress =
145 cpumask_intersects(cfg->old_domain, cpu_online_mask);
146 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
147 break;
148 }
149
150 vector = current_vector;
151 offset = current_offset;
152next:
153 vector += 16;
154 if (vector >= first_system_vector) {
155 offset = (offset + 1) % 16;
156 vector = FIRST_EXTERNAL_VECTOR + offset;
157 }
158
159 if (unlikely(current_vector == vector)) {
160 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
161 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
162 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
163 continue;
164 }
165
166 if (test_bit(vector, used_vectors))
167 goto next;
168
169 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
170 if (per_cpu(vector_irq, new_cpu)[vector] >
171 VECTOR_UNDEFINED)
172 goto next;
173 }
174 /* Found one! */
175 current_vector = vector;
176 current_offset = offset;
177 if (cfg->vector) {
178 cpumask_copy(cfg->old_domain, cfg->domain);
179 cfg->move_in_progress =
180 cpumask_intersects(cfg->old_domain, cpu_online_mask);
181 }
182 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
183 per_cpu(vector_irq, new_cpu)[vector] = irq;
184 cfg->vector = vector;
185 cpumask_copy(cfg->domain, tmp_mask);
186 err = 0;
187 break;
188 }
189 free_cpumask_var(tmp_mask);
190
191 return err;
192}
193
194int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
195{
196 int err;
197 unsigned long flags;
198
199 raw_spin_lock_irqsave(&vector_lock, flags);
200 err = __assign_irq_vector(irq, cfg, mask);
201 raw_spin_unlock_irqrestore(&vector_lock, flags);
202 return err;
203}
204
205void clear_irq_vector(int irq, struct irq_cfg *cfg)
206{
207 int cpu, vector;
208 unsigned long flags;
209
210 raw_spin_lock_irqsave(&vector_lock, flags);
211 BUG_ON(!cfg->vector);
212
213 vector = cfg->vector;
214 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
215 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
216
217 cfg->vector = 0;
218 cpumask_clear(cfg->domain);
219
220 if (likely(!cfg->move_in_progress)) {
221 raw_spin_unlock_irqrestore(&vector_lock, flags);
222 return;
223 }
224
225 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
226 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
227 vector++) {
228 if (per_cpu(vector_irq, cpu)[vector] != irq)
229 continue;
230 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
231 break;
232 }
233 }
234 cfg->move_in_progress = 0;
235 raw_spin_unlock_irqrestore(&vector_lock, flags);
236}
237
238static void __setup_vector_irq(int cpu)
239{
240 /* Initialize vector_irq on a new cpu */
241 int irq, vector;
242 struct irq_cfg *cfg;
243
244 /*
245 * vector_lock will make sure that we don't run into irq vector
246 * assignments that might be happening on another cpu in parallel,
247 * while we setup our initial vector to irq mappings.
248 */
249 raw_spin_lock(&vector_lock);
250 /* Mark the inuse vectors */
251 for_each_active_irq(irq) {
252 cfg = irq_cfg(irq);
253 if (!cfg)
254 continue;
255
256 if (!cpumask_test_cpu(cpu, cfg->domain))
257 continue;
258 vector = cfg->vector;
259 per_cpu(vector_irq, cpu)[vector] = irq;
260 }
261 /* Mark the free vectors */
262 for (vector = 0; vector < NR_VECTORS; ++vector) {
263 irq = per_cpu(vector_irq, cpu)[vector];
264 if (irq <= VECTOR_UNDEFINED)
265 continue;
266
267 cfg = irq_cfg(irq);
268 if (!cpumask_test_cpu(cpu, cfg->domain))
269 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
270 }
271 raw_spin_unlock(&vector_lock);
272}
273
274/*
275 * Setup the vector to irq mappings.
276 */
277void setup_vector_irq(int cpu)
278{
279 int irq;
280
281 /*
282 * On most of the platforms, legacy PIC delivers the interrupts on the
283 * boot cpu. But there are certain platforms where PIC interrupts are
284 * delivered to multiple cpu's. If the legacy IRQ is handled by the
285 * legacy PIC, for the new cpu that is coming online, setup the static
286 * legacy vector to irq mapping:
287 */
288 for (irq = 0; irq < nr_legacy_irqs(); irq++)
289 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
290
291 __setup_vector_irq(cpu);
292}
293
294int apic_retrigger_irq(struct irq_data *data)
295{
296 struct irq_cfg *cfg = data->chip_data;
297 unsigned long flags;
298 int cpu;
299
300 raw_spin_lock_irqsave(&vector_lock, flags);
301 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
302 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
303 raw_spin_unlock_irqrestore(&vector_lock, flags);
304
305 return 1;
306}
307
308void apic_ack_edge(struct irq_data *data)
309{
310 irq_complete_move(data->chip_data);
311 irq_move_irq(data);
312 ack_APIC_irq();
313}
314
315/*
316 * Either sets data->affinity to a valid value, and returns
317 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
318 * leaves data->affinity untouched.
319 */
320int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
321 unsigned int *dest_id)
322{
323 struct irq_cfg *cfg = data->chip_data;
324 unsigned int irq = data->irq;
325 int err;
326
327 if (!config_enabled(CONFIG_SMP))
328 return -EPERM;
329
330 if (!cpumask_intersects(mask, cpu_online_mask))
331 return -EINVAL;
332
333 err = assign_irq_vector(irq, cfg, mask);
334 if (err)
335 return err;
336
337 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
338 if (err) {
339 if (assign_irq_vector(irq, cfg, data->affinity))
340 pr_err("Failed to recover vector for irq %d\n", irq);
341 return err;
342 }
343
344 cpumask_copy(data->affinity, mask);
345
346 return 0;
347}
348
349#ifdef CONFIG_SMP
350void send_cleanup_vector(struct irq_cfg *cfg)
351{
352 cpumask_var_t cleanup_mask;
353
354 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
355 unsigned int i;
356
357 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
358 apic->send_IPI_mask(cpumask_of(i),
359 IRQ_MOVE_CLEANUP_VECTOR);
360 } else {
361 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
362 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
363 free_cpumask_var(cleanup_mask);
364 }
365 cfg->move_in_progress = 0;
366}
367
368asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
369{
370 unsigned vector, me;
371
372 ack_APIC_irq();
373 irq_enter();
374 exit_idle();
375
376 me = smp_processor_id();
377 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
378 int irq;
379 unsigned int irr;
380 struct irq_desc *desc;
381 struct irq_cfg *cfg;
382
383 irq = __this_cpu_read(vector_irq[vector]);
384
385 if (irq <= VECTOR_UNDEFINED)
386 continue;
387
388 desc = irq_to_desc(irq);
389 if (!desc)
390 continue;
391
392 cfg = irq_cfg(irq);
393 if (!cfg)
394 continue;
395
396 raw_spin_lock(&desc->lock);
397
398 /*
399 * Check if the irq migration is in progress. If so, we
400 * haven't received the cleanup request yet for this irq.
401 */
402 if (cfg->move_in_progress)
403 goto unlock;
404
405 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
406 goto unlock;
407
408 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
409 /*
410 * Check if the vector that needs to be cleanedup is
411 * registered at the cpu's IRR. If so, then this is not
412 * the best time to clean it up. Lets clean it up in the
413 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
414 * to myself.
415 */
416 if (irr & (1 << (vector % 32))) {
417 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
418 goto unlock;
419 }
420 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
421unlock:
422 raw_spin_unlock(&desc->lock);
423 }
424
425 irq_exit();
426}
427
428static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
429{
430 unsigned me;
431
432 if (likely(!cfg->move_in_progress))
433 return;
434
435 me = smp_processor_id();
436
437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
438 send_cleanup_vector(cfg);
439}
440
441void irq_complete_move(struct irq_cfg *cfg)
442{
443 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
444}
445
446void irq_force_complete_move(int irq)
447{
448 struct irq_cfg *cfg = irq_cfg(irq);
449
450 if (!cfg)
451 return;
452
453 __irq_complete_move(cfg, cfg->vector);
454}
455#else
456void irq_complete_move(struct irq_cfg *cfg) { }
457#endif
458
459/*
460 * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
461 */
462int arch_setup_hwirq(unsigned int irq, int node)
463{
464 struct irq_cfg *cfg;
465 unsigned long flags;
466 int ret;
467
468 cfg = alloc_irq_cfg(irq, node);
469 if (!cfg)
470 return -ENOMEM;
471
472 raw_spin_lock_irqsave(&vector_lock, flags);
473 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
474 raw_spin_unlock_irqrestore(&vector_lock, flags);
475
476 if (!ret)
477 irq_set_chip_data(irq, cfg);
478 else
479 free_irq_cfg(irq, cfg);
480 return ret;
481}
482
483void arch_teardown_hwirq(unsigned int irq)
484{
485 struct irq_cfg *cfg = irq_cfg(irq);
486
487 free_remapped_irq(irq);
488 clear_irq_vector(irq, cfg);
489 free_irq_cfg(irq, cfg);
490}
491
492static void __init print_APIC_field(int base)
493{
494 int i;
495
496 printk(KERN_DEBUG);
497
498 for (i = 0; i < 8; i++)
499 pr_cont("%08x", apic_read(base + i*0x10));
500
501 pr_cont("\n");
502}
503
504static void __init print_local_APIC(void *dummy)
505{
506 unsigned int i, v, ver, maxlvt;
507 u64 icr;
508
509 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
510 smp_processor_id(), hard_smp_processor_id());
511 v = apic_read(APIC_ID);
512 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
513 v = apic_read(APIC_LVR);
514 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
515 ver = GET_APIC_VERSION(v);
516 maxlvt = lapic_get_maxlvt();
517
518 v = apic_read(APIC_TASKPRI);
519 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n",
520 v, v & APIC_TPRI_MASK);
521
522 /* !82489DX */
523 if (APIC_INTEGRATED(ver)) {
524 if (!APIC_XAPIC(ver)) {
525 v = apic_read(APIC_ARBPRI);
526 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
527 v & APIC_ARBPRI_MASK);
528 }
529 v = apic_read(APIC_PROCPRI);
530 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
531 }
532
533 /*
534 * Remote read supported only in the 82489DX and local APIC for
535 * Pentium processors.
536 */
537 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
538 v = apic_read(APIC_RRR);
539 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
540 }
541
542 v = apic_read(APIC_LDR);
543 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
544 if (!x2apic_enabled()) {
545 v = apic_read(APIC_DFR);
546 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
547 }
548 v = apic_read(APIC_SPIV);
549 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
550
551 printk(KERN_DEBUG "... APIC ISR field:\n");
552 print_APIC_field(APIC_ISR);
553 printk(KERN_DEBUG "... APIC TMR field:\n");
554 print_APIC_field(APIC_TMR);
555 printk(KERN_DEBUG "... APIC IRR field:\n");
556 print_APIC_field(APIC_IRR);
557
558 /* !82489DX */
559 if (APIC_INTEGRATED(ver)) {
560 /* Due to the Pentium erratum 3AP. */
561 if (maxlvt > 3)
562 apic_write(APIC_ESR, 0);
563
564 v = apic_read(APIC_ESR);
565 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
566 }
567
568 icr = apic_icr_read();
569 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
570 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
571
572 v = apic_read(APIC_LVTT);
573 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
574
575 if (maxlvt > 3) {
576 /* PC is LVT#4. */
577 v = apic_read(APIC_LVTPC);
578 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
579 }
580 v = apic_read(APIC_LVT0);
581 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
582 v = apic_read(APIC_LVT1);
583 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
584
585 if (maxlvt > 2) {
586 /* ERR is LVT#3. */
587 v = apic_read(APIC_LVTERR);
588 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
589 }
590
591 v = apic_read(APIC_TMICT);
592 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
593 v = apic_read(APIC_TMCCT);
594 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
595 v = apic_read(APIC_TDCR);
596 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
597
598 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
599 v = apic_read(APIC_EFEAT);
600 maxlvt = (v >> 16) & 0xff;
601 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
602 v = apic_read(APIC_ECTRL);
603 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
604 for (i = 0; i < maxlvt; i++) {
605 v = apic_read(APIC_EILVTn(i));
606 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
607 }
608 }
609 pr_cont("\n");
610}
611
612static void __init print_local_APICs(int maxcpu)
613{
614 int cpu;
615
616 if (!maxcpu)
617 return;
618
619 preempt_disable();
620 for_each_online_cpu(cpu) {
621 if (cpu >= maxcpu)
622 break;
623 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
624 }
625 preempt_enable();
626}
627
628static void __init print_PIC(void)
629{
630 unsigned int v;
631 unsigned long flags;
632
633 if (!nr_legacy_irqs())
634 return;
635
636 printk(KERN_DEBUG "\nprinting PIC contents\n");
637
638 raw_spin_lock_irqsave(&i8259A_lock, flags);
639
640 v = inb(0xa1) << 8 | inb(0x21);
641 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
642
643 v = inb(0xa0) << 8 | inb(0x20);
644 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
645
646 outb(0x0b, 0xa0);
647 outb(0x0b, 0x20);
648 v = inb(0xa0) << 8 | inb(0x20);
649 outb(0x0a, 0xa0);
650 outb(0x0a, 0x20);
651
652 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
653
654 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
655
656 v = inb(0x4d1) << 8 | inb(0x4d0);
657 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
658}
659
660static int show_lapic __initdata = 1;
661static __init int setup_show_lapic(char *arg)
662{
663 int num = -1;
664
665 if (strcmp(arg, "all") == 0) {
666 show_lapic = CONFIG_NR_CPUS;
667 } else {
668 get_option(&arg, &num);
669 if (num >= 0)
670 show_lapic = num;
671 }
672
673 return 1;
674}
675__setup("show_lapic=", setup_show_lapic);
676
677static int __init print_ICs(void)
678{
679 if (apic_verbosity == APIC_QUIET)
680 return 0;
681
682 print_PIC();
683
684 /* don't print out if apic is not there */
685 if (!cpu_has_apic && !apic_from_smp_config())
686 return 0;
687
688 print_local_APICs(show_lapic);
689 print_IO_APICs();
690
691 return 0;
692}
693
694late_initcall(print_ICs);
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index fa893087fb51..70e181ea1eac 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -99,28 +99,6 @@ void __init init_IRQ(void)
99 x86_init.irqs.intr_init(); 99 x86_init.irqs.intr_init();
100} 100}
101 101
102/*
103 * Setup the vector to irq mappings.
104 */
105void setup_vector_irq(int cpu)
106{
107#ifndef CONFIG_X86_IO_APIC
108 int irq;
109
110 /*
111 * On most of the platforms, legacy PIC delivers the interrupts on the
112 * boot cpu. But there are certain platforms where PIC interrupts are
113 * delivered to multiple cpu's. If the legacy IRQ is handled by the
114 * legacy PIC, for the new cpu that is coming online, setup the static
115 * legacy vector to irq mapping:
116 */
117 for (irq = 0; irq < nr_legacy_irqs(); irq++)
118 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
119#endif
120
121 __setup_vector_irq(cpu);
122}
123
124static void __init smp_intr_init(void) 102static void __init smp_intr_init(void)
125{ 103{
126#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP