aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/io_apic.c
diff options
context:
space:
mode:
authorJiang Liu <jiang.liu@linux.intel.com>2014-10-27 04:12:00 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-12-16 08:08:16 -0500
commit74afab7af7d9aeba86b3b8e39670cf7d0058f6df (patch)
treeef65e6502d8a56eada797a278002cdd9e1307f04 /arch/x86/kernel/apic/io_apic.c
parent55a0e2b122c26c7496ea85754bceddc05dba402b (diff)
x86, irq: Move local APIC related code from io_apic.c into vector.c
Create arch/x86/kernel/apic/vector.c to host local APIC related code, prepare for making MSI/HT_IRQ independent of IOAPIC. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Grant Likely <grant.likely@linaro.org> Link: http://lkml.kernel.org/r/1414397531-28254-10-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/apic/io_apic.c')
-rw-r--r--arch/x86/kernel/apic/io_apic.c649
1 files changed, 2 insertions, 647 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 783468efa4a1..677044df2d7a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -61,8 +61,6 @@
61 61
62#include <asm/apic.h> 62#include <asm/apic.h>
63 63
64#define __apicdebuginit(type) static type __init
65
66#define for_each_ioapic(idx) \ 64#define for_each_ioapic(idx) \
67 for ((idx) = 0; (idx) < nr_ioapics; (idx)++) 65 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
68#define for_each_ioapic_reverse(idx) \ 66#define for_each_ioapic_reverse(idx) \
@@ -83,7 +81,6 @@
83int sis_apic_bug = -1; 81int sis_apic_bug = -1;
84 82
85static DEFINE_RAW_SPINLOCK(ioapic_lock); 83static DEFINE_RAW_SPINLOCK(ioapic_lock);
86static DEFINE_RAW_SPINLOCK(vector_lock);
87static DEFINE_MUTEX(ioapic_mutex); 84static DEFINE_MUTEX(ioapic_mutex);
88static unsigned int ioapic_dynirq_base; 85static unsigned int ioapic_dynirq_base;
89static int ioapic_initialized; 86static int ioapic_initialized;
@@ -206,8 +203,6 @@ static int __init parse_noapic(char *str)
206} 203}
207early_param("noapic", parse_noapic); 204early_param("noapic", parse_noapic);
208 205
209static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
210
211/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 206/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
212void mp_save_irq(struct mpc_intsrc *m) 207void mp_save_irq(struct mpc_intsrc *m)
213{ 208{
@@ -281,67 +276,6 @@ int __init arch_early_irq_init(void)
281 return 0; 276 return 0;
282} 277}
283 278
284struct irq_cfg *irq_cfg(unsigned int irq)
285{
286 return irq_get_chip_data(irq);
287}
288
289struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
290{
291 return irq_data->chip_data;
292}
293
294static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
295{
296 struct irq_cfg *cfg;
297
298 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
299 if (!cfg)
300 return NULL;
301 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
302 goto out_cfg;
303 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
304 goto out_domain;
305 INIT_LIST_HEAD(&cfg->irq_2_pin);
306 return cfg;
307out_domain:
308 free_cpumask_var(cfg->domain);
309out_cfg:
310 kfree(cfg);
311 return NULL;
312}
313
314static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
315{
316 if (!cfg)
317 return;
318 irq_set_chip_data(at, NULL);
319 free_cpumask_var(cfg->domain);
320 free_cpumask_var(cfg->old_domain);
321 kfree(cfg);
322}
323
324static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
325{
326 int res = irq_alloc_desc_at(at, node);
327 struct irq_cfg *cfg;
328
329 if (res < 0) {
330 if (res != -EEXIST)
331 return NULL;
332 cfg = irq_cfg(at);
333 if (cfg)
334 return cfg;
335 }
336
337 cfg = alloc_irq_cfg(at, node);
338 if (cfg)
339 irq_set_chip_data(at, cfg);
340 else
341 irq_free_desc(at);
342 return cfg;
343}
344
345struct io_apic { 279struct io_apic {
346 unsigned int index; 280 unsigned int index;
347 unsigned int unused[3]; 281 unsigned int unused[3];
@@ -1238,190 +1172,6 @@ out:
1238} 1172}
1239EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1173EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1240 1174
1241void lock_vector_lock(void)
1242{
1243 /* Used to the online set of cpus does not change
1244 * during assign_irq_vector.
1245 */
1246 raw_spin_lock(&vector_lock);
1247}
1248
1249void unlock_vector_lock(void)
1250{
1251 raw_spin_unlock(&vector_lock);
1252}
1253
1254static int
1255__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1256{
1257 /*
1258 * NOTE! The local APIC isn't very good at handling
1259 * multiple interrupts at the same interrupt level.
1260 * As the interrupt level is determined by taking the
1261 * vector number and shifting that right by 4, we
1262 * want to spread these out a bit so that they don't
1263 * all fall in the same interrupt level.
1264 *
1265 * Also, we've got to be careful not to trash gate
1266 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1267 */
1268 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1269 static int current_offset = VECTOR_OFFSET_START % 16;
1270 int cpu, err;
1271 cpumask_var_t tmp_mask;
1272
1273 if (cfg->move_in_progress)
1274 return -EBUSY;
1275
1276 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1277 return -ENOMEM;
1278
1279 /* Only try and allocate irqs on cpus that are present */
1280 err = -ENOSPC;
1281 cpumask_clear(cfg->old_domain);
1282 cpu = cpumask_first_and(mask, cpu_online_mask);
1283 while (cpu < nr_cpu_ids) {
1284 int new_cpu, vector, offset;
1285
1286 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1287
1288 if (cpumask_subset(tmp_mask, cfg->domain)) {
1289 err = 0;
1290 if (cpumask_equal(tmp_mask, cfg->domain))
1291 break;
1292 /*
1293 * New cpumask using the vector is a proper subset of
1294 * the current in use mask. So cleanup the vector
1295 * allocation for the members that are not used anymore.
1296 */
1297 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1298 cfg->move_in_progress =
1299 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1300 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1301 break;
1302 }
1303
1304 vector = current_vector;
1305 offset = current_offset;
1306next:
1307 vector += 16;
1308 if (vector >= first_system_vector) {
1309 offset = (offset + 1) % 16;
1310 vector = FIRST_EXTERNAL_VECTOR + offset;
1311 }
1312
1313 if (unlikely(current_vector == vector)) {
1314 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1315 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1316 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1317 continue;
1318 }
1319
1320 if (test_bit(vector, used_vectors))
1321 goto next;
1322
1323 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
1324 if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
1325 goto next;
1326 }
1327 /* Found one! */
1328 current_vector = vector;
1329 current_offset = offset;
1330 if (cfg->vector) {
1331 cpumask_copy(cfg->old_domain, cfg->domain);
1332 cfg->move_in_progress =
1333 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1334 }
1335 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1336 per_cpu(vector_irq, new_cpu)[vector] = irq;
1337 cfg->vector = vector;
1338 cpumask_copy(cfg->domain, tmp_mask);
1339 err = 0;
1340 break;
1341 }
1342 free_cpumask_var(tmp_mask);
1343 return err;
1344}
1345
1346int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1347{
1348 int err;
1349 unsigned long flags;
1350
1351 raw_spin_lock_irqsave(&vector_lock, flags);
1352 err = __assign_irq_vector(irq, cfg, mask);
1353 raw_spin_unlock_irqrestore(&vector_lock, flags);
1354 return err;
1355}
1356
1357static void clear_irq_vector(int irq, struct irq_cfg *cfg)
1358{
1359 int cpu, vector;
1360 unsigned long flags;
1361
1362 raw_spin_lock_irqsave(&vector_lock, flags);
1363 BUG_ON(!cfg->vector);
1364
1365 vector = cfg->vector;
1366 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1367 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1368
1369 cfg->vector = 0;
1370 cpumask_clear(cfg->domain);
1371
1372 if (likely(!cfg->move_in_progress)) {
1373 raw_spin_unlock_irqrestore(&vector_lock, flags);
1374 return;
1375 }
1376
1377 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1378 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1379 if (per_cpu(vector_irq, cpu)[vector] != irq)
1380 continue;
1381 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1382 break;
1383 }
1384 }
1385 cfg->move_in_progress = 0;
1386 raw_spin_unlock_irqrestore(&vector_lock, flags);
1387}
1388
1389void __setup_vector_irq(int cpu)
1390{
1391 /* Initialize vector_irq on a new cpu */
1392 int irq, vector;
1393 struct irq_cfg *cfg;
1394
1395 /*
1396 * vector_lock will make sure that we don't run into irq vector
1397 * assignments that might be happening on another cpu in parallel,
1398 * while we setup our initial vector to irq mappings.
1399 */
1400 raw_spin_lock(&vector_lock);
1401 /* Mark the inuse vectors */
1402 for_each_active_irq(irq) {
1403 cfg = irq_cfg(irq);
1404 if (!cfg)
1405 continue;
1406
1407 if (!cpumask_test_cpu(cpu, cfg->domain))
1408 continue;
1409 vector = cfg->vector;
1410 per_cpu(vector_irq, cpu)[vector] = irq;
1411 }
1412 /* Mark the free vectors */
1413 for (vector = 0; vector < NR_VECTORS; ++vector) {
1414 irq = per_cpu(vector_irq, cpu)[vector];
1415 if (irq <= VECTOR_UNDEFINED)
1416 continue;
1417
1418 cfg = irq_cfg(irq);
1419 if (!cpumask_test_cpu(cpu, cfg->domain))
1420 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1421 }
1422 raw_spin_unlock(&vector_lock);
1423}
1424
1425static struct irq_chip ioapic_chip; 1175static struct irq_chip ioapic_chip;
1426 1176
1427#ifdef CONFIG_X86_32 1177#ifdef CONFIG_X86_32
@@ -1655,7 +1405,7 @@ void ioapic_zap_locks(void)
1655 raw_spin_lock_init(&ioapic_lock); 1405 raw_spin_lock_init(&ioapic_lock);
1656} 1406}
1657 1407
1658__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1408static void __init print_IO_APIC(int ioapic_idx)
1659{ 1409{
1660 union IO_APIC_reg_00 reg_00; 1410 union IO_APIC_reg_00 reg_00;
1661 union IO_APIC_reg_01 reg_01; 1411 union IO_APIC_reg_01 reg_01;
@@ -1712,7 +1462,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1712 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); 1462 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1713} 1463}
1714 1464
1715__apicdebuginit(void) print_IO_APICs(void) 1465void __init print_IO_APICs(void)
1716{ 1466{
1717 int ioapic_idx; 1467 int ioapic_idx;
1718 struct irq_cfg *cfg; 1468 struct irq_cfg *cfg;
@@ -1756,205 +1506,6 @@ __apicdebuginit(void) print_IO_APICs(void)
1756 printk(KERN_INFO ".................................... done.\n"); 1506 printk(KERN_INFO ".................................... done.\n");
1757} 1507}
1758 1508
1759__apicdebuginit(void) print_APIC_field(int base)
1760{
1761 int i;
1762
1763 printk(KERN_DEBUG);
1764
1765 for (i = 0; i < 8; i++)
1766 pr_cont("%08x", apic_read(base + i*0x10));
1767
1768 pr_cont("\n");
1769}
1770
1771__apicdebuginit(void) print_local_APIC(void *dummy)
1772{
1773 unsigned int i, v, ver, maxlvt;
1774 u64 icr;
1775
1776 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1777 smp_processor_id(), hard_smp_processor_id());
1778 v = apic_read(APIC_ID);
1779 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1780 v = apic_read(APIC_LVR);
1781 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1782 ver = GET_APIC_VERSION(v);
1783 maxlvt = lapic_get_maxlvt();
1784
1785 v = apic_read(APIC_TASKPRI);
1786 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1787
1788 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1789 if (!APIC_XAPIC(ver)) {
1790 v = apic_read(APIC_ARBPRI);
1791 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1792 v & APIC_ARBPRI_MASK);
1793 }
1794 v = apic_read(APIC_PROCPRI);
1795 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1796 }
1797
1798 /*
1799 * Remote read supported only in the 82489DX and local APIC for
1800 * Pentium processors.
1801 */
1802 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1803 v = apic_read(APIC_RRR);
1804 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1805 }
1806
1807 v = apic_read(APIC_LDR);
1808 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1809 if (!x2apic_enabled()) {
1810 v = apic_read(APIC_DFR);
1811 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1812 }
1813 v = apic_read(APIC_SPIV);
1814 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1815
1816 printk(KERN_DEBUG "... APIC ISR field:\n");
1817 print_APIC_field(APIC_ISR);
1818 printk(KERN_DEBUG "... APIC TMR field:\n");
1819 print_APIC_field(APIC_TMR);
1820 printk(KERN_DEBUG "... APIC IRR field:\n");
1821 print_APIC_field(APIC_IRR);
1822
1823 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1824 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1825 apic_write(APIC_ESR, 0);
1826
1827 v = apic_read(APIC_ESR);
1828 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1829 }
1830
1831 icr = apic_icr_read();
1832 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1833 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1834
1835 v = apic_read(APIC_LVTT);
1836 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1837
1838 if (maxlvt > 3) { /* PC is LVT#4. */
1839 v = apic_read(APIC_LVTPC);
1840 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1841 }
1842 v = apic_read(APIC_LVT0);
1843 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1844 v = apic_read(APIC_LVT1);
1845 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1846
1847 if (maxlvt > 2) { /* ERR is LVT#3. */
1848 v = apic_read(APIC_LVTERR);
1849 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1850 }
1851
1852 v = apic_read(APIC_TMICT);
1853 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1854 v = apic_read(APIC_TMCCT);
1855 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1856 v = apic_read(APIC_TDCR);
1857 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1858
1859 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1860 v = apic_read(APIC_EFEAT);
1861 maxlvt = (v >> 16) & 0xff;
1862 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1863 v = apic_read(APIC_ECTRL);
1864 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1865 for (i = 0; i < maxlvt; i++) {
1866 v = apic_read(APIC_EILVTn(i));
1867 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1868 }
1869 }
1870 pr_cont("\n");
1871}
1872
1873__apicdebuginit(void) print_local_APICs(int maxcpu)
1874{
1875 int cpu;
1876
1877 if (!maxcpu)
1878 return;
1879
1880 preempt_disable();
1881 for_each_online_cpu(cpu) {
1882 if (cpu >= maxcpu)
1883 break;
1884 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1885 }
1886 preempt_enable();
1887}
1888
1889__apicdebuginit(void) print_PIC(void)
1890{
1891 unsigned int v;
1892 unsigned long flags;
1893
1894 if (!nr_legacy_irqs())
1895 return;
1896
1897 printk(KERN_DEBUG "\nprinting PIC contents\n");
1898
1899 raw_spin_lock_irqsave(&i8259A_lock, flags);
1900
1901 v = inb(0xa1) << 8 | inb(0x21);
1902 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1903
1904 v = inb(0xa0) << 8 | inb(0x20);
1905 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1906
1907 outb(0x0b,0xa0);
1908 outb(0x0b,0x20);
1909 v = inb(0xa0) << 8 | inb(0x20);
1910 outb(0x0a,0xa0);
1911 outb(0x0a,0x20);
1912
1913 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1914
1915 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1916
1917 v = inb(0x4d1) << 8 | inb(0x4d0);
1918 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1919}
1920
1921static int __initdata show_lapic = 1;
1922static __init int setup_show_lapic(char *arg)
1923{
1924 int num = -1;
1925
1926 if (strcmp(arg, "all") == 0) {
1927 show_lapic = CONFIG_NR_CPUS;
1928 } else {
1929 get_option(&arg, &num);
1930 if (num >= 0)
1931 show_lapic = num;
1932 }
1933
1934 return 1;
1935}
1936__setup("show_lapic=", setup_show_lapic);
1937
1938__apicdebuginit(int) print_ICs(void)
1939{
1940 if (apic_verbosity == APIC_QUIET)
1941 return 0;
1942
1943 print_PIC();
1944
1945 /* don't print out if apic is not there */
1946 if (!cpu_has_apic && !apic_from_smp_config())
1947 return 0;
1948
1949 print_local_APICs(show_lapic);
1950 print_IO_APICs();
1951
1952 return 0;
1953}
1954
1955late_initcall(print_ICs);
1956
1957
1958/* Where if anywhere is the i8259 connect in external int mode */ 1509/* Where if anywhere is the i8259 connect in external int mode */
1959static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1510static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1960 1511
@@ -2263,20 +1814,6 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
2263 return was_pending; 1814 return was_pending;
2264} 1815}
2265 1816
2266static int apic_retrigger_irq(struct irq_data *data)
2267{
2268 struct irq_cfg *cfg = data->chip_data;
2269 unsigned long flags;
2270 int cpu;
2271
2272 raw_spin_lock_irqsave(&vector_lock, flags);
2273 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
2274 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
2275 raw_spin_unlock_irqrestore(&vector_lock, flags);
2276
2277 return 1;
2278}
2279
2280/* 1817/*
2281 * Level and edge triggered IO-APIC interrupts need different handling, 1818 * Level and edge triggered IO-APIC interrupts need different handling,
2282 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 1819 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2286,113 +1823,6 @@ static int apic_retrigger_irq(struct irq_data *data)
2286 * races. 1823 * races.
2287 */ 1824 */
2288 1825
2289#ifdef CONFIG_SMP
2290void send_cleanup_vector(struct irq_cfg *cfg)
2291{
2292 cpumask_var_t cleanup_mask;
2293
2294 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2295 unsigned int i;
2296 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2297 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2298 } else {
2299 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2300 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2301 free_cpumask_var(cleanup_mask);
2302 }
2303 cfg->move_in_progress = 0;
2304}
2305
2306asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2307{
2308 unsigned vector, me;
2309
2310 ack_APIC_irq();
2311 irq_enter();
2312 exit_idle();
2313
2314 me = smp_processor_id();
2315 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2316 int irq;
2317 unsigned int irr;
2318 struct irq_desc *desc;
2319 struct irq_cfg *cfg;
2320 irq = __this_cpu_read(vector_irq[vector]);
2321
2322 if (irq <= VECTOR_UNDEFINED)
2323 continue;
2324
2325 desc = irq_to_desc(irq);
2326 if (!desc)
2327 continue;
2328
2329 cfg = irq_cfg(irq);
2330 if (!cfg)
2331 continue;
2332
2333 raw_spin_lock(&desc->lock);
2334
2335 /*
2336 * Check if the irq migration is in progress. If so, we
2337 * haven't received the cleanup request yet for this irq.
2338 */
2339 if (cfg->move_in_progress)
2340 goto unlock;
2341
2342 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2343 goto unlock;
2344
2345 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2346 /*
2347 * Check if the vector that needs to be cleanedup is
2348 * registered at the cpu's IRR. If so, then this is not
2349 * the best time to clean it up. Lets clean it up in the
2350 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2351 * to myself.
2352 */
2353 if (irr & (1 << (vector % 32))) {
2354 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2355 goto unlock;
2356 }
2357 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
2358unlock:
2359 raw_spin_unlock(&desc->lock);
2360 }
2361
2362 irq_exit();
2363}
2364
2365static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2366{
2367 unsigned me;
2368
2369 if (likely(!cfg->move_in_progress))
2370 return;
2371
2372 me = smp_processor_id();
2373
2374 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2375 send_cleanup_vector(cfg);
2376}
2377
2378static void irq_complete_move(struct irq_cfg *cfg)
2379{
2380 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2381}
2382
2383void irq_force_complete_move(int irq)
2384{
2385 struct irq_cfg *cfg = irq_cfg(irq);
2386
2387 if (!cfg)
2388 return;
2389
2390 __irq_complete_move(cfg, cfg->vector);
2391}
2392#else
2393static inline void irq_complete_move(struct irq_cfg *cfg) { }
2394#endif
2395
2396static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 1826static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2397{ 1827{
2398 int apic, pin; 1828 int apic, pin;
@@ -2413,41 +1843,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2413 } 1843 }
2414} 1844}
2415 1845
2416/*
2417 * Either sets data->affinity to a valid value, and returns
2418 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2419 * leaves data->affinity untouched.
2420 */
2421int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2422 unsigned int *dest_id)
2423{
2424 struct irq_cfg *cfg = data->chip_data;
2425 unsigned int irq = data->irq;
2426 int err;
2427
2428 if (!config_enabled(CONFIG_SMP))
2429 return -EPERM;
2430
2431 if (!cpumask_intersects(mask, cpu_online_mask))
2432 return -EINVAL;
2433
2434 err = assign_irq_vector(irq, cfg, mask);
2435 if (err)
2436 return err;
2437
2438 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2439 if (err) {
2440 if (assign_irq_vector(irq, cfg, data->affinity))
2441 pr_err("Failed to recover vector for irq %d\n", irq);
2442 return err;
2443 }
2444
2445 cpumask_copy(data->affinity, mask);
2446
2447 return 0;
2448}
2449
2450
2451int native_ioapic_set_affinity(struct irq_data *data, 1846int native_ioapic_set_affinity(struct irq_data *data,
2452 const struct cpumask *mask, 1847 const struct cpumask *mask,
2453 bool force) 1848 bool force)
@@ -2471,13 +1866,6 @@ int native_ioapic_set_affinity(struct irq_data *data,
2471 return ret; 1866 return ret;
2472} 1867}
2473 1868
2474static void apic_ack_edge(struct irq_data *data)
2475{
2476 irq_complete_move(data->chip_data);
2477 irq_move_irq(data);
2478 ack_APIC_irq();
2479}
2480
2481atomic_t irq_mis_count; 1869atomic_t irq_mis_count;
2482 1870
2483#ifdef CONFIG_GENERIC_PENDING_IRQ 1871#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -3068,39 +2456,6 @@ static int __init ioapic_init_ops(void)
3068device_initcall(ioapic_init_ops); 2456device_initcall(ioapic_init_ops);
3069 2457
3070/* 2458/*
3071 * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
3072 */
3073int arch_setup_hwirq(unsigned int irq, int node)
3074{
3075 struct irq_cfg *cfg;
3076 unsigned long flags;
3077 int ret;
3078
3079 cfg = alloc_irq_cfg(irq, node);
3080 if (!cfg)
3081 return -ENOMEM;
3082
3083 raw_spin_lock_irqsave(&vector_lock, flags);
3084 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
3085 raw_spin_unlock_irqrestore(&vector_lock, flags);
3086
3087 if (!ret)
3088 irq_set_chip_data(irq, cfg);
3089 else
3090 free_irq_cfg(irq, cfg);
3091 return ret;
3092}
3093
3094void arch_teardown_hwirq(unsigned int irq)
3095{
3096 struct irq_cfg *cfg = irq_cfg(irq);
3097
3098 free_remapped_irq(irq);
3099 clear_irq_vector(irq, cfg);
3100 free_irq_cfg(irq, cfg);
3101}
3102
3103/*
3104 * MSI message composition 2459 * MSI message composition
3105 */ 2460 */
3106void native_compose_msi_msg(struct pci_dev *pdev, 2461void native_compose_msi_msg(struct pci_dev *pdev,