diff options
author | H. Peter Anvin <hpa@zytor.com> | 2010-02-22 19:25:18 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-22 19:25:18 -0500 |
commit | 54b56170e4517e9606b245c3f805fc96baa059f0 (patch) | |
tree | 9f8750ef972b4a7fdce530889dcbaf2a8b5b0d05 /arch/x86/kernel/apic | |
parent | 1f91233c26fd5f7d6525fd29b95e4b50ca7a3e88 (diff) | |
parent | d02e30c31c57683a66ed68a1bcff900ca78f6d56 (diff) |
Merge remote branch 'origin/x86/apic' into x86/mrst
Conflicts:
arch/x86/kernel/apic/io_apic.c
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 229 | ||||
-rw-r--r-- | arch/x86/kernel/apic/nmi.c | 6 |
2 files changed, 137 insertions, 98 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 3592a72f3f0a..b34854358ee6 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -73,8 +73,8 @@ | |||
73 | */ | 73 | */ |
74 | int sis_apic_bug = -1; | 74 | int sis_apic_bug = -1; |
75 | 75 | ||
76 | static DEFINE_SPINLOCK(ioapic_lock); | 76 | static DEFINE_RAW_SPINLOCK(ioapic_lock); |
77 | static DEFINE_SPINLOCK(vector_lock); | 77 | static DEFINE_RAW_SPINLOCK(vector_lock); |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * # of IRQ routing registers | 80 | * # of IRQ routing registers |
@@ -167,8 +167,14 @@ int __init arch_early_irq_init(void) | |||
167 | desc->chip_data = &cfg[i]; | 167 | desc->chip_data = &cfg[i]; |
168 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); | 168 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); |
169 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); | 169 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); |
170 | if (i < legacy_pic->nr_legacy_irqs) | 170 | /* |
171 | cpumask_setall(cfg[i].domain); | 171 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
172 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. | ||
173 | */ | ||
174 | if (i < legacy_pic->nr_legacy_irqs) { | ||
175 | cfg[i].vector = IRQ0_VECTOR + i; | ||
176 | cpumask_set_cpu(0, cfg[i].domain); | ||
177 | } | ||
172 | } | 178 | } |
173 | 179 | ||
174 | return 0; | 180 | return 0; |
@@ -388,7 +394,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
388 | struct irq_pin_list *entry; | 394 | struct irq_pin_list *entry; |
389 | unsigned long flags; | 395 | unsigned long flags; |
390 | 396 | ||
391 | spin_lock_irqsave(&ioapic_lock, flags); | 397 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
392 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 398 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
393 | unsigned int reg; | 399 | unsigned int reg; |
394 | int pin; | 400 | int pin; |
@@ -397,11 +403,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
397 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 403 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
398 | /* Is the remote IRR bit set? */ | 404 | /* Is the remote IRR bit set? */ |
399 | if (reg & IO_APIC_REDIR_REMOTE_IRR) { | 405 | if (reg & IO_APIC_REDIR_REMOTE_IRR) { |
400 | spin_unlock_irqrestore(&ioapic_lock, flags); | 406 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
401 | return true; | 407 | return true; |
402 | } | 408 | } |
403 | } | 409 | } |
404 | spin_unlock_irqrestore(&ioapic_lock, flags); | 410 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
405 | 411 | ||
406 | return false; | 412 | return false; |
407 | } | 413 | } |
@@ -415,10 +421,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | |||
415 | { | 421 | { |
416 | union entry_union eu; | 422 | union entry_union eu; |
417 | unsigned long flags; | 423 | unsigned long flags; |
418 | spin_lock_irqsave(&ioapic_lock, flags); | 424 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
419 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); | 425 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); |
420 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); | 426 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); |
421 | spin_unlock_irqrestore(&ioapic_lock, flags); | 427 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
422 | return eu.entry; | 428 | return eu.entry; |
423 | } | 429 | } |
424 | 430 | ||
@@ -441,9 +447,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | |||
441 | void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 447 | void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
442 | { | 448 | { |
443 | unsigned long flags; | 449 | unsigned long flags; |
444 | spin_lock_irqsave(&ioapic_lock, flags); | 450 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
445 | __ioapic_write_entry(apic, pin, e); | 451 | __ioapic_write_entry(apic, pin, e); |
446 | spin_unlock_irqrestore(&ioapic_lock, flags); | 452 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
447 | } | 453 | } |
448 | 454 | ||
449 | /* | 455 | /* |
@@ -456,10 +462,10 @@ static void ioapic_mask_entry(int apic, int pin) | |||
456 | unsigned long flags; | 462 | unsigned long flags; |
457 | union entry_union eu = { .entry.mask = 1 }; | 463 | union entry_union eu = { .entry.mask = 1 }; |
458 | 464 | ||
459 | spin_lock_irqsave(&ioapic_lock, flags); | 465 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
460 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 466 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
461 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | 467 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); |
462 | spin_unlock_irqrestore(&ioapic_lock, flags); | 468 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
463 | } | 469 | } |
464 | 470 | ||
465 | /* | 471 | /* |
@@ -586,9 +592,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | |||
586 | 592 | ||
587 | BUG_ON(!cfg); | 593 | BUG_ON(!cfg); |
588 | 594 | ||
589 | spin_lock_irqsave(&ioapic_lock, flags); | 595 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
590 | __mask_IO_APIC_irq(cfg); | 596 | __mask_IO_APIC_irq(cfg); |
591 | spin_unlock_irqrestore(&ioapic_lock, flags); | 597 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
592 | } | 598 | } |
593 | 599 | ||
594 | static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) | 600 | static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) |
@@ -596,9 +602,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) | |||
596 | struct irq_cfg *cfg = desc->chip_data; | 602 | struct irq_cfg *cfg = desc->chip_data; |
597 | unsigned long flags; | 603 | unsigned long flags; |
598 | 604 | ||
599 | spin_lock_irqsave(&ioapic_lock, flags); | 605 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
600 | __unmask_IO_APIC_irq(cfg); | 606 | __unmask_IO_APIC_irq(cfg); |
601 | spin_unlock_irqrestore(&ioapic_lock, flags); | 607 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
602 | } | 608 | } |
603 | 609 | ||
604 | static void mask_IO_APIC_irq(unsigned int irq) | 610 | static void mask_IO_APIC_irq(unsigned int irq) |
@@ -1122,12 +1128,12 @@ void lock_vector_lock(void) | |||
1122 | /* Used to the online set of cpus does not change | 1128 | /* Used to the online set of cpus does not change |
1123 | * during assign_irq_vector. | 1129 | * during assign_irq_vector. |
1124 | */ | 1130 | */ |
1125 | spin_lock(&vector_lock); | 1131 | raw_spin_lock(&vector_lock); |
1126 | } | 1132 | } |
1127 | 1133 | ||
1128 | void unlock_vector_lock(void) | 1134 | void unlock_vector_lock(void) |
1129 | { | 1135 | { |
1130 | spin_unlock(&vector_lock); | 1136 | raw_spin_unlock(&vector_lock); |
1131 | } | 1137 | } |
1132 | 1138 | ||
1133 | static int | 1139 | static int |
@@ -1144,7 +1150,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
1144 | * Also, we've got to be careful not to trash gate | 1150 | * Also, we've got to be careful not to trash gate |
1145 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 1151 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
1146 | */ | 1152 | */ |
1147 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1153 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; |
1154 | static int current_offset = VECTOR_OFFSET_START % 8; | ||
1148 | unsigned int old_vector; | 1155 | unsigned int old_vector; |
1149 | int cpu, err; | 1156 | int cpu, err; |
1150 | cpumask_var_t tmp_mask; | 1157 | cpumask_var_t tmp_mask; |
@@ -1180,7 +1187,7 @@ next: | |||
1180 | if (vector >= first_system_vector) { | 1187 | if (vector >= first_system_vector) { |
1181 | /* If out of vectors on large boxen, must share them. */ | 1188 | /* If out of vectors on large boxen, must share them. */ |
1182 | offset = (offset + 1) % 8; | 1189 | offset = (offset + 1) % 8; |
1183 | vector = FIRST_DEVICE_VECTOR + offset; | 1190 | vector = FIRST_EXTERNAL_VECTOR + offset; |
1184 | } | 1191 | } |
1185 | if (unlikely(current_vector == vector)) | 1192 | if (unlikely(current_vector == vector)) |
1186 | continue; | 1193 | continue; |
@@ -1214,9 +1221,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
1214 | int err; | 1221 | int err; |
1215 | unsigned long flags; | 1222 | unsigned long flags; |
1216 | 1223 | ||
1217 | spin_lock_irqsave(&vector_lock, flags); | 1224 | raw_spin_lock_irqsave(&vector_lock, flags); |
1218 | err = __assign_irq_vector(irq, cfg, mask); | 1225 | err = __assign_irq_vector(irq, cfg, mask); |
1219 | spin_unlock_irqrestore(&vector_lock, flags); | 1226 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
1220 | return err; | 1227 | return err; |
1221 | } | 1228 | } |
1222 | 1229 | ||
@@ -1250,11 +1257,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1250 | void __setup_vector_irq(int cpu) | 1257 | void __setup_vector_irq(int cpu) |
1251 | { | 1258 | { |
1252 | /* Initialize vector_irq on a new cpu */ | 1259 | /* Initialize vector_irq on a new cpu */ |
1253 | /* This function must be called with vector_lock held */ | ||
1254 | int irq, vector; | 1260 | int irq, vector; |
1255 | struct irq_cfg *cfg; | 1261 | struct irq_cfg *cfg; |
1256 | struct irq_desc *desc; | 1262 | struct irq_desc *desc; |
1257 | 1263 | ||
1264 | /* | ||
1265 | * vector_lock will make sure that we don't run into irq vector | ||
1266 | * assignments that might be happening on another cpu in parallel, | ||
1267 | * while we setup our initial vector to irq mappings. | ||
1268 | */ | ||
1269 | raw_spin_lock(&vector_lock); | ||
1258 | /* Mark the inuse vectors */ | 1270 | /* Mark the inuse vectors */ |
1259 | for_each_irq_desc(irq, desc) { | 1271 | for_each_irq_desc(irq, desc) { |
1260 | cfg = desc->chip_data; | 1272 | cfg = desc->chip_data; |
@@ -1273,6 +1285,7 @@ void __setup_vector_irq(int cpu) | |||
1273 | if (!cpumask_test_cpu(cpu, cfg->domain)) | 1285 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1274 | per_cpu(vector_irq, cpu)[vector] = -1; | 1286 | per_cpu(vector_irq, cpu)[vector] = -1; |
1275 | } | 1287 | } |
1288 | raw_spin_unlock(&vector_lock); | ||
1276 | } | 1289 | } |
1277 | 1290 | ||
1278 | static struct irq_chip ioapic_chip; | 1291 | static struct irq_chip ioapic_chip; |
@@ -1422,6 +1435,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1422 | 1435 | ||
1423 | cfg = desc->chip_data; | 1436 | cfg = desc->chip_data; |
1424 | 1437 | ||
1438 | /* | ||
1439 | * For legacy irqs, cfg->domain starts with cpu 0 for legacy | ||
1440 | * controllers like 8259. Now that IO-APIC can handle this irq, update | ||
1441 | * the cfg->domain. | ||
1442 | */ | ||
1443 | if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) | ||
1444 | apic->vector_allocation_domain(0, cfg->domain); | ||
1445 | |||
1425 | if (assign_irq_vector(irq, cfg, apic->target_cpus())) | 1446 | if (assign_irq_vector(irq, cfg, apic->target_cpus())) |
1426 | return; | 1447 | return; |
1427 | 1448 | ||
@@ -1521,6 +1542,56 @@ static void __init setup_IO_APIC_irqs(void) | |||
1521 | } | 1542 | } |
1522 | 1543 | ||
1523 | /* | 1544 | /* |
1545 | * for the gsit that is not in first ioapic | ||
1546 | * but could not use acpi_register_gsi() | ||
1547 | * like some special sci in IBM x3330 | ||
1548 | */ | ||
1549 | void setup_IO_APIC_irq_extra(u32 gsi) | ||
1550 | { | ||
1551 | int apic_id = 0, pin, idx, irq; | ||
1552 | int node = cpu_to_node(boot_cpu_id); | ||
1553 | struct irq_desc *desc; | ||
1554 | struct irq_cfg *cfg; | ||
1555 | |||
1556 | /* | ||
1557 | * Convert 'gsi' to 'ioapic.pin'. | ||
1558 | */ | ||
1559 | apic_id = mp_find_ioapic(gsi); | ||
1560 | if (apic_id < 0) | ||
1561 | return; | ||
1562 | |||
1563 | pin = mp_find_ioapic_pin(apic_id, gsi); | ||
1564 | idx = find_irq_entry(apic_id, pin, mp_INT); | ||
1565 | if (idx == -1) | ||
1566 | return; | ||
1567 | |||
1568 | irq = pin_2_irq(idx, apic_id, pin); | ||
1569 | #ifdef CONFIG_SPARSE_IRQ | ||
1570 | desc = irq_to_desc(irq); | ||
1571 | if (desc) | ||
1572 | return; | ||
1573 | #endif | ||
1574 | desc = irq_to_desc_alloc_node(irq, node); | ||
1575 | if (!desc) { | ||
1576 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1577 | return; | ||
1578 | } | ||
1579 | |||
1580 | cfg = desc->chip_data; | ||
1581 | add_pin_to_irq_node(cfg, node, apic_id, pin); | ||
1582 | |||
1583 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | ||
1584 | pr_debug("Pin %d-%d already programmed\n", | ||
1585 | mp_ioapics[apic_id].apicid, pin); | ||
1586 | return; | ||
1587 | } | ||
1588 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | ||
1589 | |||
1590 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | ||
1591 | irq_trigger(idx), irq_polarity(idx)); | ||
1592 | } | ||
1593 | |||
1594 | /* | ||
1524 | * Set up the timer pin, possibly with the 8259A-master behind. | 1595 | * Set up the timer pin, possibly with the 8259A-master behind. |
1525 | */ | 1596 | */ |
1526 | static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, | 1597 | static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, |
@@ -1583,14 +1654,14 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1583 | 1654 | ||
1584 | for (apic = 0; apic < nr_ioapics; apic++) { | 1655 | for (apic = 0; apic < nr_ioapics; apic++) { |
1585 | 1656 | ||
1586 | spin_lock_irqsave(&ioapic_lock, flags); | 1657 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
1587 | reg_00.raw = io_apic_read(apic, 0); | 1658 | reg_00.raw = io_apic_read(apic, 0); |
1588 | reg_01.raw = io_apic_read(apic, 1); | 1659 | reg_01.raw = io_apic_read(apic, 1); |
1589 | if (reg_01.bits.version >= 0x10) | 1660 | if (reg_01.bits.version >= 0x10) |
1590 | reg_02.raw = io_apic_read(apic, 2); | 1661 | reg_02.raw = io_apic_read(apic, 2); |
1591 | if (reg_01.bits.version >= 0x20) | 1662 | if (reg_01.bits.version >= 0x20) |
1592 | reg_03.raw = io_apic_read(apic, 3); | 1663 | reg_03.raw = io_apic_read(apic, 3); |
1593 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1664 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
1594 | 1665 | ||
1595 | printk("\n"); | 1666 | printk("\n"); |
1596 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); | 1667 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); |
@@ -1812,7 +1883,7 @@ __apicdebuginit(void) print_PIC(void) | |||
1812 | 1883 | ||
1813 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | 1884 | printk(KERN_DEBUG "\nprinting PIC contents\n"); |
1814 | 1885 | ||
1815 | spin_lock_irqsave(&i8259A_lock, flags); | 1886 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
1816 | 1887 | ||
1817 | v = inb(0xa1) << 8 | inb(0x21); | 1888 | v = inb(0xa1) << 8 | inb(0x21); |
1818 | printk(KERN_DEBUG "... PIC IMR: %04x\n", v); | 1889 | printk(KERN_DEBUG "... PIC IMR: %04x\n", v); |
@@ -1826,7 +1897,7 @@ __apicdebuginit(void) print_PIC(void) | |||
1826 | outb(0x0a,0xa0); | 1897 | outb(0x0a,0xa0); |
1827 | outb(0x0a,0x20); | 1898 | outb(0x0a,0x20); |
1828 | 1899 | ||
1829 | spin_unlock_irqrestore(&i8259A_lock, flags); | 1900 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
1830 | 1901 | ||
1831 | printk(KERN_DEBUG "... PIC ISR: %04x\n", v); | 1902 | printk(KERN_DEBUG "... PIC ISR: %04x\n", v); |
1832 | 1903 | ||
@@ -1885,9 +1956,9 @@ void __init enable_IO_APIC(void) | |||
1885 | * The number of IO-APIC IRQ registers (== #pins): | 1956 | * The number of IO-APIC IRQ registers (== #pins): |
1886 | */ | 1957 | */ |
1887 | for (apic = 0; apic < nr_ioapics; apic++) { | 1958 | for (apic = 0; apic < nr_ioapics; apic++) { |
1888 | spin_lock_irqsave(&ioapic_lock, flags); | 1959 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
1889 | reg_01.raw = io_apic_read(apic, 1); | 1960 | reg_01.raw = io_apic_read(apic, 1); |
1890 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1961 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
1891 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | 1962 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; |
1892 | } | 1963 | } |
1893 | 1964 | ||
@@ -2027,9 +2098,9 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
2027 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { | 2098 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { |
2028 | 2099 | ||
2029 | /* Read the register 0 value */ | 2100 | /* Read the register 0 value */ |
2030 | spin_lock_irqsave(&ioapic_lock, flags); | 2101 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2031 | reg_00.raw = io_apic_read(apic_id, 0); | 2102 | reg_00.raw = io_apic_read(apic_id, 0); |
2032 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2103 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2033 | 2104 | ||
2034 | old_id = mp_ioapics[apic_id].apicid; | 2105 | old_id = mp_ioapics[apic_id].apicid; |
2035 | 2106 | ||
@@ -2088,16 +2159,16 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
2088 | mp_ioapics[apic_id].apicid); | 2159 | mp_ioapics[apic_id].apicid); |
2089 | 2160 | ||
2090 | reg_00.bits.ID = mp_ioapics[apic_id].apicid; | 2161 | reg_00.bits.ID = mp_ioapics[apic_id].apicid; |
2091 | spin_lock_irqsave(&ioapic_lock, flags); | 2162 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2092 | io_apic_write(apic_id, 0, reg_00.raw); | 2163 | io_apic_write(apic_id, 0, reg_00.raw); |
2093 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2164 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2094 | 2165 | ||
2095 | /* | 2166 | /* |
2096 | * Sanity check | 2167 | * Sanity check |
2097 | */ | 2168 | */ |
2098 | spin_lock_irqsave(&ioapic_lock, flags); | 2169 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2099 | reg_00.raw = io_apic_read(apic_id, 0); | 2170 | reg_00.raw = io_apic_read(apic_id, 0); |
2100 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2171 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2101 | if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) | 2172 | if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) |
2102 | printk("could not set ID!\n"); | 2173 | printk("could not set ID!\n"); |
2103 | else | 2174 | else |
@@ -2180,7 +2251,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2180 | unsigned long flags; | 2251 | unsigned long flags; |
2181 | struct irq_cfg *cfg; | 2252 | struct irq_cfg *cfg; |
2182 | 2253 | ||
2183 | spin_lock_irqsave(&ioapic_lock, flags); | 2254 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2184 | if (irq < legacy_pic->nr_legacy_irqs) { | 2255 | if (irq < legacy_pic->nr_legacy_irqs) { |
2185 | legacy_pic->chip->mask(irq); | 2256 | legacy_pic->chip->mask(irq); |
2186 | if (legacy_pic->irq_pending(irq)) | 2257 | if (legacy_pic->irq_pending(irq)) |
@@ -2188,7 +2259,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2188 | } | 2259 | } |
2189 | cfg = irq_cfg(irq); | 2260 | cfg = irq_cfg(irq); |
2190 | __unmask_IO_APIC_irq(cfg); | 2261 | __unmask_IO_APIC_irq(cfg); |
2191 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2262 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2192 | 2263 | ||
2193 | return was_pending; | 2264 | return was_pending; |
2194 | } | 2265 | } |
@@ -2199,9 +2270,9 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2199 | struct irq_cfg *cfg = irq_cfg(irq); | 2270 | struct irq_cfg *cfg = irq_cfg(irq); |
2200 | unsigned long flags; | 2271 | unsigned long flags; |
2201 | 2272 | ||
2202 | spin_lock_irqsave(&vector_lock, flags); | 2273 | raw_spin_lock_irqsave(&vector_lock, flags); |
2203 | apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); | 2274 | apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
2204 | spin_unlock_irqrestore(&vector_lock, flags); | 2275 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
2205 | 2276 | ||
2206 | return 1; | 2277 | return 1; |
2207 | } | 2278 | } |
@@ -2294,14 +2365,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2294 | irq = desc->irq; | 2365 | irq = desc->irq; |
2295 | cfg = desc->chip_data; | 2366 | cfg = desc->chip_data; |
2296 | 2367 | ||
2297 | spin_lock_irqsave(&ioapic_lock, flags); | 2368 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2298 | ret = set_desc_affinity(desc, mask, &dest); | 2369 | ret = set_desc_affinity(desc, mask, &dest); |
2299 | if (!ret) { | 2370 | if (!ret) { |
2300 | /* Only the high 8 bits are valid. */ | 2371 | /* Only the high 8 bits are valid. */ |
2301 | dest = SET_APIC_LOGICAL_ID(dest); | 2372 | dest = SET_APIC_LOGICAL_ID(dest); |
2302 | __target_IO_APIC_irq(irq, dest, cfg); | 2373 | __target_IO_APIC_irq(irq, dest, cfg); |
2303 | } | 2374 | } |
2304 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2375 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2305 | 2376 | ||
2306 | return ret; | 2377 | return ret; |
2307 | } | 2378 | } |
@@ -2536,9 +2607,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc) | |||
2536 | irq = desc->irq; | 2607 | irq = desc->irq; |
2537 | cfg = desc->chip_data; | 2608 | cfg = desc->chip_data; |
2538 | 2609 | ||
2539 | spin_lock_irqsave(&ioapic_lock, flags); | 2610 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2540 | __eoi_ioapic_irq(irq, cfg); | 2611 | __eoi_ioapic_irq(irq, cfg); |
2541 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2612 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2542 | } | 2613 | } |
2543 | 2614 | ||
2544 | static void ack_apic_level(unsigned int irq) | 2615 | static void ack_apic_level(unsigned int irq) |
@@ -3120,13 +3191,13 @@ static int ioapic_resume(struct sys_device *dev) | |||
3120 | data = container_of(dev, struct sysfs_ioapic_data, dev); | 3191 | data = container_of(dev, struct sysfs_ioapic_data, dev); |
3121 | entry = data->entry; | 3192 | entry = data->entry; |
3122 | 3193 | ||
3123 | spin_lock_irqsave(&ioapic_lock, flags); | 3194 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
3124 | reg_00.raw = io_apic_read(dev->id, 0); | 3195 | reg_00.raw = io_apic_read(dev->id, 0); |
3125 | if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { | 3196 | if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { |
3126 | reg_00.bits.ID = mp_ioapics[dev->id].apicid; | 3197 | reg_00.bits.ID = mp_ioapics[dev->id].apicid; |
3127 | io_apic_write(dev->id, 0, reg_00.raw); | 3198 | io_apic_write(dev->id, 0, reg_00.raw); |
3128 | } | 3199 | } |
3129 | spin_unlock_irqrestore(&ioapic_lock, flags); | 3200 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
3130 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) | 3201 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) |
3131 | ioapic_write_entry(dev->id, i, entry[i]); | 3202 | ioapic_write_entry(dev->id, i, entry[i]); |
3132 | 3203 | ||
@@ -3189,7 +3260,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) | |||
3189 | if (irq_want < nr_irqs_gsi) | 3260 | if (irq_want < nr_irqs_gsi) |
3190 | irq_want = nr_irqs_gsi; | 3261 | irq_want = nr_irqs_gsi; |
3191 | 3262 | ||
3192 | spin_lock_irqsave(&vector_lock, flags); | 3263 | raw_spin_lock_irqsave(&vector_lock, flags); |
3193 | for (new = irq_want; new < nr_irqs; new++) { | 3264 | for (new = irq_want; new < nr_irqs; new++) { |
3194 | desc_new = irq_to_desc_alloc_node(new, node); | 3265 | desc_new = irq_to_desc_alloc_node(new, node); |
3195 | if (!desc_new) { | 3266 | if (!desc_new) { |
@@ -3208,14 +3279,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) | |||
3208 | irq = new; | 3279 | irq = new; |
3209 | break; | 3280 | break; |
3210 | } | 3281 | } |
3211 | spin_unlock_irqrestore(&vector_lock, flags); | 3282 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3283 | |||
3284 | if (irq > 0) | ||
3285 | dynamic_irq_init_keep_chip_data(irq); | ||
3212 | 3286 | ||
3213 | if (irq > 0) { | ||
3214 | dynamic_irq_init(irq); | ||
3215 | /* restore it, in case dynamic_irq_init clear it */ | ||
3216 | if (desc_new) | ||
3217 | desc_new->chip_data = cfg_new; | ||
3218 | } | ||
3219 | return irq; | 3287 | return irq; |
3220 | } | 3288 | } |
3221 | 3289 | ||
@@ -3237,20 +3305,13 @@ int create_irq(void) | |||
3237 | void destroy_irq(unsigned int irq) | 3305 | void destroy_irq(unsigned int irq) |
3238 | { | 3306 | { |
3239 | unsigned long flags; | 3307 | unsigned long flags; |
3240 | struct irq_cfg *cfg; | ||
3241 | struct irq_desc *desc; | ||
3242 | 3308 | ||
3243 | /* store it, in case dynamic_irq_cleanup clear it */ | 3309 | dynamic_irq_cleanup_keep_chip_data(irq); |
3244 | desc = irq_to_desc(irq); | ||
3245 | cfg = desc->chip_data; | ||
3246 | dynamic_irq_cleanup(irq); | ||
3247 | /* connect back irq_cfg */ | ||
3248 | desc->chip_data = cfg; | ||
3249 | 3310 | ||
3250 | free_irte(irq); | 3311 | free_irte(irq); |
3251 | spin_lock_irqsave(&vector_lock, flags); | 3312 | raw_spin_lock_irqsave(&vector_lock, flags); |
3252 | __clear_irq_vector(irq, cfg); | 3313 | __clear_irq_vector(irq, get_irq_chip_data(irq)); |
3253 | spin_unlock_irqrestore(&vector_lock, flags); | 3314 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3254 | } | 3315 | } |
3255 | 3316 | ||
3256 | /* | 3317 | /* |
@@ -3787,9 +3848,9 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
3787 | union IO_APIC_reg_01 reg_01; | 3848 | union IO_APIC_reg_01 reg_01; |
3788 | unsigned long flags; | 3849 | unsigned long flags; |
3789 | 3850 | ||
3790 | spin_lock_irqsave(&ioapic_lock, flags); | 3851 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
3791 | reg_01.raw = io_apic_read(ioapic, 1); | 3852 | reg_01.raw = io_apic_read(ioapic, 1); |
3792 | spin_unlock_irqrestore(&ioapic_lock, flags); | 3853 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
3793 | 3854 | ||
3794 | return reg_01.bits.entries; | 3855 | return reg_01.bits.entries; |
3795 | } | 3856 | } |
@@ -3816,28 +3877,6 @@ void __init probe_nr_irqs_gsi(void) | |||
3816 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); | 3877 | printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); |
3817 | } | 3878 | } |
3818 | 3879 | ||
3819 | #ifdef CONFIG_SPARSE_IRQ | ||
3820 | int __init arch_probe_nr_irqs(void) | ||
3821 | { | ||
3822 | int nr; | ||
3823 | |||
3824 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | ||
3825 | nr_irqs = NR_VECTORS * nr_cpu_ids; | ||
3826 | |||
3827 | nr = nr_irqs_gsi + 8 * nr_cpu_ids; | ||
3828 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | ||
3829 | /* | ||
3830 | * for MSI and HT dyn irq | ||
3831 | */ | ||
3832 | nr += nr_irqs_gsi * 16; | ||
3833 | #endif | ||
3834 | if (nr < nr_irqs) | ||
3835 | nr_irqs = nr; | ||
3836 | |||
3837 | return 0; | ||
3838 | } | ||
3839 | #endif | ||
3840 | |||
3841 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3880 | static int __io_apic_set_pci_routing(struct device *dev, int irq, |
3842 | struct io_apic_irq_attr *irq_attr) | 3881 | struct io_apic_irq_attr *irq_attr) |
3843 | { | 3882 | { |
@@ -3951,9 +3990,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3951 | if (physids_empty(apic_id_map)) | 3990 | if (physids_empty(apic_id_map)) |
3952 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); | 3991 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); |
3953 | 3992 | ||
3954 | spin_lock_irqsave(&ioapic_lock, flags); | 3993 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
3955 | reg_00.raw = io_apic_read(ioapic, 0); | 3994 | reg_00.raw = io_apic_read(ioapic, 0); |
3956 | spin_unlock_irqrestore(&ioapic_lock, flags); | 3995 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
3957 | 3996 | ||
3958 | if (apic_id >= get_physical_broadcast()) { | 3997 | if (apic_id >= get_physical_broadcast()) { |
3959 | printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " | 3998 | printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " |
@@ -3987,10 +4026,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3987 | if (reg_00.bits.ID != apic_id) { | 4026 | if (reg_00.bits.ID != apic_id) { |
3988 | reg_00.bits.ID = apic_id; | 4027 | reg_00.bits.ID = apic_id; |
3989 | 4028 | ||
3990 | spin_lock_irqsave(&ioapic_lock, flags); | 4029 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
3991 | io_apic_write(ioapic, 0, reg_00.raw); | 4030 | io_apic_write(ioapic, 0, reg_00.raw); |
3992 | reg_00.raw = io_apic_read(ioapic, 0); | 4031 | reg_00.raw = io_apic_read(ioapic, 0); |
3993 | spin_unlock_irqrestore(&ioapic_lock, flags); | 4032 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
3994 | 4033 | ||
3995 | /* Sanity check */ | 4034 | /* Sanity check */ |
3996 | if (reg_00.bits.ID != apic_id) { | 4035 | if (reg_00.bits.ID != apic_id) { |
@@ -4011,9 +4050,9 @@ int __init io_apic_get_version(int ioapic) | |||
4011 | union IO_APIC_reg_01 reg_01; | 4050 | union IO_APIC_reg_01 reg_01; |
4012 | unsigned long flags; | 4051 | unsigned long flags; |
4013 | 4052 | ||
4014 | spin_lock_irqsave(&ioapic_lock, flags); | 4053 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
4015 | reg_01.raw = io_apic_read(ioapic, 1); | 4054 | reg_01.raw = io_apic_read(ioapic, 1); |
4016 | spin_unlock_irqrestore(&ioapic_lock, flags); | 4055 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
4017 | 4056 | ||
4018 | return reg_01.bits.version; | 4057 | return reg_01.bits.version; |
4019 | } | 4058 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 3817739acee9..f72b5f0f388e 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
416 | 416 | ||
417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 418 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 419 | static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */ |
420 | 420 | ||
421 | spin_lock(&lock); | 421 | raw_spin_lock(&lock); |
422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
423 | show_regs(regs); | 423 | show_regs(regs); |
424 | dump_stack(); | 424 | dump_stack(); |
425 | spin_unlock(&lock); | 425 | raw_spin_unlock(&lock); |
426 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 426 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
427 | 427 | ||
428 | rc = 1; | 428 | rc = 1; |