aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/io_apic.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2007-02-23 06:40:58 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-26 13:34:08 -0500
commit610142927b5bc149da92b03c7ab08b8b5f205b74 (patch)
tree600c115072fc5f28c07dddfbe52f0dcf376d8504 /arch/x86_64/kernel/io_apic.c
parentbc5e81a1519abc69472bb67deace7bb1ac09d65a (diff)
[PATCH] x86_64 irq: Safely cleanup an irq after moving it.
The problem: After moving an interrupt when is it safe to teardown the data structures for receiving the interrupt at the old location? With a normal pci device it is possible to issue a read to a device to flush all posted writes. This does not work for the oldest ioapics because they are on a 3-wire apic bus which is a completely different data path. For some more modern ioapics when everything is using front side bus delivery you can flush interrupts by simply issuing a read to the ioapic. For other modern ioapics emperical testing has shown that this does not work. So it appears the only reliable way to know the last of the irqs from an ioapic have been received from before the ioapic was reprogrammed is to received the first irq from the ioapic from after it was reprogrammed. Once we know the last irq message has been received from an ioapic into a local apic we then need to know that irq message has been processed through the local apics. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/kernel/io_apic.c')
-rw-r--r--arch/x86_64/kernel/io_apic.c78
1 files changed, 73 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 8dede0bd2267..48593f6b708f 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -36,6 +36,7 @@
36#include <acpi/acpi_bus.h> 36#include <acpi/acpi_bus.h>
37#endif 37#endif
38 38
39#include <asm/idle.h>
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/smp.h> 41#include <asm/smp.h>
41#include <asm/desc.h> 42#include <asm/desc.h>
@@ -49,7 +50,10 @@
49 50
50struct irq_cfg { 51struct irq_cfg {
51 cpumask_t domain; 52 cpumask_t domain;
53 cpumask_t old_domain;
54 unsigned move_cleanup_count;
52 u8 vector; 55 u8 vector;
56 u8 move_in_progress : 1;
53}; 57};
54 58
55/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 59/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
@@ -652,7 +656,6 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
652 * 0x80, because int 0x80 is hm, kind of importantish. ;) 656 * 0x80, because int 0x80 is hm, kind of importantish. ;)
653 */ 657 */
654 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 658 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
655 cpumask_t old_mask = CPU_MASK_NONE;
656 unsigned int old_vector; 659 unsigned int old_vector;
657 int cpu; 660 int cpu;
658 struct irq_cfg *cfg; 661 struct irq_cfg *cfg;
@@ -663,18 +666,20 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
663 /* Only try and allocate irqs on cpus that are present */ 666 /* Only try and allocate irqs on cpus that are present */
664 cpus_and(mask, mask, cpu_online_map); 667 cpus_and(mask, mask, cpu_online_map);
665 668
669 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
670 return -EBUSY;
671
666 old_vector = cfg->vector; 672 old_vector = cfg->vector;
667 if (old_vector) { 673 if (old_vector) {
668 cpumask_t tmp; 674 cpumask_t tmp;
669 cpus_and(tmp, cfg->domain, mask); 675 cpus_and(tmp, cfg->domain, mask);
670 if (!cpus_empty(tmp)) 676 if (!cpus_empty(tmp))
671 return 0; 677 return 0;
672 cpus_and(old_mask, cfg->domain, cpu_online_map);
673 } 678 }
674 679
675 for_each_cpu_mask(cpu, mask) { 680 for_each_cpu_mask(cpu, mask) {
676 cpumask_t domain, new_mask; 681 cpumask_t domain, new_mask;
677 int new_cpu, old_cpu; 682 int new_cpu;
678 int vector, offset; 683 int vector, offset;
679 684
680 domain = vector_allocation_domain(cpu); 685 domain = vector_allocation_domain(cpu);
@@ -699,8 +704,10 @@ next:
699 /* Found one! */ 704 /* Found one! */
700 current_vector = vector; 705 current_vector = vector;
701 current_offset = offset; 706 current_offset = offset;
702 for_each_cpu_mask(old_cpu, old_mask) 707 if (old_vector) {
703 per_cpu(vector_irq, old_cpu)[old_vector] = -1; 708 cfg->move_in_progress = 1;
709 cfg->old_domain = cfg->domain;
710 }
704 for_each_cpu_mask(new_cpu, new_mask) 711 for_each_cpu_mask(new_cpu, new_mask)
705 per_cpu(vector_irq, new_cpu)[vector] = irq; 712 per_cpu(vector_irq, new_cpu)[vector] = irq;
706 cfg->vector = vector; 713 cfg->vector = vector;
@@ -1360,8 +1367,68 @@ static int ioapic_retrigger_irq(unsigned int irq)
1360 * races. 1367 * races.
1361 */ 1368 */
1362 1369
1370#ifdef CONFIG_SMP
1371asmlinkage void smp_irq_move_cleanup_interrupt(void)
1372{
1373 unsigned vector, me;
1374 ack_APIC_irq();
1375 exit_idle();
1376 irq_enter();
1377
1378 me = smp_processor_id();
1379 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1380 unsigned int irq;
1381 struct irq_desc *desc;
1382 struct irq_cfg *cfg;
1383 irq = __get_cpu_var(vector_irq)[vector];
1384 if (irq >= NR_IRQS)
1385 continue;
1386
1387 desc = irq_desc + irq;
1388 cfg = irq_cfg + irq;
1389 spin_lock(&desc->lock);
1390 if (!cfg->move_cleanup_count)
1391 goto unlock;
1392
1393 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1394 goto unlock;
1395
1396 __get_cpu_var(vector_irq)[vector] = -1;
1397 cfg->move_cleanup_count--;
1398unlock:
1399 spin_unlock(&desc->lock);
1400 }
1401
1402 irq_exit();
1403}
1404
1405static void irq_complete_move(unsigned int irq)
1406{
1407 struct irq_cfg *cfg = irq_cfg + irq;
1408 unsigned vector, me;
1409
1410 if (likely(!cfg->move_in_progress))
1411 return;
1412
1413 vector = ~get_irq_regs()->orig_rax;
1414 me = smp_processor_id();
1415 if ((vector == cfg->vector) &&
1416 cpu_isset(smp_processor_id(), cfg->domain)) {
1417 cpumask_t cleanup_mask;
1418
1419 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1420 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1421 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1422 cfg->move_in_progress = 0;
1423 }
1424}
1425#else
1426static inline void irq_complete_move(unsigned int irq) {}
1427#endif
1428
1363static void ack_apic_edge(unsigned int irq) 1429static void ack_apic_edge(unsigned int irq)
1364{ 1430{
1431 irq_complete_move(irq);
1365 move_native_irq(irq); 1432 move_native_irq(irq);
1366 ack_APIC_irq(); 1433 ack_APIC_irq();
1367} 1434}
@@ -1370,6 +1437,7 @@ static void ack_apic_level(unsigned int irq)
1370{ 1437{
1371 int do_unmask_irq = 0; 1438 int do_unmask_irq = 0;
1372 1439
1440 irq_complete_move(irq);
1373#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) 1441#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
1374 /* If we are moving the irq we need to mask it */ 1442 /* If we are moving the irq we need to mask it */
1375 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 1443 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {