aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2009-09-30 12:02:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-14 03:17:01 -0400
commit6c2c502910247d2820cb630e7b28fb6bdecdbf45 (patch)
treed2bc695c68e57d853bcc66195628a98e34bb01ef /arch/x86/kernel/apic
parent2626eb2b2fd958dc0f683126aa84e93b939699a1 (diff)
x86: SGI UV: Fix irq affinity for hub based interrupts
This patch fixes handling of uv hub irq affinity. IRQs with ALL or NODE affinity can be routed to cpus other than their originally assigned cpu. Those with CPU affinity cannot be rerouted. Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> LKML-Reference: <20090930160259.GA7822@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/io_apic.c49
1 files changed, 45 insertions, 4 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8c718c93d079..bb52e7f6e953 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3731,9 +3731,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3731 * on the specified blade to allow the sending of MSIs to the specified CPU. 3731 * on the specified blade to allow the sending of MSIs to the specified CPU.
3732 */ 3732 */
3733int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3733int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3734 unsigned long mmr_offset) 3734 unsigned long mmr_offset, int restrict)
3735{ 3735{
3736 const struct cpumask *eligible_cpu = cpumask_of(cpu); 3736 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3737 struct irq_desc *desc = irq_to_desc(irq);
3737 struct irq_cfg *cfg; 3738 struct irq_cfg *cfg;
3738 int mmr_pnode; 3739 int mmr_pnode;
3739 unsigned long mmr_value; 3740 unsigned long mmr_value;
@@ -3749,6 +3750,11 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3749 if (err != 0) 3750 if (err != 0)
3750 return err; 3751 return err;
3751 3752
3753 if (restrict == UV_AFFINITY_CPU)
3754 desc->status |= IRQ_NO_BALANCING;
3755 else
3756 desc->status |= IRQ_MOVE_PCNTXT;
3757
3752 spin_lock_irqsave(&vector_lock, flags); 3758 spin_lock_irqsave(&vector_lock, flags);
3753 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, 3759 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3754 irq_name); 3760 irq_name);
@@ -3777,11 +3783,10 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3777 * Disable the specified MMR located on the specified blade so that MSIs are 3783 * Disable the specified MMR located on the specified blade so that MSIs are
3778 * longer allowed to be sent. 3784 * longer allowed to be sent.
3779 */ 3785 */
3780void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) 3786void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
3781{ 3787{
3782 unsigned long mmr_value; 3788 unsigned long mmr_value;
3783 struct uv_IO_APIC_route_entry *entry; 3789 struct uv_IO_APIC_route_entry *entry;
3784 int mmr_pnode;
3785 3790
3786 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3791 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3787 3792
@@ -3789,9 +3794,45 @@ void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
3789 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; 3794 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3790 entry->mask = 1; 3795 entry->mask = 1;
3791 3796
3792 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3793 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3797 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3794} 3798}
3799
3800int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
3801{
3802 struct irq_desc *desc = irq_to_desc(irq);
3803 struct irq_cfg *cfg = desc->chip_data;
3804 unsigned int dest;
3805 unsigned long mmr_value;
3806 struct uv_IO_APIC_route_entry *entry;
3807 unsigned long mmr_offset;
3808 unsigned mmr_pnode;
3809
3810 dest = set_desc_affinity(desc, mask);
3811 if (dest == BAD_APICID)
3812 return -1;
3813
3814 mmr_value = 0;
3815 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3816
3817 entry->vector = cfg->vector;
3818 entry->delivery_mode = apic->irq_delivery_mode;
3819 entry->dest_mode = apic->irq_dest_mode;
3820 entry->polarity = 0;
3821 entry->trigger = 0;
3822 entry->mask = 0;
3823 entry->dest = dest;
3824
3825 /* Get previously stored MMR and pnode of hub sourcing interrupts */
3826 if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
3827 return -1;
3828
3829 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3830
3831 if (cfg->move_in_progress)
3832 send_cleanup_vector(cfg);
3833
3834 return 0;
3835}
3795#endif /* CONFIG_X86_64 */ 3836#endif /* CONFIG_X86_64 */
3796 3837
3797int __init io_apic_get_redir_entries (int ioapic) 3838int __init io_apic_get_redir_entries (int ioapic)