aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/io_apic.c
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2009-10-13 16:32:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-14 03:17:09 -0400
commit9338ad6ffb70eca97f335d93c54943828c8b209e (patch)
tree40b259ecd99f08e42d5e51ba8b346cf8e10417bc /arch/x86/kernel/apic/io_apic.c
parent6c2c502910247d2820cb630e7b28fb6bdecdbf45 (diff)
x86, apic: Move SGI UV functionality out of generic IO-APIC code
Move UV specific functionality out of the generic IO-APIC code. Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> LKML-Reference: <20091013203236.GD20543@sgi.com> [ Cleaned up the code some more in their new places. ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic/io_apic.c')
-rw-r--r--arch/x86/kernel/apic/io_apic.c140
1 files changed, 5 insertions, 135 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index bb52e7f6e953..ce16b65cfdcc 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -60,8 +60,6 @@
60#include <asm/irq_remapping.h> 60#include <asm/irq_remapping.h>
61#include <asm/hpet.h> 61#include <asm/hpet.h>
62#include <asm/hw_irq.h> 62#include <asm/hw_irq.h>
63#include <asm/uv/uv_hub.h>
64#include <asm/uv/uv_irq.h>
65 63
66#include <asm/apic.h> 64#include <asm/apic.h>
67 65
@@ -140,20 +138,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
140 return pin; 138 return pin;
141} 139}
142 140
143/*
144 * This is performance-critical, we want to do it O(1)
145 *
146 * Most irqs are mapped 1:1 with pins.
147 */
148struct irq_cfg {
149 struct irq_pin_list *irq_2_pin;
150 cpumask_var_t domain;
151 cpumask_var_t old_domain;
152 unsigned move_cleanup_count;
153 u8 vector;
154 u8 move_in_progress : 1;
155};
156
157/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 141/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
158#ifdef CONFIG_SPARSE_IRQ 142#ifdef CONFIG_SPARSE_IRQ
159static struct irq_cfg irq_cfgx[] = { 143static struct irq_cfg irq_cfgx[] = {
@@ -209,7 +193,7 @@ int __init arch_early_irq_init(void)
209} 193}
210 194
211#ifdef CONFIG_SPARSE_IRQ 195#ifdef CONFIG_SPARSE_IRQ
212static struct irq_cfg *irq_cfg(unsigned int irq) 196struct irq_cfg *irq_cfg(unsigned int irq)
213{ 197{
214 struct irq_cfg *cfg = NULL; 198 struct irq_cfg *cfg = NULL;
215 struct irq_desc *desc; 199 struct irq_desc *desc;
@@ -361,7 +345,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
361/* end for move_irq_desc */ 345/* end for move_irq_desc */
362 346
363#else 347#else
364static struct irq_cfg *irq_cfg(unsigned int irq) 348struct irq_cfg *irq_cfg(unsigned int irq)
365{ 349{
366 return irq < nr_irqs ? irq_cfgx + irq : NULL; 350 return irq < nr_irqs ? irq_cfgx + irq : NULL;
367} 351}
@@ -1237,8 +1221,7 @@ next:
1237 return err; 1221 return err;
1238} 1222}
1239 1223
1240static int 1224int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1241assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1242{ 1225{
1243 int err; 1226 int err;
1244 unsigned long flags; 1227 unsigned long flags;
@@ -2245,7 +2228,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2245 */ 2228 */
2246 2229
2247#ifdef CONFIG_SMP 2230#ifdef CONFIG_SMP
2248static void send_cleanup_vector(struct irq_cfg *cfg) 2231void send_cleanup_vector(struct irq_cfg *cfg)
2249{ 2232{
2250 cpumask_var_t cleanup_mask; 2233 cpumask_var_t cleanup_mask;
2251 2234
@@ -2289,15 +2272,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2289 } 2272 }
2290} 2273}
2291 2274
2292static int
2293assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
2294
2295/* 2275/*
2296 * Either sets desc->affinity to a valid value, and returns 2276 * Either sets desc->affinity to a valid value, and returns
2297 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and 2277 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
2298 * leaves desc->affinity untouched. 2278 * leaves desc->affinity untouched.
2299 */ 2279 */
2300static unsigned int 2280unsigned int
2301set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) 2281set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
2302{ 2282{
2303 struct irq_cfg *cfg; 2283 struct irq_cfg *cfg;
@@ -3725,116 +3705,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3725} 3705}
3726#endif /* CONFIG_HT_IRQ */ 3706#endif /* CONFIG_HT_IRQ */
3727 3707
3728#ifdef CONFIG_X86_UV
3729/*
3730 * Re-target the irq to the specified CPU and enable the specified MMR located
3731 * on the specified blade to allow the sending of MSIs to the specified CPU.
3732 */
3733int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3734 unsigned long mmr_offset, int restrict)
3735{
3736 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3737 struct irq_desc *desc = irq_to_desc(irq);
3738 struct irq_cfg *cfg;
3739 int mmr_pnode;
3740 unsigned long mmr_value;
3741 struct uv_IO_APIC_route_entry *entry;
3742 unsigned long flags;
3743 int err;
3744
3745 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3746
3747 cfg = irq_cfg(irq);
3748
3749 err = assign_irq_vector(irq, cfg, eligible_cpu);
3750 if (err != 0)
3751 return err;
3752
3753 if (restrict == UV_AFFINITY_CPU)
3754 desc->status |= IRQ_NO_BALANCING;
3755 else
3756 desc->status |= IRQ_MOVE_PCNTXT;
3757
3758 spin_lock_irqsave(&vector_lock, flags);
3759 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3760 irq_name);
3761 spin_unlock_irqrestore(&vector_lock, flags);
3762
3763 mmr_value = 0;
3764 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3765 entry->vector = cfg->vector;
3766 entry->delivery_mode = apic->irq_delivery_mode;
3767 entry->dest_mode = apic->irq_dest_mode;
3768 entry->polarity = 0;
3769 entry->trigger = 0;
3770 entry->mask = 0;
3771 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
3772
3773 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3774 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3775
3776 if (cfg->move_in_progress)
3777 send_cleanup_vector(cfg);
3778
3779 return irq;
3780}
3781
3782/*
3783 * Disable the specified MMR located on the specified blade so that MSIs are
3784 * longer allowed to be sent.
3785 */
3786void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
3787{
3788 unsigned long mmr_value;
3789 struct uv_IO_APIC_route_entry *entry;
3790
3791 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3792
3793 mmr_value = 0;
3794 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3795 entry->mask = 1;
3796
3797 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3798}
3799
3800int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
3801{
3802 struct irq_desc *desc = irq_to_desc(irq);
3803 struct irq_cfg *cfg = desc->chip_data;
3804 unsigned int dest;
3805 unsigned long mmr_value;
3806 struct uv_IO_APIC_route_entry *entry;
3807 unsigned long mmr_offset;
3808 unsigned mmr_pnode;
3809
3810 dest = set_desc_affinity(desc, mask);
3811 if (dest == BAD_APICID)
3812 return -1;
3813
3814 mmr_value = 0;
3815 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3816
3817 entry->vector = cfg->vector;
3818 entry->delivery_mode = apic->irq_delivery_mode;
3819 entry->dest_mode = apic->irq_dest_mode;
3820 entry->polarity = 0;
3821 entry->trigger = 0;
3822 entry->mask = 0;
3823 entry->dest = dest;
3824
3825 /* Get previously stored MMR and pnode of hub sourcing interrupts */
3826 if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
3827 return -1;
3828
3829 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3830
3831 if (cfg->move_in_progress)
3832 send_cleanup_vector(cfg);
3833
3834 return 0;
3835}
3836#endif /* CONFIG_X86_64 */
3837
3838int __init io_apic_get_redir_entries (int ioapic) 3708int __init io_apic_get_redir_entries (int ioapic)
3839{ 3709{
3840 union IO_APIC_reg_01 reg_01; 3710 union IO_APIC_reg_01 reg_01;