diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 29 | ||||
-rw-r--r-- | arch/x86/include/asm/uv/uv_irq.h | 7 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 140 | ||||
-rw-r--r-- | arch/x86/kernel/uv_irq.c | 123 |
4 files changed, 145 insertions, 154 deletions
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index ba180d93b08c..56f0877c9329 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -79,14 +79,31 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
79 | int ioapic, int ioapic_pin, | 79 | int ioapic, int ioapic_pin, |
80 | int trigger, int polarity) | 80 | int trigger, int polarity) |
81 | { | 81 | { |
82 | irq_attr->ioapic = ioapic; | 82 | irq_attr->ioapic = ioapic; |
83 | irq_attr->ioapic_pin = ioapic_pin; | 83 | irq_attr->ioapic_pin = ioapic_pin; |
84 | irq_attr->trigger = trigger; | 84 | irq_attr->trigger = trigger; |
85 | irq_attr->polarity = polarity; | 85 | irq_attr->polarity = polarity; |
86 | } | 86 | } |
87 | 87 | ||
88 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | 88 | /* |
89 | struct io_apic_irq_attr *irq_attr); | 89 | * This is performance-critical, we want to do it O(1) |
90 | * | ||
91 | * Most irqs are mapped 1:1 with pins. | ||
92 | */ | ||
93 | struct irq_cfg { | ||
94 | struct irq_pin_list *irq_2_pin; | ||
95 | cpumask_var_t domain; | ||
96 | cpumask_var_t old_domain; | ||
97 | unsigned move_cleanup_count; | ||
98 | u8 vector; | ||
99 | u8 move_in_progress : 1; | ||
100 | }; | ||
101 | |||
102 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
103 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | ||
104 | extern void send_cleanup_vector(struct irq_cfg *); | ||
105 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *); | ||
106 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | ||
90 | extern void setup_ioapic_dest(void); | 107 | extern void setup_ioapic_dest(void); |
91 | 108 | ||
92 | extern void enable_IO_APIC(void); | 109 | extern void enable_IO_APIC(void); |
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index 5397e1290952..d6b17c760622 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h | |||
@@ -31,13 +31,6 @@ enum { | |||
31 | UV_AFFINITY_CPU | 31 | UV_AFFINITY_CPU |
32 | }; | 32 | }; |
33 | 33 | ||
34 | extern struct irq_chip uv_irq_chip; | ||
35 | |||
36 | extern int | ||
37 | arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long, int); | ||
38 | extern void arch_disable_uv_irq(int, unsigned long); | ||
39 | extern int uv_set_irq_affinity(unsigned int, const struct cpumask *); | ||
40 | |||
41 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); |
42 | extern int uv_setup_irq(char *, int, int, unsigned long, int); | 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); |
43 | extern void uv_teardown_irq(unsigned int); | 36 | extern void uv_teardown_irq(unsigned int); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index bb52e7f6e953..ce16b65cfdcc 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -60,8 +60,6 @@ | |||
60 | #include <asm/irq_remapping.h> | 60 | #include <asm/irq_remapping.h> |
61 | #include <asm/hpet.h> | 61 | #include <asm/hpet.h> |
62 | #include <asm/hw_irq.h> | 62 | #include <asm/hw_irq.h> |
63 | #include <asm/uv/uv_hub.h> | ||
64 | #include <asm/uv/uv_irq.h> | ||
65 | 63 | ||
66 | #include <asm/apic.h> | 64 | #include <asm/apic.h> |
67 | 65 | ||
@@ -140,20 +138,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
140 | return pin; | 138 | return pin; |
141 | } | 139 | } |
142 | 140 | ||
143 | /* | ||
144 | * This is performance-critical, we want to do it O(1) | ||
145 | * | ||
146 | * Most irqs are mapped 1:1 with pins. | ||
147 | */ | ||
148 | struct irq_cfg { | ||
149 | struct irq_pin_list *irq_2_pin; | ||
150 | cpumask_var_t domain; | ||
151 | cpumask_var_t old_domain; | ||
152 | unsigned move_cleanup_count; | ||
153 | u8 vector; | ||
154 | u8 move_in_progress : 1; | ||
155 | }; | ||
156 | |||
157 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 141 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
158 | #ifdef CONFIG_SPARSE_IRQ | 142 | #ifdef CONFIG_SPARSE_IRQ |
159 | static struct irq_cfg irq_cfgx[] = { | 143 | static struct irq_cfg irq_cfgx[] = { |
@@ -209,7 +193,7 @@ int __init arch_early_irq_init(void) | |||
209 | } | 193 | } |
210 | 194 | ||
211 | #ifdef CONFIG_SPARSE_IRQ | 195 | #ifdef CONFIG_SPARSE_IRQ |
212 | static struct irq_cfg *irq_cfg(unsigned int irq) | 196 | struct irq_cfg *irq_cfg(unsigned int irq) |
213 | { | 197 | { |
214 | struct irq_cfg *cfg = NULL; | 198 | struct irq_cfg *cfg = NULL; |
215 | struct irq_desc *desc; | 199 | struct irq_desc *desc; |
@@ -361,7 +345,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
361 | /* end for move_irq_desc */ | 345 | /* end for move_irq_desc */ |
362 | 346 | ||
363 | #else | 347 | #else |
364 | static struct irq_cfg *irq_cfg(unsigned int irq) | 348 | struct irq_cfg *irq_cfg(unsigned int irq) |
365 | { | 349 | { |
366 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 350 | return irq < nr_irqs ? irq_cfgx + irq : NULL; |
367 | } | 351 | } |
@@ -1237,8 +1221,7 @@ next: | |||
1237 | return err; | 1221 | return err; |
1238 | } | 1222 | } |
1239 | 1223 | ||
1240 | static int | 1224 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) |
1241 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1242 | { | 1225 | { |
1243 | int err; | 1226 | int err; |
1244 | unsigned long flags; | 1227 | unsigned long flags; |
@@ -2245,7 +2228,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2245 | */ | 2228 | */ |
2246 | 2229 | ||
2247 | #ifdef CONFIG_SMP | 2230 | #ifdef CONFIG_SMP |
2248 | static void send_cleanup_vector(struct irq_cfg *cfg) | 2231 | void send_cleanup_vector(struct irq_cfg *cfg) |
2249 | { | 2232 | { |
2250 | cpumask_var_t cleanup_mask; | 2233 | cpumask_var_t cleanup_mask; |
2251 | 2234 | ||
@@ -2289,15 +2272,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2289 | } | 2272 | } |
2290 | } | 2273 | } |
2291 | 2274 | ||
2292 | static int | ||
2293 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
2294 | |||
2295 | /* | 2275 | /* |
2296 | * Either sets desc->affinity to a valid value, and returns | 2276 | * Either sets desc->affinity to a valid value, and returns |
2297 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and | 2277 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and |
2298 | * leaves desc->affinity untouched. | 2278 | * leaves desc->affinity untouched. |
2299 | */ | 2279 | */ |
2300 | static unsigned int | 2280 | unsigned int |
2301 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | 2281 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) |
2302 | { | 2282 | { |
2303 | struct irq_cfg *cfg; | 2283 | struct irq_cfg *cfg; |
@@ -3725,116 +3705,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3725 | } | 3705 | } |
3726 | #endif /* CONFIG_HT_IRQ */ | 3706 | #endif /* CONFIG_HT_IRQ */ |
3727 | 3707 | ||
3728 | #ifdef CONFIG_X86_UV | ||
3729 | /* | ||
3730 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
3731 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
3732 | */ | ||
3733 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
3734 | unsigned long mmr_offset, int restrict) | ||
3735 | { | ||
3736 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
3737 | struct irq_desc *desc = irq_to_desc(irq); | ||
3738 | struct irq_cfg *cfg; | ||
3739 | int mmr_pnode; | ||
3740 | unsigned long mmr_value; | ||
3741 | struct uv_IO_APIC_route_entry *entry; | ||
3742 | unsigned long flags; | ||
3743 | int err; | ||
3744 | |||
3745 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3746 | |||
3747 | cfg = irq_cfg(irq); | ||
3748 | |||
3749 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
3750 | if (err != 0) | ||
3751 | return err; | ||
3752 | |||
3753 | if (restrict == UV_AFFINITY_CPU) | ||
3754 | desc->status |= IRQ_NO_BALANCING; | ||
3755 | else | ||
3756 | desc->status |= IRQ_MOVE_PCNTXT; | ||
3757 | |||
3758 | spin_lock_irqsave(&vector_lock, flags); | ||
3759 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
3760 | irq_name); | ||
3761 | spin_unlock_irqrestore(&vector_lock, flags); | ||
3762 | |||
3763 | mmr_value = 0; | ||
3764 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3765 | entry->vector = cfg->vector; | ||
3766 | entry->delivery_mode = apic->irq_delivery_mode; | ||
3767 | entry->dest_mode = apic->irq_dest_mode; | ||
3768 | entry->polarity = 0; | ||
3769 | entry->trigger = 0; | ||
3770 | entry->mask = 0; | ||
3771 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
3772 | |||
3773 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
3774 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3775 | |||
3776 | if (cfg->move_in_progress) | ||
3777 | send_cleanup_vector(cfg); | ||
3778 | |||
3779 | return irq; | ||
3780 | } | ||
3781 | |||
3782 | /* | ||
3783 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
3784 | * longer allowed to be sent. | ||
3785 | */ | ||
3786 | void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | ||
3787 | { | ||
3788 | unsigned long mmr_value; | ||
3789 | struct uv_IO_APIC_route_entry *entry; | ||
3790 | |||
3791 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3792 | |||
3793 | mmr_value = 0; | ||
3794 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3795 | entry->mask = 1; | ||
3796 | |||
3797 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3798 | } | ||
3799 | |||
3800 | int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
3801 | { | ||
3802 | struct irq_desc *desc = irq_to_desc(irq); | ||
3803 | struct irq_cfg *cfg = desc->chip_data; | ||
3804 | unsigned int dest; | ||
3805 | unsigned long mmr_value; | ||
3806 | struct uv_IO_APIC_route_entry *entry; | ||
3807 | unsigned long mmr_offset; | ||
3808 | unsigned mmr_pnode; | ||
3809 | |||
3810 | dest = set_desc_affinity(desc, mask); | ||
3811 | if (dest == BAD_APICID) | ||
3812 | return -1; | ||
3813 | |||
3814 | mmr_value = 0; | ||
3815 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3816 | |||
3817 | entry->vector = cfg->vector; | ||
3818 | entry->delivery_mode = apic->irq_delivery_mode; | ||
3819 | entry->dest_mode = apic->irq_dest_mode; | ||
3820 | entry->polarity = 0; | ||
3821 | entry->trigger = 0; | ||
3822 | entry->mask = 0; | ||
3823 | entry->dest = dest; | ||
3824 | |||
3825 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | ||
3826 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | ||
3827 | return -1; | ||
3828 | |||
3829 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3830 | |||
3831 | if (cfg->move_in_progress) | ||
3832 | send_cleanup_vector(cfg); | ||
3833 | |||
3834 | return 0; | ||
3835 | } | ||
3836 | #endif /* CONFIG_X86_64 */ | ||
3837 | |||
3838 | int __init io_apic_get_redir_entries (int ioapic) | 3708 | int __init io_apic_get_redir_entries (int ioapic) |
3839 | { | 3709 | { |
3840 | union IO_APIC_reg_01 reg_01; | 3710 | union IO_APIC_reg_01 reg_01; |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 9a83775ab0f3..61d805df4c91 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -18,13 +18,16 @@ | |||
18 | 18 | ||
19 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ | 19 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ |
20 | struct uv_irq_2_mmr_pnode{ | 20 | struct uv_irq_2_mmr_pnode{ |
21 | struct rb_node list; | 21 | struct rb_node list; |
22 | unsigned long offset; | 22 | unsigned long offset; |
23 | int pnode; | 23 | int pnode; |
24 | int irq; | 24 | int irq; |
25 | }; | 25 | }; |
26 | static spinlock_t uv_irq_lock; | 26 | |
27 | static struct rb_root uv_irq_root; | 27 | static spinlock_t uv_irq_lock; |
28 | static struct rb_root uv_irq_root; | ||
29 | |||
30 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | ||
28 | 31 | ||
29 | static void uv_noop(unsigned int irq) | 32 | static void uv_noop(unsigned int irq) |
30 | { | 33 | { |
@@ -132,6 +135,114 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | |||
132 | } | 135 | } |
133 | 136 | ||
134 | /* | 137 | /* |
138 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
139 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
140 | */ | ||
141 | static int | ||
142 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
143 | unsigned long mmr_offset, int restrict) | ||
144 | { | ||
145 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
146 | struct irq_desc *desc = irq_to_desc(irq); | ||
147 | struct irq_cfg *cfg; | ||
148 | int mmr_pnode; | ||
149 | unsigned long mmr_value; | ||
150 | struct uv_IO_APIC_route_entry *entry; | ||
151 | int err; | ||
152 | |||
153 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
154 | sizeof(unsigned long)); | ||
155 | |||
156 | cfg = irq_cfg(irq); | ||
157 | |||
158 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
159 | if (err != 0) | ||
160 | return err; | ||
161 | |||
162 | if (restrict == UV_AFFINITY_CPU) | ||
163 | desc->status |= IRQ_NO_BALANCING; | ||
164 | else | ||
165 | desc->status |= IRQ_MOVE_PCNTXT; | ||
166 | |||
167 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
168 | irq_name); | ||
169 | |||
170 | mmr_value = 0; | ||
171 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
172 | entry->vector = cfg->vector; | ||
173 | entry->delivery_mode = apic->irq_delivery_mode; | ||
174 | entry->dest_mode = apic->irq_dest_mode; | ||
175 | entry->polarity = 0; | ||
176 | entry->trigger = 0; | ||
177 | entry->mask = 0; | ||
178 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
179 | |||
180 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
181 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
182 | |||
183 | if (cfg->move_in_progress) | ||
184 | send_cleanup_vector(cfg); | ||
185 | |||
186 | return irq; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
191 | * longer allowed to be sent. | ||
192 | */ | ||
193 | static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | ||
194 | { | ||
195 | unsigned long mmr_value; | ||
196 | struct uv_IO_APIC_route_entry *entry; | ||
197 | |||
198 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
199 | sizeof(unsigned long)); | ||
200 | |||
201 | mmr_value = 0; | ||
202 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
203 | entry->mask = 1; | ||
204 | |||
205 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
206 | } | ||
207 | |||
208 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
209 | { | ||
210 | struct irq_desc *desc = irq_to_desc(irq); | ||
211 | struct irq_cfg *cfg = desc->chip_data; | ||
212 | unsigned int dest; | ||
213 | unsigned long mmr_value; | ||
214 | struct uv_IO_APIC_route_entry *entry; | ||
215 | unsigned long mmr_offset; | ||
216 | unsigned mmr_pnode; | ||
217 | |||
218 | dest = set_desc_affinity(desc, mask); | ||
219 | if (dest == BAD_APICID) | ||
220 | return -1; | ||
221 | |||
222 | mmr_value = 0; | ||
223 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
224 | |||
225 | entry->vector = cfg->vector; | ||
226 | entry->delivery_mode = apic->irq_delivery_mode; | ||
227 | entry->dest_mode = apic->irq_dest_mode; | ||
228 | entry->polarity = 0; | ||
229 | entry->trigger = 0; | ||
230 | entry->mask = 0; | ||
231 | entry->dest = dest; | ||
232 | |||
233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | ||
234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | ||
235 | return -1; | ||
236 | |||
237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
238 | |||
239 | if (cfg->move_in_progress) | ||
240 | send_cleanup_vector(cfg); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /* | ||
135 | * Set up a mapping of an available irq and vector, and enable the specified | 246 | * Set up a mapping of an available irq and vector, and enable the specified |
136 | * MMR that defines the MSI that is to be sent to the specified CPU when an | 247 | * MMR that defines the MSI that is to be sent to the specified CPU when an |
137 | * interrupt is raised. | 248 | * interrupt is raised. |