diff options
37 files changed, 1607 insertions, 1412 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2995788bcb1d..ba397bde7948 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -883,11 +883,11 @@ config X86_UP_IOAPIC | |||
883 | config X86_LOCAL_APIC | 883 | config X86_LOCAL_APIC |
884 | def_bool y | 884 | def_bool y |
885 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI | 885 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI |
886 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | ||
886 | 887 | ||
887 | config X86_IO_APIC | 888 | config X86_IO_APIC |
888 | def_bool y | 889 | def_bool X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC |
889 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI | 890 | depends on X86_LOCAL_APIC |
890 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | ||
891 | select IRQ_DOMAIN | 891 | select IRQ_DOMAIN |
892 | 892 | ||
893 | config X86_REROUTE_FOR_BROKEN_BOOT_IRQS | 893 | config X86_REROUTE_FOR_BROKEN_BOOT_IRQS |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 4615906d83df..9662290e0b20 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -94,30 +94,7 @@ extern void trace_call_function_single_interrupt(void); | |||
94 | #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi | 94 | #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi |
95 | #endif /* CONFIG_TRACING */ | 95 | #endif /* CONFIG_TRACING */ |
96 | 96 | ||
97 | /* IOAPIC */ | 97 | #ifdef CONFIG_IRQ_REMAP |
98 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | ||
99 | extern unsigned long io_apic_irqs; | ||
100 | |||
101 | extern void setup_IO_APIC(void); | ||
102 | extern void disable_IO_APIC(void); | ||
103 | |||
104 | struct io_apic_irq_attr { | ||
105 | int ioapic; | ||
106 | int ioapic_pin; | ||
107 | int trigger; | ||
108 | int polarity; | ||
109 | }; | ||
110 | |||
111 | static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | ||
112 | int ioapic, int ioapic_pin, | ||
113 | int trigger, int polarity) | ||
114 | { | ||
115 | irq_attr->ioapic = ioapic; | ||
116 | irq_attr->ioapic_pin = ioapic_pin; | ||
117 | irq_attr->trigger = trigger; | ||
118 | irq_attr->polarity = polarity; | ||
119 | } | ||
120 | |||
121 | /* Intel specific interrupt remapping information */ | 98 | /* Intel specific interrupt remapping information */ |
122 | struct irq_2_iommu { | 99 | struct irq_2_iommu { |
123 | struct intel_iommu *iommu; | 100 | struct intel_iommu *iommu; |
@@ -131,14 +108,12 @@ struct irq_2_irte { | |||
131 | u16 devid; /* Device ID for IRTE table */ | 108 | u16 devid; /* Device ID for IRTE table */ |
132 | u16 index; /* Index into IRTE table*/ | 109 | u16 index; /* Index into IRTE table*/ |
133 | }; | 110 | }; |
111 | #endif /* CONFIG_IRQ_REMAP */ | ||
112 | |||
113 | #ifdef CONFIG_X86_LOCAL_APIC | ||
114 | struct irq_data; | ||
134 | 115 | ||
135 | /* | ||
136 | * This is performance-critical, we want to do it O(1) | ||
137 | * | ||
138 | * Most irqs are mapped 1:1 with pins. | ||
139 | */ | ||
140 | struct irq_cfg { | 116 | struct irq_cfg { |
141 | struct irq_pin_list *irq_2_pin; | ||
142 | cpumask_var_t domain; | 117 | cpumask_var_t domain; |
143 | cpumask_var_t old_domain; | 118 | cpumask_var_t old_domain; |
144 | u8 vector; | 119 | u8 vector; |
@@ -150,18 +125,39 @@ struct irq_cfg { | |||
150 | struct irq_2_irte irq_2_irte; | 125 | struct irq_2_irte irq_2_irte; |
151 | }; | 126 | }; |
152 | #endif | 127 | #endif |
128 | union { | ||
129 | #ifdef CONFIG_X86_IO_APIC | ||
130 | struct { | ||
131 | struct list_head irq_2_pin; | ||
132 | }; | ||
133 | #endif | ||
134 | }; | ||
153 | }; | 135 | }; |
154 | 136 | ||
137 | extern struct irq_cfg *irq_cfg(unsigned int irq); | ||
138 | extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data); | ||
139 | extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node); | ||
140 | extern void lock_vector_lock(void); | ||
141 | extern void unlock_vector_lock(void); | ||
155 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | 142 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); |
143 | extern void clear_irq_vector(int irq, struct irq_cfg *cfg); | ||
144 | extern void setup_vector_irq(int cpu); | ||
145 | #ifdef CONFIG_SMP | ||
156 | extern void send_cleanup_vector(struct irq_cfg *); | 146 | extern void send_cleanup_vector(struct irq_cfg *); |
147 | extern void irq_complete_move(struct irq_cfg *cfg); | ||
148 | #else | ||
149 | static inline void send_cleanup_vector(struct irq_cfg *c) { } | ||
150 | static inline void irq_complete_move(struct irq_cfg *c) { } | ||
151 | #endif | ||
157 | 152 | ||
158 | struct irq_data; | 153 | extern int apic_retrigger_irq(struct irq_data *data); |
159 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, | 154 | extern void apic_ack_edge(struct irq_data *data); |
160 | unsigned int *dest_id); | 155 | extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
161 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | 156 | unsigned int *dest_id); |
162 | extern void setup_ioapic_dest(void); | 157 | #else /* CONFIG_X86_LOCAL_APIC */ |
163 | 158 | static inline void lock_vector_lock(void) {} | |
164 | extern void enable_IO_APIC(void); | 159 | static inline void unlock_vector_lock(void) {} |
160 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
165 | 161 | ||
166 | /* Statistics */ | 162 | /* Statistics */ |
167 | extern atomic_t irq_err_count; | 163 | extern atomic_t irq_err_count; |
@@ -185,7 +181,8 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *); | |||
185 | extern __visible void smp_invalidate_interrupt(struct pt_regs *); | 181 | extern __visible void smp_invalidate_interrupt(struct pt_regs *); |
186 | #endif | 182 | #endif |
187 | 183 | ||
188 | extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | 184 | extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR |
185 | - FIRST_EXTERNAL_VECTOR])(void); | ||
189 | #ifdef CONFIG_TRACING | 186 | #ifdef CONFIG_TRACING |
190 | #define trace_interrupt interrupt | 187 | #define trace_interrupt interrupt |
191 | #endif | 188 | #endif |
@@ -195,17 +192,6 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
195 | 192 | ||
196 | typedef int vector_irq_t[NR_VECTORS]; | 193 | typedef int vector_irq_t[NR_VECTORS]; |
197 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 194 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
198 | extern void setup_vector_irq(int cpu); | ||
199 | |||
200 | #ifdef CONFIG_X86_IO_APIC | ||
201 | extern void lock_vector_lock(void); | ||
202 | extern void unlock_vector_lock(void); | ||
203 | extern void __setup_vector_irq(int cpu); | ||
204 | #else | ||
205 | static inline void lock_vector_lock(void) {} | ||
206 | static inline void unlock_vector_lock(void) {} | ||
207 | static inline void __setup_vector_irq(int cpu) {} | ||
208 | #endif | ||
209 | 195 | ||
210 | #endif /* !ASSEMBLY_ */ | 196 | #endif /* !ASSEMBLY_ */ |
211 | 197 | ||
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 1733ab49ac5e..bf006cce9418 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -132,6 +132,10 @@ extern int noioapicquirk; | |||
132 | /* -1 if "noapic" boot option passed */ | 132 | /* -1 if "noapic" boot option passed */ |
133 | extern int noioapicreroute; | 133 | extern int noioapicreroute; |
134 | 134 | ||
135 | extern unsigned long io_apic_irqs; | ||
136 | |||
137 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs)) | ||
138 | |||
135 | /* | 139 | /* |
136 | * If we use the IO-APIC for IRQ routing, disable automatic | 140 | * If we use the IO-APIC for IRQ routing, disable automatic |
137 | * assignment of PCI IRQ's. | 141 | * assignment of PCI IRQ's. |
@@ -139,18 +143,15 @@ extern int noioapicreroute; | |||
139 | #define io_apic_assign_pci_irqs \ | 143 | #define io_apic_assign_pci_irqs \ |
140 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
141 | 145 | ||
142 | struct io_apic_irq_attr; | ||
143 | struct irq_cfg; | 146 | struct irq_cfg; |
144 | extern void ioapic_insert_resources(void); | 147 | extern void ioapic_insert_resources(void); |
148 | extern int arch_early_ioapic_init(void); | ||
145 | 149 | ||
146 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, | 150 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, |
147 | unsigned int, int, | 151 | unsigned int, int, |
148 | struct io_apic_irq_attr *); | 152 | struct io_apic_irq_attr *); |
149 | extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); | 153 | extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); |
150 | 154 | ||
151 | extern void native_compose_msi_msg(struct pci_dev *pdev, | ||
152 | unsigned int irq, unsigned int dest, | ||
153 | struct msi_msg *msg, u8 hpet_id); | ||
154 | extern void native_eoi_ioapic_pin(int apic, int pin, int vector); | 155 | extern void native_eoi_ioapic_pin(int apic, int pin, int vector); |
155 | 156 | ||
156 | extern int save_ioapic_entries(void); | 157 | extern int save_ioapic_entries(void); |
@@ -160,6 +161,13 @@ extern int restore_ioapic_entries(void); | |||
160 | extern void setup_ioapic_ids_from_mpc(void); | 161 | extern void setup_ioapic_ids_from_mpc(void); |
161 | extern void setup_ioapic_ids_from_mpc_nocheck(void); | 162 | extern void setup_ioapic_ids_from_mpc_nocheck(void); |
162 | 163 | ||
164 | struct io_apic_irq_attr { | ||
165 | int ioapic; | ||
166 | int ioapic_pin; | ||
167 | int trigger; | ||
168 | int polarity; | ||
169 | }; | ||
170 | |||
163 | enum ioapic_domain_type { | 171 | enum ioapic_domain_type { |
164 | IOAPIC_DOMAIN_INVALID, | 172 | IOAPIC_DOMAIN_INVALID, |
165 | IOAPIC_DOMAIN_LEGACY, | 173 | IOAPIC_DOMAIN_LEGACY, |
@@ -188,8 +196,10 @@ extern int mp_find_ioapic_pin(int ioapic, u32 gsi); | |||
188 | extern u32 mp_pin_to_gsi(int ioapic, int pin); | 196 | extern u32 mp_pin_to_gsi(int ioapic, int pin); |
189 | extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags); | 197 | extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags); |
190 | extern void mp_unmap_irq(int irq); | 198 | extern void mp_unmap_irq(int irq); |
191 | extern void __init mp_register_ioapic(int id, u32 address, u32 gsi_base, | 199 | extern int mp_register_ioapic(int id, u32 address, u32 gsi_base, |
192 | struct ioapic_domain_cfg *cfg); | 200 | struct ioapic_domain_cfg *cfg); |
201 | extern int mp_unregister_ioapic(u32 gsi_base); | ||
202 | extern int mp_ioapic_registered(u32 gsi_base); | ||
193 | extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, | 203 | extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, |
194 | irq_hw_number_t hwirq); | 204 | irq_hw_number_t hwirq); |
195 | extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq); | 205 | extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq); |
@@ -227,19 +237,25 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned | |||
227 | 237 | ||
228 | extern void io_apic_eoi(unsigned int apic, unsigned int vector); | 238 | extern void io_apic_eoi(unsigned int apic, unsigned int vector); |
229 | 239 | ||
230 | extern bool mp_should_keep_irq(struct device *dev); | 240 | extern void setup_IO_APIC(void); |
231 | 241 | extern void enable_IO_APIC(void); | |
242 | extern void disable_IO_APIC(void); | ||
243 | extern void setup_ioapic_dest(void); | ||
244 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); | ||
245 | extern void print_IO_APICs(void); | ||
232 | #else /* !CONFIG_X86_IO_APIC */ | 246 | #else /* !CONFIG_X86_IO_APIC */ |
233 | 247 | ||
248 | #define IO_APIC_IRQ(x) 0 | ||
234 | #define io_apic_assign_pci_irqs 0 | 249 | #define io_apic_assign_pci_irqs 0 |
235 | #define setup_ioapic_ids_from_mpc x86_init_noop | 250 | #define setup_ioapic_ids_from_mpc x86_init_noop |
236 | static inline void ioapic_insert_resources(void) { } | 251 | static inline void ioapic_insert_resources(void) { } |
252 | static inline int arch_early_ioapic_init(void) { return 0; } | ||
253 | static inline void print_IO_APICs(void) {} | ||
237 | #define gsi_top (NR_IRQS_LEGACY) | 254 | #define gsi_top (NR_IRQS_LEGACY) |
238 | static inline int mp_find_ioapic(u32 gsi) { return 0; } | 255 | static inline int mp_find_ioapic(u32 gsi) { return 0; } |
239 | static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } | 256 | static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } |
240 | static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } | 257 | static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } |
241 | static inline void mp_unmap_irq(int irq) { } | 258 | static inline void mp_unmap_irq(int irq) { } |
242 | static inline bool mp_should_keep_irq(struct device *dev) { return 1; } | ||
243 | 259 | ||
244 | static inline int save_ioapic_entries(void) | 260 | static inline int save_ioapic_entries(void) |
245 | { | 261 | { |
@@ -262,7 +278,6 @@ static inline void disable_ioapic_support(void) { } | |||
262 | #define native_io_apic_print_entries NULL | 278 | #define native_io_apic_print_entries NULL |
263 | #define native_ioapic_set_affinity NULL | 279 | #define native_ioapic_set_affinity NULL |
264 | #define native_setup_ioapic_entry NULL | 280 | #define native_setup_ioapic_entry NULL |
265 | #define native_compose_msi_msg NULL | ||
266 | #define native_eoi_ioapic_pin NULL | 281 | #define native_eoi_ioapic_pin NULL |
267 | #endif | 282 | #endif |
268 | 283 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 5702d7e3111d..666c89ec4bd7 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -126,6 +126,12 @@ | |||
126 | 126 | ||
127 | #define NR_VECTORS 256 | 127 | #define NR_VECTORS 256 |
128 | 128 | ||
129 | #ifdef CONFIG_X86_LOCAL_APIC | ||
130 | #define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR | ||
131 | #else | ||
132 | #define FIRST_SYSTEM_VECTOR NR_VECTORS | ||
133 | #endif | ||
134 | |||
129 | #define FPU_IRQ 13 | 135 | #define FPU_IRQ 13 |
130 | 136 | ||
131 | #define FIRST_VM86_IRQ 3 | 137 | #define FIRST_VM86_IRQ 3 |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 0892ea0e683f..4e370a5d8117 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -96,12 +96,15 @@ extern void pci_iommu_alloc(void); | |||
96 | #ifdef CONFIG_PCI_MSI | 96 | #ifdef CONFIG_PCI_MSI |
97 | /* implemented in arch/x86/kernel/apic/io_apic. */ | 97 | /* implemented in arch/x86/kernel/apic/io_apic. */ |
98 | struct msi_desc; | 98 | struct msi_desc; |
99 | void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq, | ||
100 | unsigned int dest, struct msi_msg *msg, u8 hpet_id); | ||
99 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 101 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
100 | void native_teardown_msi_irq(unsigned int irq); | 102 | void native_teardown_msi_irq(unsigned int irq); |
101 | void native_restore_msi_irqs(struct pci_dev *dev); | 103 | void native_restore_msi_irqs(struct pci_dev *dev); |
102 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 104 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
103 | unsigned int irq_base, unsigned int irq_offset); | 105 | unsigned int irq_base, unsigned int irq_offset); |
104 | #else | 106 | #else |
107 | #define native_compose_msi_msg NULL | ||
105 | #define native_setup_msi_irqs NULL | 108 | #define native_setup_msi_irqs NULL |
106 | #define native_teardown_msi_irq NULL | 109 | #define native_teardown_msi_irq NULL |
107 | #endif | 110 | #endif |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; | |||
93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
95 | 95 | ||
96 | extern bool mp_should_keep_irq(struct device *dev); | ||
97 | |||
96 | struct pci_raw_ops { | 98 | struct pci_raw_ops { |
97 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 99 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
98 | int reg, int len, u32 *val); | 100 | int reg, int len, u32 *val); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a142e77693e1..4433a4be8171 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -76,6 +76,19 @@ int acpi_fix_pin2_polarity __initdata; | |||
76 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 76 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* | ||
80 | * Locks related to IOAPIC hotplug | ||
81 | * Hotplug side: | ||
82 | * ->device_hotplug_lock | ||
83 | * ->acpi_ioapic_lock | ||
84 | * ->ioapic_lock | ||
85 | * Interrupt mapping side: | ||
86 | * ->acpi_ioapic_lock | ||
87 | * ->ioapic_mutex | ||
88 | * ->ioapic_lock | ||
89 | */ | ||
90 | static DEFINE_MUTEX(acpi_ioapic_lock); | ||
91 | |||
79 | /* -------------------------------------------------------------------------- | 92 | /* -------------------------------------------------------------------------- |
80 | Boot-time Configuration | 93 | Boot-time Configuration |
81 | -------------------------------------------------------------------------- */ | 94 | -------------------------------------------------------------------------- */ |
@@ -395,10 +408,6 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger, | |||
395 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | 408 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) |
396 | return gsi; | 409 | return gsi; |
397 | 410 | ||
398 | /* Don't set up the ACPI SCI because it's already set up */ | ||
399 | if (acpi_gbl_FADT.sci_interrupt == gsi) | ||
400 | return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC); | ||
401 | |||
402 | trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; | 411 | trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; |
403 | polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; | 412 | polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; |
404 | node = dev ? dev_to_node(dev) : NUMA_NO_NODE; | 413 | node = dev ? dev_to_node(dev) : NUMA_NO_NODE; |
@@ -411,7 +420,8 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger, | |||
411 | if (irq < 0) | 420 | if (irq < 0) |
412 | return irq; | 421 | return irq; |
413 | 422 | ||
414 | if (enable_update_mptable) | 423 | /* Don't set up the ACPI SCI because it's already set up */ |
424 | if (enable_update_mptable && acpi_gbl_FADT.sci_interrupt != gsi) | ||
415 | mp_config_acpi_gsi(dev, gsi, trigger, polarity); | 425 | mp_config_acpi_gsi(dev, gsi, trigger, polarity); |
416 | 426 | ||
417 | return irq; | 427 | return irq; |
@@ -424,9 +434,6 @@ static void mp_unregister_gsi(u32 gsi) | |||
424 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | 434 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) |
425 | return; | 435 | return; |
426 | 436 | ||
427 | if (acpi_gbl_FADT.sci_interrupt == gsi) | ||
428 | return; | ||
429 | |||
430 | irq = mp_map_gsi_to_irq(gsi, 0); | 437 | irq = mp_map_gsi_to_irq(gsi, 0); |
431 | if (irq > 0) | 438 | if (irq > 0) |
432 | mp_unmap_irq(irq); | 439 | mp_unmap_irq(irq); |
@@ -609,8 +616,10 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) | |||
609 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { | 616 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { |
610 | *irqp = gsi; | 617 | *irqp = gsi; |
611 | } else { | 618 | } else { |
619 | mutex_lock(&acpi_ioapic_lock); | ||
612 | irq = mp_map_gsi_to_irq(gsi, | 620 | irq = mp_map_gsi_to_irq(gsi, |
613 | IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); | 621 | IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); |
622 | mutex_unlock(&acpi_ioapic_lock); | ||
614 | if (irq < 0) | 623 | if (irq < 0) |
615 | return -1; | 624 | return -1; |
616 | *irqp = irq; | 625 | *irqp = irq; |
@@ -650,7 +659,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, | |||
650 | int irq = gsi; | 659 | int irq = gsi; |
651 | 660 | ||
652 | #ifdef CONFIG_X86_IO_APIC | 661 | #ifdef CONFIG_X86_IO_APIC |
662 | mutex_lock(&acpi_ioapic_lock); | ||
653 | irq = mp_register_gsi(dev, gsi, trigger, polarity); | 663 | irq = mp_register_gsi(dev, gsi, trigger, polarity); |
664 | mutex_unlock(&acpi_ioapic_lock); | ||
654 | #endif | 665 | #endif |
655 | 666 | ||
656 | return irq; | 667 | return irq; |
@@ -659,7 +670,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, | |||
659 | static void acpi_unregister_gsi_ioapic(u32 gsi) | 670 | static void acpi_unregister_gsi_ioapic(u32 gsi) |
660 | { | 671 | { |
661 | #ifdef CONFIG_X86_IO_APIC | 672 | #ifdef CONFIG_X86_IO_APIC |
673 | mutex_lock(&acpi_ioapic_lock); | ||
662 | mp_unregister_gsi(gsi); | 674 | mp_unregister_gsi(gsi); |
675 | mutex_unlock(&acpi_ioapic_lock); | ||
663 | #endif | 676 | #endif |
664 | } | 677 | } |
665 | 678 | ||
@@ -690,6 +703,7 @@ void acpi_unregister_gsi(u32 gsi) | |||
690 | } | 703 | } |
691 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | 704 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); |
692 | 705 | ||
706 | #ifdef CONFIG_X86_LOCAL_APIC | ||
693 | static void __init acpi_set_irq_model_ioapic(void) | 707 | static void __init acpi_set_irq_model_ioapic(void) |
694 | { | 708 | { |
695 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; | 709 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; |
@@ -697,6 +711,7 @@ static void __init acpi_set_irq_model_ioapic(void) | |||
697 | __acpi_unregister_gsi = acpi_unregister_gsi_ioapic; | 711 | __acpi_unregister_gsi = acpi_unregister_gsi_ioapic; |
698 | acpi_ioapic = 1; | 712 | acpi_ioapic = 1; |
699 | } | 713 | } |
714 | #endif | ||
700 | 715 | ||
701 | /* | 716 | /* |
702 | * ACPI based hotplug support for CPU | 717 | * ACPI based hotplug support for CPU |
@@ -759,20 +774,74 @@ EXPORT_SYMBOL(acpi_unmap_lsapic); | |||
759 | 774 | ||
760 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | 775 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
761 | { | 776 | { |
762 | /* TBD */ | 777 | int ret = -ENOSYS; |
763 | return -EINVAL; | 778 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
764 | } | 779 | int ioapic_id; |
780 | u64 addr; | ||
781 | struct ioapic_domain_cfg cfg = { | ||
782 | .type = IOAPIC_DOMAIN_DYNAMIC, | ||
783 | .ops = &acpi_irqdomain_ops, | ||
784 | }; | ||
785 | |||
786 | ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr); | ||
787 | if (ioapic_id < 0) { | ||
788 | unsigned long long uid; | ||
789 | acpi_status status; | ||
765 | 790 | ||
791 | status = acpi_evaluate_integer(handle, METHOD_NAME__UID, | ||
792 | NULL, &uid); | ||
793 | if (ACPI_FAILURE(status)) { | ||
794 | acpi_handle_warn(handle, "failed to get IOAPIC ID.\n"); | ||
795 | return -EINVAL; | ||
796 | } | ||
797 | ioapic_id = (int)uid; | ||
798 | } | ||
799 | |||
800 | mutex_lock(&acpi_ioapic_lock); | ||
801 | ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg); | ||
802 | mutex_unlock(&acpi_ioapic_lock); | ||
803 | #endif | ||
804 | |||
805 | return ret; | ||
806 | } | ||
766 | EXPORT_SYMBOL(acpi_register_ioapic); | 807 | EXPORT_SYMBOL(acpi_register_ioapic); |
767 | 808 | ||
768 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) | 809 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) |
769 | { | 810 | { |
770 | /* TBD */ | 811 | int ret = -ENOSYS; |
771 | return -EINVAL; | ||
772 | } | ||
773 | 812 | ||
813 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | ||
814 | mutex_lock(&acpi_ioapic_lock); | ||
815 | ret = mp_unregister_ioapic(gsi_base); | ||
816 | mutex_unlock(&acpi_ioapic_lock); | ||
817 | #endif | ||
818 | |||
819 | return ret; | ||
820 | } | ||
774 | EXPORT_SYMBOL(acpi_unregister_ioapic); | 821 | EXPORT_SYMBOL(acpi_unregister_ioapic); |
775 | 822 | ||
823 | /** | ||
824 | * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base | ||
825 | * has been registered | ||
826 | * @handle: ACPI handle of the IOAPIC deivce | ||
827 | * @gsi_base: GSI base associated with the IOAPIC | ||
828 | * | ||
829 | * Assume caller holds some type of lock to serialize acpi_ioapic_registered() | ||
830 | * with acpi_register_ioapic()/acpi_unregister_ioapic(). | ||
831 | */ | ||
832 | int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base) | ||
833 | { | ||
834 | int ret = 0; | ||
835 | |||
836 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | ||
837 | mutex_lock(&acpi_ioapic_lock); | ||
838 | ret = mp_ioapic_registered(gsi_base); | ||
839 | mutex_unlock(&acpi_ioapic_lock); | ||
840 | #endif | ||
841 | |||
842 | return ret; | ||
843 | } | ||
844 | |||
776 | static int __init acpi_parse_sbf(struct acpi_table_header *table) | 845 | static int __init acpi_parse_sbf(struct acpi_table_header *table) |
777 | { | 846 | { |
778 | struct acpi_table_boot *sb; | 847 | struct acpi_table_boot *sb; |
@@ -1185,7 +1254,9 @@ static void __init acpi_process_madt(void) | |||
1185 | /* | 1254 | /* |
1186 | * Parse MADT IO-APIC entries | 1255 | * Parse MADT IO-APIC entries |
1187 | */ | 1256 | */ |
1257 | mutex_lock(&acpi_ioapic_lock); | ||
1188 | error = acpi_parse_madt_ioapic_entries(); | 1258 | error = acpi_parse_madt_ioapic_entries(); |
1259 | mutex_unlock(&acpi_ioapic_lock); | ||
1189 | if (!error) { | 1260 | if (!error) { |
1190 | acpi_set_irq_model_ioapic(); | 1261 | acpi_set_irq_model_ioapic(); |
1191 | 1262 | ||
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index dcb5b15401ce..8bb12ddc5db8 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile | |||
@@ -2,10 +2,12 @@ | |||
2 | # Makefile for local APIC drivers and for the IO-APIC code | 2 | # Makefile for local APIC drivers and for the IO-APIC code |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o | 5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o |
6 | obj-y += hw_nmi.o | 6 | obj-y += hw_nmi.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 8 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
9 | obj-$(CONFIG_PCI_MSI) += msi.o | ||
10 | obj-$(CONFIG_HT_IRQ) += htirq.o | ||
9 | obj-$(CONFIG_SMP) += ipi.o | 11 | obj-$(CONFIG_SMP) += ipi.o |
10 | 12 | ||
11 | ifeq ($(CONFIG_X86_64),y) | 13 | ifeq ($(CONFIG_X86_64),y) |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ba6cc041edb1..29b5b18afa27 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -196,7 +196,7 @@ static int disable_apic_timer __initdata; | |||
196 | int local_apic_timer_c2_ok; | 196 | int local_apic_timer_c2_ok; |
197 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 197 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
198 | 198 | ||
199 | int first_system_vector = 0xfe; | 199 | int first_system_vector = FIRST_SYSTEM_VECTOR; |
200 | 200 | ||
201 | /* | 201 | /* |
202 | * Debug level, exported for io_apic.c | 202 | * Debug level, exported for io_apic.c |
@@ -1930,7 +1930,7 @@ int __init APIC_init_uniprocessor(void) | |||
1930 | /* | 1930 | /* |
1931 | * This interrupt should _never_ happen with our APIC/SMP architecture | 1931 | * This interrupt should _never_ happen with our APIC/SMP architecture |
1932 | */ | 1932 | */ |
1933 | static inline void __smp_spurious_interrupt(void) | 1933 | static inline void __smp_spurious_interrupt(u8 vector) |
1934 | { | 1934 | { |
1935 | u32 v; | 1935 | u32 v; |
1936 | 1936 | ||
@@ -1939,30 +1939,32 @@ static inline void __smp_spurious_interrupt(void) | |||
1939 | * if it is a vectored one. Just in case... | 1939 | * if it is a vectored one. Just in case... |
1940 | * Spurious interrupts should not be ACKed. | 1940 | * Spurious interrupts should not be ACKed. |
1941 | */ | 1941 | */ |
1942 | v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); | 1942 | v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)); |
1943 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) | 1943 | if (v & (1 << (vector & 0x1f))) |
1944 | ack_APIC_irq(); | 1944 | ack_APIC_irq(); |
1945 | 1945 | ||
1946 | inc_irq_stat(irq_spurious_count); | 1946 | inc_irq_stat(irq_spurious_count); |
1947 | 1947 | ||
1948 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | 1948 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ |
1949 | pr_info("spurious APIC interrupt on CPU#%d, " | 1949 | pr_info("spurious APIC interrupt through vector %02x on CPU#%d, " |
1950 | "should never happen.\n", smp_processor_id()); | 1950 | "should never happen.\n", vector, smp_processor_id()); |
1951 | } | 1951 | } |
1952 | 1952 | ||
1953 | __visible void smp_spurious_interrupt(struct pt_regs *regs) | 1953 | __visible void smp_spurious_interrupt(struct pt_regs *regs) |
1954 | { | 1954 | { |
1955 | entering_irq(); | 1955 | entering_irq(); |
1956 | __smp_spurious_interrupt(); | 1956 | __smp_spurious_interrupt(~regs->orig_ax); |
1957 | exiting_irq(); | 1957 | exiting_irq(); |
1958 | } | 1958 | } |
1959 | 1959 | ||
1960 | __visible void smp_trace_spurious_interrupt(struct pt_regs *regs) | 1960 | __visible void smp_trace_spurious_interrupt(struct pt_regs *regs) |
1961 | { | 1961 | { |
1962 | u8 vector = ~regs->orig_ax; | ||
1963 | |||
1962 | entering_irq(); | 1964 | entering_irq(); |
1963 | trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR); | 1965 | trace_spurious_apic_entry(vector); |
1964 | __smp_spurious_interrupt(); | 1966 | __smp_spurious_interrupt(vector); |
1965 | trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR); | 1967 | trace_spurious_apic_exit(vector); |
1966 | exiting_irq(); | 1968 | exiting_irq(); |
1967 | } | 1969 | } |
1968 | 1970 | ||
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c new file mode 100644 index 000000000000..816f36e979ad --- /dev/null +++ b/arch/x86/kernel/apic/htirq.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Support Hypertransport IRQ | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | ||
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/htirq.h> | ||
17 | #include <asm/hw_irq.h> | ||
18 | #include <asm/apic.h> | ||
19 | #include <asm/hypertransport.h> | ||
20 | |||
21 | /* | ||
22 | * Hypertransport interrupt support | ||
23 | */ | ||
24 | static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | ||
25 | { | ||
26 | struct ht_irq_msg msg; | ||
27 | |||
28 | fetch_ht_irq_msg(irq, &msg); | ||
29 | |||
30 | msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); | ||
31 | msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); | ||
32 | |||
33 | msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); | ||
34 | msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); | ||
35 | |||
36 | write_ht_irq_msg(irq, &msg); | ||
37 | } | ||
38 | |||
39 | static int | ||
40 | ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
41 | { | ||
42 | struct irq_cfg *cfg = irqd_cfg(data); | ||
43 | unsigned int dest; | ||
44 | int ret; | ||
45 | |||
46 | ret = apic_set_affinity(data, mask, &dest); | ||
47 | if (ret) | ||
48 | return ret; | ||
49 | |||
50 | target_ht_irq(data->irq, dest, cfg->vector); | ||
51 | return IRQ_SET_MASK_OK_NOCOPY; | ||
52 | } | ||
53 | |||
54 | static struct irq_chip ht_irq_chip = { | ||
55 | .name = "PCI-HT", | ||
56 | .irq_mask = mask_ht_irq, | ||
57 | .irq_unmask = unmask_ht_irq, | ||
58 | .irq_ack = apic_ack_edge, | ||
59 | .irq_set_affinity = ht_set_affinity, | ||
60 | .irq_retrigger = apic_retrigger_irq, | ||
61 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
62 | }; | ||
63 | |||
64 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | ||
65 | { | ||
66 | struct irq_cfg *cfg; | ||
67 | struct ht_irq_msg msg; | ||
68 | unsigned dest; | ||
69 | int err; | ||
70 | |||
71 | if (disable_apic) | ||
72 | return -ENXIO; | ||
73 | |||
74 | cfg = irq_cfg(irq); | ||
75 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
76 | if (err) | ||
77 | return err; | ||
78 | |||
79 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
80 | apic->target_cpus(), &dest); | ||
81 | if (err) | ||
82 | return err; | ||
83 | |||
84 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | ||
85 | |||
86 | msg.address_lo = | ||
87 | HT_IRQ_LOW_BASE | | ||
88 | HT_IRQ_LOW_DEST_ID(dest) | | ||
89 | HT_IRQ_LOW_VECTOR(cfg->vector) | | ||
90 | ((apic->irq_dest_mode == 0) ? | ||
91 | HT_IRQ_LOW_DM_PHYSICAL : | ||
92 | HT_IRQ_LOW_DM_LOGICAL) | | ||
93 | HT_IRQ_LOW_RQEOI_EDGE | | ||
94 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
95 | HT_IRQ_LOW_MT_FIXED : | ||
96 | HT_IRQ_LOW_MT_ARBITRATED) | | ||
97 | HT_IRQ_LOW_IRQ_MASKED; | ||
98 | |||
99 | write_ht_irq_msg(irq, &msg); | ||
100 | |||
101 | irq_set_chip_and_handler_name(irq, &ht_irq_chip, | ||
102 | handle_edge_irq, "edge"); | ||
103 | |||
104 | dev_dbg(&dev->dev, "irq %d for HT\n", irq); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index a6745e756729..3f5f60406ab1 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -32,15 +32,11 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/syscore_ops.h> | 33 | #include <linux/syscore_ops.h> |
34 | #include <linux/irqdomain.h> | 34 | #include <linux/irqdomain.h> |
35 | #include <linux/msi.h> | ||
36 | #include <linux/htirq.h> | ||
37 | #include <linux/freezer.h> | 35 | #include <linux/freezer.h> |
38 | #include <linux/kthread.h> | 36 | #include <linux/kthread.h> |
39 | #include <linux/jiffies.h> /* time_after() */ | 37 | #include <linux/jiffies.h> /* time_after() */ |
40 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
41 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
42 | #include <linux/dmar.h> | ||
43 | #include <linux/hpet.h> | ||
44 | 40 | ||
45 | #include <asm/idle.h> | 41 | #include <asm/idle.h> |
46 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -52,17 +48,12 @@ | |||
52 | #include <asm/dma.h> | 48 | #include <asm/dma.h> |
53 | #include <asm/timer.h> | 49 | #include <asm/timer.h> |
54 | #include <asm/i8259.h> | 50 | #include <asm/i8259.h> |
55 | #include <asm/msidef.h> | ||
56 | #include <asm/hypertransport.h> | ||
57 | #include <asm/setup.h> | 51 | #include <asm/setup.h> |
58 | #include <asm/irq_remapping.h> | 52 | #include <asm/irq_remapping.h> |
59 | #include <asm/hpet.h> | ||
60 | #include <asm/hw_irq.h> | 53 | #include <asm/hw_irq.h> |
61 | 54 | ||
62 | #include <asm/apic.h> | 55 | #include <asm/apic.h> |
63 | 56 | ||
64 | #define __apicdebuginit(type) static type __init | ||
65 | |||
66 | #define for_each_ioapic(idx) \ | 57 | #define for_each_ioapic(idx) \ |
67 | for ((idx) = 0; (idx) < nr_ioapics; (idx)++) | 58 | for ((idx) = 0; (idx) < nr_ioapics; (idx)++) |
68 | #define for_each_ioapic_reverse(idx) \ | 59 | #define for_each_ioapic_reverse(idx) \ |
@@ -74,7 +65,7 @@ | |||
74 | for_each_pin((idx), (pin)) | 65 | for_each_pin((idx), (pin)) |
75 | 66 | ||
76 | #define for_each_irq_pin(entry, head) \ | 67 | #define for_each_irq_pin(entry, head) \ |
77 | for (entry = head; entry; entry = entry->next) | 68 | list_for_each_entry(entry, &head, list) |
78 | 69 | ||
79 | /* | 70 | /* |
80 | * Is the SiS APIC rmw bug present ? | 71 | * Is the SiS APIC rmw bug present ? |
@@ -83,7 +74,6 @@ | |||
83 | int sis_apic_bug = -1; | 74 | int sis_apic_bug = -1; |
84 | 75 | ||
85 | static DEFINE_RAW_SPINLOCK(ioapic_lock); | 76 | static DEFINE_RAW_SPINLOCK(ioapic_lock); |
86 | static DEFINE_RAW_SPINLOCK(vector_lock); | ||
87 | static DEFINE_MUTEX(ioapic_mutex); | 77 | static DEFINE_MUTEX(ioapic_mutex); |
88 | static unsigned int ioapic_dynirq_base; | 78 | static unsigned int ioapic_dynirq_base; |
89 | static int ioapic_initialized; | 79 | static int ioapic_initialized; |
@@ -112,6 +102,7 @@ static struct ioapic { | |||
112 | struct ioapic_domain_cfg irqdomain_cfg; | 102 | struct ioapic_domain_cfg irqdomain_cfg; |
113 | struct irq_domain *irqdomain; | 103 | struct irq_domain *irqdomain; |
114 | struct mp_pin_info *pin_info; | 104 | struct mp_pin_info *pin_info; |
105 | struct resource *iomem_res; | ||
115 | } ioapics[MAX_IO_APICS]; | 106 | } ioapics[MAX_IO_APICS]; |
116 | 107 | ||
117 | #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver | 108 | #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver |
@@ -205,8 +196,6 @@ static int __init parse_noapic(char *str) | |||
205 | } | 196 | } |
206 | early_param("noapic", parse_noapic); | 197 | early_param("noapic", parse_noapic); |
207 | 198 | ||
208 | static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node); | ||
209 | |||
210 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ | 199 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ |
211 | void mp_save_irq(struct mpc_intsrc *m) | 200 | void mp_save_irq(struct mpc_intsrc *m) |
212 | { | 201 | { |
@@ -228,8 +217,8 @@ void mp_save_irq(struct mpc_intsrc *m) | |||
228 | } | 217 | } |
229 | 218 | ||
230 | struct irq_pin_list { | 219 | struct irq_pin_list { |
220 | struct list_head list; | ||
231 | int apic, pin; | 221 | int apic, pin; |
232 | struct irq_pin_list *next; | ||
233 | }; | 222 | }; |
234 | 223 | ||
235 | static struct irq_pin_list *alloc_irq_pin_list(int node) | 224 | static struct irq_pin_list *alloc_irq_pin_list(int node) |
@@ -237,7 +226,26 @@ static struct irq_pin_list *alloc_irq_pin_list(int node) | |||
237 | return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); | 226 | return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); |
238 | } | 227 | } |
239 | 228 | ||
240 | int __init arch_early_irq_init(void) | 229 | static void alloc_ioapic_saved_registers(int idx) |
230 | { | ||
231 | size_t size; | ||
232 | |||
233 | if (ioapics[idx].saved_registers) | ||
234 | return; | ||
235 | |||
236 | size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers; | ||
237 | ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL); | ||
238 | if (!ioapics[idx].saved_registers) | ||
239 | pr_err("IOAPIC %d: suspend/resume impossible!\n", idx); | ||
240 | } | ||
241 | |||
242 | static void free_ioapic_saved_registers(int idx) | ||
243 | { | ||
244 | kfree(ioapics[idx].saved_registers); | ||
245 | ioapics[idx].saved_registers = NULL; | ||
246 | } | ||
247 | |||
248 | int __init arch_early_ioapic_init(void) | ||
241 | { | 249 | { |
242 | struct irq_cfg *cfg; | 250 | struct irq_cfg *cfg; |
243 | int i, node = cpu_to_node(0); | 251 | int i, node = cpu_to_node(0); |
@@ -245,13 +253,8 @@ int __init arch_early_irq_init(void) | |||
245 | if (!nr_legacy_irqs()) | 253 | if (!nr_legacy_irqs()) |
246 | io_apic_irqs = ~0UL; | 254 | io_apic_irqs = ~0UL; |
247 | 255 | ||
248 | for_each_ioapic(i) { | 256 | for_each_ioapic(i) |
249 | ioapics[i].saved_registers = | 257 | alloc_ioapic_saved_registers(i); |
250 | kzalloc(sizeof(struct IO_APIC_route_entry) * | ||
251 | ioapics[i].nr_registers, GFP_KERNEL); | ||
252 | if (!ioapics[i].saved_registers) | ||
253 | pr_err("IOAPIC %d: suspend/resume impossible!\n", i); | ||
254 | } | ||
255 | 258 | ||
256 | /* | 259 | /* |
257 | * For legacy IRQ's, start with assigning irq0 to irq15 to | 260 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
@@ -266,61 +269,6 @@ int __init arch_early_irq_init(void) | |||
266 | return 0; | 269 | return 0; |
267 | } | 270 | } |
268 | 271 | ||
269 | static inline struct irq_cfg *irq_cfg(unsigned int irq) | ||
270 | { | ||
271 | return irq_get_chip_data(irq); | ||
272 | } | ||
273 | |||
274 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | ||
275 | { | ||
276 | struct irq_cfg *cfg; | ||
277 | |||
278 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | ||
279 | if (!cfg) | ||
280 | return NULL; | ||
281 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | ||
282 | goto out_cfg; | ||
283 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | ||
284 | goto out_domain; | ||
285 | return cfg; | ||
286 | out_domain: | ||
287 | free_cpumask_var(cfg->domain); | ||
288 | out_cfg: | ||
289 | kfree(cfg); | ||
290 | return NULL; | ||
291 | } | ||
292 | |||
293 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | ||
294 | { | ||
295 | if (!cfg) | ||
296 | return; | ||
297 | irq_set_chip_data(at, NULL); | ||
298 | free_cpumask_var(cfg->domain); | ||
299 | free_cpumask_var(cfg->old_domain); | ||
300 | kfree(cfg); | ||
301 | } | ||
302 | |||
303 | static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | ||
304 | { | ||
305 | int res = irq_alloc_desc_at(at, node); | ||
306 | struct irq_cfg *cfg; | ||
307 | |||
308 | if (res < 0) { | ||
309 | if (res != -EEXIST) | ||
310 | return NULL; | ||
311 | cfg = irq_cfg(at); | ||
312 | if (cfg) | ||
313 | return cfg; | ||
314 | } | ||
315 | |||
316 | cfg = alloc_irq_cfg(at, node); | ||
317 | if (cfg) | ||
318 | irq_set_chip_data(at, cfg); | ||
319 | else | ||
320 | irq_free_desc(at); | ||
321 | return cfg; | ||
322 | } | ||
323 | |||
324 | struct io_apic { | 272 | struct io_apic { |
325 | unsigned int index; | 273 | unsigned int index; |
326 | unsigned int unused[3]; | 274 | unsigned int unused[3]; |
@@ -445,15 +393,12 @@ static void ioapic_mask_entry(int apic, int pin) | |||
445 | */ | 393 | */ |
446 | static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 394 | static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
447 | { | 395 | { |
448 | struct irq_pin_list **last, *entry; | 396 | struct irq_pin_list *entry; |
449 | 397 | ||
450 | /* don't allow duplicates */ | 398 | /* don't allow duplicates */ |
451 | last = &cfg->irq_2_pin; | 399 | for_each_irq_pin(entry, cfg->irq_2_pin) |
452 | for_each_irq_pin(entry, cfg->irq_2_pin) { | ||
453 | if (entry->apic == apic && entry->pin == pin) | 400 | if (entry->apic == apic && entry->pin == pin) |
454 | return 0; | 401 | return 0; |
455 | last = &entry->next; | ||
456 | } | ||
457 | 402 | ||
458 | entry = alloc_irq_pin_list(node); | 403 | entry = alloc_irq_pin_list(node); |
459 | if (!entry) { | 404 | if (!entry) { |
@@ -464,22 +409,19 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi | |||
464 | entry->apic = apic; | 409 | entry->apic = apic; |
465 | entry->pin = pin; | 410 | entry->pin = pin; |
466 | 411 | ||
467 | *last = entry; | 412 | list_add_tail(&entry->list, &cfg->irq_2_pin); |
468 | return 0; | 413 | return 0; |
469 | } | 414 | } |
470 | 415 | ||
471 | static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin) | 416 | static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin) |
472 | { | 417 | { |
473 | struct irq_pin_list **last, *entry; | 418 | struct irq_pin_list *tmp, *entry; |
474 | 419 | ||
475 | last = &cfg->irq_2_pin; | 420 | list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list) |
476 | for_each_irq_pin(entry, cfg->irq_2_pin) | ||
477 | if (entry->apic == apic && entry->pin == pin) { | 421 | if (entry->apic == apic && entry->pin == pin) { |
478 | *last = entry->next; | 422 | list_del(&entry->list); |
479 | kfree(entry); | 423 | kfree(entry); |
480 | return; | 424 | return; |
481 | } else { | ||
482 | last = &entry->next; | ||
483 | } | 425 | } |
484 | } | 426 | } |
485 | 427 | ||
@@ -559,7 +501,7 @@ static void mask_ioapic(struct irq_cfg *cfg) | |||
559 | 501 | ||
560 | static void mask_ioapic_irq(struct irq_data *data) | 502 | static void mask_ioapic_irq(struct irq_data *data) |
561 | { | 503 | { |
562 | mask_ioapic(data->chip_data); | 504 | mask_ioapic(irqd_cfg(data)); |
563 | } | 505 | } |
564 | 506 | ||
565 | static void __unmask_ioapic(struct irq_cfg *cfg) | 507 | static void __unmask_ioapic(struct irq_cfg *cfg) |
@@ -578,7 +520,7 @@ static void unmask_ioapic(struct irq_cfg *cfg) | |||
578 | 520 | ||
579 | static void unmask_ioapic_irq(struct irq_data *data) | 521 | static void unmask_ioapic_irq(struct irq_data *data) |
580 | { | 522 | { |
581 | unmask_ioapic(data->chip_data); | 523 | unmask_ioapic(irqd_cfg(data)); |
582 | } | 524 | } |
583 | 525 | ||
584 | /* | 526 | /* |
@@ -1164,8 +1106,7 @@ void mp_unmap_irq(int irq) | |||
1164 | * Find a specific PCI IRQ entry. | 1106 | * Find a specific PCI IRQ entry. |
1165 | * Not an __init, possibly needed by modules | 1107 | * Not an __init, possibly needed by modules |
1166 | */ | 1108 | */ |
1167 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, | 1109 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) |
1168 | struct io_apic_irq_attr *irq_attr) | ||
1169 | { | 1110 | { |
1170 | int irq, i, best_ioapic = -1, best_idx = -1; | 1111 | int irq, i, best_ioapic = -1, best_idx = -1; |
1171 | 1112 | ||
@@ -1219,195 +1160,11 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, | |||
1219 | return -1; | 1160 | return -1; |
1220 | 1161 | ||
1221 | out: | 1162 | out: |
1222 | irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, | 1163 | return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, |
1223 | IOAPIC_MAP_ALLOC); | 1164 | IOAPIC_MAP_ALLOC); |
1224 | if (irq > 0) | ||
1225 | set_io_apic_irq_attr(irq_attr, best_ioapic, | ||
1226 | mp_irqs[best_idx].dstirq, | ||
1227 | irq_trigger(best_idx), | ||
1228 | irq_polarity(best_idx)); | ||
1229 | return irq; | ||
1230 | } | 1165 | } |
1231 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | 1166 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); |
1232 | 1167 | ||
1233 | void lock_vector_lock(void) | ||
1234 | { | ||
1235 | /* Used to the online set of cpus does not change | ||
1236 | * during assign_irq_vector. | ||
1237 | */ | ||
1238 | raw_spin_lock(&vector_lock); | ||
1239 | } | ||
1240 | |||
1241 | void unlock_vector_lock(void) | ||
1242 | { | ||
1243 | raw_spin_unlock(&vector_lock); | ||
1244 | } | ||
1245 | |||
1246 | static int | ||
1247 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1248 | { | ||
1249 | /* | ||
1250 | * NOTE! The local APIC isn't very good at handling | ||
1251 | * multiple interrupts at the same interrupt level. | ||
1252 | * As the interrupt level is determined by taking the | ||
1253 | * vector number and shifting that right by 4, we | ||
1254 | * want to spread these out a bit so that they don't | ||
1255 | * all fall in the same interrupt level. | ||
1256 | * | ||
1257 | * Also, we've got to be careful not to trash gate | ||
1258 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | ||
1259 | */ | ||
1260 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | ||
1261 | static int current_offset = VECTOR_OFFSET_START % 16; | ||
1262 | int cpu, err; | ||
1263 | cpumask_var_t tmp_mask; | ||
1264 | |||
1265 | if (cfg->move_in_progress) | ||
1266 | return -EBUSY; | ||
1267 | |||
1268 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | ||
1269 | return -ENOMEM; | ||
1270 | |||
1271 | /* Only try and allocate irqs on cpus that are present */ | ||
1272 | err = -ENOSPC; | ||
1273 | cpumask_clear(cfg->old_domain); | ||
1274 | cpu = cpumask_first_and(mask, cpu_online_mask); | ||
1275 | while (cpu < nr_cpu_ids) { | ||
1276 | int new_cpu, vector, offset; | ||
1277 | |||
1278 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | ||
1279 | |||
1280 | if (cpumask_subset(tmp_mask, cfg->domain)) { | ||
1281 | err = 0; | ||
1282 | if (cpumask_equal(tmp_mask, cfg->domain)) | ||
1283 | break; | ||
1284 | /* | ||
1285 | * New cpumask using the vector is a proper subset of | ||
1286 | * the current in use mask. So cleanup the vector | ||
1287 | * allocation for the members that are not used anymore. | ||
1288 | */ | ||
1289 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | ||
1290 | cfg->move_in_progress = | ||
1291 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
1292 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | ||
1293 | break; | ||
1294 | } | ||
1295 | |||
1296 | vector = current_vector; | ||
1297 | offset = current_offset; | ||
1298 | next: | ||
1299 | vector += 16; | ||
1300 | if (vector >= first_system_vector) { | ||
1301 | offset = (offset + 1) % 16; | ||
1302 | vector = FIRST_EXTERNAL_VECTOR + offset; | ||
1303 | } | ||
1304 | |||
1305 | if (unlikely(current_vector == vector)) { | ||
1306 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | ||
1307 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | ||
1308 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | ||
1309 | continue; | ||
1310 | } | ||
1311 | |||
1312 | if (test_bit(vector, used_vectors)) | ||
1313 | goto next; | ||
1314 | |||
1315 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | ||
1316 | if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) | ||
1317 | goto next; | ||
1318 | } | ||
1319 | /* Found one! */ | ||
1320 | current_vector = vector; | ||
1321 | current_offset = offset; | ||
1322 | if (cfg->vector) { | ||
1323 | cpumask_copy(cfg->old_domain, cfg->domain); | ||
1324 | cfg->move_in_progress = | ||
1325 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
1326 | } | ||
1327 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | ||
1328 | per_cpu(vector_irq, new_cpu)[vector] = irq; | ||
1329 | cfg->vector = vector; | ||
1330 | cpumask_copy(cfg->domain, tmp_mask); | ||
1331 | err = 0; | ||
1332 | break; | ||
1333 | } | ||
1334 | free_cpumask_var(tmp_mask); | ||
1335 | return err; | ||
1336 | } | ||
1337 | |||
1338 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1339 | { | ||
1340 | int err; | ||
1341 | unsigned long flags; | ||
1342 | |||
1343 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
1344 | err = __assign_irq_vector(irq, cfg, mask); | ||
1345 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
1346 | return err; | ||
1347 | } | ||
1348 | |||
1349 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | ||
1350 | { | ||
1351 | int cpu, vector; | ||
1352 | |||
1353 | BUG_ON(!cfg->vector); | ||
1354 | |||
1355 | vector = cfg->vector; | ||
1356 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | ||
1357 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
1358 | |||
1359 | cfg->vector = 0; | ||
1360 | cpumask_clear(cfg->domain); | ||
1361 | |||
1362 | if (likely(!cfg->move_in_progress)) | ||
1363 | return; | ||
1364 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | ||
1365 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
1366 | if (per_cpu(vector_irq, cpu)[vector] != irq) | ||
1367 | continue; | ||
1368 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
1369 | break; | ||
1370 | } | ||
1371 | } | ||
1372 | cfg->move_in_progress = 0; | ||
1373 | } | ||
1374 | |||
1375 | void __setup_vector_irq(int cpu) | ||
1376 | { | ||
1377 | /* Initialize vector_irq on a new cpu */ | ||
1378 | int irq, vector; | ||
1379 | struct irq_cfg *cfg; | ||
1380 | |||
1381 | /* | ||
1382 | * vector_lock will make sure that we don't run into irq vector | ||
1383 | * assignments that might be happening on another cpu in parallel, | ||
1384 | * while we setup our initial vector to irq mappings. | ||
1385 | */ | ||
1386 | raw_spin_lock(&vector_lock); | ||
1387 | /* Mark the inuse vectors */ | ||
1388 | for_each_active_irq(irq) { | ||
1389 | cfg = irq_cfg(irq); | ||
1390 | if (!cfg) | ||
1391 | continue; | ||
1392 | |||
1393 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
1394 | continue; | ||
1395 | vector = cfg->vector; | ||
1396 | per_cpu(vector_irq, cpu)[vector] = irq; | ||
1397 | } | ||
1398 | /* Mark the free vectors */ | ||
1399 | for (vector = 0; vector < NR_VECTORS; ++vector) { | ||
1400 | irq = per_cpu(vector_irq, cpu)[vector]; | ||
1401 | if (irq <= VECTOR_UNDEFINED) | ||
1402 | continue; | ||
1403 | |||
1404 | cfg = irq_cfg(irq); | ||
1405 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
1406 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
1407 | } | ||
1408 | raw_spin_unlock(&vector_lock); | ||
1409 | } | ||
1410 | |||
1411 | static struct irq_chip ioapic_chip; | 1168 | static struct irq_chip ioapic_chip; |
1412 | 1169 | ||
1413 | #ifdef CONFIG_X86_32 | 1170 | #ifdef CONFIG_X86_32 |
@@ -1496,7 +1253,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, | |||
1496 | &dest)) { | 1253 | &dest)) { |
1497 | pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", | 1254 | pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", |
1498 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); | 1255 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); |
1499 | __clear_irq_vector(irq, cfg); | 1256 | clear_irq_vector(irq, cfg); |
1500 | 1257 | ||
1501 | return; | 1258 | return; |
1502 | } | 1259 | } |
@@ -1510,7 +1267,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, | |||
1510 | if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { | 1267 | if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { |
1511 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1268 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1512 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); | 1269 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); |
1513 | __clear_irq_vector(irq, cfg); | 1270 | clear_irq_vector(irq, cfg); |
1514 | 1271 | ||
1515 | return; | 1272 | return; |
1516 | } | 1273 | } |
@@ -1641,7 +1398,7 @@ void ioapic_zap_locks(void) | |||
1641 | raw_spin_lock_init(&ioapic_lock); | 1398 | raw_spin_lock_init(&ioapic_lock); |
1642 | } | 1399 | } |
1643 | 1400 | ||
1644 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | 1401 | static void __init print_IO_APIC(int ioapic_idx) |
1645 | { | 1402 | { |
1646 | union IO_APIC_reg_00 reg_00; | 1403 | union IO_APIC_reg_00 reg_00; |
1647 | union IO_APIC_reg_01 reg_01; | 1404 | union IO_APIC_reg_01 reg_01; |
@@ -1698,7 +1455,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | |||
1698 | x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); | 1455 | x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); |
1699 | } | 1456 | } |
1700 | 1457 | ||
1701 | __apicdebuginit(void) print_IO_APICs(void) | 1458 | void __init print_IO_APICs(void) |
1702 | { | 1459 | { |
1703 | int ioapic_idx; | 1460 | int ioapic_idx; |
1704 | struct irq_cfg *cfg; | 1461 | struct irq_cfg *cfg; |
@@ -1731,8 +1488,7 @@ __apicdebuginit(void) print_IO_APICs(void) | |||
1731 | cfg = irq_cfg(irq); | 1488 | cfg = irq_cfg(irq); |
1732 | if (!cfg) | 1489 | if (!cfg) |
1733 | continue; | 1490 | continue; |
1734 | entry = cfg->irq_2_pin; | 1491 | if (list_empty(&cfg->irq_2_pin)) |
1735 | if (!entry) | ||
1736 | continue; | 1492 | continue; |
1737 | printk(KERN_DEBUG "IRQ%d ", irq); | 1493 | printk(KERN_DEBUG "IRQ%d ", irq); |
1738 | for_each_irq_pin(entry, cfg->irq_2_pin) | 1494 | for_each_irq_pin(entry, cfg->irq_2_pin) |
@@ -1743,205 +1499,6 @@ __apicdebuginit(void) print_IO_APICs(void) | |||
1743 | printk(KERN_INFO ".................................... done.\n"); | 1499 | printk(KERN_INFO ".................................... done.\n"); |
1744 | } | 1500 | } |
1745 | 1501 | ||
1746 | __apicdebuginit(void) print_APIC_field(int base) | ||
1747 | { | ||
1748 | int i; | ||
1749 | |||
1750 | printk(KERN_DEBUG); | ||
1751 | |||
1752 | for (i = 0; i < 8; i++) | ||
1753 | pr_cont("%08x", apic_read(base + i*0x10)); | ||
1754 | |||
1755 | pr_cont("\n"); | ||
1756 | } | ||
1757 | |||
1758 | __apicdebuginit(void) print_local_APIC(void *dummy) | ||
1759 | { | ||
1760 | unsigned int i, v, ver, maxlvt; | ||
1761 | u64 icr; | ||
1762 | |||
1763 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | ||
1764 | smp_processor_id(), hard_smp_processor_id()); | ||
1765 | v = apic_read(APIC_ID); | ||
1766 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); | ||
1767 | v = apic_read(APIC_LVR); | ||
1768 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | ||
1769 | ver = GET_APIC_VERSION(v); | ||
1770 | maxlvt = lapic_get_maxlvt(); | ||
1771 | |||
1772 | v = apic_read(APIC_TASKPRI); | ||
1773 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | ||
1774 | |||
1775 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ | ||
1776 | if (!APIC_XAPIC(ver)) { | ||
1777 | v = apic_read(APIC_ARBPRI); | ||
1778 | printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, | ||
1779 | v & APIC_ARBPRI_MASK); | ||
1780 | } | ||
1781 | v = apic_read(APIC_PROCPRI); | ||
1782 | printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); | ||
1783 | } | ||
1784 | |||
1785 | /* | ||
1786 | * Remote read supported only in the 82489DX and local APIC for | ||
1787 | * Pentium processors. | ||
1788 | */ | ||
1789 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | ||
1790 | v = apic_read(APIC_RRR); | ||
1791 | printk(KERN_DEBUG "... APIC RRR: %08x\n", v); | ||
1792 | } | ||
1793 | |||
1794 | v = apic_read(APIC_LDR); | ||
1795 | printk(KERN_DEBUG "... APIC LDR: %08x\n", v); | ||
1796 | if (!x2apic_enabled()) { | ||
1797 | v = apic_read(APIC_DFR); | ||
1798 | printk(KERN_DEBUG "... APIC DFR: %08x\n", v); | ||
1799 | } | ||
1800 | v = apic_read(APIC_SPIV); | ||
1801 | printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); | ||
1802 | |||
1803 | printk(KERN_DEBUG "... APIC ISR field:\n"); | ||
1804 | print_APIC_field(APIC_ISR); | ||
1805 | printk(KERN_DEBUG "... APIC TMR field:\n"); | ||
1806 | print_APIC_field(APIC_TMR); | ||
1807 | printk(KERN_DEBUG "... APIC IRR field:\n"); | ||
1808 | print_APIC_field(APIC_IRR); | ||
1809 | |||
1810 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ | ||
1811 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
1812 | apic_write(APIC_ESR, 0); | ||
1813 | |||
1814 | v = apic_read(APIC_ESR); | ||
1815 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | ||
1816 | } | ||
1817 | |||
1818 | icr = apic_icr_read(); | ||
1819 | printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); | ||
1820 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); | ||
1821 | |||
1822 | v = apic_read(APIC_LVTT); | ||
1823 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | ||
1824 | |||
1825 | if (maxlvt > 3) { /* PC is LVT#4. */ | ||
1826 | v = apic_read(APIC_LVTPC); | ||
1827 | printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); | ||
1828 | } | ||
1829 | v = apic_read(APIC_LVT0); | ||
1830 | printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); | ||
1831 | v = apic_read(APIC_LVT1); | ||
1832 | printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); | ||
1833 | |||
1834 | if (maxlvt > 2) { /* ERR is LVT#3. */ | ||
1835 | v = apic_read(APIC_LVTERR); | ||
1836 | printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); | ||
1837 | } | ||
1838 | |||
1839 | v = apic_read(APIC_TMICT); | ||
1840 | printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); | ||
1841 | v = apic_read(APIC_TMCCT); | ||
1842 | printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); | ||
1843 | v = apic_read(APIC_TDCR); | ||
1844 | printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); | ||
1845 | |||
1846 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
1847 | v = apic_read(APIC_EFEAT); | ||
1848 | maxlvt = (v >> 16) & 0xff; | ||
1849 | printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); | ||
1850 | v = apic_read(APIC_ECTRL); | ||
1851 | printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); | ||
1852 | for (i = 0; i < maxlvt; i++) { | ||
1853 | v = apic_read(APIC_EILVTn(i)); | ||
1854 | printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); | ||
1855 | } | ||
1856 | } | ||
1857 | pr_cont("\n"); | ||
1858 | } | ||
1859 | |||
1860 | __apicdebuginit(void) print_local_APICs(int maxcpu) | ||
1861 | { | ||
1862 | int cpu; | ||
1863 | |||
1864 | if (!maxcpu) | ||
1865 | return; | ||
1866 | |||
1867 | preempt_disable(); | ||
1868 | for_each_online_cpu(cpu) { | ||
1869 | if (cpu >= maxcpu) | ||
1870 | break; | ||
1871 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | ||
1872 | } | ||
1873 | preempt_enable(); | ||
1874 | } | ||
1875 | |||
1876 | __apicdebuginit(void) print_PIC(void) | ||
1877 | { | ||
1878 | unsigned int v; | ||
1879 | unsigned long flags; | ||
1880 | |||
1881 | if (!nr_legacy_irqs()) | ||
1882 | return; | ||
1883 | |||
1884 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | ||
1885 | |||
1886 | raw_spin_lock_irqsave(&i8259A_lock, flags); | ||
1887 | |||
1888 | v = inb(0xa1) << 8 | inb(0x21); | ||
1889 | printk(KERN_DEBUG "... PIC IMR: %04x\n", v); | ||
1890 | |||
1891 | v = inb(0xa0) << 8 | inb(0x20); | ||
1892 | printk(KERN_DEBUG "... PIC IRR: %04x\n", v); | ||
1893 | |||
1894 | outb(0x0b,0xa0); | ||
1895 | outb(0x0b,0x20); | ||
1896 | v = inb(0xa0) << 8 | inb(0x20); | ||
1897 | outb(0x0a,0xa0); | ||
1898 | outb(0x0a,0x20); | ||
1899 | |||
1900 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||
1901 | |||
1902 | printk(KERN_DEBUG "... PIC ISR: %04x\n", v); | ||
1903 | |||
1904 | v = inb(0x4d1) << 8 | inb(0x4d0); | ||
1905 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | ||
1906 | } | ||
1907 | |||
1908 | static int __initdata show_lapic = 1; | ||
1909 | static __init int setup_show_lapic(char *arg) | ||
1910 | { | ||
1911 | int num = -1; | ||
1912 | |||
1913 | if (strcmp(arg, "all") == 0) { | ||
1914 | show_lapic = CONFIG_NR_CPUS; | ||
1915 | } else { | ||
1916 | get_option(&arg, &num); | ||
1917 | if (num >= 0) | ||
1918 | show_lapic = num; | ||
1919 | } | ||
1920 | |||
1921 | return 1; | ||
1922 | } | ||
1923 | __setup("show_lapic=", setup_show_lapic); | ||
1924 | |||
1925 | __apicdebuginit(int) print_ICs(void) | ||
1926 | { | ||
1927 | if (apic_verbosity == APIC_QUIET) | ||
1928 | return 0; | ||
1929 | |||
1930 | print_PIC(); | ||
1931 | |||
1932 | /* don't print out if apic is not there */ | ||
1933 | if (!cpu_has_apic && !apic_from_smp_config()) | ||
1934 | return 0; | ||
1935 | |||
1936 | print_local_APICs(show_lapic); | ||
1937 | print_IO_APICs(); | ||
1938 | |||
1939 | return 0; | ||
1940 | } | ||
1941 | |||
1942 | late_initcall(print_ICs); | ||
1943 | |||
1944 | |||
1945 | /* Where if anywhere is the i8259 connect in external int mode */ | 1502 | /* Where if anywhere is the i8259 connect in external int mode */ |
1946 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | 1503 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; |
1947 | 1504 | ||
@@ -2244,26 +1801,12 @@ static unsigned int startup_ioapic_irq(struct irq_data *data) | |||
2244 | if (legacy_pic->irq_pending(irq)) | 1801 | if (legacy_pic->irq_pending(irq)) |
2245 | was_pending = 1; | 1802 | was_pending = 1; |
2246 | } | 1803 | } |
2247 | __unmask_ioapic(data->chip_data); | 1804 | __unmask_ioapic(irqd_cfg(data)); |
2248 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 1805 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2249 | 1806 | ||
2250 | return was_pending; | 1807 | return was_pending; |
2251 | } | 1808 | } |
2252 | 1809 | ||
2253 | static int ioapic_retrigger_irq(struct irq_data *data) | ||
2254 | { | ||
2255 | struct irq_cfg *cfg = data->chip_data; | ||
2256 | unsigned long flags; | ||
2257 | int cpu; | ||
2258 | |||
2259 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
2260 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | ||
2261 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | ||
2262 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
2263 | |||
2264 | return 1; | ||
2265 | } | ||
2266 | |||
2267 | /* | 1810 | /* |
2268 | * Level and edge triggered IO-APIC interrupts need different handling, | 1811 | * Level and edge triggered IO-APIC interrupts need different handling, |
2269 | * so we use two separate IRQ descriptors. Edge triggered IRQs can be | 1812 | * so we use two separate IRQ descriptors. Edge triggered IRQs can be |
@@ -2273,113 +1816,6 @@ static int ioapic_retrigger_irq(struct irq_data *data) | |||
2273 | * races. | 1816 | * races. |
2274 | */ | 1817 | */ |
2275 | 1818 | ||
2276 | #ifdef CONFIG_SMP | ||
2277 | void send_cleanup_vector(struct irq_cfg *cfg) | ||
2278 | { | ||
2279 | cpumask_var_t cleanup_mask; | ||
2280 | |||
2281 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
2282 | unsigned int i; | ||
2283 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
2284 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
2285 | } else { | ||
2286 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
2287 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2288 | free_cpumask_var(cleanup_mask); | ||
2289 | } | ||
2290 | cfg->move_in_progress = 0; | ||
2291 | } | ||
2292 | |||
2293 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | ||
2294 | { | ||
2295 | unsigned vector, me; | ||
2296 | |||
2297 | ack_APIC_irq(); | ||
2298 | irq_enter(); | ||
2299 | exit_idle(); | ||
2300 | |||
2301 | me = smp_processor_id(); | ||
2302 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
2303 | int irq; | ||
2304 | unsigned int irr; | ||
2305 | struct irq_desc *desc; | ||
2306 | struct irq_cfg *cfg; | ||
2307 | irq = __this_cpu_read(vector_irq[vector]); | ||
2308 | |||
2309 | if (irq <= VECTOR_UNDEFINED) | ||
2310 | continue; | ||
2311 | |||
2312 | desc = irq_to_desc(irq); | ||
2313 | if (!desc) | ||
2314 | continue; | ||
2315 | |||
2316 | cfg = irq_cfg(irq); | ||
2317 | if (!cfg) | ||
2318 | continue; | ||
2319 | |||
2320 | raw_spin_lock(&desc->lock); | ||
2321 | |||
2322 | /* | ||
2323 | * Check if the irq migration is in progress. If so, we | ||
2324 | * haven't received the cleanup request yet for this irq. | ||
2325 | */ | ||
2326 | if (cfg->move_in_progress) | ||
2327 | goto unlock; | ||
2328 | |||
2329 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
2330 | goto unlock; | ||
2331 | |||
2332 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
2333 | /* | ||
2334 | * Check if the vector that needs to be cleanedup is | ||
2335 | * registered at the cpu's IRR. If so, then this is not | ||
2336 | * the best time to clean it up. Lets clean it up in the | ||
2337 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | ||
2338 | * to myself. | ||
2339 | */ | ||
2340 | if (irr & (1 << (vector % 32))) { | ||
2341 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | ||
2342 | goto unlock; | ||
2343 | } | ||
2344 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | ||
2345 | unlock: | ||
2346 | raw_spin_unlock(&desc->lock); | ||
2347 | } | ||
2348 | |||
2349 | irq_exit(); | ||
2350 | } | ||
2351 | |||
2352 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | ||
2353 | { | ||
2354 | unsigned me; | ||
2355 | |||
2356 | if (likely(!cfg->move_in_progress)) | ||
2357 | return; | ||
2358 | |||
2359 | me = smp_processor_id(); | ||
2360 | |||
2361 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
2362 | send_cleanup_vector(cfg); | ||
2363 | } | ||
2364 | |||
2365 | static void irq_complete_move(struct irq_cfg *cfg) | ||
2366 | { | ||
2367 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | ||
2368 | } | ||
2369 | |||
2370 | void irq_force_complete_move(int irq) | ||
2371 | { | ||
2372 | struct irq_cfg *cfg = irq_cfg(irq); | ||
2373 | |||
2374 | if (!cfg) | ||
2375 | return; | ||
2376 | |||
2377 | __irq_complete_move(cfg, cfg->vector); | ||
2378 | } | ||
2379 | #else | ||
2380 | static inline void irq_complete_move(struct irq_cfg *cfg) { } | ||
2381 | #endif | ||
2382 | |||
2383 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 1819 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
2384 | { | 1820 | { |
2385 | int apic, pin; | 1821 | int apic, pin; |
@@ -2400,41 +1836,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2400 | } | 1836 | } |
2401 | } | 1837 | } |
2402 | 1838 | ||
2403 | /* | ||
2404 | * Either sets data->affinity to a valid value, and returns | ||
2405 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | ||
2406 | * leaves data->affinity untouched. | ||
2407 | */ | ||
2408 | int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
2409 | unsigned int *dest_id) | ||
2410 | { | ||
2411 | struct irq_cfg *cfg = data->chip_data; | ||
2412 | unsigned int irq = data->irq; | ||
2413 | int err; | ||
2414 | |||
2415 | if (!config_enabled(CONFIG_SMP)) | ||
2416 | return -EPERM; | ||
2417 | |||
2418 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
2419 | return -EINVAL; | ||
2420 | |||
2421 | err = assign_irq_vector(irq, cfg, mask); | ||
2422 | if (err) | ||
2423 | return err; | ||
2424 | |||
2425 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | ||
2426 | if (err) { | ||
2427 | if (assign_irq_vector(irq, cfg, data->affinity)) | ||
2428 | pr_err("Failed to recover vector for irq %d\n", irq); | ||
2429 | return err; | ||
2430 | } | ||
2431 | |||
2432 | cpumask_copy(data->affinity, mask); | ||
2433 | |||
2434 | return 0; | ||
2435 | } | ||
2436 | |||
2437 | |||
2438 | int native_ioapic_set_affinity(struct irq_data *data, | 1839 | int native_ioapic_set_affinity(struct irq_data *data, |
2439 | const struct cpumask *mask, | 1840 | const struct cpumask *mask, |
2440 | bool force) | 1841 | bool force) |
@@ -2447,24 +1848,17 @@ int native_ioapic_set_affinity(struct irq_data *data, | |||
2447 | return -EPERM; | 1848 | return -EPERM; |
2448 | 1849 | ||
2449 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 1850 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2450 | ret = __ioapic_set_affinity(data, mask, &dest); | 1851 | ret = apic_set_affinity(data, mask, &dest); |
2451 | if (!ret) { | 1852 | if (!ret) { |
2452 | /* Only the high 8 bits are valid. */ | 1853 | /* Only the high 8 bits are valid. */ |
2453 | dest = SET_APIC_LOGICAL_ID(dest); | 1854 | dest = SET_APIC_LOGICAL_ID(dest); |
2454 | __target_IO_APIC_irq(irq, dest, data->chip_data); | 1855 | __target_IO_APIC_irq(irq, dest, irqd_cfg(data)); |
2455 | ret = IRQ_SET_MASK_OK_NOCOPY; | 1856 | ret = IRQ_SET_MASK_OK_NOCOPY; |
2456 | } | 1857 | } |
2457 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 1858 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
2458 | return ret; | 1859 | return ret; |
2459 | } | 1860 | } |
2460 | 1861 | ||
2461 | static void ack_apic_edge(struct irq_data *data) | ||
2462 | { | ||
2463 | irq_complete_move(data->chip_data); | ||
2464 | irq_move_irq(data); | ||
2465 | ack_APIC_irq(); | ||
2466 | } | ||
2467 | |||
2468 | atomic_t irq_mis_count; | 1862 | atomic_t irq_mis_count; |
2469 | 1863 | ||
2470 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 1864 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
@@ -2547,9 +1941,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, | |||
2547 | } | 1941 | } |
2548 | #endif | 1942 | #endif |
2549 | 1943 | ||
2550 | static void ack_apic_level(struct irq_data *data) | 1944 | static void ack_ioapic_level(struct irq_data *data) |
2551 | { | 1945 | { |
2552 | struct irq_cfg *cfg = data->chip_data; | 1946 | struct irq_cfg *cfg = irqd_cfg(data); |
2553 | int i, irq = data->irq; | 1947 | int i, irq = data->irq; |
2554 | unsigned long v; | 1948 | unsigned long v; |
2555 | bool masked; | 1949 | bool masked; |
@@ -2619,10 +2013,10 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2619 | .irq_startup = startup_ioapic_irq, | 2013 | .irq_startup = startup_ioapic_irq, |
2620 | .irq_mask = mask_ioapic_irq, | 2014 | .irq_mask = mask_ioapic_irq, |
2621 | .irq_unmask = unmask_ioapic_irq, | 2015 | .irq_unmask = unmask_ioapic_irq, |
2622 | .irq_ack = ack_apic_edge, | 2016 | .irq_ack = apic_ack_edge, |
2623 | .irq_eoi = ack_apic_level, | 2017 | .irq_eoi = ack_ioapic_level, |
2624 | .irq_set_affinity = native_ioapic_set_affinity, | 2018 | .irq_set_affinity = native_ioapic_set_affinity, |
2625 | .irq_retrigger = ioapic_retrigger_irq, | 2019 | .irq_retrigger = apic_retrigger_irq, |
2626 | .flags = IRQCHIP_SKIP_SET_WAKE, | 2020 | .flags = IRQCHIP_SKIP_SET_WAKE, |
2627 | }; | 2021 | }; |
2628 | 2022 | ||
@@ -2965,6 +2359,16 @@ static int mp_irqdomain_create(int ioapic) | |||
2965 | return 0; | 2359 | return 0; |
2966 | } | 2360 | } |
2967 | 2361 | ||
2362 | static void ioapic_destroy_irqdomain(int idx) | ||
2363 | { | ||
2364 | if (ioapics[idx].irqdomain) { | ||
2365 | irq_domain_remove(ioapics[idx].irqdomain); | ||
2366 | ioapics[idx].irqdomain = NULL; | ||
2367 | } | ||
2368 | kfree(ioapics[idx].pin_info); | ||
2369 | ioapics[idx].pin_info = NULL; | ||
2370 | } | ||
2371 | |||
2968 | void __init setup_IO_APIC(void) | 2372 | void __init setup_IO_APIC(void) |
2969 | { | 2373 | { |
2970 | int ioapic; | 2374 | int ioapic; |
@@ -3044,399 +2448,6 @@ static int __init ioapic_init_ops(void) | |||
3044 | 2448 | ||
3045 | device_initcall(ioapic_init_ops); | 2449 | device_initcall(ioapic_init_ops); |
3046 | 2450 | ||
3047 | /* | ||
3048 | * Dynamic irq allocate and deallocation. Should be replaced by irq domains! | ||
3049 | */ | ||
3050 | int arch_setup_hwirq(unsigned int irq, int node) | ||
3051 | { | ||
3052 | struct irq_cfg *cfg; | ||
3053 | unsigned long flags; | ||
3054 | int ret; | ||
3055 | |||
3056 | cfg = alloc_irq_cfg(irq, node); | ||
3057 | if (!cfg) | ||
3058 | return -ENOMEM; | ||
3059 | |||
3060 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
3061 | ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3062 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3063 | |||
3064 | if (!ret) | ||
3065 | irq_set_chip_data(irq, cfg); | ||
3066 | else | ||
3067 | free_irq_cfg(irq, cfg); | ||
3068 | return ret; | ||
3069 | } | ||
3070 | |||
3071 | void arch_teardown_hwirq(unsigned int irq) | ||
3072 | { | ||
3073 | struct irq_cfg *cfg = irq_cfg(irq); | ||
3074 | unsigned long flags; | ||
3075 | |||
3076 | free_remapped_irq(irq); | ||
3077 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
3078 | __clear_irq_vector(irq, cfg); | ||
3079 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
3080 | free_irq_cfg(irq, cfg); | ||
3081 | } | ||
3082 | |||
3083 | /* | ||
3084 | * MSI message composition | ||
3085 | */ | ||
3086 | void native_compose_msi_msg(struct pci_dev *pdev, | ||
3087 | unsigned int irq, unsigned int dest, | ||
3088 | struct msi_msg *msg, u8 hpet_id) | ||
3089 | { | ||
3090 | struct irq_cfg *cfg = irq_cfg(irq); | ||
3091 | |||
3092 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
3093 | |||
3094 | if (x2apic_enabled()) | ||
3095 | msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); | ||
3096 | |||
3097 | msg->address_lo = | ||
3098 | MSI_ADDR_BASE_LO | | ||
3099 | ((apic->irq_dest_mode == 0) ? | ||
3100 | MSI_ADDR_DEST_MODE_PHYSICAL: | ||
3101 | MSI_ADDR_DEST_MODE_LOGICAL) | | ||
3102 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
3103 | MSI_ADDR_REDIRECTION_CPU: | ||
3104 | MSI_ADDR_REDIRECTION_LOWPRI) | | ||
3105 | MSI_ADDR_DEST_ID(dest); | ||
3106 | |||
3107 | msg->data = | ||
3108 | MSI_DATA_TRIGGER_EDGE | | ||
3109 | MSI_DATA_LEVEL_ASSERT | | ||
3110 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
3111 | MSI_DATA_DELIVERY_FIXED: | ||
3112 | MSI_DATA_DELIVERY_LOWPRI) | | ||
3113 | MSI_DATA_VECTOR(cfg->vector); | ||
3114 | } | ||
3115 | |||
3116 | #ifdef CONFIG_PCI_MSI | ||
3117 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | ||
3118 | struct msi_msg *msg, u8 hpet_id) | ||
3119 | { | ||
3120 | struct irq_cfg *cfg; | ||
3121 | int err; | ||
3122 | unsigned dest; | ||
3123 | |||
3124 | if (disable_apic) | ||
3125 | return -ENXIO; | ||
3126 | |||
3127 | cfg = irq_cfg(irq); | ||
3128 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3129 | if (err) | ||
3130 | return err; | ||
3131 | |||
3132 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
3133 | apic->target_cpus(), &dest); | ||
3134 | if (err) | ||
3135 | return err; | ||
3136 | |||
3137 | x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
3138 | |||
3139 | return 0; | ||
3140 | } | ||
3141 | |||
3142 | static int | ||
3143 | msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3144 | { | ||
3145 | struct irq_cfg *cfg = data->chip_data; | ||
3146 | struct msi_msg msg; | ||
3147 | unsigned int dest; | ||
3148 | int ret; | ||
3149 | |||
3150 | ret = __ioapic_set_affinity(data, mask, &dest); | ||
3151 | if (ret) | ||
3152 | return ret; | ||
3153 | |||
3154 | __get_cached_msi_msg(data->msi_desc, &msg); | ||
3155 | |||
3156 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
3157 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
3158 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
3159 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
3160 | |||
3161 | __pci_write_msi_msg(data->msi_desc, &msg); | ||
3162 | |||
3163 | return IRQ_SET_MASK_OK_NOCOPY; | ||
3164 | } | ||
3165 | |||
3166 | /* | ||
3167 | * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, | ||
3168 | * which implement the MSI or MSI-X Capability Structure. | ||
3169 | */ | ||
3170 | static struct irq_chip msi_chip = { | ||
3171 | .name = "PCI-MSI", | ||
3172 | .irq_unmask = pci_msi_unmask_irq, | ||
3173 | .irq_mask = pci_msi_mask_irq, | ||
3174 | .irq_ack = ack_apic_edge, | ||
3175 | .irq_set_affinity = msi_set_affinity, | ||
3176 | .irq_retrigger = ioapic_retrigger_irq, | ||
3177 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3178 | }; | ||
3179 | |||
3180 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | ||
3181 | unsigned int irq_base, unsigned int irq_offset) | ||
3182 | { | ||
3183 | struct irq_chip *chip = &msi_chip; | ||
3184 | struct msi_msg msg; | ||
3185 | unsigned int irq = irq_base + irq_offset; | ||
3186 | int ret; | ||
3187 | |||
3188 | ret = msi_compose_msg(dev, irq, &msg, -1); | ||
3189 | if (ret < 0) | ||
3190 | return ret; | ||
3191 | |||
3192 | irq_set_msi_desc_off(irq_base, irq_offset, msidesc); | ||
3193 | |||
3194 | /* | ||
3195 | * MSI-X message is written per-IRQ, the offset is always 0. | ||
3196 | * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. | ||
3197 | */ | ||
3198 | if (!irq_offset) | ||
3199 | pci_write_msi_msg(irq, &msg); | ||
3200 | |||
3201 | setup_remapped_irq(irq, irq_cfg(irq), chip); | ||
3202 | |||
3203 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3204 | |||
3205 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); | ||
3206 | |||
3207 | return 0; | ||
3208 | } | ||
3209 | |||
3210 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
3211 | { | ||
3212 | struct msi_desc *msidesc; | ||
3213 | unsigned int irq; | ||
3214 | int node, ret; | ||
3215 | |||
3216 | /* Multiple MSI vectors only supported with interrupt remapping */ | ||
3217 | if (type == PCI_CAP_ID_MSI && nvec > 1) | ||
3218 | return 1; | ||
3219 | |||
3220 | node = dev_to_node(&dev->dev); | ||
3221 | |||
3222 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
3223 | irq = irq_alloc_hwirq(node); | ||
3224 | if (!irq) | ||
3225 | return -ENOSPC; | ||
3226 | |||
3227 | ret = setup_msi_irq(dev, msidesc, irq, 0); | ||
3228 | if (ret < 0) { | ||
3229 | irq_free_hwirq(irq); | ||
3230 | return ret; | ||
3231 | } | ||
3232 | |||
3233 | } | ||
3234 | return 0; | ||
3235 | } | ||
3236 | |||
3237 | void native_teardown_msi_irq(unsigned int irq) | ||
3238 | { | ||
3239 | irq_free_hwirq(irq); | ||
3240 | } | ||
3241 | |||
3242 | #ifdef CONFIG_DMAR_TABLE | ||
3243 | static int | ||
3244 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
3245 | bool force) | ||
3246 | { | ||
3247 | struct irq_cfg *cfg = data->chip_data; | ||
3248 | unsigned int dest, irq = data->irq; | ||
3249 | struct msi_msg msg; | ||
3250 | int ret; | ||
3251 | |||
3252 | ret = __ioapic_set_affinity(data, mask, &dest); | ||
3253 | if (ret) | ||
3254 | return ret; | ||
3255 | |||
3256 | dmar_msi_read(irq, &msg); | ||
3257 | |||
3258 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
3259 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
3260 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
3261 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
3262 | msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); | ||
3263 | |||
3264 | dmar_msi_write(irq, &msg); | ||
3265 | |||
3266 | return IRQ_SET_MASK_OK_NOCOPY; | ||
3267 | } | ||
3268 | |||
3269 | static struct irq_chip dmar_msi_type = { | ||
3270 | .name = "DMAR_MSI", | ||
3271 | .irq_unmask = dmar_msi_unmask, | ||
3272 | .irq_mask = dmar_msi_mask, | ||
3273 | .irq_ack = ack_apic_edge, | ||
3274 | .irq_set_affinity = dmar_msi_set_affinity, | ||
3275 | .irq_retrigger = ioapic_retrigger_irq, | ||
3276 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3277 | }; | ||
3278 | |||
3279 | int arch_setup_dmar_msi(unsigned int irq) | ||
3280 | { | ||
3281 | int ret; | ||
3282 | struct msi_msg msg; | ||
3283 | |||
3284 | ret = msi_compose_msg(NULL, irq, &msg, -1); | ||
3285 | if (ret < 0) | ||
3286 | return ret; | ||
3287 | dmar_msi_write(irq, &msg); | ||
3288 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | ||
3289 | "edge"); | ||
3290 | return 0; | ||
3291 | } | ||
3292 | #endif | ||
3293 | |||
3294 | #ifdef CONFIG_HPET_TIMER | ||
3295 | |||
3296 | static int hpet_msi_set_affinity(struct irq_data *data, | ||
3297 | const struct cpumask *mask, bool force) | ||
3298 | { | ||
3299 | struct irq_cfg *cfg = data->chip_data; | ||
3300 | struct msi_msg msg; | ||
3301 | unsigned int dest; | ||
3302 | int ret; | ||
3303 | |||
3304 | ret = __ioapic_set_affinity(data, mask, &dest); | ||
3305 | if (ret) | ||
3306 | return ret; | ||
3307 | |||
3308 | hpet_msi_read(data->handler_data, &msg); | ||
3309 | |||
3310 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
3311 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
3312 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
3313 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
3314 | |||
3315 | hpet_msi_write(data->handler_data, &msg); | ||
3316 | |||
3317 | return IRQ_SET_MASK_OK_NOCOPY; | ||
3318 | } | ||
3319 | |||
3320 | static struct irq_chip hpet_msi_type = { | ||
3321 | .name = "HPET_MSI", | ||
3322 | .irq_unmask = hpet_msi_unmask, | ||
3323 | .irq_mask = hpet_msi_mask, | ||
3324 | .irq_ack = ack_apic_edge, | ||
3325 | .irq_set_affinity = hpet_msi_set_affinity, | ||
3326 | .irq_retrigger = ioapic_retrigger_irq, | ||
3327 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3328 | }; | ||
3329 | |||
3330 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) | ||
3331 | { | ||
3332 | struct irq_chip *chip = &hpet_msi_type; | ||
3333 | struct msi_msg msg; | ||
3334 | int ret; | ||
3335 | |||
3336 | ret = msi_compose_msg(NULL, irq, &msg, id); | ||
3337 | if (ret < 0) | ||
3338 | return ret; | ||
3339 | |||
3340 | hpet_msi_write(irq_get_handler_data(irq), &msg); | ||
3341 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | ||
3342 | setup_remapped_irq(irq, irq_cfg(irq), chip); | ||
3343 | |||
3344 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3345 | return 0; | ||
3346 | } | ||
3347 | #endif | ||
3348 | |||
3349 | #endif /* CONFIG_PCI_MSI */ | ||
3350 | /* | ||
3351 | * Hypertransport interrupt support | ||
3352 | */ | ||
3353 | #ifdef CONFIG_HT_IRQ | ||
3354 | |||
3355 | static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | ||
3356 | { | ||
3357 | struct ht_irq_msg msg; | ||
3358 | fetch_ht_irq_msg(irq, &msg); | ||
3359 | |||
3360 | msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); | ||
3361 | msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); | ||
3362 | |||
3363 | msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); | ||
3364 | msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); | ||
3365 | |||
3366 | write_ht_irq_msg(irq, &msg); | ||
3367 | } | ||
3368 | |||
3369 | static int | ||
3370 | ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
3371 | { | ||
3372 | struct irq_cfg *cfg = data->chip_data; | ||
3373 | unsigned int dest; | ||
3374 | int ret; | ||
3375 | |||
3376 | ret = __ioapic_set_affinity(data, mask, &dest); | ||
3377 | if (ret) | ||
3378 | return ret; | ||
3379 | |||
3380 | target_ht_irq(data->irq, dest, cfg->vector); | ||
3381 | return IRQ_SET_MASK_OK_NOCOPY; | ||
3382 | } | ||
3383 | |||
3384 | static struct irq_chip ht_irq_chip = { | ||
3385 | .name = "PCI-HT", | ||
3386 | .irq_mask = mask_ht_irq, | ||
3387 | .irq_unmask = unmask_ht_irq, | ||
3388 | .irq_ack = ack_apic_edge, | ||
3389 | .irq_set_affinity = ht_set_affinity, | ||
3390 | .irq_retrigger = ioapic_retrigger_irq, | ||
3391 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3392 | }; | ||
3393 | |||
3394 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | ||
3395 | { | ||
3396 | struct irq_cfg *cfg; | ||
3397 | struct ht_irq_msg msg; | ||
3398 | unsigned dest; | ||
3399 | int err; | ||
3400 | |||
3401 | if (disable_apic) | ||
3402 | return -ENXIO; | ||
3403 | |||
3404 | cfg = irq_cfg(irq); | ||
3405 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3406 | if (err) | ||
3407 | return err; | ||
3408 | |||
3409 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
3410 | apic->target_cpus(), &dest); | ||
3411 | if (err) | ||
3412 | return err; | ||
3413 | |||
3414 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | ||
3415 | |||
3416 | msg.address_lo = | ||
3417 | HT_IRQ_LOW_BASE | | ||
3418 | HT_IRQ_LOW_DEST_ID(dest) | | ||
3419 | HT_IRQ_LOW_VECTOR(cfg->vector) | | ||
3420 | ((apic->irq_dest_mode == 0) ? | ||
3421 | HT_IRQ_LOW_DM_PHYSICAL : | ||
3422 | HT_IRQ_LOW_DM_LOGICAL) | | ||
3423 | HT_IRQ_LOW_RQEOI_EDGE | | ||
3424 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
3425 | HT_IRQ_LOW_MT_FIXED : | ||
3426 | HT_IRQ_LOW_MT_ARBITRATED) | | ||
3427 | HT_IRQ_LOW_IRQ_MASKED; | ||
3428 | |||
3429 | write_ht_irq_msg(irq, &msg); | ||
3430 | |||
3431 | irq_set_chip_and_handler_name(irq, &ht_irq_chip, | ||
3432 | handle_edge_irq, "edge"); | ||
3433 | |||
3434 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); | ||
3435 | |||
3436 | return 0; | ||
3437 | } | ||
3438 | #endif /* CONFIG_HT_IRQ */ | ||
3439 | |||
3440 | static int | 2451 | static int |
3441 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | 2452 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) |
3442 | { | 2453 | { |
@@ -3451,7 +2462,7 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | |||
3451 | return ret; | 2462 | return ret; |
3452 | } | 2463 | } |
3453 | 2464 | ||
3454 | static int __init io_apic_get_redir_entries(int ioapic) | 2465 | static int io_apic_get_redir_entries(int ioapic) |
3455 | { | 2466 | { |
3456 | union IO_APIC_reg_01 reg_01; | 2467 | union IO_APIC_reg_01 reg_01; |
3457 | unsigned long flags; | 2468 | unsigned long flags; |
@@ -3476,28 +2487,8 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) | |||
3476 | return ioapic_initialized ? ioapic_dynirq_base : gsi_top; | 2487 | return ioapic_initialized ? ioapic_dynirq_base : gsi_top; |
3477 | } | 2488 | } |
3478 | 2489 | ||
3479 | int __init arch_probe_nr_irqs(void) | ||
3480 | { | ||
3481 | int nr; | ||
3482 | |||
3483 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | ||
3484 | nr_irqs = NR_VECTORS * nr_cpu_ids; | ||
3485 | |||
3486 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | ||
3487 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | ||
3488 | /* | ||
3489 | * for MSI and HT dyn irq | ||
3490 | */ | ||
3491 | nr += gsi_top * 16; | ||
3492 | #endif | ||
3493 | if (nr < nr_irqs) | ||
3494 | nr_irqs = nr; | ||
3495 | |||
3496 | return 0; | ||
3497 | } | ||
3498 | |||
3499 | #ifdef CONFIG_X86_32 | 2490 | #ifdef CONFIG_X86_32 |
3500 | static int __init io_apic_get_unique_id(int ioapic, int apic_id) | 2491 | static int io_apic_get_unique_id(int ioapic, int apic_id) |
3501 | { | 2492 | { |
3502 | union IO_APIC_reg_00 reg_00; | 2493 | union IO_APIC_reg_00 reg_00; |
3503 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | 2494 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; |
@@ -3572,30 +2563,63 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3572 | return apic_id; | 2563 | return apic_id; |
3573 | } | 2564 | } |
3574 | 2565 | ||
3575 | static u8 __init io_apic_unique_id(u8 id) | 2566 | static u8 io_apic_unique_id(int idx, u8 id) |
3576 | { | 2567 | { |
3577 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | 2568 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && |
3578 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | 2569 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) |
3579 | return io_apic_get_unique_id(nr_ioapics, id); | 2570 | return io_apic_get_unique_id(idx, id); |
3580 | else | 2571 | else |
3581 | return id; | 2572 | return id; |
3582 | } | 2573 | } |
3583 | #else | 2574 | #else |
3584 | static u8 __init io_apic_unique_id(u8 id) | 2575 | static u8 io_apic_unique_id(int idx, u8 id) |
3585 | { | 2576 | { |
3586 | int i; | 2577 | union IO_APIC_reg_00 reg_00; |
3587 | DECLARE_BITMAP(used, 256); | 2578 | DECLARE_BITMAP(used, 256); |
2579 | unsigned long flags; | ||
2580 | u8 new_id; | ||
2581 | int i; | ||
3588 | 2582 | ||
3589 | bitmap_zero(used, 256); | 2583 | bitmap_zero(used, 256); |
3590 | for_each_ioapic(i) | 2584 | for_each_ioapic(i) |
3591 | __set_bit(mpc_ioapic_id(i), used); | 2585 | __set_bit(mpc_ioapic_id(i), used); |
2586 | |||
2587 | /* Hand out the requested id if available */ | ||
3592 | if (!test_bit(id, used)) | 2588 | if (!test_bit(id, used)) |
3593 | return id; | 2589 | return id; |
3594 | return find_first_zero_bit(used, 256); | 2590 | |
2591 | /* | ||
2592 | * Read the current id from the ioapic and keep it if | ||
2593 | * available. | ||
2594 | */ | ||
2595 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2596 | reg_00.raw = io_apic_read(idx, 0); | ||
2597 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2598 | new_id = reg_00.bits.ID; | ||
2599 | if (!test_bit(new_id, used)) { | ||
2600 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
2601 | "IOAPIC[%d]: Using reg apic_id %d instead of %d\n", | ||
2602 | idx, new_id, id); | ||
2603 | return new_id; | ||
2604 | } | ||
2605 | |||
2606 | /* | ||
2607 | * Get the next free id and write it to the ioapic. | ||
2608 | */ | ||
2609 | new_id = find_first_zero_bit(used, 256); | ||
2610 | reg_00.bits.ID = new_id; | ||
2611 | raw_spin_lock_irqsave(&ioapic_lock, flags); | ||
2612 | io_apic_write(idx, 0, reg_00.raw); | ||
2613 | reg_00.raw = io_apic_read(idx, 0); | ||
2614 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2615 | /* Sanity check */ | ||
2616 | BUG_ON(reg_00.bits.ID != new_id); | ||
2617 | |||
2618 | return new_id; | ||
3595 | } | 2619 | } |
3596 | #endif | 2620 | #endif |
3597 | 2621 | ||
3598 | static int __init io_apic_get_version(int ioapic) | 2622 | static int io_apic_get_version(int ioapic) |
3599 | { | 2623 | { |
3600 | union IO_APIC_reg_01 reg_01; | 2624 | union IO_APIC_reg_01 reg_01; |
3601 | unsigned long flags; | 2625 | unsigned long flags; |
@@ -3702,6 +2726,7 @@ static struct resource * __init ioapic_setup_resources(void) | |||
3702 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); | 2726 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
3703 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 2727 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
3704 | num++; | 2728 | num++; |
2729 | ioapics[i].iomem_res = res; | ||
3705 | } | 2730 | } |
3706 | 2731 | ||
3707 | ioapic_resources = res; | 2732 | ioapic_resources = res; |
@@ -3799,21 +2824,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) | |||
3799 | return gsi - gsi_cfg->gsi_base; | 2824 | return gsi - gsi_cfg->gsi_base; |
3800 | } | 2825 | } |
3801 | 2826 | ||
3802 | static __init int bad_ioapic(unsigned long address) | 2827 | static int bad_ioapic_register(int idx) |
3803 | { | ||
3804 | if (nr_ioapics >= MAX_IO_APICS) { | ||
3805 | pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n", | ||
3806 | MAX_IO_APICS, nr_ioapics); | ||
3807 | return 1; | ||
3808 | } | ||
3809 | if (!address) { | ||
3810 | pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n"); | ||
3811 | return 1; | ||
3812 | } | ||
3813 | return 0; | ||
3814 | } | ||
3815 | |||
3816 | static __init int bad_ioapic_register(int idx) | ||
3817 | { | 2828 | { |
3818 | union IO_APIC_reg_00 reg_00; | 2829 | union IO_APIC_reg_00 reg_00; |
3819 | union IO_APIC_reg_01 reg_01; | 2830 | union IO_APIC_reg_01 reg_01; |
@@ -3832,32 +2843,61 @@ static __init int bad_ioapic_register(int idx) | |||
3832 | return 0; | 2843 | return 0; |
3833 | } | 2844 | } |
3834 | 2845 | ||
3835 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base, | 2846 | static int find_free_ioapic_entry(void) |
3836 | struct ioapic_domain_cfg *cfg) | ||
3837 | { | 2847 | { |
3838 | int idx = 0; | 2848 | int idx; |
3839 | int entries; | 2849 | |
2850 | for (idx = 0; idx < MAX_IO_APICS; idx++) | ||
2851 | if (ioapics[idx].nr_registers == 0) | ||
2852 | return idx; | ||
2853 | |||
2854 | return MAX_IO_APICS; | ||
2855 | } | ||
2856 | |||
2857 | /** | ||
2858 | * mp_register_ioapic - Register an IOAPIC device | ||
2859 | * @id: hardware IOAPIC ID | ||
2860 | * @address: physical address of IOAPIC register area | ||
2861 | * @gsi_base: base of GSI associated with the IOAPIC | ||
2862 | * @cfg: configuration information for the IOAPIC | ||
2863 | */ | ||
2864 | int mp_register_ioapic(int id, u32 address, u32 gsi_base, | ||
2865 | struct ioapic_domain_cfg *cfg) | ||
2866 | { | ||
2867 | bool hotplug = !!ioapic_initialized; | ||
3840 | struct mp_ioapic_gsi *gsi_cfg; | 2868 | struct mp_ioapic_gsi *gsi_cfg; |
2869 | int idx, ioapic, entries; | ||
2870 | u32 gsi_end; | ||
3841 | 2871 | ||
3842 | if (bad_ioapic(address)) | 2872 | if (!address) { |
3843 | return; | 2873 | pr_warn("Bogus (zero) I/O APIC address found, skipping!\n"); |
2874 | return -EINVAL; | ||
2875 | } | ||
2876 | for_each_ioapic(ioapic) | ||
2877 | if (ioapics[ioapic].mp_config.apicaddr == address) { | ||
2878 | pr_warn("address 0x%x conflicts with IOAPIC%d\n", | ||
2879 | address, ioapic); | ||
2880 | return -EEXIST; | ||
2881 | } | ||
3844 | 2882 | ||
3845 | idx = nr_ioapics; | 2883 | idx = find_free_ioapic_entry(); |
2884 | if (idx >= MAX_IO_APICS) { | ||
2885 | pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n", | ||
2886 | MAX_IO_APICS, idx); | ||
2887 | return -ENOSPC; | ||
2888 | } | ||
3846 | 2889 | ||
3847 | ioapics[idx].mp_config.type = MP_IOAPIC; | 2890 | ioapics[idx].mp_config.type = MP_IOAPIC; |
3848 | ioapics[idx].mp_config.flags = MPC_APIC_USABLE; | 2891 | ioapics[idx].mp_config.flags = MPC_APIC_USABLE; |
3849 | ioapics[idx].mp_config.apicaddr = address; | 2892 | ioapics[idx].mp_config.apicaddr = address; |
3850 | ioapics[idx].irqdomain = NULL; | ||
3851 | ioapics[idx].irqdomain_cfg = *cfg; | ||
3852 | 2893 | ||
3853 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 2894 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
3854 | |||
3855 | if (bad_ioapic_register(idx)) { | 2895 | if (bad_ioapic_register(idx)) { |
3856 | clear_fixmap(FIX_IO_APIC_BASE_0 + idx); | 2896 | clear_fixmap(FIX_IO_APIC_BASE_0 + idx); |
3857 | return; | 2897 | return -ENODEV; |
3858 | } | 2898 | } |
3859 | 2899 | ||
3860 | ioapics[idx].mp_config.apicid = io_apic_unique_id(id); | 2900 | ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id); |
3861 | ioapics[idx].mp_config.apicver = io_apic_get_version(idx); | 2901 | ioapics[idx].mp_config.apicver = io_apic_get_version(idx); |
3862 | 2902 | ||
3863 | /* | 2903 | /* |
@@ -3865,24 +2905,112 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base, | |||
3865 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | 2905 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). |
3866 | */ | 2906 | */ |
3867 | entries = io_apic_get_redir_entries(idx); | 2907 | entries = io_apic_get_redir_entries(idx); |
2908 | gsi_end = gsi_base + entries - 1; | ||
2909 | for_each_ioapic(ioapic) { | ||
2910 | gsi_cfg = mp_ioapic_gsi_routing(ioapic); | ||
2911 | if ((gsi_base >= gsi_cfg->gsi_base && | ||
2912 | gsi_base <= gsi_cfg->gsi_end) || | ||
2913 | (gsi_end >= gsi_cfg->gsi_base && | ||
2914 | gsi_end <= gsi_cfg->gsi_end)) { | ||
2915 | pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n", | ||
2916 | gsi_base, gsi_end, | ||
2917 | gsi_cfg->gsi_base, gsi_cfg->gsi_end); | ||
2918 | clear_fixmap(FIX_IO_APIC_BASE_0 + idx); | ||
2919 | return -ENOSPC; | ||
2920 | } | ||
2921 | } | ||
3868 | gsi_cfg = mp_ioapic_gsi_routing(idx); | 2922 | gsi_cfg = mp_ioapic_gsi_routing(idx); |
3869 | gsi_cfg->gsi_base = gsi_base; | 2923 | gsi_cfg->gsi_base = gsi_base; |
3870 | gsi_cfg->gsi_end = gsi_base + entries - 1; | 2924 | gsi_cfg->gsi_end = gsi_end; |
2925 | |||
2926 | ioapics[idx].irqdomain = NULL; | ||
2927 | ioapics[idx].irqdomain_cfg = *cfg; | ||
3871 | 2928 | ||
3872 | /* | 2929 | /* |
3873 | * The number of IO-APIC IRQ registers (== #pins): | 2930 | * If mp_register_ioapic() is called during early boot stage when |
2931 | * walking ACPI/SFI/DT tables, it's too early to create irqdomain, | ||
2932 | * we are still using bootmem allocator. So delay it to setup_IO_APIC(). | ||
3874 | */ | 2933 | */ |
3875 | ioapics[idx].nr_registers = entries; | 2934 | if (hotplug) { |
2935 | if (mp_irqdomain_create(idx)) { | ||
2936 | clear_fixmap(FIX_IO_APIC_BASE_0 + idx); | ||
2937 | return -ENOMEM; | ||
2938 | } | ||
2939 | alloc_ioapic_saved_registers(idx); | ||
2940 | } | ||
3876 | 2941 | ||
3877 | if (gsi_cfg->gsi_end >= gsi_top) | 2942 | if (gsi_cfg->gsi_end >= gsi_top) |
3878 | gsi_top = gsi_cfg->gsi_end + 1; | 2943 | gsi_top = gsi_cfg->gsi_end + 1; |
2944 | if (nr_ioapics <= idx) | ||
2945 | nr_ioapics = idx + 1; | ||
2946 | |||
2947 | /* Set nr_registers to mark entry present */ | ||
2948 | ioapics[idx].nr_registers = entries; | ||
3879 | 2949 | ||
3880 | pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", | 2950 | pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", |
3881 | idx, mpc_ioapic_id(idx), | 2951 | idx, mpc_ioapic_id(idx), |
3882 | mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), | 2952 | mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), |
3883 | gsi_cfg->gsi_base, gsi_cfg->gsi_end); | 2953 | gsi_cfg->gsi_base, gsi_cfg->gsi_end); |
3884 | 2954 | ||
3885 | nr_ioapics++; | 2955 | return 0; |
2956 | } | ||
2957 | |||
2958 | int mp_unregister_ioapic(u32 gsi_base) | ||
2959 | { | ||
2960 | int ioapic, pin; | ||
2961 | int found = 0; | ||
2962 | struct mp_pin_info *pin_info; | ||
2963 | |||
2964 | for_each_ioapic(ioapic) | ||
2965 | if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) { | ||
2966 | found = 1; | ||
2967 | break; | ||
2968 | } | ||
2969 | if (!found) { | ||
2970 | pr_warn("can't find IOAPIC for GSI %d\n", gsi_base); | ||
2971 | return -ENODEV; | ||
2972 | } | ||
2973 | |||
2974 | for_each_pin(ioapic, pin) { | ||
2975 | pin_info = mp_pin_info(ioapic, pin); | ||
2976 | if (pin_info->count) { | ||
2977 | pr_warn("pin%d on IOAPIC%d is still in use.\n", | ||
2978 | pin, ioapic); | ||
2979 | return -EBUSY; | ||
2980 | } | ||
2981 | } | ||
2982 | |||
2983 | /* Mark entry not present */ | ||
2984 | ioapics[ioapic].nr_registers = 0; | ||
2985 | ioapic_destroy_irqdomain(ioapic); | ||
2986 | free_ioapic_saved_registers(ioapic); | ||
2987 | if (ioapics[ioapic].iomem_res) | ||
2988 | release_resource(ioapics[ioapic].iomem_res); | ||
2989 | clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic); | ||
2990 | memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic])); | ||
2991 | |||
2992 | return 0; | ||
2993 | } | ||
2994 | |||
2995 | int mp_ioapic_registered(u32 gsi_base) | ||
2996 | { | ||
2997 | int ioapic; | ||
2998 | |||
2999 | for_each_ioapic(ioapic) | ||
3000 | if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) | ||
3001 | return 1; | ||
3002 | |||
3003 | return 0; | ||
3004 | } | ||
3005 | |||
3006 | static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | ||
3007 | int ioapic, int ioapic_pin, | ||
3008 | int trigger, int polarity) | ||
3009 | { | ||
3010 | irq_attr->ioapic = ioapic; | ||
3011 | irq_attr->ioapic_pin = ioapic_pin; | ||
3012 | irq_attr->trigger = trigger; | ||
3013 | irq_attr->polarity = polarity; | ||
3886 | } | 3014 | } |
3887 | 3015 | ||
3888 | int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, | 3016 | int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, |
@@ -3931,7 +3059,7 @@ void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq) | |||
3931 | 3059 | ||
3932 | ioapic_mask_entry(ioapic, pin); | 3060 | ioapic_mask_entry(ioapic, pin); |
3933 | __remove_pin_from_irq(cfg, ioapic, pin); | 3061 | __remove_pin_from_irq(cfg, ioapic, pin); |
3934 | WARN_ON(cfg->irq_2_pin != NULL); | 3062 | WARN_ON(!list_empty(&cfg->irq_2_pin)); |
3935 | arch_teardown_hwirq(virq); | 3063 | arch_teardown_hwirq(virq); |
3936 | } | 3064 | } |
3937 | 3065 | ||
@@ -3964,18 +3092,6 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node) | |||
3964 | return ret; | 3092 | return ret; |
3965 | } | 3093 | } |
3966 | 3094 | ||
3967 | bool mp_should_keep_irq(struct device *dev) | ||
3968 | { | ||
3969 | if (dev->power.is_prepared) | ||
3970 | return true; | ||
3971 | #ifdef CONFIG_PM | ||
3972 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
3973 | return true; | ||
3974 | #endif | ||
3975 | |||
3976 | return false; | ||
3977 | } | ||
3978 | |||
3979 | /* Enable IOAPIC early just for system timer */ | 3095 | /* Enable IOAPIC early just for system timer */ |
3980 | void __init pre_init_apic_IRQ0(void) | 3096 | void __init pre_init_apic_IRQ0(void) |
3981 | { | 3097 | { |
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c new file mode 100644 index 000000000000..d6ba2d660dc5 --- /dev/null +++ b/arch/x86/kernel/apic/msi.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Support of MSI, HPET and DMAR interrupts. | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | ||
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/dmar.h> | ||
15 | #include <linux/hpet.h> | ||
16 | #include <linux/msi.h> | ||
17 | #include <asm/msidef.h> | ||
18 | #include <asm/hpet.h> | ||
19 | #include <asm/hw_irq.h> | ||
20 | #include <asm/apic.h> | ||
21 | #include <asm/irq_remapping.h> | ||
22 | |||
23 | void native_compose_msi_msg(struct pci_dev *pdev, | ||
24 | unsigned int irq, unsigned int dest, | ||
25 | struct msi_msg *msg, u8 hpet_id) | ||
26 | { | ||
27 | struct irq_cfg *cfg = irq_cfg(irq); | ||
28 | |||
29 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
30 | |||
31 | if (x2apic_enabled()) | ||
32 | msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); | ||
33 | |||
34 | msg->address_lo = | ||
35 | MSI_ADDR_BASE_LO | | ||
36 | ((apic->irq_dest_mode == 0) ? | ||
37 | MSI_ADDR_DEST_MODE_PHYSICAL : | ||
38 | MSI_ADDR_DEST_MODE_LOGICAL) | | ||
39 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
40 | MSI_ADDR_REDIRECTION_CPU : | ||
41 | MSI_ADDR_REDIRECTION_LOWPRI) | | ||
42 | MSI_ADDR_DEST_ID(dest); | ||
43 | |||
44 | msg->data = | ||
45 | MSI_DATA_TRIGGER_EDGE | | ||
46 | MSI_DATA_LEVEL_ASSERT | | ||
47 | ((apic->irq_delivery_mode != dest_LowestPrio) ? | ||
48 | MSI_DATA_DELIVERY_FIXED : | ||
49 | MSI_DATA_DELIVERY_LOWPRI) | | ||
50 | MSI_DATA_VECTOR(cfg->vector); | ||
51 | } | ||
52 | |||
53 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | ||
54 | struct msi_msg *msg, u8 hpet_id) | ||
55 | { | ||
56 | struct irq_cfg *cfg; | ||
57 | int err; | ||
58 | unsigned dest; | ||
59 | |||
60 | if (disable_apic) | ||
61 | return -ENXIO; | ||
62 | |||
63 | cfg = irq_cfg(irq); | ||
64 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
69 | apic->target_cpus(), &dest); | ||
70 | if (err) | ||
71 | return err; | ||
72 | |||
73 | x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int | ||
79 | msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) | ||
80 | { | ||
81 | struct irq_cfg *cfg = irqd_cfg(data); | ||
82 | struct msi_msg msg; | ||
83 | unsigned int dest; | ||
84 | int ret; | ||
85 | |||
86 | ret = apic_set_affinity(data, mask, &dest); | ||
87 | if (ret) | ||
88 | return ret; | ||
89 | |||
90 | __get_cached_msi_msg(data->msi_desc, &msg); | ||
91 | |||
92 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
93 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
94 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
95 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
96 | |||
97 | __pci_write_msi_msg(data->msi_desc, &msg); | ||
98 | |||
99 | return IRQ_SET_MASK_OK_NOCOPY; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, | ||
104 | * which implement the MSI or MSI-X Capability Structure. | ||
105 | */ | ||
106 | static struct irq_chip msi_chip = { | ||
107 | .name = "PCI-MSI", | ||
108 | .irq_unmask = pci_msi_unmask_irq, | ||
109 | .irq_mask = pci_msi_mask_irq, | ||
110 | .irq_ack = apic_ack_edge, | ||
111 | .irq_set_affinity = msi_set_affinity, | ||
112 | .irq_retrigger = apic_retrigger_irq, | ||
113 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
114 | }; | ||
115 | |||
116 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | ||
117 | unsigned int irq_base, unsigned int irq_offset) | ||
118 | { | ||
119 | struct irq_chip *chip = &msi_chip; | ||
120 | struct msi_msg msg; | ||
121 | unsigned int irq = irq_base + irq_offset; | ||
122 | int ret; | ||
123 | |||
124 | ret = msi_compose_msg(dev, irq, &msg, -1); | ||
125 | if (ret < 0) | ||
126 | return ret; | ||
127 | |||
128 | irq_set_msi_desc_off(irq_base, irq_offset, msidesc); | ||
129 | |||
130 | /* | ||
131 | * MSI-X message is written per-IRQ, the offset is always 0. | ||
132 | * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. | ||
133 | */ | ||
134 | if (!irq_offset) | ||
135 | pci_write_msi_msg(irq, &msg); | ||
136 | |||
137 | setup_remapped_irq(irq, irq_cfg(irq), chip); | ||
138 | |||
139 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
140 | |||
141 | dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
147 | { | ||
148 | struct msi_desc *msidesc; | ||
149 | unsigned int irq; | ||
150 | int node, ret; | ||
151 | |||
152 | /* Multiple MSI vectors only supported with interrupt remapping */ | ||
153 | if (type == PCI_CAP_ID_MSI && nvec > 1) | ||
154 | return 1; | ||
155 | |||
156 | node = dev_to_node(&dev->dev); | ||
157 | |||
158 | list_for_each_entry(msidesc, &dev->msi_list, list) { | ||
159 | irq = irq_alloc_hwirq(node); | ||
160 | if (!irq) | ||
161 | return -ENOSPC; | ||
162 | |||
163 | ret = setup_msi_irq(dev, msidesc, irq, 0); | ||
164 | if (ret < 0) { | ||
165 | irq_free_hwirq(irq); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | } | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | void native_teardown_msi_irq(unsigned int irq) | ||
174 | { | ||
175 | irq_free_hwirq(irq); | ||
176 | } | ||
177 | |||
178 | #ifdef CONFIG_DMAR_TABLE | ||
179 | static int | ||
180 | dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
181 | bool force) | ||
182 | { | ||
183 | struct irq_cfg *cfg = irqd_cfg(data); | ||
184 | unsigned int dest, irq = data->irq; | ||
185 | struct msi_msg msg; | ||
186 | int ret; | ||
187 | |||
188 | ret = apic_set_affinity(data, mask, &dest); | ||
189 | if (ret) | ||
190 | return ret; | ||
191 | |||
192 | dmar_msi_read(irq, &msg); | ||
193 | |||
194 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
195 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
196 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
197 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
198 | msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); | ||
199 | |||
200 | dmar_msi_write(irq, &msg); | ||
201 | |||
202 | return IRQ_SET_MASK_OK_NOCOPY; | ||
203 | } | ||
204 | |||
205 | static struct irq_chip dmar_msi_type = { | ||
206 | .name = "DMAR_MSI", | ||
207 | .irq_unmask = dmar_msi_unmask, | ||
208 | .irq_mask = dmar_msi_mask, | ||
209 | .irq_ack = apic_ack_edge, | ||
210 | .irq_set_affinity = dmar_msi_set_affinity, | ||
211 | .irq_retrigger = apic_retrigger_irq, | ||
212 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
213 | }; | ||
214 | |||
215 | int arch_setup_dmar_msi(unsigned int irq) | ||
216 | { | ||
217 | int ret; | ||
218 | struct msi_msg msg; | ||
219 | |||
220 | ret = msi_compose_msg(NULL, irq, &msg, -1); | ||
221 | if (ret < 0) | ||
222 | return ret; | ||
223 | dmar_msi_write(irq, &msg); | ||
224 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | ||
225 | "edge"); | ||
226 | return 0; | ||
227 | } | ||
228 | #endif | ||
229 | |||
230 | /* | ||
231 | * MSI message composition | ||
232 | */ | ||
233 | #ifdef CONFIG_HPET_TIMER | ||
234 | |||
235 | static int hpet_msi_set_affinity(struct irq_data *data, | ||
236 | const struct cpumask *mask, bool force) | ||
237 | { | ||
238 | struct irq_cfg *cfg = irqd_cfg(data); | ||
239 | struct msi_msg msg; | ||
240 | unsigned int dest; | ||
241 | int ret; | ||
242 | |||
243 | ret = apic_set_affinity(data, mask, &dest); | ||
244 | if (ret) | ||
245 | return ret; | ||
246 | |||
247 | hpet_msi_read(data->handler_data, &msg); | ||
248 | |||
249 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
250 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
251 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
252 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
253 | |||
254 | hpet_msi_write(data->handler_data, &msg); | ||
255 | |||
256 | return IRQ_SET_MASK_OK_NOCOPY; | ||
257 | } | ||
258 | |||
259 | static struct irq_chip hpet_msi_type = { | ||
260 | .name = "HPET_MSI", | ||
261 | .irq_unmask = hpet_msi_unmask, | ||
262 | .irq_mask = hpet_msi_mask, | ||
263 | .irq_ack = apic_ack_edge, | ||
264 | .irq_set_affinity = hpet_msi_set_affinity, | ||
265 | .irq_retrigger = apic_retrigger_irq, | ||
266 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
267 | }; | ||
268 | |||
269 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) | ||
270 | { | ||
271 | struct irq_chip *chip = &hpet_msi_type; | ||
272 | struct msi_msg msg; | ||
273 | int ret; | ||
274 | |||
275 | ret = msi_compose_msg(NULL, irq, &msg, id); | ||
276 | if (ret < 0) | ||
277 | return ret; | ||
278 | |||
279 | hpet_msi_write(irq_get_handler_data(irq), &msg); | ||
280 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | ||
281 | setup_remapped_irq(irq, irq_cfg(irq), chip); | ||
282 | |||
283 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
284 | return 0; | ||
285 | } | ||
286 | #endif | ||
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c new file mode 100644 index 000000000000..6cedd7914581 --- /dev/null +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -0,0 +1,719 @@ | |||
1 | /* | ||
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | ||
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/irqdomain.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <asm/hw_irq.h> | ||
17 | #include <asm/apic.h> | ||
18 | #include <asm/i8259.h> | ||
19 | #include <asm/desc.h> | ||
20 | #include <asm/irq_remapping.h> | ||
21 | |||
22 | static DEFINE_RAW_SPINLOCK(vector_lock); | ||
23 | |||
24 | void lock_vector_lock(void) | ||
25 | { | ||
26 | /* Used to the online set of cpus does not change | ||
27 | * during assign_irq_vector. | ||
28 | */ | ||
29 | raw_spin_lock(&vector_lock); | ||
30 | } | ||
31 | |||
32 | void unlock_vector_lock(void) | ||
33 | { | ||
34 | raw_spin_unlock(&vector_lock); | ||
35 | } | ||
36 | |||
37 | struct irq_cfg *irq_cfg(unsigned int irq) | ||
38 | { | ||
39 | return irq_get_chip_data(irq); | ||
40 | } | ||
41 | |||
42 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) | ||
43 | { | ||
44 | return irq_data->chip_data; | ||
45 | } | ||
46 | |||
47 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | ||
48 | { | ||
49 | struct irq_cfg *cfg; | ||
50 | |||
51 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | ||
52 | if (!cfg) | ||
53 | return NULL; | ||
54 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | ||
55 | goto out_cfg; | ||
56 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | ||
57 | goto out_domain; | ||
58 | #ifdef CONFIG_X86_IO_APIC | ||
59 | INIT_LIST_HEAD(&cfg->irq_2_pin); | ||
60 | #endif | ||
61 | return cfg; | ||
62 | out_domain: | ||
63 | free_cpumask_var(cfg->domain); | ||
64 | out_cfg: | ||
65 | kfree(cfg); | ||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | ||
70 | { | ||
71 | int res = irq_alloc_desc_at(at, node); | ||
72 | struct irq_cfg *cfg; | ||
73 | |||
74 | if (res < 0) { | ||
75 | if (res != -EEXIST) | ||
76 | return NULL; | ||
77 | cfg = irq_cfg(at); | ||
78 | if (cfg) | ||
79 | return cfg; | ||
80 | } | ||
81 | |||
82 | cfg = alloc_irq_cfg(at, node); | ||
83 | if (cfg) | ||
84 | irq_set_chip_data(at, cfg); | ||
85 | else | ||
86 | irq_free_desc(at); | ||
87 | return cfg; | ||
88 | } | ||
89 | |||
90 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | ||
91 | { | ||
92 | if (!cfg) | ||
93 | return; | ||
94 | irq_set_chip_data(at, NULL); | ||
95 | free_cpumask_var(cfg->domain); | ||
96 | free_cpumask_var(cfg->old_domain); | ||
97 | kfree(cfg); | ||
98 | } | ||
99 | |||
100 | static int | ||
101 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
102 | { | ||
103 | /* | ||
104 | * NOTE! The local APIC isn't very good at handling | ||
105 | * multiple interrupts at the same interrupt level. | ||
106 | * As the interrupt level is determined by taking the | ||
107 | * vector number and shifting that right by 4, we | ||
108 | * want to spread these out a bit so that they don't | ||
109 | * all fall in the same interrupt level. | ||
110 | * | ||
111 | * Also, we've got to be careful not to trash gate | ||
112 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | ||
113 | */ | ||
114 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | ||
115 | static int current_offset = VECTOR_OFFSET_START % 16; | ||
116 | int cpu, err; | ||
117 | cpumask_var_t tmp_mask; | ||
118 | |||
119 | if (cfg->move_in_progress) | ||
120 | return -EBUSY; | ||
121 | |||
122 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | /* Only try and allocate irqs on cpus that are present */ | ||
126 | err = -ENOSPC; | ||
127 | cpumask_clear(cfg->old_domain); | ||
128 | cpu = cpumask_first_and(mask, cpu_online_mask); | ||
129 | while (cpu < nr_cpu_ids) { | ||
130 | int new_cpu, vector, offset; | ||
131 | |||
132 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | ||
133 | |||
134 | if (cpumask_subset(tmp_mask, cfg->domain)) { | ||
135 | err = 0; | ||
136 | if (cpumask_equal(tmp_mask, cfg->domain)) | ||
137 | break; | ||
138 | /* | ||
139 | * New cpumask using the vector is a proper subset of | ||
140 | * the current in use mask. So cleanup the vector | ||
141 | * allocation for the members that are not used anymore. | ||
142 | */ | ||
143 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | ||
144 | cfg->move_in_progress = | ||
145 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
146 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | ||
147 | break; | ||
148 | } | ||
149 | |||
150 | vector = current_vector; | ||
151 | offset = current_offset; | ||
152 | next: | ||
153 | vector += 16; | ||
154 | if (vector >= first_system_vector) { | ||
155 | offset = (offset + 1) % 16; | ||
156 | vector = FIRST_EXTERNAL_VECTOR + offset; | ||
157 | } | ||
158 | |||
159 | if (unlikely(current_vector == vector)) { | ||
160 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | ||
161 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | ||
162 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | ||
163 | continue; | ||
164 | } | ||
165 | |||
166 | if (test_bit(vector, used_vectors)) | ||
167 | goto next; | ||
168 | |||
169 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | ||
170 | if (per_cpu(vector_irq, new_cpu)[vector] > | ||
171 | VECTOR_UNDEFINED) | ||
172 | goto next; | ||
173 | } | ||
174 | /* Found one! */ | ||
175 | current_vector = vector; | ||
176 | current_offset = offset; | ||
177 | if (cfg->vector) { | ||
178 | cpumask_copy(cfg->old_domain, cfg->domain); | ||
179 | cfg->move_in_progress = | ||
180 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
181 | } | ||
182 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | ||
183 | per_cpu(vector_irq, new_cpu)[vector] = irq; | ||
184 | cfg->vector = vector; | ||
185 | cpumask_copy(cfg->domain, tmp_mask); | ||
186 | err = 0; | ||
187 | break; | ||
188 | } | ||
189 | free_cpumask_var(tmp_mask); | ||
190 | |||
191 | return err; | ||
192 | } | ||
193 | |||
194 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
195 | { | ||
196 | int err; | ||
197 | unsigned long flags; | ||
198 | |||
199 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
200 | err = __assign_irq_vector(irq, cfg, mask); | ||
201 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | void clear_irq_vector(int irq, struct irq_cfg *cfg) | ||
206 | { | ||
207 | int cpu, vector; | ||
208 | unsigned long flags; | ||
209 | |||
210 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
211 | BUG_ON(!cfg->vector); | ||
212 | |||
213 | vector = cfg->vector; | ||
214 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | ||
215 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
216 | |||
217 | cfg->vector = 0; | ||
218 | cpumask_clear(cfg->domain); | ||
219 | |||
220 | if (likely(!cfg->move_in_progress)) { | ||
221 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | ||
226 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | ||
227 | vector++) { | ||
228 | if (per_cpu(vector_irq, cpu)[vector] != irq) | ||
229 | continue; | ||
230 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | cfg->move_in_progress = 0; | ||
235 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
236 | } | ||
237 | |||
238 | int __init arch_probe_nr_irqs(void) | ||
239 | { | ||
240 | int nr; | ||
241 | |||
242 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | ||
243 | nr_irqs = NR_VECTORS * nr_cpu_ids; | ||
244 | |||
245 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | ||
246 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | ||
247 | /* | ||
248 | * for MSI and HT dyn irq | ||
249 | */ | ||
250 | if (gsi_top <= NR_IRQS_LEGACY) | ||
251 | nr += 8 * nr_cpu_ids; | ||
252 | else | ||
253 | nr += gsi_top * 16; | ||
254 | #endif | ||
255 | if (nr < nr_irqs) | ||
256 | nr_irqs = nr; | ||
257 | |||
258 | return nr_legacy_irqs(); | ||
259 | } | ||
260 | |||
261 | int __init arch_early_irq_init(void) | ||
262 | { | ||
263 | return arch_early_ioapic_init(); | ||
264 | } | ||
265 | |||
266 | static void __setup_vector_irq(int cpu) | ||
267 | { | ||
268 | /* Initialize vector_irq on a new cpu */ | ||
269 | int irq, vector; | ||
270 | struct irq_cfg *cfg; | ||
271 | |||
272 | /* | ||
273 | * vector_lock will make sure that we don't run into irq vector | ||
274 | * assignments that might be happening on another cpu in parallel, | ||
275 | * while we setup our initial vector to irq mappings. | ||
276 | */ | ||
277 | raw_spin_lock(&vector_lock); | ||
278 | /* Mark the inuse vectors */ | ||
279 | for_each_active_irq(irq) { | ||
280 | cfg = irq_cfg(irq); | ||
281 | if (!cfg) | ||
282 | continue; | ||
283 | |||
284 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
285 | continue; | ||
286 | vector = cfg->vector; | ||
287 | per_cpu(vector_irq, cpu)[vector] = irq; | ||
288 | } | ||
289 | /* Mark the free vectors */ | ||
290 | for (vector = 0; vector < NR_VECTORS; ++vector) { | ||
291 | irq = per_cpu(vector_irq, cpu)[vector]; | ||
292 | if (irq <= VECTOR_UNDEFINED) | ||
293 | continue; | ||
294 | |||
295 | cfg = irq_cfg(irq); | ||
296 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
297 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
298 | } | ||
299 | raw_spin_unlock(&vector_lock); | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * Setup the vector to irq mappings. | ||
304 | */ | ||
305 | void setup_vector_irq(int cpu) | ||
306 | { | ||
307 | int irq; | ||
308 | |||
309 | /* | ||
310 | * On most of the platforms, legacy PIC delivers the interrupts on the | ||
311 | * boot cpu. But there are certain platforms where PIC interrupts are | ||
312 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | ||
313 | * legacy PIC, for the new cpu that is coming online, setup the static | ||
314 | * legacy vector to irq mapping: | ||
315 | */ | ||
316 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | ||
317 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | ||
318 | |||
319 | __setup_vector_irq(cpu); | ||
320 | } | ||
321 | |||
322 | int apic_retrigger_irq(struct irq_data *data) | ||
323 | { | ||
324 | struct irq_cfg *cfg = irqd_cfg(data); | ||
325 | unsigned long flags; | ||
326 | int cpu; | ||
327 | |||
328 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
329 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | ||
330 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | ||
331 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
332 | |||
333 | return 1; | ||
334 | } | ||
335 | |||
336 | void apic_ack_edge(struct irq_data *data) | ||
337 | { | ||
338 | irq_complete_move(irqd_cfg(data)); | ||
339 | irq_move_irq(data); | ||
340 | ack_APIC_irq(); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Either sets data->affinity to a valid value, and returns | ||
345 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | ||
346 | * leaves data->affinity untouched. | ||
347 | */ | ||
348 | int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
349 | unsigned int *dest_id) | ||
350 | { | ||
351 | struct irq_cfg *cfg = irqd_cfg(data); | ||
352 | unsigned int irq = data->irq; | ||
353 | int err; | ||
354 | |||
355 | if (!config_enabled(CONFIG_SMP)) | ||
356 | return -EPERM; | ||
357 | |||
358 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
359 | return -EINVAL; | ||
360 | |||
361 | err = assign_irq_vector(irq, cfg, mask); | ||
362 | if (err) | ||
363 | return err; | ||
364 | |||
365 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | ||
366 | if (err) { | ||
367 | if (assign_irq_vector(irq, cfg, data->affinity)) | ||
368 | pr_err("Failed to recover vector for irq %d\n", irq); | ||
369 | return err; | ||
370 | } | ||
371 | |||
372 | cpumask_copy(data->affinity, mask); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | #ifdef CONFIG_SMP | ||
378 | void send_cleanup_vector(struct irq_cfg *cfg) | ||
379 | { | ||
380 | cpumask_var_t cleanup_mask; | ||
381 | |||
382 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
383 | unsigned int i; | ||
384 | |||
385 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
386 | apic->send_IPI_mask(cpumask_of(i), | ||
387 | IRQ_MOVE_CLEANUP_VECTOR); | ||
388 | } else { | ||
389 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
390 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
391 | free_cpumask_var(cleanup_mask); | ||
392 | } | ||
393 | cfg->move_in_progress = 0; | ||
394 | } | ||
395 | |||
396 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | ||
397 | { | ||
398 | unsigned vector, me; | ||
399 | |||
400 | ack_APIC_irq(); | ||
401 | irq_enter(); | ||
402 | exit_idle(); | ||
403 | |||
404 | me = smp_processor_id(); | ||
405 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
406 | int irq; | ||
407 | unsigned int irr; | ||
408 | struct irq_desc *desc; | ||
409 | struct irq_cfg *cfg; | ||
410 | |||
411 | irq = __this_cpu_read(vector_irq[vector]); | ||
412 | |||
413 | if (irq <= VECTOR_UNDEFINED) | ||
414 | continue; | ||
415 | |||
416 | desc = irq_to_desc(irq); | ||
417 | if (!desc) | ||
418 | continue; | ||
419 | |||
420 | cfg = irq_cfg(irq); | ||
421 | if (!cfg) | ||
422 | continue; | ||
423 | |||
424 | raw_spin_lock(&desc->lock); | ||
425 | |||
426 | /* | ||
427 | * Check if the irq migration is in progress. If so, we | ||
428 | * haven't received the cleanup request yet for this irq. | ||
429 | */ | ||
430 | if (cfg->move_in_progress) | ||
431 | goto unlock; | ||
432 | |||
433 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
434 | goto unlock; | ||
435 | |||
436 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
437 | /* | ||
438 | * Check if the vector that needs to be cleanedup is | ||
439 | * registered at the cpu's IRR. If so, then this is not | ||
440 | * the best time to clean it up. Lets clean it up in the | ||
441 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | ||
442 | * to myself. | ||
443 | */ | ||
444 | if (irr & (1 << (vector % 32))) { | ||
445 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | ||
446 | goto unlock; | ||
447 | } | ||
448 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | ||
449 | unlock: | ||
450 | raw_spin_unlock(&desc->lock); | ||
451 | } | ||
452 | |||
453 | irq_exit(); | ||
454 | } | ||
455 | |||
456 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | ||
457 | { | ||
458 | unsigned me; | ||
459 | |||
460 | if (likely(!cfg->move_in_progress)) | ||
461 | return; | ||
462 | |||
463 | me = smp_processor_id(); | ||
464 | |||
465 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
466 | send_cleanup_vector(cfg); | ||
467 | } | ||
468 | |||
469 | void irq_complete_move(struct irq_cfg *cfg) | ||
470 | { | ||
471 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | ||
472 | } | ||
473 | |||
474 | void irq_force_complete_move(int irq) | ||
475 | { | ||
476 | struct irq_cfg *cfg = irq_cfg(irq); | ||
477 | |||
478 | if (!cfg) | ||
479 | return; | ||
480 | |||
481 | __irq_complete_move(cfg, cfg->vector); | ||
482 | } | ||
483 | #endif | ||
484 | |||
485 | /* | ||
486 | * Dynamic irq allocate and deallocation. Should be replaced by irq domains! | ||
487 | */ | ||
488 | int arch_setup_hwirq(unsigned int irq, int node) | ||
489 | { | ||
490 | struct irq_cfg *cfg; | ||
491 | unsigned long flags; | ||
492 | int ret; | ||
493 | |||
494 | cfg = alloc_irq_cfg(irq, node); | ||
495 | if (!cfg) | ||
496 | return -ENOMEM; | ||
497 | |||
498 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
499 | ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
500 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
501 | |||
502 | if (!ret) | ||
503 | irq_set_chip_data(irq, cfg); | ||
504 | else | ||
505 | free_irq_cfg(irq, cfg); | ||
506 | return ret; | ||
507 | } | ||
508 | |||
509 | void arch_teardown_hwirq(unsigned int irq) | ||
510 | { | ||
511 | struct irq_cfg *cfg = irq_cfg(irq); | ||
512 | |||
513 | free_remapped_irq(irq); | ||
514 | clear_irq_vector(irq, cfg); | ||
515 | free_irq_cfg(irq, cfg); | ||
516 | } | ||
517 | |||
518 | static void __init print_APIC_field(int base) | ||
519 | { | ||
520 | int i; | ||
521 | |||
522 | printk(KERN_DEBUG); | ||
523 | |||
524 | for (i = 0; i < 8; i++) | ||
525 | pr_cont("%08x", apic_read(base + i*0x10)); | ||
526 | |||
527 | pr_cont("\n"); | ||
528 | } | ||
529 | |||
530 | static void __init print_local_APIC(void *dummy) | ||
531 | { | ||
532 | unsigned int i, v, ver, maxlvt; | ||
533 | u64 icr; | ||
534 | |||
535 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", | ||
536 | smp_processor_id(), hard_smp_processor_id()); | ||
537 | v = apic_read(APIC_ID); | ||
538 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); | ||
539 | v = apic_read(APIC_LVR); | ||
540 | pr_info("... APIC VERSION: %08x\n", v); | ||
541 | ver = GET_APIC_VERSION(v); | ||
542 | maxlvt = lapic_get_maxlvt(); | ||
543 | |||
544 | v = apic_read(APIC_TASKPRI); | ||
545 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | ||
546 | |||
547 | /* !82489DX */ | ||
548 | if (APIC_INTEGRATED(ver)) { | ||
549 | if (!APIC_XAPIC(ver)) { | ||
550 | v = apic_read(APIC_ARBPRI); | ||
551 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", | ||
552 | v, v & APIC_ARBPRI_MASK); | ||
553 | } | ||
554 | v = apic_read(APIC_PROCPRI); | ||
555 | pr_debug("... APIC PROCPRI: %08x\n", v); | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * Remote read supported only in the 82489DX and local APIC for | ||
560 | * Pentium processors. | ||
561 | */ | ||
562 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | ||
563 | v = apic_read(APIC_RRR); | ||
564 | pr_debug("... APIC RRR: %08x\n", v); | ||
565 | } | ||
566 | |||
567 | v = apic_read(APIC_LDR); | ||
568 | pr_debug("... APIC LDR: %08x\n", v); | ||
569 | if (!x2apic_enabled()) { | ||
570 | v = apic_read(APIC_DFR); | ||
571 | pr_debug("... APIC DFR: %08x\n", v); | ||
572 | } | ||
573 | v = apic_read(APIC_SPIV); | ||
574 | pr_debug("... APIC SPIV: %08x\n", v); | ||
575 | |||
576 | pr_debug("... APIC ISR field:\n"); | ||
577 | print_APIC_field(APIC_ISR); | ||
578 | pr_debug("... APIC TMR field:\n"); | ||
579 | print_APIC_field(APIC_TMR); | ||
580 | pr_debug("... APIC IRR field:\n"); | ||
581 | print_APIC_field(APIC_IRR); | ||
582 | |||
583 | /* !82489DX */ | ||
584 | if (APIC_INTEGRATED(ver)) { | ||
585 | /* Due to the Pentium erratum 3AP. */ | ||
586 | if (maxlvt > 3) | ||
587 | apic_write(APIC_ESR, 0); | ||
588 | |||
589 | v = apic_read(APIC_ESR); | ||
590 | pr_debug("... APIC ESR: %08x\n", v); | ||
591 | } | ||
592 | |||
593 | icr = apic_icr_read(); | ||
594 | pr_debug("... APIC ICR: %08x\n", (u32)icr); | ||
595 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | ||
596 | |||
597 | v = apic_read(APIC_LVTT); | ||
598 | pr_debug("... APIC LVTT: %08x\n", v); | ||
599 | |||
600 | if (maxlvt > 3) { | ||
601 | /* PC is LVT#4. */ | ||
602 | v = apic_read(APIC_LVTPC); | ||
603 | pr_debug("... APIC LVTPC: %08x\n", v); | ||
604 | } | ||
605 | v = apic_read(APIC_LVT0); | ||
606 | pr_debug("... APIC LVT0: %08x\n", v); | ||
607 | v = apic_read(APIC_LVT1); | ||
608 | pr_debug("... APIC LVT1: %08x\n", v); | ||
609 | |||
610 | if (maxlvt > 2) { | ||
611 | /* ERR is LVT#3. */ | ||
612 | v = apic_read(APIC_LVTERR); | ||
613 | pr_debug("... APIC LVTERR: %08x\n", v); | ||
614 | } | ||
615 | |||
616 | v = apic_read(APIC_TMICT); | ||
617 | pr_debug("... APIC TMICT: %08x\n", v); | ||
618 | v = apic_read(APIC_TMCCT); | ||
619 | pr_debug("... APIC TMCCT: %08x\n", v); | ||
620 | v = apic_read(APIC_TDCR); | ||
621 | pr_debug("... APIC TDCR: %08x\n", v); | ||
622 | |||
623 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
624 | v = apic_read(APIC_EFEAT); | ||
625 | maxlvt = (v >> 16) & 0xff; | ||
626 | pr_debug("... APIC EFEAT: %08x\n", v); | ||
627 | v = apic_read(APIC_ECTRL); | ||
628 | pr_debug("... APIC ECTRL: %08x\n", v); | ||
629 | for (i = 0; i < maxlvt; i++) { | ||
630 | v = apic_read(APIC_EILVTn(i)); | ||
631 | pr_debug("... APIC EILVT%d: %08x\n", i, v); | ||
632 | } | ||
633 | } | ||
634 | pr_cont("\n"); | ||
635 | } | ||
636 | |||
637 | static void __init print_local_APICs(int maxcpu) | ||
638 | { | ||
639 | int cpu; | ||
640 | |||
641 | if (!maxcpu) | ||
642 | return; | ||
643 | |||
644 | preempt_disable(); | ||
645 | for_each_online_cpu(cpu) { | ||
646 | if (cpu >= maxcpu) | ||
647 | break; | ||
648 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | ||
649 | } | ||
650 | preempt_enable(); | ||
651 | } | ||
652 | |||
653 | static void __init print_PIC(void) | ||
654 | { | ||
655 | unsigned int v; | ||
656 | unsigned long flags; | ||
657 | |||
658 | if (!nr_legacy_irqs()) | ||
659 | return; | ||
660 | |||
661 | pr_debug("\nprinting PIC contents\n"); | ||
662 | |||
663 | raw_spin_lock_irqsave(&i8259A_lock, flags); | ||
664 | |||
665 | v = inb(0xa1) << 8 | inb(0x21); | ||
666 | pr_debug("... PIC IMR: %04x\n", v); | ||
667 | |||
668 | v = inb(0xa0) << 8 | inb(0x20); | ||
669 | pr_debug("... PIC IRR: %04x\n", v); | ||
670 | |||
671 | outb(0x0b, 0xa0); | ||
672 | outb(0x0b, 0x20); | ||
673 | v = inb(0xa0) << 8 | inb(0x20); | ||
674 | outb(0x0a, 0xa0); | ||
675 | outb(0x0a, 0x20); | ||
676 | |||
677 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||
678 | |||
679 | pr_debug("... PIC ISR: %04x\n", v); | ||
680 | |||
681 | v = inb(0x4d1) << 8 | inb(0x4d0); | ||
682 | pr_debug("... PIC ELCR: %04x\n", v); | ||
683 | } | ||
684 | |||
685 | static int show_lapic __initdata = 1; | ||
686 | static __init int setup_show_lapic(char *arg) | ||
687 | { | ||
688 | int num = -1; | ||
689 | |||
690 | if (strcmp(arg, "all") == 0) { | ||
691 | show_lapic = CONFIG_NR_CPUS; | ||
692 | } else { | ||
693 | get_option(&arg, &num); | ||
694 | if (num >= 0) | ||
695 | show_lapic = num; | ||
696 | } | ||
697 | |||
698 | return 1; | ||
699 | } | ||
700 | __setup("show_lapic=", setup_show_lapic); | ||
701 | |||
702 | static int __init print_ICs(void) | ||
703 | { | ||
704 | if (apic_verbosity == APIC_QUIET) | ||
705 | return 0; | ||
706 | |||
707 | print_PIC(); | ||
708 | |||
709 | /* don't print out if apic is not there */ | ||
710 | if (!cpu_has_apic && !apic_from_smp_config()) | ||
711 | return 0; | ||
712 | |||
713 | print_local_APICs(show_lapic); | ||
714 | print_IO_APICs(); | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | late_initcall(print_ICs); | ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f5ab56d14287..aceb2f90c716 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
29 | #include <asm/hw_irq.h> | 29 | #include <asm/hw_irq.h> |
30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
31 | #include <asm/io_apic.h> | ||
31 | #include <asm/hpet.h> | 32 | #include <asm/hpet.h> |
32 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
33 | #include <asm/cpu.h> | 34 | #include <asm/cpu.h> |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 1cf7c97ff175..000d4199b03e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -732,10 +732,10 @@ ENTRY(interrupt) | |||
732 | ENTRY(irq_entries_start) | 732 | ENTRY(irq_entries_start) |
733 | RING0_INT_FRAME | 733 | RING0_INT_FRAME |
734 | vector=FIRST_EXTERNAL_VECTOR | 734 | vector=FIRST_EXTERNAL_VECTOR |
735 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | 735 | .rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7 |
736 | .balign 32 | 736 | .balign 32 |
737 | .rept 7 | 737 | .rept 7 |
738 | .if vector < NR_VECTORS | 738 | .if vector < FIRST_SYSTEM_VECTOR |
739 | .if vector <> FIRST_EXTERNAL_VECTOR | 739 | .if vector <> FIRST_EXTERNAL_VECTOR |
740 | CFI_ADJUST_CFA_OFFSET -4 | 740 | CFI_ADJUST_CFA_OFFSET -4 |
741 | .endif | 741 | .endif |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 90878aa38dbd..9ebaf63ba182 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -740,10 +740,10 @@ ENTRY(interrupt) | |||
740 | ENTRY(irq_entries_start) | 740 | ENTRY(irq_entries_start) |
741 | INTR_FRAME | 741 | INTR_FRAME |
742 | vector=FIRST_EXTERNAL_VECTOR | 742 | vector=FIRST_EXTERNAL_VECTOR |
743 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | 743 | .rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7 |
744 | .balign 32 | 744 | .balign 32 |
745 | .rept 7 | 745 | .rept 7 |
746 | .if vector < NR_VECTORS | 746 | .if vector < FIRST_SYSTEM_VECTOR |
747 | .if vector <> FIRST_EXTERNAL_VECTOR | 747 | .if vector <> FIRST_EXTERNAL_VECTOR |
748 | CFI_ADJUST_CFA_OFFSET -8 | 748 | CFI_ADJUST_CFA_OFFSET -8 |
749 | .endif | 749 | .endif |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 4de73ee78361..70e181ea1eac 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -99,32 +99,9 @@ void __init init_IRQ(void) | |||
99 | x86_init.irqs.intr_init(); | 99 | x86_init.irqs.intr_init(); |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | ||
103 | * Setup the vector to irq mappings. | ||
104 | */ | ||
105 | void setup_vector_irq(int cpu) | ||
106 | { | ||
107 | #ifndef CONFIG_X86_IO_APIC | ||
108 | int irq; | ||
109 | |||
110 | /* | ||
111 | * On most of the platforms, legacy PIC delivers the interrupts on the | ||
112 | * boot cpu. But there are certain platforms where PIC interrupts are | ||
113 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | ||
114 | * legacy PIC, for the new cpu that is coming online, setup the static | ||
115 | * legacy vector to irq mapping: | ||
116 | */ | ||
117 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | ||
118 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | ||
119 | #endif | ||
120 | |||
121 | __setup_vector_irq(cpu); | ||
122 | } | ||
123 | |||
124 | static void __init smp_intr_init(void) | 102 | static void __init smp_intr_init(void) |
125 | { | 103 | { |
126 | #ifdef CONFIG_SMP | 104 | #ifdef CONFIG_SMP |
127 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | ||
128 | /* | 105 | /* |
129 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 106 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
130 | * IPI, driven by wakeup. | 107 | * IPI, driven by wakeup. |
@@ -144,7 +121,6 @@ static void __init smp_intr_init(void) | |||
144 | 121 | ||
145 | /* IPI used for rebooting/stopping */ | 122 | /* IPI used for rebooting/stopping */ |
146 | alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); | 123 | alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); |
147 | #endif | ||
148 | #endif /* CONFIG_SMP */ | 124 | #endif /* CONFIG_SMP */ |
149 | } | 125 | } |
150 | 126 | ||
@@ -159,7 +135,7 @@ static void __init apic_intr_init(void) | |||
159 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | 135 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); |
160 | #endif | 136 | #endif |
161 | 137 | ||
162 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 138 | #ifdef CONFIG_X86_LOCAL_APIC |
163 | /* self generated IPI for local APIC timer */ | 139 | /* self generated IPI for local APIC timer */ |
164 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 140 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
165 | 141 | ||
@@ -197,10 +173,17 @@ void __init native_init_IRQ(void) | |||
197 | * 'special' SMP interrupts) | 173 | * 'special' SMP interrupts) |
198 | */ | 174 | */ |
199 | i = FIRST_EXTERNAL_VECTOR; | 175 | i = FIRST_EXTERNAL_VECTOR; |
200 | for_each_clear_bit_from(i, used_vectors, NR_VECTORS) { | 176 | #ifndef CONFIG_X86_LOCAL_APIC |
177 | #define first_system_vector NR_VECTORS | ||
178 | #endif | ||
179 | for_each_clear_bit_from(i, used_vectors, first_system_vector) { | ||
201 | /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ | 180 | /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ |
202 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); | 181 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); |
203 | } | 182 | } |
183 | #ifdef CONFIG_X86_LOCAL_APIC | ||
184 | for_each_clear_bit_from(i, used_vectors, NR_VECTORS) | ||
185 | set_intr_gate(i, spurious_interrupt); | ||
186 | #endif | ||
204 | 187 | ||
205 | if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) | 188 | if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) |
206 | setup_irq(2, &irq2); | 189 | setup_irq(2, &irq2); |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 72e8e310258d..469b23d6acc2 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/io_apic.h> | ||
23 | #include <asm/cpufeature.h> | 24 | #include <asm/cpufeature.h> |
24 | #include <asm/desc.h> | 25 | #include <asm/desc.h> |
25 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 485981059a40..415480d3ea84 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
23 | #include <asm/tlbflush.h> | 23 | #include <asm/tlbflush.h> |
24 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
25 | #include <asm/io_apic.h> | ||
25 | #include <asm/debugreg.h> | 26 | #include <asm/debugreg.h> |
26 | #include <asm/kexec-bzimage64.h> | 27 | #include <asm/kexec-bzimage64.h> |
27 | 28 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 17962e667a91..bae6c609888e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <acpi/reboot.h> | 12 | #include <acpi/reboot.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
15 | #include <asm/io_apic.h> | ||
15 | #include <asm/desc.h> | 16 | #include <asm/desc.h> |
16 | #include <asm/hpet.h> | 17 | #include <asm/hpet.h> |
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7a8f5845e8eb..6d7022c683e3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1084,7 +1084,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1084 | { | 1084 | { |
1085 | unsigned int i; | 1085 | unsigned int i; |
1086 | 1086 | ||
1087 | preempt_disable(); | ||
1088 | smp_cpu_index_default(); | 1087 | smp_cpu_index_default(); |
1089 | 1088 | ||
1090 | /* | 1089 | /* |
@@ -1102,22 +1101,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1102 | } | 1101 | } |
1103 | set_cpu_sibling_map(0); | 1102 | set_cpu_sibling_map(0); |
1104 | 1103 | ||
1105 | |||
1106 | if (smp_sanity_check(max_cpus) < 0) { | 1104 | if (smp_sanity_check(max_cpus) < 0) { |
1107 | pr_info("SMP disabled\n"); | 1105 | pr_info("SMP disabled\n"); |
1108 | disable_smp(); | 1106 | disable_smp(); |
1109 | goto out; | 1107 | return; |
1110 | } | 1108 | } |
1111 | 1109 | ||
1112 | default_setup_apic_routing(); | 1110 | default_setup_apic_routing(); |
1113 | 1111 | ||
1114 | preempt_disable(); | ||
1115 | if (read_apic_id() != boot_cpu_physical_apicid) { | 1112 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1116 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1113 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
1117 | read_apic_id(), boot_cpu_physical_apicid); | 1114 | read_apic_id(), boot_cpu_physical_apicid); |
1118 | /* Or can we switch back to PIC here? */ | 1115 | /* Or can we switch back to PIC here? */ |
1119 | } | 1116 | } |
1120 | preempt_enable(); | ||
1121 | 1117 | ||
1122 | connect_bsp_APIC(); | 1118 | connect_bsp_APIC(); |
1123 | 1119 | ||
@@ -1151,8 +1147,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1151 | uv_system_init(); | 1147 | uv_system_init(); |
1152 | 1148 | ||
1153 | set_mtrr_aps_delayed_init(); | 1149 | set_mtrr_aps_delayed_init(); |
1154 | out: | ||
1155 | preempt_enable(); | ||
1156 | } | 1150 | } |
1157 | 1151 | ||
1158 | void arch_enable_nonboot_cpus_begin(void) | 1152 | void arch_enable_nonboot_cpus_begin(void) |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index aae94132bc24..c1c1544b8485 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -841,7 +841,7 @@ static void __init lguest_init_IRQ(void) | |||
841 | { | 841 | { |
842 | unsigned int i; | 842 | unsigned int i; |
843 | 843 | ||
844 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { | 844 | for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) { |
845 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ | 845 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ |
846 | __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR); | 846 | __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR); |
847 | if (i != SYSCALL_VECTOR) | 847 | if (i != SYSCALL_VECTOR) |
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index b9958c364075..44b9271580b5 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -210,6 +210,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
210 | { | 210 | { |
211 | int polarity; | 211 | int polarity; |
212 | 212 | ||
213 | if (dev->irq_managed && dev->irq > 0) | ||
214 | return 0; | ||
215 | |||
213 | if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) | 216 | if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) |
214 | polarity = 0; /* active high */ | 217 | polarity = 0; /* active high */ |
215 | else | 218 | else |
@@ -224,13 +227,18 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
224 | if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0) | 227 | if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0) |
225 | return -EBUSY; | 228 | return -EBUSY; |
226 | 229 | ||
230 | dev->irq_managed = 1; | ||
231 | |||
227 | return 0; | 232 | return 0; |
228 | } | 233 | } |
229 | 234 | ||
230 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) | 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
231 | { | 236 | { |
232 | if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0) | 237 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
238 | dev->irq > 0) { | ||
233 | mp_unmap_irq(dev->irq); | 239 | mp_unmap_irq(dev->irq); |
240 | dev->irq_managed = 0; | ||
241 | } | ||
234 | } | 242 | } |
235 | 243 | ||
236 | struct pci_ops intel_mid_pci_ops = { | 244 | struct pci_ops intel_mid_pci_ops = { |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index eb500c2592ad..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1200,11 +1200,12 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1200 | #ifdef CONFIG_X86_IO_APIC | 1200 | #ifdef CONFIG_X86_IO_APIC |
1201 | struct pci_dev *temp_dev; | 1201 | struct pci_dev *temp_dev; |
1202 | int irq; | 1202 | int irq; |
1203 | struct io_apic_irq_attr irq_attr; | 1203 | |
1204 | if (dev->irq_managed && dev->irq > 0) | ||
1205 | return 0; | ||
1204 | 1206 | ||
1205 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | 1207 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1206 | PCI_SLOT(dev->devfn), | 1208 | PCI_SLOT(dev->devfn), pin - 1); |
1207 | pin - 1, &irq_attr); | ||
1208 | /* | 1209 | /* |
1209 | * Busses behind bridges are typically not listed in the MP-table. | 1210 | * Busses behind bridges are typically not listed in the MP-table. |
1210 | * In this case we have to look up the IRQ based on the parent bus, | 1211 | * In this case we have to look up the IRQ based on the parent bus, |
@@ -1218,7 +1219,7 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1218 | pin = pci_swizzle_interrupt_pin(dev, pin); | 1219 | pin = pci_swizzle_interrupt_pin(dev, pin); |
1219 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1220 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, |
1220 | PCI_SLOT(bridge->devfn), | 1221 | PCI_SLOT(bridge->devfn), |
1221 | pin - 1, &irq_attr); | 1222 | pin - 1); |
1222 | if (irq >= 0) | 1223 | if (irq >= 0) |
1223 | dev_warn(&dev->dev, "using bridge %s " | 1224 | dev_warn(&dev->dev, "using bridge %s " |
1224 | "INT %c to get IRQ %d\n", | 1225 | "INT %c to get IRQ %d\n", |
@@ -1228,6 +1229,7 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1228 | } | 1229 | } |
1229 | dev = temp_dev; | 1230 | dev = temp_dev; |
1230 | if (irq >= 0) { | 1231 | if (irq >= 0) { |
1232 | dev->irq_managed = 1; | ||
1231 | dev->irq = irq; | 1233 | dev->irq = irq; |
1232 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " | 1234 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " |
1233 | "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); | 1235 | "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); |
@@ -1254,11 +1256,24 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1254 | return 0; | 1256 | return 0; |
1255 | } | 1257 | } |
1256 | 1258 | ||
1259 | bool mp_should_keep_irq(struct device *dev) | ||
1260 | { | ||
1261 | if (dev->power.is_prepared) | ||
1262 | return true; | ||
1263 | #ifdef CONFIG_PM | ||
1264 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
1265 | return true; | ||
1266 | #endif | ||
1267 | |||
1268 | return false; | ||
1269 | } | ||
1270 | |||
1257 | static void pirq_disable_irq(struct pci_dev *dev) | 1271 | static void pirq_disable_irq(struct pci_dev *dev) |
1258 | { | 1272 | { |
1259 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && | 1273 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && |
1260 | dev->irq) { | 1274 | dev->irq_managed && dev->irq) { |
1261 | mp_unmap_irq(dev->irq); | 1275 | mp_unmap_irq(dev->irq); |
1262 | dev->irq = 0; | 1276 | dev->irq = 0; |
1277 | dev->irq_managed = 0; | ||
1263 | } | 1278 | } |
1264 | } | 1279 | } |
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index b233681af4de..0ce673645432 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c | |||
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
131 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
132 | { | 132 | { |
133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
134 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 134 | struct irq_cfg *cfg = irq_cfg(irq); |
135 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
136 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
137 | int mmr_pnode, err; | 137 | int mmr_pnode, err; |
@@ -198,13 +198,13 @@ static int | |||
198 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | 198 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, |
199 | bool force) | 199 | bool force) |
200 | { | 200 | { |
201 | struct irq_cfg *cfg = data->chip_data; | 201 | struct irq_cfg *cfg = irqd_cfg(data); |
202 | unsigned int dest; | 202 | unsigned int dest; |
203 | unsigned long mmr_value, mmr_offset; | 203 | unsigned long mmr_value, mmr_offset; |
204 | struct uv_IO_APIC_route_entry *entry; | 204 | struct uv_IO_APIC_route_entry *entry; |
205 | int mmr_pnode; | 205 | int mmr_pnode; |
206 | 206 | ||
207 | if (__ioapic_set_affinity(data, mask, &dest)) | 207 | if (apic_set_affinity(data, mask, &dest)) |
208 | return -1; | 208 | return -1; |
209 | 209 | ||
210 | mmr_value = 0; | 210 | mmr_value = 0; |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 7cc4e33179f9..5277a0ee5704 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -413,6 +413,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (dev->irq_managed && dev->irq > 0) | ||
417 | return 0; | ||
418 | |||
416 | entry = acpi_pci_irq_lookup(dev, pin); | 419 | entry = acpi_pci_irq_lookup(dev, pin); |
417 | if (!entry) { | 420 | if (!entry) { |
418 | /* | 421 | /* |
@@ -456,6 +459,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) | |||
456 | return rc; | 459 | return rc; |
457 | } | 460 | } |
458 | dev->irq = rc; | 461 | dev->irq = rc; |
462 | dev->irq_managed = 1; | ||
459 | 463 | ||
460 | if (link) | 464 | if (link) |
461 | snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); | 465 | snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); |
@@ -478,7 +482,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
478 | u8 pin; | 482 | u8 pin; |
479 | 483 | ||
480 | pin = dev->pin; | 484 | pin = dev->pin; |
481 | if (!pin) | 485 | if (!pin || !dev->irq_managed || dev->irq <= 0) |
482 | return; | 486 | return; |
483 | 487 | ||
484 | /* Keep IOAPIC pin configuration when suspending */ | 488 | /* Keep IOAPIC pin configuration when suspending */ |
@@ -506,6 +510,9 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
506 | */ | 510 | */ |
507 | 511 | ||
508 | dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); | 512 | dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); |
509 | if (gsi >= 0 && dev->irq > 0) | 513 | if (gsi >= 0) { |
510 | acpi_unregister_gsi(gsi); | 514 | acpi_unregister_gsi(gsi); |
515 | dev->irq = 0; | ||
516 | dev->irq_managed = 0; | ||
517 | } | ||
511 | } | 518 | } |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index ef58f46c8442..342942f90a10 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -125,13 +125,12 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | header = (struct acpi_subtable_header *)obj->buffer.pointer; | 127 | header = (struct acpi_subtable_header *)obj->buffer.pointer; |
128 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { | 128 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) |
129 | map_lapic_id(header, acpi_id, &apic_id); | 129 | map_lapic_id(header, acpi_id, &apic_id); |
130 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { | 130 | else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) |
131 | map_lsapic_id(header, type, acpi_id, &apic_id); | 131 | map_lsapic_id(header, type, acpi_id, &apic_id); |
132 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { | 132 | else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) |
133 | map_x2apic_id(header, type, acpi_id, &apic_id); | 133 | map_x2apic_id(header, type, acpi_id, &apic_id); |
134 | } | ||
135 | 134 | ||
136 | exit: | 135 | exit: |
137 | kfree(buffer.pointer); | 136 | kfree(buffer.pointer); |
@@ -164,7 +163,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) | |||
164 | * For example, | 163 | * For example, |
165 | * | 164 | * |
166 | * Scope (_PR) | 165 | * Scope (_PR) |
167 | * { | 166 | * { |
168 | * Processor (CPU0, 0x00, 0x00000410, 0x06) {} | 167 | * Processor (CPU0, 0x00, 0x00000410, 0x06) {} |
169 | * Processor (CPU1, 0x01, 0x00000410, 0x06) {} | 168 | * Processor (CPU1, 0x01, 0x00000410, 0x06) {} |
170 | * Processor (CPU2, 0x02, 0x00000410, 0x06) {} | 169 | * Processor (CPU2, 0x02, 0x00000410, 0x06) {} |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 2ba8f02ced36..782a0d15c25f 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
@@ -200,7 +200,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares, | |||
200 | 200 | ||
201 | status = acpi_resource_to_address64(ares, &addr); | 201 | status = acpi_resource_to_address64(ares, &addr); |
202 | if (ACPI_FAILURE(status)) | 202 | if (ACPI_FAILURE(status)) |
203 | return true; | 203 | return false; |
204 | 204 | ||
205 | res->start = addr.minimum; | 205 | res->start = addr.minimum; |
206 | res->end = addr.maximum; | 206 | res->end = addr.maximum; |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b205f76d7129..98024856df07 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -4071,7 +4071,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | |||
4071 | int devid; | 4071 | int devid; |
4072 | int ret; | 4072 | int ret; |
4073 | 4073 | ||
4074 | cfg = irq_get_chip_data(irq); | 4074 | cfg = irq_cfg(irq); |
4075 | if (!cfg) | 4075 | if (!cfg) |
4076 | return -EINVAL; | 4076 | return -EINVAL; |
4077 | 4077 | ||
@@ -4134,7 +4134,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
4134 | if (!config_enabled(CONFIG_SMP)) | 4134 | if (!config_enabled(CONFIG_SMP)) |
4135 | return -1; | 4135 | return -1; |
4136 | 4136 | ||
4137 | cfg = data->chip_data; | 4137 | cfg = irqd_cfg(data); |
4138 | irq = data->irq; | 4138 | irq = data->irq; |
4139 | irte_info = &cfg->irq_2_irte; | 4139 | irte_info = &cfg->irq_2_irte; |
4140 | 4140 | ||
@@ -4172,7 +4172,7 @@ static int free_irq(int irq) | |||
4172 | struct irq_2_irte *irte_info; | 4172 | struct irq_2_irte *irte_info; |
4173 | struct irq_cfg *cfg; | 4173 | struct irq_cfg *cfg; |
4174 | 4174 | ||
4175 | cfg = irq_get_chip_data(irq); | 4175 | cfg = irq_cfg(irq); |
4176 | if (!cfg) | 4176 | if (!cfg) |
4177 | return -EINVAL; | 4177 | return -EINVAL; |
4178 | 4178 | ||
@@ -4191,7 +4191,7 @@ static void compose_msi_msg(struct pci_dev *pdev, | |||
4191 | struct irq_cfg *cfg; | 4191 | struct irq_cfg *cfg; |
4192 | union irte irte; | 4192 | union irte irte; |
4193 | 4193 | ||
4194 | cfg = irq_get_chip_data(irq); | 4194 | cfg = irq_cfg(irq); |
4195 | if (!cfg) | 4195 | if (!cfg) |
4196 | return; | 4196 | return; |
4197 | 4197 | ||
@@ -4220,7 +4220,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) | |||
4220 | if (!pdev) | 4220 | if (!pdev) |
4221 | return -EINVAL; | 4221 | return -EINVAL; |
4222 | 4222 | ||
4223 | cfg = irq_get_chip_data(irq); | 4223 | cfg = irq_cfg(irq); |
4224 | if (!cfg) | 4224 | if (!cfg) |
4225 | return -EINVAL; | 4225 | return -EINVAL; |
4226 | 4226 | ||
@@ -4240,7 +4240,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
4240 | if (!pdev) | 4240 | if (!pdev) |
4241 | return -EINVAL; | 4241 | return -EINVAL; |
4242 | 4242 | ||
4243 | cfg = irq_get_chip_data(irq); | 4243 | cfg = irq_cfg(irq); |
4244 | if (!cfg) | 4244 | if (!cfg) |
4245 | return -EINVAL; | 4245 | return -EINVAL; |
4246 | 4246 | ||
@@ -4263,7 +4263,7 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id) | |||
4263 | struct irq_cfg *cfg; | 4263 | struct irq_cfg *cfg; |
4264 | int index, devid; | 4264 | int index, devid; |
4265 | 4265 | ||
4266 | cfg = irq_get_chip_data(irq); | 4266 | cfg = irq_cfg(irq); |
4267 | if (!cfg) | 4267 | if (!cfg) |
4268 | return -EINVAL; | 4268 | return -EINVAL; |
4269 | 4269 | ||
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 27541d440849..a55b207b9425 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -54,7 +54,7 @@ static int __init parse_ioapics_under_ir(void); | |||
54 | 54 | ||
55 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 55 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
56 | { | 56 | { |
57 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 57 | struct irq_cfg *cfg = irq_cfg(irq); |
58 | return cfg ? &cfg->irq_2_iommu : NULL; | 58 | return cfg ? &cfg->irq_2_iommu : NULL; |
59 | } | 59 | } |
60 | 60 | ||
@@ -85,7 +85,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
85 | { | 85 | { |
86 | struct ir_table *table = iommu->ir_table; | 86 | struct ir_table *table = iommu->ir_table; |
87 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 87 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
88 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 88 | struct irq_cfg *cfg = irq_cfg(irq); |
89 | unsigned int mask = 0; | 89 | unsigned int mask = 0; |
90 | unsigned long flags; | 90 | unsigned long flags; |
91 | int index; | 91 | int index; |
@@ -153,7 +153,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
153 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 153 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
154 | { | 154 | { |
155 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 155 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); |
156 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 156 | struct irq_cfg *cfg = irq_cfg(irq); |
157 | unsigned long flags; | 157 | unsigned long flags; |
158 | 158 | ||
159 | if (!irq_iommu) | 159 | if (!irq_iommu) |
@@ -1050,7 +1050,7 @@ static int | |||
1050 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | 1050 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
1051 | bool force) | 1051 | bool force) |
1052 | { | 1052 | { |
1053 | struct irq_cfg *cfg = data->chip_data; | 1053 | struct irq_cfg *cfg = irqd_cfg(data); |
1054 | unsigned int dest, irq = data->irq; | 1054 | unsigned int dest, irq = data->irq; |
1055 | struct irte irte; | 1055 | struct irte irte; |
1056 | int err; | 1056 | int err; |
@@ -1105,7 +1105,7 @@ static void intel_compose_msi_msg(struct pci_dev *pdev, | |||
1105 | u16 sub_handle = 0; | 1105 | u16 sub_handle = 0; |
1106 | int ir_index; | 1106 | int ir_index; |
1107 | 1107 | ||
1108 | cfg = irq_get_chip_data(irq); | 1108 | cfg = irq_cfg(irq); |
1109 | 1109 | ||
1110 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | 1110 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); |
1111 | BUG_ON(ir_index == -1); | 1111 | BUG_ON(ir_index == -1); |
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 2c3f5ad01098..89c4846683be 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c | |||
@@ -298,7 +298,7 @@ static int set_remapped_irq_affinity(struct irq_data *data, | |||
298 | 298 | ||
299 | void free_remapped_irq(int irq) | 299 | void free_remapped_irq(int irq) |
300 | { | 300 | { |
301 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 301 | struct irq_cfg *cfg = irq_cfg(irq); |
302 | 302 | ||
303 | if (!remap_ops || !remap_ops->free_irq) | 303 | if (!remap_ops || !remap_ops->free_irq) |
304 | return; | 304 | return; |
@@ -311,7 +311,7 @@ void compose_remapped_msi_msg(struct pci_dev *pdev, | |||
311 | unsigned int irq, unsigned int dest, | 311 | unsigned int irq, unsigned int dest, |
312 | struct msi_msg *msg, u8 hpet_id) | 312 | struct msi_msg *msg, u8 hpet_id) |
313 | { | 313 | { |
314 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 314 | struct irq_cfg *cfg = irq_cfg(irq); |
315 | 315 | ||
316 | if (!irq_remapped(cfg)) | 316 | if (!irq_remapped(cfg)) |
317 | native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); | 317 | native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); |
@@ -364,7 +364,7 @@ static void ir_ack_apic_edge(struct irq_data *data) | |||
364 | static void ir_ack_apic_level(struct irq_data *data) | 364 | static void ir_ack_apic_level(struct irq_data *data) |
365 | { | 365 | { |
366 | ack_APIC_irq(); | 366 | ack_APIC_irq(); |
367 | eoi_ioapic_irq(data->irq, data->chip_data); | 367 | eoi_ioapic_irq(data->irq, irqd_cfg(data)); |
368 | } | 368 | } |
369 | 369 | ||
370 | static void ir_print_prefix(struct irq_data *data, struct seq_file *p) | 370 | static void ir_print_prefix(struct irq_data *data, struct seq_file *p) |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index cced84233ac0..7a8f1c5e65af 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -67,7 +67,7 @@ config XEN_PCIDEV_FRONTEND | |||
67 | config HT_IRQ | 67 | config HT_IRQ |
68 | bool "Interrupts on hypertransport devices" | 68 | bool "Interrupts on hypertransport devices" |
69 | default y | 69 | default y |
70 | depends on PCI && X86_LOCAL_APIC && X86_IO_APIC | 70 | depends on PCI && X86_LOCAL_APIC |
71 | help | 71 | help |
72 | This allows native hypertransport devices to use interrupts. | 72 | This allows native hypertransport devices to use interrupts. |
73 | 73 | ||
@@ -110,13 +110,6 @@ config PCI_PASID | |||
110 | 110 | ||
111 | If unsure, say N. | 111 | If unsure, say N. |
112 | 112 | ||
113 | config PCI_IOAPIC | ||
114 | bool "PCI IO-APIC hotplug support" if X86 | ||
115 | depends on PCI | ||
116 | depends on ACPI | ||
117 | depends on X86_IO_APIC | ||
118 | default !X86 | ||
119 | |||
120 | config PCI_LABEL | 113 | config PCI_LABEL |
121 | def_bool y if (DMI || ACPI) | 114 | def_bool y if (DMI || ACPI) |
122 | select NLS | 115 | select NLS |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index e04fe2d9df3b..73e4af400a5a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -13,8 +13,6 @@ obj-$(CONFIG_PCI_QUIRKS) += quirks.o | |||
13 | # Build PCI Express stuff if needed | 13 | # Build PCI Express stuff if needed |
14 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 14 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
15 | 15 | ||
16 | obj-$(CONFIG_PCI_IOAPIC) += ioapic.o | ||
17 | |||
18 | # Build the PCI Hotplug drivers if we were asked to | 16 | # Build the PCI Hotplug drivers if we were asked to |
19 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ | 17 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ |
20 | ifdef CONFIG_HOTPLUG_PCI | 18 | ifdef CONFIG_HOTPLUG_PCI |
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 3efaf4c38528..96c5c729cdbc 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
37 | #include "../pci.h" | 37 | #include "../pci.h" |
38 | #include <asm/pci_x86.h> /* for struct irq_routing_table */ | 38 | #include <asm/pci_x86.h> /* for struct irq_routing_table */ |
39 | #include <asm/io_apic.h> | ||
39 | #include "ibmphp.h" | 40 | #include "ibmphp.h" |
40 | 41 | ||
41 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) | 42 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) |
@@ -155,13 +156,10 @@ int ibmphp_init_devno(struct slot **cur_slot) | |||
155 | for (loop = 0; loop < len; loop++) { | 156 | for (loop = 0; loop < len; loop++) { |
156 | if ((*cur_slot)->number == rtable->slots[loop].slot && | 157 | if ((*cur_slot)->number == rtable->slots[loop].slot && |
157 | (*cur_slot)->bus == rtable->slots[loop].bus) { | 158 | (*cur_slot)->bus == rtable->slots[loop].bus) { |
158 | struct io_apic_irq_attr irq_attr; | ||
159 | |||
160 | (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); | 159 | (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); |
161 | for (i = 0; i < 4; i++) | 160 | for (i = 0; i < 4; i++) |
162 | (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, | 161 | (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, |
163 | (int) (*cur_slot)->device, i, | 162 | (int) (*cur_slot)->device, i); |
164 | &irq_attr); | ||
165 | 163 | ||
166 | debug("(*cur_slot)->irq[0] = %x\n", | 164 | debug("(*cur_slot)->irq[0] = %x\n", |
167 | (*cur_slot)->irq[0]); | 165 | (*cur_slot)->irq[0]); |
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c deleted file mode 100644 index f6219d36227f..000000000000 --- a/drivers/pci/ioapic.c +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * IOAPIC/IOxAPIC/IOSAPIC driver | ||
3 | * | ||
4 | * Copyright (C) 2009 Fujitsu Limited. | ||
5 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * This driver manages PCI I/O APICs added by hotplug after boot. We try to | ||
14 | * claim all I/O APIC PCI devices, but those present at boot were registered | ||
15 | * when we parsed the ACPI MADT, so we'll fail when we try to re-register | ||
16 | * them. | ||
17 | */ | ||
18 | |||
19 | #include <linux/pci.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/acpi.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | struct ioapic { | ||
25 | acpi_handle handle; | ||
26 | u32 gsi_base; | ||
27 | }; | ||
28 | |||
29 | static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | ||
30 | { | ||
31 | acpi_handle handle; | ||
32 | acpi_status status; | ||
33 | unsigned long long gsb; | ||
34 | struct ioapic *ioapic; | ||
35 | int ret; | ||
36 | char *type; | ||
37 | struct resource *res; | ||
38 | |||
39 | handle = ACPI_HANDLE(&dev->dev); | ||
40 | if (!handle) | ||
41 | return -EINVAL; | ||
42 | |||
43 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); | ||
44 | if (ACPI_FAILURE(status)) | ||
45 | return -EINVAL; | ||
46 | |||
47 | /* | ||
48 | * The previous code in acpiphp evaluated _MAT if _GSB failed, but | ||
49 | * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs. | ||
50 | */ | ||
51 | |||
52 | ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); | ||
53 | if (!ioapic) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | ioapic->handle = handle; | ||
57 | ioapic->gsi_base = (u32) gsb; | ||
58 | |||
59 | if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC) | ||
60 | type = "IOAPIC"; | ||
61 | else | ||
62 | type = "IOxAPIC"; | ||
63 | |||
64 | ret = pci_enable_device(dev); | ||
65 | if (ret < 0) | ||
66 | goto exit_free; | ||
67 | |||
68 | pci_set_master(dev); | ||
69 | |||
70 | if (pci_request_region(dev, 0, type)) | ||
71 | goto exit_disable; | ||
72 | |||
73 | res = &dev->resource[0]; | ||
74 | if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) | ||
75 | goto exit_release; | ||
76 | |||
77 | pci_set_drvdata(dev, ioapic); | ||
78 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); | ||
79 | return 0; | ||
80 | |||
81 | exit_release: | ||
82 | pci_release_region(dev, 0); | ||
83 | exit_disable: | ||
84 | pci_disable_device(dev); | ||
85 | exit_free: | ||
86 | kfree(ioapic); | ||
87 | return -ENODEV; | ||
88 | } | ||
89 | |||
90 | static void ioapic_remove(struct pci_dev *dev) | ||
91 | { | ||
92 | struct ioapic *ioapic = pci_get_drvdata(dev); | ||
93 | |||
94 | acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base); | ||
95 | pci_release_region(dev, 0); | ||
96 | pci_disable_device(dev); | ||
97 | kfree(ioapic); | ||
98 | } | ||
99 | |||
100 | |||
101 | static const struct pci_device_id ioapic_devices[] = { | ||
102 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, | ||
103 | { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, | ||
104 | { } | ||
105 | }; | ||
106 | MODULE_DEVICE_TABLE(pci, ioapic_devices); | ||
107 | |||
108 | static struct pci_driver ioapic_driver = { | ||
109 | .name = "ioapic", | ||
110 | .id_table = ioapic_devices, | ||
111 | .probe = ioapic_probe, | ||
112 | .remove = ioapic_remove, | ||
113 | }; | ||
114 | |||
115 | static int __init ioapic_init(void) | ||
116 | { | ||
117 | return pci_register_driver(&ioapic_driver); | ||
118 | } | ||
119 | module_init(ioapic_init); | ||
120 | |||
121 | MODULE_LICENSE("GPL"); | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6bff83b1f298..856d381b1d5b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -153,6 +153,7 @@ int acpi_unmap_lsapic(int cpu); | |||
153 | 153 | ||
154 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); | 154 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); |
155 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); | 155 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); |
156 | int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); | ||
156 | void acpi_irq_stats_init(void); | 157 | void acpi_irq_stats_init(void); |
157 | extern u32 acpi_irq_handled; | 158 | extern u32 acpi_irq_handled; |
158 | extern u32 acpi_irq_not_handled; | 159 | extern u32 acpi_irq_not_handled; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 44a27696ab6c..360a966a97a5 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -349,6 +349,7 @@ struct pci_dev { | |||
349 | unsigned int __aer_firmware_first:1; | 349 | unsigned int __aer_firmware_first:1; |
350 | unsigned int broken_intx_masking:1; | 350 | unsigned int broken_intx_masking:1; |
351 | unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ | 351 | unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ |
352 | unsigned int irq_managed:1; | ||
352 | pci_dev_flags_t dev_flags; | 353 | pci_dev_flags_t dev_flags; |
353 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 354 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
354 | 355 | ||