diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic_32.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/genx2apic_uv_x.c | 141 | ||||
-rw-r--r-- | arch/x86/kernel/i8259.c (renamed from arch/x86/kernel/i8259_32.c) | 136 | ||||
-rw-r--r-- | arch/x86/kernel/i8259_64.c | 512 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 216 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_32.c | 114 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_64.c | 217 | ||||
-rw-r--r-- | arch/x86/kernel/vmiclock_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mach-visws/visws_apic.c | 3 |
14 files changed, 602 insertions, 774 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5e618c3b4720..2a53ad2cb450 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -18,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp) | |||
18 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 18 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
19 | obj-y += traps_$(BITS).o irq_$(BITS).o | 19 | obj-y += traps_$(BITS).o irq_$(BITS).o |
20 | obj-y += time_$(BITS).o ioport.o ldt.o | 20 | obj-y += time_$(BITS).o ioport.o ldt.o |
21 | obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o | 21 | obj-y += setup_$(BITS).o i8259.o irqinit_$(BITS).o setup.o |
22 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 22 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
23 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 23 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
24 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o | 24 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 33c5216fd3e1..ff1a7b49a460 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) | |||
514 | * Make sure all (legacy) PCI IRQs are set as level-triggered. | 514 | * Make sure all (legacy) PCI IRQs are set as level-triggered. |
515 | */ | 515 | */ |
516 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { | 516 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { |
517 | extern void eisa_set_level_irq(unsigned int irq); | ||
518 | |||
519 | if (triggering == ACPI_LEVEL_SENSITIVE) | 517 | if (triggering == ACPI_LEVEL_SENSITIVE) |
520 | eisa_set_level_irq(gsi); | 518 | eisa_set_level_irq(gsi); |
521 | } | 519 | } |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 4b99b1bdeb6c..d5767cb19d56 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -71,6 +71,10 @@ int local_apic_timer_disabled; | |||
71 | int local_apic_timer_c2_ok; | 71 | int local_apic_timer_c2_ok; |
72 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 72 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
73 | 73 | ||
74 | int first_system_vector = 0xfe; | ||
75 | |||
76 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
77 | |||
74 | /* | 78 | /* |
75 | * Debug level, exported for io_apic.c | 79 | * Debug level, exported for io_apic.c |
76 | */ | 80 | */ |
@@ -1351,13 +1355,13 @@ void __init smp_intr_init(void) | |||
1351 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 1355 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
1352 | * IPI, driven by wakeup. | 1356 | * IPI, driven by wakeup. |
1353 | */ | 1357 | */ |
1354 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | 1358 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
1355 | 1359 | ||
1356 | /* IPI for invalidation */ | 1360 | /* IPI for invalidation */ |
1357 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | 1361 | alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); |
1358 | 1362 | ||
1359 | /* IPI for generic function call */ | 1363 | /* IPI for generic function call */ |
1360 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1364 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
1361 | } | 1365 | } |
1362 | #endif | 1366 | #endif |
1363 | 1367 | ||
@@ -1370,15 +1374,15 @@ void __init apic_intr_init(void) | |||
1370 | smp_intr_init(); | 1374 | smp_intr_init(); |
1371 | #endif | 1375 | #endif |
1372 | /* self generated IPI for local APIC timer */ | 1376 | /* self generated IPI for local APIC timer */ |
1373 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 1377 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
1374 | 1378 | ||
1375 | /* IPI vectors for APIC spurious and error interrupts */ | 1379 | /* IPI vectors for APIC spurious and error interrupts */ |
1376 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 1380 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
1377 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 1381 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
1378 | 1382 | ||
1379 | /* thermal monitor LVT interrupt */ | 1383 | /* thermal monitor LVT interrupt */ |
1380 | #ifdef CONFIG_X86_MCE_P4THERMAL | 1384 | #ifdef CONFIG_X86_MCE_P4THERMAL |
1381 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 1385 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
1382 | #endif | 1386 | #endif |
1383 | } | 1387 | } |
1384 | 1388 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c778e4fa55a2..159a1c76d2bd 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -51,7 +51,7 @@ | |||
51 | #include <asm/percpu.h> | 51 | #include <asm/percpu.h> |
52 | #include <asm/dwarf2.h> | 52 | #include <asm/dwarf2.h> |
53 | #include <asm/processor-flags.h> | 53 | #include <asm/processor-flags.h> |
54 | #include "irq_vectors.h" | 54 | #include <asm/irq_vectors.h> |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * We use macros for low-level operations which need to be overridden | 57 | * We use macros for low-level operations which need to be overridden |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index ebf13908a743..45e84acca8a9 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV APIC functions (note: not an Intel compatible APIC) | 6 | * SGI UV APIC functions (note: not an Intel compatible APIC) |
7 | * | 7 | * |
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
@@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu) | |||
55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
56 | { | 56 | { |
57 | unsigned long val; | 57 | unsigned long val; |
58 | int nasid; | 58 | int pnode; |
59 | 59 | ||
60 | nasid = uv_apicid_to_nasid(phys_apicid); | 60 | pnode = uv_apicid_to_pnode(phys_apicid); |
61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
64 | APIC_DM_INIT; | 64 | APIC_DM_INIT; |
65 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 65 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
66 | mdelay(10); | 66 | mdelay(10); |
67 | 67 | ||
68 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 68 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
69 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 69 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
70 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 70 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
71 | APIC_DM_STARTUP; | 71 | APIC_DM_STARTUP; |
72 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 72 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void uv_send_IPI_one(int cpu, int vector) | 76 | static void uv_send_IPI_one(int cpu, int vector) |
77 | { | 77 | { |
78 | unsigned long val, apicid, lapicid; | 78 | unsigned long val, apicid, lapicid; |
79 | int nasid; | 79 | int pnode; |
80 | 80 | ||
81 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ | 81 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ |
82 | lapicid = apicid & 0x3f; /* ZZZ macro needed */ | 82 | lapicid = apicid & 0x3f; /* ZZZ macro needed */ |
83 | nasid = uv_apicid_to_nasid(apicid); | 83 | pnode = uv_apicid_to_pnode(apicid); |
84 | val = | 84 | val = |
85 | (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << | 85 | (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << |
86 | UVH_IPI_INT_APIC_ID_SHFT) | | 86 | UVH_IPI_INT_APIC_ID_SHFT) | |
87 | (vector << UVH_IPI_INT_VECTOR_SHFT); | 87 | (vector << UVH_IPI_INT_VECTOR_SHFT); |
88 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 88 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 91 | static void uv_send_IPI_mask(cpumask_t mask, int vector) |
@@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = { | |||
159 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | 159 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static __cpuinit void set_x2apic_extra_bits(int nasid) | 162 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
163 | { | 163 | { |
164 | __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); | 164 | __get_cpu_var(x2apic_extra_bits) = (pnode << 6); |
165 | } | 165 | } |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Called on boot cpu. | 168 | * Called on boot cpu. |
169 | */ | 169 | */ |
170 | static __init int boot_pnode_to_blade(int pnode) | ||
171 | { | ||
172 | int blade; | ||
173 | |||
174 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | ||
175 | if (pnode == uv_blade_info[blade].pnode) | ||
176 | return blade; | ||
177 | BUG(); | ||
178 | } | ||
179 | |||
180 | struct redir_addr { | ||
181 | unsigned long redirect; | ||
182 | unsigned long alias; | ||
183 | }; | ||
184 | |||
185 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT | ||
186 | |||
187 | static __initdata struct redir_addr redir_addrs[] = { | ||
188 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, | ||
189 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, | ||
190 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, | ||
191 | }; | ||
192 | |||
193 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | ||
194 | { | ||
195 | union uvh_si_alias0_overlay_config_u alias; | ||
196 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; | ||
197 | int i; | ||
198 | |||
199 | for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { | ||
200 | alias.v = uv_read_local_mmr(redir_addrs[i].alias); | ||
201 | if (alias.s.base == 0) { | ||
202 | *size = (1UL << alias.s.m_alias); | ||
203 | redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); | ||
204 | *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; | ||
205 | return; | ||
206 | } | ||
207 | } | ||
208 | BUG(); | ||
209 | } | ||
210 | |||
170 | static __init void uv_system_init(void) | 211 | static __init void uv_system_init(void) |
171 | { | 212 | { |
172 | union uvh_si_addr_map_config_u m_n_config; | 213 | union uvh_si_addr_map_config_u m_n_config; |
173 | int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; | 214 | union uvh_node_id_u node_id; |
174 | unsigned long mmr_base; | 215 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
216 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | ||
217 | unsigned long mmr_base, present; | ||
175 | 218 | ||
176 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 219 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
220 | m_val = m_n_config.s.m_skt; | ||
221 | n_val = m_n_config.s.n_skt; | ||
177 | mmr_base = | 222 | mmr_base = |
178 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | 223 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & |
179 | ~UV_MMR_ENABLE; | 224 | ~UV_MMR_ENABLE; |
180 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | 225 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); |
181 | 226 | ||
182 | last_nasid = -1; | 227 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) |
183 | for_each_possible_cpu(cpu) { | 228 | uv_possible_blades += |
184 | nid = cpu_to_node(cpu); | 229 | hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); |
185 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
186 | if (nasid != last_nasid) | ||
187 | uv_possible_blades++; | ||
188 | last_nasid = nasid; | ||
189 | } | ||
190 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 230 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
191 | 231 | ||
192 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 232 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
193 | uv_blade_info = alloc_bootmem_pages(bytes); | 233 | uv_blade_info = alloc_bootmem_pages(bytes); |
194 | 234 | ||
235 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | ||
236 | |||
195 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 237 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
196 | uv_node_to_blade = alloc_bootmem_pages(bytes); | 238 | uv_node_to_blade = alloc_bootmem_pages(bytes); |
197 | memset(uv_node_to_blade, 255, bytes); | 239 | memset(uv_node_to_blade, 255, bytes); |
@@ -200,43 +242,56 @@ static __init void uv_system_init(void) | |||
200 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | 242 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); |
201 | memset(uv_cpu_to_blade, 255, bytes); | 243 | memset(uv_cpu_to_blade, 255, bytes); |
202 | 244 | ||
203 | last_nasid = -1; | 245 | blade = 0; |
204 | blade = -1; | 246 | for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { |
205 | lcpu = -1; | 247 | present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); |
206 | for_each_possible_cpu(cpu) { | 248 | for (j = 0; j < 64; j++) { |
207 | nid = cpu_to_node(cpu); | 249 | if (!test_bit(j, &present)) |
208 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | 250 | continue; |
209 | if (nasid != last_nasid) { | 251 | uv_blade_info[blade].pnode = (i * 64 + j); |
210 | blade++; | 252 | uv_blade_info[blade].nr_possible_cpus = 0; |
211 | lcpu = -1; | ||
212 | uv_blade_info[blade].nr_posible_cpus = 0; | ||
213 | uv_blade_info[blade].nr_online_cpus = 0; | 253 | uv_blade_info[blade].nr_online_cpus = 0; |
254 | blade++; | ||
214 | } | 255 | } |
215 | last_nasid = nasid; | 256 | } |
216 | lcpu++; | ||
217 | 257 | ||
218 | uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; | 258 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); |
219 | uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; | 259 | gnode_upper = (((unsigned long)node_id.s.node_id) & |
260 | ~((1 << n_val) - 1)) << m_val; | ||
261 | |||
262 | for_each_present_cpu(cpu) { | ||
263 | nid = cpu_to_node(cpu); | ||
264 | pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); | ||
265 | blade = boot_pnode_to_blade(pnode); | ||
266 | lcpu = uv_blade_info[blade].nr_possible_cpus; | ||
267 | uv_blade_info[blade].nr_possible_cpus++; | ||
268 | |||
269 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; | ||
270 | uv_cpu_hub_info(cpu)->lowmem_remap_top = | ||
271 | lowmem_redir_base + lowmem_redir_size; | ||
272 | uv_cpu_hub_info(cpu)->m_val = m_val; | ||
273 | uv_cpu_hub_info(cpu)->n_val = m_val; | ||
220 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | 274 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; |
221 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | 275 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; |
222 | uv_cpu_hub_info(cpu)->local_nasid = nasid; | 276 | uv_cpu_hub_info(cpu)->pnode = pnode; |
223 | uv_cpu_hub_info(cpu)->gnode_upper = | 277 | uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; |
224 | nasid & ~((1 << uv_hub_info->n_val) - 1); | 278 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
279 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | ||
225 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 280 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
226 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ | 281 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ |
227 | uv_blade_info[blade].nasid = nasid; | ||
228 | uv_blade_info[blade].nr_posible_cpus++; | ||
229 | uv_node_to_blade[nid] = blade; | 282 | uv_node_to_blade[nid] = blade; |
230 | uv_cpu_to_blade[cpu] = blade; | 283 | uv_cpu_to_blade[cpu] = blade; |
231 | 284 | ||
232 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", | 285 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, " |
233 | cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); | 286 | "lcpu %d, blade %d\n", |
234 | printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); | 287 | cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, |
288 | lcpu, blade); | ||
235 | } | 289 | } |
236 | } | 290 | } |
237 | 291 | ||
238 | /* | 292 | /* |
239 | * Called on each cpu to initialize the per_cpu UV data area. | 293 | * Called on each cpu to initialize the per_cpu UV data area. |
294 | * ZZZ hotplug not supported yet | ||
240 | */ | 295 | */ |
241 | void __cpuinit uv_cpu_init(void) | 296 | void __cpuinit uv_cpu_init(void) |
242 | { | 297 | { |
@@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void) | |||
246 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | 301 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; |
247 | 302 | ||
248 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | 303 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) |
249 | set_x2apic_extra_bits(uv_hub_info->local_nasid); | 304 | set_x2apic_extra_bits(uv_hub_info->pnode); |
250 | } | 305 | } |
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259.c index fe631967d625..dc92b49d9204 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -1,8 +1,10 @@ | |||
1 | #include <linux/linkage.h> | ||
1 | #include <linux/errno.h> | 2 | #include <linux/errno.h> |
2 | #include <linux/signal.h> | 3 | #include <linux/signal.h> |
3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
4 | #include <linux/ioport.h> | 5 | #include <linux/ioport.h> |
5 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/timex.h> | ||
6 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
7 | #include <linux/random.h> | 9 | #include <linux/random.h> |
8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
@@ -10,10 +12,12 @@ | |||
10 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
11 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
12 | 14 | ||
15 | #include <asm/acpi.h> | ||
13 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
14 | #include <asm/system.h> | 17 | #include <asm/system.h> |
15 | #include <asm/io.h> | 18 | #include <asm/io.h> |
16 | #include <asm/timer.h> | 19 | #include <asm/timer.h> |
20 | #include <asm/hw_irq.h> | ||
17 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
18 | #include <asm/delay.h> | 22 | #include <asm/delay.h> |
19 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
@@ -32,7 +36,7 @@ static int i8259A_auto_eoi; | |||
32 | DEFINE_SPINLOCK(i8259A_lock); | 36 | DEFINE_SPINLOCK(i8259A_lock); |
33 | static void mask_and_ack_8259A(unsigned int); | 37 | static void mask_and_ack_8259A(unsigned int); |
34 | 38 | ||
35 | static struct irq_chip i8259A_chip = { | 39 | struct irq_chip i8259A_chip = { |
36 | .name = "XT-PIC", | 40 | .name = "XT-PIC", |
37 | .mask = disable_8259A_irq, | 41 | .mask = disable_8259A_irq, |
38 | .disable = disable_8259A_irq, | 42 | .disable = disable_8259A_irq, |
@@ -125,14 +129,14 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
125 | int irqmask = 1<<irq; | 129 | int irqmask = 1<<irq; |
126 | 130 | ||
127 | if (irq < 8) { | 131 | if (irq < 8) { |
128 | outb(0x0B,PIC_MASTER_CMD); /* ISR register */ | 132 | outb(0x0B, PIC_MASTER_CMD); /* ISR register */ |
129 | value = inb(PIC_MASTER_CMD) & irqmask; | 133 | value = inb(PIC_MASTER_CMD) & irqmask; |
130 | outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */ | 134 | outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ |
131 | return value; | 135 | return value; |
132 | } | 136 | } |
133 | outb(0x0B,PIC_SLAVE_CMD); /* ISR register */ | 137 | outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ |
134 | value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); | 138 | value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); |
135 | outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */ | 139 | outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ |
136 | return value; | 140 | return value; |
137 | } | 141 | } |
138 | 142 | ||
@@ -171,12 +175,14 @@ handle_real_irq: | |||
171 | if (irq & 8) { | 175 | if (irq & 8) { |
172 | inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ | 176 | inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ |
173 | outb(cached_slave_mask, PIC_SLAVE_IMR); | 177 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
174 | outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ | 178 | /* 'Specific EOI' to slave */ |
175 | outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ | 179 | outb(0x60+(irq&7), PIC_SLAVE_CMD); |
180 | /* 'Specific EOI' to master-IRQ2 */ | ||
181 | outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); | ||
176 | } else { | 182 | } else { |
177 | inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ | 183 | inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ |
178 | outb(cached_master_mask, PIC_MASTER_IMR); | 184 | outb(cached_master_mask, PIC_MASTER_IMR); |
179 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ | 185 | outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ |
180 | } | 186 | } |
181 | spin_unlock_irqrestore(&i8259A_lock, flags); | 187 | spin_unlock_irqrestore(&i8259A_lock, flags); |
182 | return; | 188 | return; |
@@ -199,7 +205,8 @@ spurious_8259A_irq: | |||
199 | * lets ACK and report it. [once per IRQ] | 205 | * lets ACK and report it. [once per IRQ] |
200 | */ | 206 | */ |
201 | if (!(spurious_irq_mask & irqmask)) { | 207 | if (!(spurious_irq_mask & irqmask)) { |
202 | printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); | 208 | printk(KERN_DEBUG |
209 | "spurious 8259A interrupt: IRQ%d.\n", irq); | ||
203 | spurious_irq_mask |= irqmask; | 210 | spurious_irq_mask |= irqmask; |
204 | } | 211 | } |
205 | atomic_inc(&irq_err_count); | 212 | atomic_inc(&irq_err_count); |
@@ -290,17 +297,28 @@ void init_8259A(int auto_eoi) | |||
290 | * outb_pic - this has to work on a wide range of PC hardware. | 297 | * outb_pic - this has to work on a wide range of PC hardware. |
291 | */ | 298 | */ |
292 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ | 299 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ |
293 | outb_pic(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ | 300 | |
294 | outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ | 301 | /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, |
302 | to 0x20-0x27 on i386 */ | ||
303 | outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); | ||
304 | |||
305 | /* 8259A-1 (the master) has a slave on IR2 */ | ||
306 | outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); | ||
307 | |||
295 | if (auto_eoi) /* master does Auto EOI */ | 308 | if (auto_eoi) /* master does Auto EOI */ |
296 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); | 309 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); |
297 | else /* master expects normal EOI */ | 310 | else /* master expects normal EOI */ |
298 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); | 311 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); |
299 | 312 | ||
300 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ | 313 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ |
301 | outb_pic(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ | 314 | |
302 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ | 315 | /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ |
303 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ | 316 | outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); |
317 | /* 8259A-2 is a slave on master's IR2 */ | ||
318 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); | ||
319 | /* (slave's support for AEOI in flat mode is to be investigated) */ | ||
320 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); | ||
321 | |||
304 | if (auto_eoi) | 322 | if (auto_eoi) |
305 | /* | 323 | /* |
306 | * In AEOI mode we just have to mask the interrupt | 324 | * In AEOI mode we just have to mask the interrupt |
@@ -317,93 +335,3 @@ void init_8259A(int auto_eoi) | |||
317 | 335 | ||
318 | spin_unlock_irqrestore(&i8259A_lock, flags); | 336 | spin_unlock_irqrestore(&i8259A_lock, flags); |
319 | } | 337 | } |
320 | |||
321 | /* | ||
322 | * Note that on a 486, we don't want to do a SIGFPE on an irq13 | ||
323 | * as the irq is unreliable, and exception 16 works correctly | ||
324 | * (ie as explained in the intel literature). On a 386, you | ||
325 | * can't use exception 16 due to bad IBM design, so we have to | ||
326 | * rely on the less exact irq13. | ||
327 | * | ||
328 | * Careful.. Not only is IRQ13 unreliable, but it is also | ||
329 | * leads to races. IBM designers who came up with it should | ||
330 | * be shot. | ||
331 | */ | ||
332 | |||
333 | |||
334 | static irqreturn_t math_error_irq(int cpl, void *dev_id) | ||
335 | { | ||
336 | extern void math_error(void __user *); | ||
337 | outb(0,0xF0); | ||
338 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) | ||
339 | return IRQ_NONE; | ||
340 | math_error((void __user *)get_irq_regs()->ip); | ||
341 | return IRQ_HANDLED; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * New motherboards sometimes make IRQ 13 be a PCI interrupt, | ||
346 | * so allow interrupt sharing. | ||
347 | */ | ||
348 | static struct irqaction fpu_irq = { | ||
349 | .handler = math_error_irq, | ||
350 | .mask = CPU_MASK_NONE, | ||
351 | .name = "fpu", | ||
352 | }; | ||
353 | |||
354 | void __init init_ISA_irqs (void) | ||
355 | { | ||
356 | int i; | ||
357 | |||
358 | #ifdef CONFIG_X86_LOCAL_APIC | ||
359 | init_bsp_APIC(); | ||
360 | #endif | ||
361 | init_8259A(0); | ||
362 | |||
363 | /* | ||
364 | * 16 old-style INTA-cycle interrupts: | ||
365 | */ | ||
366 | for (i = 0; i < 16; i++) { | ||
367 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
368 | handle_level_irq, "XT"); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* Overridden in paravirt.c */ | ||
373 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
374 | |||
375 | void __init native_init_IRQ(void) | ||
376 | { | ||
377 | int i; | ||
378 | |||
379 | /* all the set up before the call gates are initialised */ | ||
380 | pre_intr_init_hook(); | ||
381 | |||
382 | /* | ||
383 | * Cover the whole vector space, no vector can escape | ||
384 | * us. (some of these will be overridden and become | ||
385 | * 'special' SMP interrupts) | ||
386 | */ | ||
387 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | ||
388 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
389 | if (i >= NR_IRQS) | ||
390 | break; | ||
391 | /* SYSCALL_VECTOR was reserved in trap_init. */ | ||
392 | if (!test_bit(vector, used_vectors)) | ||
393 | set_intr_gate(vector, interrupt[i]); | ||
394 | } | ||
395 | |||
396 | /* setup after call gates are initialised (usually add in | ||
397 | * the architecture specific gates) | ||
398 | */ | ||
399 | intr_init_hook(); | ||
400 | |||
401 | /* | ||
402 | * External FPU? Set up irq13 if so, for | ||
403 | * original braindamaged IBM FERR coupling. | ||
404 | */ | ||
405 | if (boot_cpu_data.hard_math && !cpu_has_fpu) | ||
406 | setup_irq(FPU_IRQ, &fpu_irq); | ||
407 | |||
408 | irq_ctx_init(smp_processor_id()); | ||
409 | } | ||
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c deleted file mode 100644 index fa57a1568508..000000000000 --- a/arch/x86/kernel/i8259_64.c +++ /dev/null | |||
@@ -1,512 +0,0 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/signal.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/ioport.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/timex.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/random.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel_stat.h> | ||
12 | #include <linux/sysdev.h> | ||
13 | #include <linux/bitops.h> | ||
14 | |||
15 | #include <asm/acpi.h> | ||
16 | #include <asm/atomic.h> | ||
17 | #include <asm/system.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/hw_irq.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/delay.h> | ||
22 | #include <asm/desc.h> | ||
23 | #include <asm/apic.h> | ||
24 | #include <asm/i8259.h> | ||
25 | |||
26 | /* | ||
27 | * Common place to define all x86 IRQ vectors | ||
28 | * | ||
29 | * This builds up the IRQ handler stubs using some ugly macros in irq.h | ||
30 | * | ||
31 | * These macros create the low-level assembly IRQ routines that save | ||
32 | * register context and call do_IRQ(). do_IRQ() then does all the | ||
33 | * operations that are needed to keep the AT (or SMP IOAPIC) | ||
34 | * interrupt-controller happy. | ||
35 | */ | ||
36 | |||
37 | #define BI(x,y) \ | ||
38 | BUILD_IRQ(x##y) | ||
39 | |||
40 | #define BUILD_16_IRQS(x) \ | ||
41 | BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ | ||
42 | BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ | ||
43 | BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ | ||
44 | BI(x,c) BI(x,d) BI(x,e) BI(x,f) | ||
45 | |||
46 | /* | ||
47 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | ||
48 | * (these are usually mapped to vectors 0x30-0x3f) | ||
49 | */ | ||
50 | |||
51 | /* | ||
52 | * The IO-APIC gives us many more interrupt sources. Most of these | ||
53 | * are unused but an SMP system is supposed to have enough memory ... | ||
54 | * sometimes (mostly wrt. hw bugs) we get corrupted vectors all | ||
55 | * across the spectrum, so we really want to be prepared to get all | ||
56 | * of these. Plus, more powerful systems might have more than 64 | ||
57 | * IO-APIC registers. | ||
58 | * | ||
59 | * (these are usually mapped into the 0x30-0xff vector range) | ||
60 | */ | ||
61 | BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) | ||
62 | BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) | ||
63 | BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) | ||
64 | BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf) | ||
65 | |||
66 | #undef BUILD_16_IRQS | ||
67 | #undef BI | ||
68 | |||
69 | |||
70 | #define IRQ(x,y) \ | ||
71 | IRQ##x##y##_interrupt | ||
72 | |||
73 | #define IRQLIST_16(x) \ | ||
74 | IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ | ||
75 | IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ | ||
76 | IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ | ||
77 | IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) | ||
78 | |||
79 | /* for the irq vectors */ | ||
80 | static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = { | ||
81 | IRQLIST_16(0x2), IRQLIST_16(0x3), | ||
82 | IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7), | ||
83 | IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb), | ||
84 | IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf) | ||
85 | }; | ||
86 | |||
87 | #undef IRQ | ||
88 | #undef IRQLIST_16 | ||
89 | |||
90 | /* | ||
91 | * This is the 'legacy' 8259A Programmable Interrupt Controller, | ||
92 | * present in the majority of PC/AT boxes. | ||
93 | * plus some generic x86 specific things if generic specifics makes | ||
94 | * any sense at all. | ||
95 | * this file should become arch/i386/kernel/irq.c when the old irq.c | ||
96 | * moves to arch independent land | ||
97 | */ | ||
98 | |||
99 | static int i8259A_auto_eoi; | ||
100 | DEFINE_SPINLOCK(i8259A_lock); | ||
101 | static void mask_and_ack_8259A(unsigned int); | ||
102 | |||
103 | static struct irq_chip i8259A_chip = { | ||
104 | .name = "XT-PIC", | ||
105 | .mask = disable_8259A_irq, | ||
106 | .disable = disable_8259A_irq, | ||
107 | .unmask = enable_8259A_irq, | ||
108 | .mask_ack = mask_and_ack_8259A, | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * 8259A PIC functions to handle ISA devices: | ||
113 | */ | ||
114 | |||
115 | /* | ||
116 | * This contains the irq mask for both 8259A irq controllers, | ||
117 | */ | ||
118 | unsigned int cached_irq_mask = 0xffff; | ||
119 | |||
120 | /* | ||
121 | * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) | ||
122 | * boards the timer interrupt is not really connected to any IO-APIC pin, | ||
123 | * it's fed to the master 8259A's IR0 line only. | ||
124 | * | ||
125 | * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. | ||
126 | * this 'mixed mode' IRQ handling costs nothing because it's only used | ||
127 | * at IRQ setup time. | ||
128 | */ | ||
129 | unsigned long io_apic_irqs; | ||
130 | |||
131 | void disable_8259A_irq(unsigned int irq) | ||
132 | { | ||
133 | unsigned int mask = 1 << irq; | ||
134 | unsigned long flags; | ||
135 | |||
136 | spin_lock_irqsave(&i8259A_lock, flags); | ||
137 | cached_irq_mask |= mask; | ||
138 | if (irq & 8) | ||
139 | outb(cached_slave_mask, PIC_SLAVE_IMR); | ||
140 | else | ||
141 | outb(cached_master_mask, PIC_MASTER_IMR); | ||
142 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
143 | } | ||
144 | |||
145 | void enable_8259A_irq(unsigned int irq) | ||
146 | { | ||
147 | unsigned int mask = ~(1 << irq); | ||
148 | unsigned long flags; | ||
149 | |||
150 | spin_lock_irqsave(&i8259A_lock, flags); | ||
151 | cached_irq_mask &= mask; | ||
152 | if (irq & 8) | ||
153 | outb(cached_slave_mask, PIC_SLAVE_IMR); | ||
154 | else | ||
155 | outb(cached_master_mask, PIC_MASTER_IMR); | ||
156 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
157 | } | ||
158 | |||
159 | int i8259A_irq_pending(unsigned int irq) | ||
160 | { | ||
161 | unsigned int mask = 1<<irq; | ||
162 | unsigned long flags; | ||
163 | int ret; | ||
164 | |||
165 | spin_lock_irqsave(&i8259A_lock, flags); | ||
166 | if (irq < 8) | ||
167 | ret = inb(PIC_MASTER_CMD) & mask; | ||
168 | else | ||
169 | ret = inb(PIC_SLAVE_CMD) & (mask >> 8); | ||
170 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
171 | |||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | void make_8259A_irq(unsigned int irq) | ||
176 | { | ||
177 | disable_irq_nosync(irq); | ||
178 | io_apic_irqs &= ~(1<<irq); | ||
179 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | ||
180 | "XT"); | ||
181 | enable_irq(irq); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * This function assumes to be called rarely. Switching between | ||
186 | * 8259A registers is slow. | ||
187 | * This has to be protected by the irq controller spinlock | ||
188 | * before being called. | ||
189 | */ | ||
190 | static inline int i8259A_irq_real(unsigned int irq) | ||
191 | { | ||
192 | int value; | ||
193 | int irqmask = 1<<irq; | ||
194 | |||
195 | if (irq < 8) { | ||
196 | outb(0x0B,PIC_MASTER_CMD); /* ISR register */ | ||
197 | value = inb(PIC_MASTER_CMD) & irqmask; | ||
198 | outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */ | ||
199 | return value; | ||
200 | } | ||
201 | outb(0x0B,PIC_SLAVE_CMD); /* ISR register */ | ||
202 | value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); | ||
203 | outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */ | ||
204 | return value; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Careful! The 8259A is a fragile beast, it pretty | ||
209 | * much _has_ to be done exactly like this (mask it | ||
210 | * first, _then_ send the EOI, and the order of EOI | ||
211 | * to the two 8259s is important! | ||
212 | */ | ||
213 | static void mask_and_ack_8259A(unsigned int irq) | ||
214 | { | ||
215 | unsigned int irqmask = 1 << irq; | ||
216 | unsigned long flags; | ||
217 | |||
218 | spin_lock_irqsave(&i8259A_lock, flags); | ||
219 | /* | ||
220 | * Lightweight spurious IRQ detection. We do not want | ||
221 | * to overdo spurious IRQ handling - it's usually a sign | ||
222 | * of hardware problems, so we only do the checks we can | ||
223 | * do without slowing down good hardware unnecessarily. | ||
224 | * | ||
225 | * Note that IRQ7 and IRQ15 (the two spurious IRQs | ||
226 | * usually resulting from the 8259A-1|2 PICs) occur | ||
227 | * even if the IRQ is masked in the 8259A. Thus we | ||
228 | * can check spurious 8259A IRQs without doing the | ||
229 | * quite slow i8259A_irq_real() call for every IRQ. | ||
230 | * This does not cover 100% of spurious interrupts, | ||
231 | * but should be enough to warn the user that there | ||
232 | * is something bad going on ... | ||
233 | */ | ||
234 | if (cached_irq_mask & irqmask) | ||
235 | goto spurious_8259A_irq; | ||
236 | cached_irq_mask |= irqmask; | ||
237 | |||
238 | handle_real_irq: | ||
239 | if (irq & 8) { | ||
240 | inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ | ||
241 | outb(cached_slave_mask, PIC_SLAVE_IMR); | ||
242 | /* 'Specific EOI' to slave */ | ||
243 | outb(0x60+(irq&7),PIC_SLAVE_CMD); | ||
244 | /* 'Specific EOI' to master-IRQ2 */ | ||
245 | outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); | ||
246 | } else { | ||
247 | inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ | ||
248 | outb(cached_master_mask, PIC_MASTER_IMR); | ||
249 | /* 'Specific EOI' to master */ | ||
250 | outb(0x60+irq,PIC_MASTER_CMD); | ||
251 | } | ||
252 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
253 | return; | ||
254 | |||
255 | spurious_8259A_irq: | ||
256 | /* | ||
257 | * this is the slow path - should happen rarely. | ||
258 | */ | ||
259 | if (i8259A_irq_real(irq)) | ||
260 | /* | ||
261 | * oops, the IRQ _is_ in service according to the | ||
262 | * 8259A - not spurious, go handle it. | ||
263 | */ | ||
264 | goto handle_real_irq; | ||
265 | |||
266 | { | ||
267 | static int spurious_irq_mask; | ||
268 | /* | ||
269 | * At this point we can be sure the IRQ is spurious, | ||
270 | * lets ACK and report it. [once per IRQ] | ||
271 | */ | ||
272 | if (!(spurious_irq_mask & irqmask)) { | ||
273 | printk(KERN_DEBUG | ||
274 | "spurious 8259A interrupt: IRQ%d.\n", irq); | ||
275 | spurious_irq_mask |= irqmask; | ||
276 | } | ||
277 | atomic_inc(&irq_err_count); | ||
278 | /* | ||
279 | * Theoretically we do not have to handle this IRQ, | ||
280 | * but in Linux this does not cause problems and is | ||
281 | * simpler for us. | ||
282 | */ | ||
283 | goto handle_real_irq; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static char irq_trigger[2]; | ||
288 | /** | ||
289 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ | ||
290 | */ | ||
291 | static void restore_ELCR(char *trigger) | ||
292 | { | ||
293 | outb(trigger[0], 0x4d0); | ||
294 | outb(trigger[1], 0x4d1); | ||
295 | } | ||
296 | |||
297 | static void save_ELCR(char *trigger) | ||
298 | { | ||
299 | /* IRQ 0,1,2,8,13 are marked as reserved */ | ||
300 | trigger[0] = inb(0x4d0) & 0xF8; | ||
301 | trigger[1] = inb(0x4d1) & 0xDE; | ||
302 | } | ||
303 | |||
304 | static int i8259A_resume(struct sys_device *dev) | ||
305 | { | ||
306 | init_8259A(i8259A_auto_eoi); | ||
307 | restore_ELCR(irq_trigger); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int i8259A_suspend(struct sys_device *dev, pm_message_t state) | ||
312 | { | ||
313 | save_ELCR(irq_trigger); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static int i8259A_shutdown(struct sys_device *dev) | ||
318 | { | ||
319 | /* Put the i8259A into a quiescent state that | ||
320 | * the kernel initialization code can get it | ||
321 | * out of. | ||
322 | */ | ||
323 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | ||
324 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static struct sysdev_class i8259_sysdev_class = { | ||
329 | .name = "i8259", | ||
330 | .suspend = i8259A_suspend, | ||
331 | .resume = i8259A_resume, | ||
332 | .shutdown = i8259A_shutdown, | ||
333 | }; | ||
334 | |||
335 | static struct sys_device device_i8259A = { | ||
336 | .id = 0, | ||
337 | .cls = &i8259_sysdev_class, | ||
338 | }; | ||
339 | |||
340 | static int __init i8259A_init_sysfs(void) | ||
341 | { | ||
342 | int error = sysdev_class_register(&i8259_sysdev_class); | ||
343 | if (!error) | ||
344 | error = sysdev_register(&device_i8259A); | ||
345 | return error; | ||
346 | } | ||
347 | |||
348 | device_initcall(i8259A_init_sysfs); | ||
349 | |||
350 | void init_8259A(int auto_eoi) | ||
351 | { | ||
352 | unsigned long flags; | ||
353 | |||
354 | i8259A_auto_eoi = auto_eoi; | ||
355 | |||
356 | spin_lock_irqsave(&i8259A_lock, flags); | ||
357 | |||
358 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | ||
359 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ | ||
360 | |||
361 | /* | ||
362 | * outb_pic - this has to work on a wide range of PC hardware. | ||
363 | */ | ||
364 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ | ||
365 | /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */ | ||
366 | outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); | ||
367 | /* 8259A-1 (the master) has a slave on IR2 */ | ||
368 | outb_pic(0x04, PIC_MASTER_IMR); | ||
369 | if (auto_eoi) /* master does Auto EOI */ | ||
370 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); | ||
371 | else /* master expects normal EOI */ | ||
372 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); | ||
373 | |||
374 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ | ||
375 | /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */ | ||
376 | outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); | ||
377 | /* 8259A-2 is a slave on master's IR2 */ | ||
378 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); | ||
379 | /* (slave's support for AEOI in flat mode is to be investigated) */ | ||
380 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); | ||
381 | |||
382 | if (auto_eoi) | ||
383 | /* | ||
384 | * In AEOI mode we just have to mask the interrupt | ||
385 | * when acking. | ||
386 | */ | ||
387 | i8259A_chip.mask_ack = disable_8259A_irq; | ||
388 | else | ||
389 | i8259A_chip.mask_ack = mask_and_ack_8259A; | ||
390 | |||
391 | udelay(100); /* wait for 8259A to initialize */ | ||
392 | |||
393 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ | ||
394 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ | ||
395 | |||
396 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
397 | } | ||
398 | |||
399 | |||
400 | |||
401 | |||
402 | /* | ||
403 | * IRQ2 is cascade interrupt to second interrupt controller | ||
404 | */ | ||
405 | |||
406 | static struct irqaction irq2 = { | ||
407 | .handler = no_action, | ||
408 | .mask = CPU_MASK_NONE, | ||
409 | .name = "cascade", | ||
410 | }; | ||
411 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | ||
412 | [0 ... IRQ0_VECTOR - 1] = -1, | ||
413 | [IRQ0_VECTOR] = 0, | ||
414 | [IRQ1_VECTOR] = 1, | ||
415 | [IRQ2_VECTOR] = 2, | ||
416 | [IRQ3_VECTOR] = 3, | ||
417 | [IRQ4_VECTOR] = 4, | ||
418 | [IRQ5_VECTOR] = 5, | ||
419 | [IRQ6_VECTOR] = 6, | ||
420 | [IRQ7_VECTOR] = 7, | ||
421 | [IRQ8_VECTOR] = 8, | ||
422 | [IRQ9_VECTOR] = 9, | ||
423 | [IRQ10_VECTOR] = 10, | ||
424 | [IRQ11_VECTOR] = 11, | ||
425 | [IRQ12_VECTOR] = 12, | ||
426 | [IRQ13_VECTOR] = 13, | ||
427 | [IRQ14_VECTOR] = 14, | ||
428 | [IRQ15_VECTOR] = 15, | ||
429 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | ||
430 | }; | ||
431 | |||
432 | void __init init_ISA_irqs (void) | ||
433 | { | ||
434 | int i; | ||
435 | |||
436 | init_bsp_APIC(); | ||
437 | init_8259A(0); | ||
438 | |||
439 | for (i = 0; i < NR_IRQS; i++) { | ||
440 | irq_desc[i].status = IRQ_DISABLED; | ||
441 | irq_desc[i].action = NULL; | ||
442 | irq_desc[i].depth = 1; | ||
443 | |||
444 | if (i < 16) { | ||
445 | /* | ||
446 | * 16 old-style INTA-cycle interrupts: | ||
447 | */ | ||
448 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
449 | handle_level_irq, "XT"); | ||
450 | } else { | ||
451 | /* | ||
452 | * 'high' PCI IRQs filled in on demand | ||
453 | */ | ||
454 | irq_desc[i].chip = &no_irq_chip; | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | |||
459 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
460 | |||
461 | void __init native_init_IRQ(void) | ||
462 | { | ||
463 | int i; | ||
464 | |||
465 | init_ISA_irqs(); | ||
466 | /* | ||
467 | * Cover the whole vector space, no vector can escape | ||
468 | * us. (some of these will be overridden and become | ||
469 | * 'special' SMP interrupts) | ||
470 | */ | ||
471 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | ||
472 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
473 | if (vector != IA32_SYSCALL_VECTOR) | ||
474 | set_intr_gate(vector, interrupt[i]); | ||
475 | } | ||
476 | |||
477 | #ifdef CONFIG_SMP | ||
478 | /* | ||
479 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
480 | * IPI, driven by wakeup. | ||
481 | */ | ||
482 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
483 | |||
484 | /* IPIs for invalidation */ | ||
485 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | ||
486 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | ||
487 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | ||
488 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | ||
489 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | ||
490 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | ||
491 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | ||
492 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | ||
493 | |||
494 | /* IPI for generic function call */ | ||
495 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
496 | |||
497 | /* Low priority IPI to cleanup after moving an irq */ | ||
498 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
499 | #endif | ||
500 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
501 | set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | ||
502 | |||
503 | /* self generated IPI for local APIC timer */ | ||
504 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
505 | |||
506 | /* IPI vectors for APIC spurious and error interrupts */ | ||
507 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
508 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
509 | |||
510 | if (!acpi_ioapic) | ||
511 | setup_irq(2, &irq2); | ||
512 | } | ||
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4dc8600d9d20..0774b231a28b 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1176,7 +1176,7 @@ static int __assign_irq_vector(int irq) | |||
1176 | offset = current_offset; | 1176 | offset = current_offset; |
1177 | next: | 1177 | next: |
1178 | vector += 8; | 1178 | vector += 8; |
1179 | if (vector >= FIRST_SYSTEM_VECTOR) { | 1179 | if (vector >= first_system_vector) { |
1180 | offset = (offset + 1) % 8; | 1180 | offset = (offset + 1) % 8; |
1181 | vector = FIRST_DEVICE_VECTOR + offset; | 1181 | vector = FIRST_DEVICE_VECTOR + offset; |
1182 | } | 1182 | } |
@@ -2261,7 +2261,7 @@ void __init setup_IO_APIC(void) | |||
2261 | int i; | 2261 | int i; |
2262 | 2262 | ||
2263 | /* Reserve all the system vectors. */ | 2263 | /* Reserve all the system vectors. */ |
2264 | for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) | 2264 | for (i = first_system_vector; i < NR_VECTORS; i++) |
2265 | set_bit(i, used_vectors); | 2265 | set_bit(i, used_vectors); |
2266 | 2266 | ||
2267 | enable_IO_APIC(); | 2267 | enable_IO_APIC(); |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc529..f1e1ae3e5c7d 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -82,6 +82,10 @@ struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { | |||
82 | 82 | ||
83 | static int assign_irq_vector(int irq, cpumask_t mask); | 83 | static int assign_irq_vector(int irq, cpumask_t mask); |
84 | 84 | ||
85 | int first_system_vector = 0xfe; | ||
86 | |||
87 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
88 | |||
85 | #define __apicdebuginit __init | 89 | #define __apicdebuginit __init |
86 | 90 | ||
87 | int sis_apic_bug; /* not actually supported, dummy for compile */ | 91 | int sis_apic_bug; /* not actually supported, dummy for compile */ |
@@ -730,7 +734,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
730 | offset = current_offset; | 734 | offset = current_offset; |
731 | next: | 735 | next: |
732 | vector += 8; | 736 | vector += 8; |
733 | if (vector >= FIRST_SYSTEM_VECTOR) { | 737 | if (vector >= first_system_vector) { |
734 | /* If we run out of vectors on large boxen, must share them. */ | 738 | /* If we run out of vectors on large boxen, must share them. */ |
735 | offset = (offset + 1) % 8; | 739 | offset = (offset + 1) % 8; |
736 | vector = FIRST_DEVICE_VECTOR + offset; | 740 | vector = FIRST_DEVICE_VECTOR + offset; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 147352df28b9..4e3e8ec60276 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq) | |||
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
52 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
53 | static int check_stack_overflow(void) | ||
54 | { | ||
55 | long sp; | ||
56 | |||
57 | __asm__ __volatile__("andl %%esp,%0" : | ||
58 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | ||
59 | |||
60 | return sp < (sizeof(struct thread_info) + STACK_WARN); | ||
61 | } | ||
62 | |||
63 | static void print_stack_overflow(void) | ||
64 | { | ||
65 | printk(KERN_WARNING "low stack detected by irq handler\n"); | ||
66 | dump_stack(); | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | static inline int check_stack_overflow(void) { return 0; } | ||
71 | static inline void print_stack_overflow(void) { } | ||
72 | #endif | ||
73 | |||
51 | #ifdef CONFIG_4KSTACKS | 74 | #ifdef CONFIG_4KSTACKS |
52 | /* | 75 | /* |
53 | * per-CPU IRQ handling contexts (thread information and stack) | 76 | * per-CPU IRQ handling contexts (thread information and stack) |
@@ -59,48 +82,29 @@ union irq_ctx { | |||
59 | 82 | ||
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
62 | #endif | ||
63 | 85 | ||
64 | /* | 86 | static char softirq_stack[NR_CPUS * THREAD_SIZE] |
65 | * do_IRQ handles all normal device IRQ's (the special | 87 | __attribute__((__section__(".bss.page_aligned"))); |
66 | * SMP cross-CPU interrupts have their own specific | ||
67 | * handlers). | ||
68 | */ | ||
69 | unsigned int do_IRQ(struct pt_regs *regs) | ||
70 | { | ||
71 | struct pt_regs *old_regs; | ||
72 | /* high bit used in ret_from_ code */ | ||
73 | int irq = ~regs->orig_ax; | ||
74 | struct irq_desc *desc = irq_desc + irq; | ||
75 | #ifdef CONFIG_4KSTACKS | ||
76 | union irq_ctx *curctx, *irqctx; | ||
77 | u32 *isp; | ||
78 | #endif | ||
79 | 88 | ||
80 | if (unlikely((unsigned)irq >= NR_IRQS)) { | 89 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] |
81 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 90 | __attribute__((__section__(".bss.page_aligned"))); |
82 | __func__, irq); | ||
83 | BUG(); | ||
84 | } | ||
85 | 91 | ||
86 | old_regs = set_irq_regs(regs); | 92 | static void call_on_stack(void *func, void *stack) |
87 | irq_enter(); | 93 | { |
88 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 94 | asm volatile("xchgl %%ebx,%%esp \n" |
89 | /* Debugging check for stack overflow: is there less than 1KB free? */ | 95 | "call *%%edi \n" |
90 | { | 96 | "movl %%ebx,%%esp \n" |
91 | long sp; | 97 | : "=b" (stack) |
92 | 98 | : "0" (stack), | |
93 | __asm__ __volatile__("andl %%esp,%0" : | 99 | "D"(func) |
94 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | 100 | : "memory", "cc", "edx", "ecx", "eax"); |
95 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | 101 | } |
96 | printk("do_IRQ: stack overflow: %ld\n", | ||
97 | sp - sizeof(struct thread_info)); | ||
98 | dump_stack(); | ||
99 | } | ||
100 | } | ||
101 | #endif | ||
102 | 102 | ||
103 | #ifdef CONFIG_4KSTACKS | 103 | static inline int |
104 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | ||
105 | { | ||
106 | union irq_ctx *curctx, *irqctx; | ||
107 | u32 *isp, arg1, arg2; | ||
104 | 108 | ||
105 | curctx = (union irq_ctx *) current_thread_info(); | 109 | curctx = (union irq_ctx *) current_thread_info(); |
106 | irqctx = hardirq_ctx[smp_processor_id()]; | 110 | irqctx = hardirq_ctx[smp_processor_id()]; |
@@ -111,52 +115,39 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
111 | * handler) we can't do that and just have to keep using the | 115 | * handler) we can't do that and just have to keep using the |
112 | * current stack (which is the irq stack already after all) | 116 | * current stack (which is the irq stack already after all) |
113 | */ | 117 | */ |
114 | if (curctx != irqctx) { | 118 | if (unlikely(curctx == irqctx)) |
115 | int arg1, arg2, bx; | 119 | return 0; |
116 | 120 | ||
117 | /* build the stack frame on the IRQ stack */ | 121 | /* build the stack frame on the IRQ stack */ |
118 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | 122 | isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); |
119 | irqctx->tinfo.task = curctx->tinfo.task; | 123 | irqctx->tinfo.task = curctx->tinfo.task; |
120 | irqctx->tinfo.previous_esp = current_stack_pointer; | 124 | irqctx->tinfo.previous_esp = current_stack_pointer; |
121 | 125 | ||
122 | /* | 126 | /* |
123 | * Copy the softirq bits in preempt_count so that the | 127 | * Copy the softirq bits in preempt_count so that the |
124 | * softirq checks work in the hardirq context. | 128 | * softirq checks work in the hardirq context. |
125 | */ | 129 | */ |
126 | irqctx->tinfo.preempt_count = | 130 | irqctx->tinfo.preempt_count = |
127 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | 131 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
128 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | 132 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
129 | 133 | ||
130 | asm volatile( | 134 | if (unlikely(overflow)) |
131 | " xchgl %%ebx,%%esp \n" | 135 | call_on_stack(print_stack_overflow, isp); |
132 | " call *%%edi \n" | 136 | |
133 | " movl %%ebx,%%esp \n" | 137 | asm volatile("xchgl %%ebx,%%esp \n" |
134 | : "=a" (arg1), "=d" (arg2), "=b" (bx) | 138 | "call *%%edi \n" |
135 | : "0" (irq), "1" (desc), "2" (isp), | 139 | "movl %%ebx,%%esp \n" |
136 | "D" (desc->handle_irq) | 140 | : "=a" (arg1), "=d" (arg2), "=b" (isp) |
137 | : "memory", "cc", "ecx" | 141 | : "0" (irq), "1" (desc), "2" (isp), |
138 | ); | 142 | "D" (desc->handle_irq) |
139 | } else | 143 | : "memory", "cc", "ecx"); |
140 | #endif | ||
141 | desc->handle_irq(irq, desc); | ||
142 | |||
143 | irq_exit(); | ||
144 | set_irq_regs(old_regs); | ||
145 | return 1; | 144 | return 1; |
146 | } | 145 | } |
147 | 146 | ||
148 | #ifdef CONFIG_4KSTACKS | ||
149 | |||
150 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | ||
151 | __attribute__((__section__(".bss.page_aligned"))); | ||
152 | |||
153 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | ||
154 | __attribute__((__section__(".bss.page_aligned"))); | ||
155 | |||
156 | /* | 147 | /* |
157 | * allocate per-cpu stacks for hardirq and for softirq processing | 148 | * allocate per-cpu stacks for hardirq and for softirq processing |
158 | */ | 149 | */ |
159 | void irq_ctx_init(int cpu) | 150 | void __cpuinit irq_ctx_init(int cpu) |
160 | { | 151 | { |
161 | union irq_ctx *irqctx; | 152 | union irq_ctx *irqctx; |
162 | 153 | ||
@@ -164,25 +155,25 @@ void irq_ctx_init(int cpu) | |||
164 | return; | 155 | return; |
165 | 156 | ||
166 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | 157 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; |
167 | irqctx->tinfo.task = NULL; | 158 | irqctx->tinfo.task = NULL; |
168 | irqctx->tinfo.exec_domain = NULL; | 159 | irqctx->tinfo.exec_domain = NULL; |
169 | irqctx->tinfo.cpu = cpu; | 160 | irqctx->tinfo.cpu = cpu; |
170 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 161 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
171 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 162 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
172 | 163 | ||
173 | hardirq_ctx[cpu] = irqctx; | 164 | hardirq_ctx[cpu] = irqctx; |
174 | 165 | ||
175 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; | 166 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; |
176 | irqctx->tinfo.task = NULL; | 167 | irqctx->tinfo.task = NULL; |
177 | irqctx->tinfo.exec_domain = NULL; | 168 | irqctx->tinfo.exec_domain = NULL; |
178 | irqctx->tinfo.cpu = cpu; | 169 | irqctx->tinfo.cpu = cpu; |
179 | irqctx->tinfo.preempt_count = 0; | 170 | irqctx->tinfo.preempt_count = 0; |
180 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 171 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
181 | 172 | ||
182 | softirq_ctx[cpu] = irqctx; | 173 | softirq_ctx[cpu] = irqctx; |
183 | 174 | ||
184 | printk("CPU %u irqstacks, hard=%p soft=%p\n", | 175 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
185 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); | 176 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); |
186 | } | 177 | } |
187 | 178 | ||
188 | void irq_ctx_exit(int cpu) | 179 | void irq_ctx_exit(int cpu) |
@@ -211,25 +202,56 @@ asmlinkage void do_softirq(void) | |||
211 | /* build the stack frame on the softirq stack */ | 202 | /* build the stack frame on the softirq stack */ |
212 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | 203 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); |
213 | 204 | ||
214 | asm volatile( | 205 | call_on_stack(__do_softirq, isp); |
215 | " xchgl %%ebx,%%esp \n" | ||
216 | " call __do_softirq \n" | ||
217 | " movl %%ebx,%%esp \n" | ||
218 | : "=b"(isp) | ||
219 | : "0"(isp) | ||
220 | : "memory", "cc", "edx", "ecx", "eax" | ||
221 | ); | ||
222 | /* | 206 | /* |
223 | * Shouldnt happen, we returned above if in_interrupt(): | 207 | * Shouldnt happen, we returned above if in_interrupt(): |
224 | */ | 208 | */ |
225 | WARN_ON_ONCE(softirq_count()); | 209 | WARN_ON_ONCE(softirq_count()); |
226 | } | 210 | } |
227 | 211 | ||
228 | local_irq_restore(flags); | 212 | local_irq_restore(flags); |
229 | } | 213 | } |
214 | |||
215 | #else | ||
216 | static inline int | ||
217 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | ||
230 | #endif | 218 | #endif |
231 | 219 | ||
232 | /* | 220 | /* |
221 | * do_IRQ handles all normal device IRQ's (the special | ||
222 | * SMP cross-CPU interrupts have their own specific | ||
223 | * handlers). | ||
224 | */ | ||
225 | unsigned int do_IRQ(struct pt_regs *regs) | ||
226 | { | ||
227 | struct pt_regs *old_regs; | ||
228 | /* high bit used in ret_from_ code */ | ||
229 | int overflow, irq = ~regs->orig_ax; | ||
230 | struct irq_desc *desc = irq_desc + irq; | ||
231 | |||
232 | if (unlikely((unsigned)irq >= NR_IRQS)) { | ||
233 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | ||
234 | __func__, irq); | ||
235 | BUG(); | ||
236 | } | ||
237 | |||
238 | old_regs = set_irq_regs(regs); | ||
239 | irq_enter(); | ||
240 | |||
241 | overflow = check_stack_overflow(); | ||
242 | |||
243 | if (!execute_on_irq_stack(overflow, desc, irq)) { | ||
244 | if (unlikely(overflow)) | ||
245 | print_stack_overflow(); | ||
246 | desc->handle_irq(irq, desc); | ||
247 | } | ||
248 | |||
249 | irq_exit(); | ||
250 | set_irq_regs(old_regs); | ||
251 | return 1; | ||
252 | } | ||
253 | |||
254 | /* | ||
233 | * Interrupt statistics: | 255 | * Interrupt statistics: |
234 | */ | 256 | */ |
235 | 257 | ||
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c new file mode 100644 index 000000000000..d66914287ee1 --- /dev/null +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -0,0 +1,114 @@ | |||
1 | #include <linux/errno.h> | ||
2 | #include <linux/signal.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/ioport.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/slab.h> | ||
7 | #include <linux/random.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/kernel_stat.h> | ||
10 | #include <linux/sysdev.h> | ||
11 | #include <linux/bitops.h> | ||
12 | |||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/io.h> | ||
16 | #include <asm/timer.h> | ||
17 | #include <asm/pgtable.h> | ||
18 | #include <asm/delay.h> | ||
19 | #include <asm/desc.h> | ||
20 | #include <asm/apic.h> | ||
21 | #include <asm/arch_hooks.h> | ||
22 | #include <asm/i8259.h> | ||
23 | |||
24 | |||
25 | |||
26 | /* | ||
27 | * Note that on a 486, we don't want to do a SIGFPE on an irq13 | ||
28 | * as the irq is unreliable, and exception 16 works correctly | ||
29 | * (ie as explained in the intel literature). On a 386, you | ||
30 | * can't use exception 16 due to bad IBM design, so we have to | ||
31 | * rely on the less exact irq13. | ||
32 | * | ||
33 | * Careful.. Not only is IRQ13 unreliable, but it is also | ||
34 | * leads to races. IBM designers who came up with it should | ||
35 | * be shot. | ||
36 | */ | ||
37 | |||
38 | |||
39 | static irqreturn_t math_error_irq(int cpl, void *dev_id) | ||
40 | { | ||
41 | extern void math_error(void __user *); | ||
42 | outb(0,0xF0); | ||
43 | if (ignore_fpu_irq || !boot_cpu_data.hard_math) | ||
44 | return IRQ_NONE; | ||
45 | math_error((void __user *)get_irq_regs()->ip); | ||
46 | return IRQ_HANDLED; | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * New motherboards sometimes make IRQ 13 be a PCI interrupt, | ||
51 | * so allow interrupt sharing. | ||
52 | */ | ||
53 | static struct irqaction fpu_irq = { | ||
54 | .handler = math_error_irq, | ||
55 | .mask = CPU_MASK_NONE, | ||
56 | .name = "fpu", | ||
57 | }; | ||
58 | |||
59 | void __init init_ISA_irqs (void) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | #ifdef CONFIG_X86_LOCAL_APIC | ||
64 | init_bsp_APIC(); | ||
65 | #endif | ||
66 | init_8259A(0); | ||
67 | |||
68 | /* | ||
69 | * 16 old-style INTA-cycle interrupts: | ||
70 | */ | ||
71 | for (i = 0; i < 16; i++) { | ||
72 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
73 | handle_level_irq, "XT"); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | /* Overridden in paravirt.c */ | ||
78 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
79 | |||
80 | void __init native_init_IRQ(void) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | /* all the set up before the call gates are initialised */ | ||
85 | pre_intr_init_hook(); | ||
86 | |||
87 | /* | ||
88 | * Cover the whole vector space, no vector can escape | ||
89 | * us. (some of these will be overridden and become | ||
90 | * 'special' SMP interrupts) | ||
91 | */ | ||
92 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | ||
93 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
94 | if (i >= NR_IRQS) | ||
95 | break; | ||
96 | /* SYSCALL_VECTOR was reserved in trap_init. */ | ||
97 | if (!test_bit(vector, used_vectors)) | ||
98 | set_intr_gate(vector, interrupt[i]); | ||
99 | } | ||
100 | |||
101 | /* setup after call gates are initialised (usually add in | ||
102 | * the architecture specific gates) | ||
103 | */ | ||
104 | intr_init_hook(); | ||
105 | |||
106 | /* | ||
107 | * External FPU? Set up irq13 if so, for | ||
108 | * original braindamaged IBM FERR coupling. | ||
109 | */ | ||
110 | if (boot_cpu_data.hard_math && !cpu_has_fpu) | ||
111 | setup_irq(FPU_IRQ, &fpu_irq); | ||
112 | |||
113 | irq_ctx_init(smp_processor_id()); | ||
114 | } | ||
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c new file mode 100644 index 000000000000..31f49e8f46a7 --- /dev/null +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -0,0 +1,217 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/signal.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/ioport.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/timex.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/random.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel_stat.h> | ||
12 | #include <linux/sysdev.h> | ||
13 | #include <linux/bitops.h> | ||
14 | |||
15 | #include <asm/acpi.h> | ||
16 | #include <asm/atomic.h> | ||
17 | #include <asm/system.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/hw_irq.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/delay.h> | ||
22 | #include <asm/desc.h> | ||
23 | #include <asm/apic.h> | ||
24 | #include <asm/i8259.h> | ||
25 | |||
26 | /* | ||
27 | * Common place to define all x86 IRQ vectors | ||
28 | * | ||
29 | * This builds up the IRQ handler stubs using some ugly macros in irq.h | ||
30 | * | ||
31 | * These macros create the low-level assembly IRQ routines that save | ||
32 | * register context and call do_IRQ(). do_IRQ() then does all the | ||
33 | * operations that are needed to keep the AT (or SMP IOAPIC) | ||
34 | * interrupt-controller happy. | ||
35 | */ | ||
36 | |||
37 | #define IRQ_NAME2(nr) nr##_interrupt(void) | ||
38 | #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) | ||
39 | |||
40 | /* | ||
41 | * SMP has a few special interrupts for IPI messages | ||
42 | */ | ||
43 | |||
44 | #define BUILD_IRQ(nr) \ | ||
45 | asmlinkage void IRQ_NAME(nr); \ | ||
46 | asm("\n.p2align\n" \ | ||
47 | "IRQ" #nr "_interrupt:\n\t" \ | ||
48 | "push $~(" #nr ") ; " \ | ||
49 | "jmp common_interrupt"); | ||
50 | |||
51 | #define BI(x,y) \ | ||
52 | BUILD_IRQ(x##y) | ||
53 | |||
54 | #define BUILD_16_IRQS(x) \ | ||
55 | BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ | ||
56 | BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ | ||
57 | BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ | ||
58 | BI(x,c) BI(x,d) BI(x,e) BI(x,f) | ||
59 | |||
60 | /* | ||
61 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | ||
62 | * (these are usually mapped to vectors 0x30-0x3f) | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * The IO-APIC gives us many more interrupt sources. Most of these | ||
67 | * are unused but an SMP system is supposed to have enough memory ... | ||
68 | * sometimes (mostly wrt. hw bugs) we get corrupted vectors all | ||
69 | * across the spectrum, so we really want to be prepared to get all | ||
70 | * of these. Plus, more powerful systems might have more than 64 | ||
71 | * IO-APIC registers. | ||
72 | * | ||
73 | * (these are usually mapped into the 0x30-0xff vector range) | ||
74 | */ | ||
75 | BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) | ||
76 | BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) | ||
77 | BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) | ||
78 | BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf) | ||
79 | |||
80 | #undef BUILD_16_IRQS | ||
81 | #undef BI | ||
82 | |||
83 | |||
84 | #define IRQ(x,y) \ | ||
85 | IRQ##x##y##_interrupt | ||
86 | |||
87 | #define IRQLIST_16(x) \ | ||
88 | IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ | ||
89 | IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ | ||
90 | IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ | ||
91 | IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) | ||
92 | |||
93 | /* for the irq vectors */ | ||
94 | static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = { | ||
95 | IRQLIST_16(0x2), IRQLIST_16(0x3), | ||
96 | IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7), | ||
97 | IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb), | ||
98 | IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf) | ||
99 | }; | ||
100 | |||
101 | #undef IRQ | ||
102 | #undef IRQLIST_16 | ||
103 | |||
104 | |||
105 | |||
106 | |||
107 | /* | ||
108 | * IRQ2 is cascade interrupt to second interrupt controller | ||
109 | */ | ||
110 | |||
111 | static struct irqaction irq2 = { | ||
112 | .handler = no_action, | ||
113 | .mask = CPU_MASK_NONE, | ||
114 | .name = "cascade", | ||
115 | }; | ||
116 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | ||
117 | [0 ... IRQ0_VECTOR - 1] = -1, | ||
118 | [IRQ0_VECTOR] = 0, | ||
119 | [IRQ1_VECTOR] = 1, | ||
120 | [IRQ2_VECTOR] = 2, | ||
121 | [IRQ3_VECTOR] = 3, | ||
122 | [IRQ4_VECTOR] = 4, | ||
123 | [IRQ5_VECTOR] = 5, | ||
124 | [IRQ6_VECTOR] = 6, | ||
125 | [IRQ7_VECTOR] = 7, | ||
126 | [IRQ8_VECTOR] = 8, | ||
127 | [IRQ9_VECTOR] = 9, | ||
128 | [IRQ10_VECTOR] = 10, | ||
129 | [IRQ11_VECTOR] = 11, | ||
130 | [IRQ12_VECTOR] = 12, | ||
131 | [IRQ13_VECTOR] = 13, | ||
132 | [IRQ14_VECTOR] = 14, | ||
133 | [IRQ15_VECTOR] = 15, | ||
134 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | ||
135 | }; | ||
136 | |||
137 | static void __init init_ISA_irqs (void) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | init_bsp_APIC(); | ||
142 | init_8259A(0); | ||
143 | |||
144 | for (i = 0; i < NR_IRQS; i++) { | ||
145 | irq_desc[i].status = IRQ_DISABLED; | ||
146 | irq_desc[i].action = NULL; | ||
147 | irq_desc[i].depth = 1; | ||
148 | |||
149 | if (i < 16) { | ||
150 | /* | ||
151 | * 16 old-style INTA-cycle interrupts: | ||
152 | */ | ||
153 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
154 | handle_level_irq, "XT"); | ||
155 | } else { | ||
156 | /* | ||
157 | * 'high' PCI IRQs filled in on demand | ||
158 | */ | ||
159 | irq_desc[i].chip = &no_irq_chip; | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | |||
164 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
165 | |||
166 | void __init native_init_IRQ(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | init_ISA_irqs(); | ||
171 | /* | ||
172 | * Cover the whole vector space, no vector can escape | ||
173 | * us. (some of these will be overridden and become | ||
174 | * 'special' SMP interrupts) | ||
175 | */ | ||
176 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | ||
177 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
178 | if (vector != IA32_SYSCALL_VECTOR) | ||
179 | set_intr_gate(vector, interrupt[i]); | ||
180 | } | ||
181 | |||
182 | #ifdef CONFIG_SMP | ||
183 | /* | ||
184 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
185 | * IPI, driven by wakeup. | ||
186 | */ | ||
187 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
188 | |||
189 | /* IPIs for invalidation */ | ||
190 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | ||
191 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | ||
192 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | ||
193 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | ||
194 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | ||
195 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | ||
196 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | ||
197 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | ||
198 | |||
199 | /* IPI for generic function call */ | ||
200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
201 | |||
202 | /* Low priority IPI to cleanup after moving an irq */ | ||
203 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
204 | #endif | ||
205 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
206 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | ||
207 | |||
208 | /* self generated IPI for local APIC timer */ | ||
209 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
210 | |||
211 | /* IPI vectors for APIC spurious and error interrupts */ | ||
212 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
213 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
214 | |||
215 | if (!acpi_ioapic) | ||
216 | setup_irq(2, &irq2); | ||
217 | } | ||
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index a2b030780aa9..ba7d19e102b1 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -33,8 +33,7 @@ | |||
33 | #include <asm/apic.h> | 33 | #include <asm/apic.h> |
34 | #include <asm/timer.h> | 34 | #include <asm/timer.h> |
35 | #include <asm/i8253.h> | 35 | #include <asm/i8253.h> |
36 | 36 | #include <asm/irq_vectors.h> | |
37 | #include <irq_vectors.h> | ||
38 | 37 | ||
39 | #define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | 38 | #define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) |
40 | #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | 39 | #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) |
diff --git a/arch/x86/mach-visws/visws_apic.c b/arch/x86/mach-visws/visws_apic.c index cef9cb1d15ac..d8b2cfd85d92 100644 --- a/arch/x86/mach-visws/visws_apic.c +++ b/arch/x86/mach-visws/visws_apic.c | |||
@@ -21,10 +21,9 @@ | |||
21 | #include <asm/io.h> | 21 | #include <asm/io.h> |
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/i8259.h> | 23 | #include <asm/i8259.h> |
24 | #include <asm/irq_vectors.h> | ||
24 | 25 | ||
25 | #include "cobalt.h" | 26 | #include "cobalt.h" |
26 | #include "irq_vectors.h" | ||
27 | |||
28 | 27 | ||
29 | static DEFINE_SPINLOCK(cobalt_lock); | 28 | static DEFINE_SPINLOCK(cobalt_lock); |
30 | 29 | ||