aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-08 03:53:57 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 03:53:57 -0400
commit1b8ba39a3fad9c58532f6dad12c94d6e675be656 (patch)
tree9ae9b4c4545b4c91f5dbb3a1085c4c721961c4f2 /arch
parent58cf35228fec541418cc3bd781d6c069d904815e (diff)
parentcbd6712406a3ea861b49fbfd46e23cbf5f8e073f (diff)
Merge branch 'x86/irq' into x86/devel
Conflicts: arch/x86/kernel/i8259.c arch/x86/kernel/irqinit_64.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/apic_32.c18
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c141
-rw-r--r--arch/x86/kernel/i8259.c22
-rw-r--r--arch/x86/kernel/io_apic_32.c4
-rw-r--r--arch/x86/kernel/io_apic_64.c6
-rw-r--r--arch/x86/kernel/irq_32.c216
-rw-r--r--arch/x86/kernel/irqinit_64.c44
-rw-r--r--arch/x86/kernel/vmiclock_32.c3
-rw-r--r--arch/x86/mach-visws/visws_apic.c3
11 files changed, 275 insertions, 186 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 33c5216fd3e1..ff1a7b49a460 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
514 * Make sure all (legacy) PCI IRQs are set as level-triggered. 514 * Make sure all (legacy) PCI IRQs are set as level-triggered.
515 */ 515 */
516 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { 516 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
517 extern void eisa_set_level_irq(unsigned int irq);
518
519 if (triggering == ACPI_LEVEL_SENSITIVE) 517 if (triggering == ACPI_LEVEL_SENSITIVE)
520 eisa_set_level_irq(gsi); 518 eisa_set_level_irq(gsi);
521 } 519 }
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 45d8da405ad9..ce4538ebb7fe 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -70,6 +70,10 @@ static int local_apic_timer_disabled;
70int local_apic_timer_c2_ok; 70int local_apic_timer_c2_ok;
71EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 71EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
72 72
73int first_system_vector = 0xfe;
74
75char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
76
73/* 77/*
74 * Debug level, exported for io_apic.c 78 * Debug level, exported for io_apic.c
75 */ 79 */
@@ -1351,13 +1355,13 @@ void __init smp_intr_init(void)
1351 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 1355 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1352 * IPI, driven by wakeup. 1356 * IPI, driven by wakeup.
1353 */ 1357 */
1354 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 1358 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1355 1359
1356 /* IPI for invalidation */ 1360 /* IPI for invalidation */
1357 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); 1361 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1358 1362
1359 /* IPI for generic function call */ 1363 /* IPI for generic function call */
1360 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 1364 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1361} 1365}
1362#endif 1366#endif
1363 1367
@@ -1370,15 +1374,15 @@ void __init apic_intr_init(void)
1370 smp_intr_init(); 1374 smp_intr_init();
1371#endif 1375#endif
1372 /* self generated IPI for local APIC timer */ 1376 /* self generated IPI for local APIC timer */
1373 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 1377 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1374 1378
1375 /* IPI vectors for APIC spurious and error interrupts */ 1379 /* IPI vectors for APIC spurious and error interrupts */
1376 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 1380 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1377 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 1381 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1378 1382
1379 /* thermal monitor LVT interrupt */ 1383 /* thermal monitor LVT interrupt */
1380#ifdef CONFIG_X86_MCE_P4THERMAL 1384#ifdef CONFIG_X86_MCE_P4THERMAL
1381 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 1385 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1382#endif 1386#endif
1383} 1387}
1384 1388
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c778e4fa55a2..159a1c76d2bd 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -51,7 +51,7 @@
51#include <asm/percpu.h> 51#include <asm/percpu.h>
52#include <asm/dwarf2.h> 52#include <asm/dwarf2.h>
53#include <asm/processor-flags.h> 53#include <asm/processor-flags.h>
54#include "irq_vectors.h" 54#include <asm/irq_vectors.h>
55 55
56/* 56/*
57 * We use macros for low-level operations which need to be overridden 57 * We use macros for low-level operations which need to be overridden
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index ebf13908a743..45e84acca8a9 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV APIC functions (note: not an Intel compatible APIC) 6 * SGI UV APIC functions (note: not an Intel compatible APIC)
7 * 7 *
8 * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#include <linux/threads.h> 11#include <linux/threads.h>
@@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu)
55int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 55int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
56{ 56{
57 unsigned long val; 57 unsigned long val;
58 int nasid; 58 int pnode;
59 59
60 nasid = uv_apicid_to_nasid(phys_apicid); 60 pnode = uv_apicid_to_pnode(phys_apicid);
61 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 61 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
62 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 62 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
63 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 63 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
64 APIC_DM_INIT; 64 APIC_DM_INIT;
65 uv_write_global_mmr64(nasid, UVH_IPI_INT, val); 65 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
66 mdelay(10); 66 mdelay(10);
67 67
68 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 68 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
69 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 69 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
70 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 70 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
71 APIC_DM_STARTUP; 71 APIC_DM_STARTUP;
72 uv_write_global_mmr64(nasid, UVH_IPI_INT, val); 72 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
73 return 0; 73 return 0;
74} 74}
75 75
76static void uv_send_IPI_one(int cpu, int vector) 76static void uv_send_IPI_one(int cpu, int vector)
77{ 77{
78 unsigned long val, apicid, lapicid; 78 unsigned long val, apicid, lapicid;
79 int nasid; 79 int pnode;
80 80
81 apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ 81 apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
82 lapicid = apicid & 0x3f; /* ZZZ macro needed */ 82 lapicid = apicid & 0x3f; /* ZZZ macro needed */
83 nasid = uv_apicid_to_nasid(apicid); 83 pnode = uv_apicid_to_pnode(apicid);
84 val = 84 val =
85 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << 85 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
86 UVH_IPI_INT_APIC_ID_SHFT) | 86 UVH_IPI_INT_APIC_ID_SHFT) |
87 (vector << UVH_IPI_INT_VECTOR_SHFT); 87 (vector << UVH_IPI_INT_VECTOR_SHFT);
88 uv_write_global_mmr64(nasid, UVH_IPI_INT, val); 88 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
89} 89}
90 90
91static void uv_send_IPI_mask(cpumask_t mask, int vector) 91static void uv_send_IPI_mask(cpumask_t mask, int vector)
@@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = {
159 .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ 159 .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
160}; 160};
161 161
162static __cpuinit void set_x2apic_extra_bits(int nasid) 162static __cpuinit void set_x2apic_extra_bits(int pnode)
163{ 163{
164 __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); 164 __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
165} 165}
166 166
167/* 167/*
168 * Called on boot cpu. 168 * Called on boot cpu.
169 */ 169 */
170static __init int boot_pnode_to_blade(int pnode)
171{
172 int blade;
173
174 for (blade = 0; blade < uv_num_possible_blades(); blade++)
175 if (pnode == uv_blade_info[blade].pnode)
176 return blade;
177 BUG();
178}
179
180struct redir_addr {
181 unsigned long redirect;
182 unsigned long alias;
183};
184
185#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
186
187static __initdata struct redir_addr redir_addrs[] = {
188 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
189 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
190 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
191};
192
193static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
194{
195 union uvh_si_alias0_overlay_config_u alias;
196 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
200 alias.v = uv_read_local_mmr(redir_addrs[i].alias);
201 if (alias.s.base == 0) {
202 *size = (1UL << alias.s.m_alias);
203 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
204 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
205 return;
206 }
207 }
208 BUG();
209}
210
170static __init void uv_system_init(void) 211static __init void uv_system_init(void)
171{ 212{
172 union uvh_si_addr_map_config_u m_n_config; 213 union uvh_si_addr_map_config_u m_n_config;
173 int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; 214 union uvh_node_id_u node_id;
174 unsigned long mmr_base; 215 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
216 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
217 unsigned long mmr_base, present;
175 218
176 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 219 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
220 m_val = m_n_config.s.m_skt;
221 n_val = m_n_config.s.n_skt;
177 mmr_base = 222 mmr_base =
178 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 223 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
179 ~UV_MMR_ENABLE; 224 ~UV_MMR_ENABLE;
180 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 225 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
181 226
182 last_nasid = -1; 227 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
183 for_each_possible_cpu(cpu) { 228 uv_possible_blades +=
184 nid = cpu_to_node(cpu); 229 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
185 nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
186 if (nasid != last_nasid)
187 uv_possible_blades++;
188 last_nasid = nasid;
189 }
190 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 230 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
191 231
192 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 232 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
193 uv_blade_info = alloc_bootmem_pages(bytes); 233 uv_blade_info = alloc_bootmem_pages(bytes);
194 234
235 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
236
195 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 237 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
196 uv_node_to_blade = alloc_bootmem_pages(bytes); 238 uv_node_to_blade = alloc_bootmem_pages(bytes);
197 memset(uv_node_to_blade, 255, bytes); 239 memset(uv_node_to_blade, 255, bytes);
@@ -200,43 +242,56 @@ static __init void uv_system_init(void)
200 uv_cpu_to_blade = alloc_bootmem_pages(bytes); 242 uv_cpu_to_blade = alloc_bootmem_pages(bytes);
201 memset(uv_cpu_to_blade, 255, bytes); 243 memset(uv_cpu_to_blade, 255, bytes);
202 244
203 last_nasid = -1; 245 blade = 0;
204 blade = -1; 246 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
205 lcpu = -1; 247 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
206 for_each_possible_cpu(cpu) { 248 for (j = 0; j < 64; j++) {
207 nid = cpu_to_node(cpu); 249 if (!test_bit(j, &present))
208 nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); 250 continue;
209 if (nasid != last_nasid) { 251 uv_blade_info[blade].pnode = (i * 64 + j);
210 blade++; 252 uv_blade_info[blade].nr_possible_cpus = 0;
211 lcpu = -1;
212 uv_blade_info[blade].nr_posible_cpus = 0;
213 uv_blade_info[blade].nr_online_cpus = 0; 253 uv_blade_info[blade].nr_online_cpus = 0;
254 blade++;
214 } 255 }
215 last_nasid = nasid; 256 }
216 lcpu++;
217 257
218 uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; 258 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
219 uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; 259 gnode_upper = (((unsigned long)node_id.s.node_id) &
260 ~((1 << n_val) - 1)) << m_val;
261
262 for_each_present_cpu(cpu) {
263 nid = cpu_to_node(cpu);
264 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
265 blade = boot_pnode_to_blade(pnode);
266 lcpu = uv_blade_info[blade].nr_possible_cpus;
267 uv_blade_info[blade].nr_possible_cpus++;
268
269 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
270 uv_cpu_hub_info(cpu)->lowmem_remap_top =
271 lowmem_redir_base + lowmem_redir_size;
272 uv_cpu_hub_info(cpu)->m_val = m_val;
273 uv_cpu_hub_info(cpu)->n_val = m_val;
220 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 274 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
221 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 275 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
222 uv_cpu_hub_info(cpu)->local_nasid = nasid; 276 uv_cpu_hub_info(cpu)->pnode = pnode;
223 uv_cpu_hub_info(cpu)->gnode_upper = 277 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
224 nasid & ~((1 << uv_hub_info->n_val) - 1); 278 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
279 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
225 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 280 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
226 uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ 281 uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
227 uv_blade_info[blade].nasid = nasid;
228 uv_blade_info[blade].nr_posible_cpus++;
229 uv_node_to_blade[nid] = blade; 282 uv_node_to_blade[nid] = blade;
230 uv_cpu_to_blade[cpu] = blade; 283 uv_cpu_to_blade[cpu] = blade;
231 284
232 printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", 285 printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, "
233 cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); 286 "lcpu %d, blade %d\n",
234 printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); 287 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
288 lcpu, blade);
235 } 289 }
236} 290}
237 291
238/* 292/*
239 * Called on each cpu to initialize the per_cpu UV data area. 293 * Called on each cpu to initialize the per_cpu UV data area.
294 * ZZZ hotplug not supported yet
240 */ 295 */
241void __cpuinit uv_cpu_init(void) 296void __cpuinit uv_cpu_init(void)
242{ 297{
@@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void)
246 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 301 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
247 302
248 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) 303 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
249 set_x2apic_extra_bits(uv_hub_info->local_nasid); 304 set_x2apic_extra_bits(uv_hub_info->pnode);
250} 305}
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 7a0fda8f01b5..dc92b49d9204 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -297,34 +297,28 @@ void init_8259A(int auto_eoi)
297 * outb_pic - this has to work on a wide range of PC hardware. 297 * outb_pic - this has to work on a wide range of PC hardware.
298 */ 298 */
299 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ 299 outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
300#ifndef CONFIG_X86_64 300
301 outb_pic(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ 301 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
302 outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ 302 to 0x20-0x27 on i386 */
303#else /* CONFIG_X86_64 */
304 /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
305 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); 303 outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
304
306 /* 8259A-1 (the master) has a slave on IR2 */ 305 /* 8259A-1 (the master) has a slave on IR2 */
307 outb_pic(0x04, PIC_MASTER_IMR); 306 outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);
308#endif /* CONFIG_X86_64 */ 307
309 if (auto_eoi) /* master does Auto EOI */ 308 if (auto_eoi) /* master does Auto EOI */
310 outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); 309 outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
311 else /* master expects normal EOI */ 310 else /* master expects normal EOI */
312 outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); 311 outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
313 312
314 outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ 313 outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
315#ifndef CONFIG_X86_64 314
316 outb_pic(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ 315 /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */
317 outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
318 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
319#else /* CONFIG_X86_64 */
320 /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
321 outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); 316 outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
322 /* 8259A-2 is a slave on master's IR2 */ 317 /* 8259A-2 is a slave on master's IR2 */
323 outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); 318 outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
324 /* (slave's support for AEOI in flat mode is to be investigated) */ 319 /* (slave's support for AEOI in flat mode is to be investigated) */
325 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); 320 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
326 321
327#endif /* CONFIG_X86_64 */
328 if (auto_eoi) 322 if (auto_eoi)
329 /* 323 /*
330 * In AEOI mode we just have to mask the interrupt 324 * In AEOI mode we just have to mask the interrupt
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index d4f9df2b022a..dac47d61d2be 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -1174,7 +1174,7 @@ static int __assign_irq_vector(int irq)
1174 offset = current_offset; 1174 offset = current_offset;
1175next: 1175next:
1176 vector += 8; 1176 vector += 8;
1177 if (vector >= FIRST_SYSTEM_VECTOR) { 1177 if (vector >= first_system_vector) {
1178 offset = (offset + 1) % 8; 1178 offset = (offset + 1) % 8;
1179 vector = FIRST_DEVICE_VECTOR + offset; 1179 vector = FIRST_DEVICE_VECTOR + offset;
1180 } 1180 }
@@ -2280,7 +2280,7 @@ void __init setup_IO_APIC(void)
2280 int i; 2280 int i;
2281 2281
2282 /* Reserve all the system vectors. */ 2282 /* Reserve all the system vectors. */
2283 for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) 2283 for (i = first_system_vector; i < NR_VECTORS; i++)
2284 set_bit(i, used_vectors); 2284 set_bit(i, used_vectors);
2285 2285
2286 enable_IO_APIC(); 2286 enable_IO_APIC();
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index e5ef60303562..78a3866ab367 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -82,6 +82,10 @@ static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
82 82
83static int assign_irq_vector(int irq, cpumask_t mask); 83static int assign_irq_vector(int irq, cpumask_t mask);
84 84
85int first_system_vector = 0xfe;
86
87char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
88
85#define __apicdebuginit __init 89#define __apicdebuginit __init
86 90
87int sis_apic_bug; /* not actually supported, dummy for compile */ 91int sis_apic_bug; /* not actually supported, dummy for compile */
@@ -737,7 +741,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
737 offset = current_offset; 741 offset = current_offset;
738next: 742next:
739 vector += 8; 743 vector += 8;
740 if (vector >= FIRST_SYSTEM_VECTOR) { 744 if (vector >= first_system_vector) {
741 /* If we run out of vectors on large boxen, must share them. */ 745 /* If we run out of vectors on large boxen, must share them. */
742 offset = (offset + 1) % 8; 746 offset = (offset + 1) % 8;
743 vector = FIRST_DEVICE_VECTOR + offset; 747 vector = FIRST_DEVICE_VECTOR + offset;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 468acd04aa2e..47a6f6f12478 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq)
48#endif 48#endif
49} 49}
50 50
51#ifdef CONFIG_DEBUG_STACKOVERFLOW
52/* Debugging check for stack overflow: is there less than 1KB free? */
53static int check_stack_overflow(void)
54{
55 long sp;
56
57 __asm__ __volatile__("andl %%esp,%0" :
58 "=r" (sp) : "0" (THREAD_SIZE - 1));
59
60 return sp < (sizeof(struct thread_info) + STACK_WARN);
61}
62
63static void print_stack_overflow(void)
64{
65 printk(KERN_WARNING "low stack detected by irq handler\n");
66 dump_stack();
67}
68
69#else
70static inline int check_stack_overflow(void) { return 0; }
71static inline void print_stack_overflow(void) { }
72#endif
73
51#ifdef CONFIG_4KSTACKS 74#ifdef CONFIG_4KSTACKS
52/* 75/*
53 * per-CPU IRQ handling contexts (thread information and stack) 76 * per-CPU IRQ handling contexts (thread information and stack)
@@ -59,48 +82,29 @@ union irq_ctx {
59 82
60static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 83static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
61static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 84static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
62#endif
63 85
64/* 86static char softirq_stack[NR_CPUS * THREAD_SIZE]
65 * do_IRQ handles all normal device IRQ's (the special 87 __attribute__((__section__(".bss.page_aligned")));
66 * SMP cross-CPU interrupts have their own specific
67 * handlers).
68 */
69unsigned int do_IRQ(struct pt_regs *regs)
70{
71 struct pt_regs *old_regs;
72 /* high bit used in ret_from_ code */
73 int irq = ~regs->orig_ax;
74 struct irq_desc *desc = irq_desc + irq;
75#ifdef CONFIG_4KSTACKS
76 union irq_ctx *curctx, *irqctx;
77 u32 *isp;
78#endif
79 88
80 if (unlikely((unsigned)irq >= NR_IRQS)) { 89static char hardirq_stack[NR_CPUS * THREAD_SIZE]
81 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 90 __attribute__((__section__(".bss.page_aligned")));
82 __func__, irq);
83 BUG();
84 }
85 91
86 old_regs = set_irq_regs(regs); 92static void call_on_stack(void *func, void *stack)
87 irq_enter(); 93{
88#ifdef CONFIG_DEBUG_STACKOVERFLOW 94 asm volatile("xchgl %%ebx,%%esp \n"
89 /* Debugging check for stack overflow: is there less than 1KB free? */ 95 "call *%%edi \n"
90 { 96 "movl %%ebx,%%esp \n"
91 long sp; 97 : "=b" (stack)
92 98 : "0" (stack),
93 __asm__ __volatile__("andl %%esp,%0" : 99 "D"(func)
94 "=r" (sp) : "0" (THREAD_SIZE - 1)); 100 : "memory", "cc", "edx", "ecx", "eax");
95 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 101}
96 printk("do_IRQ: stack overflow: %ld\n",
97 sp - sizeof(struct thread_info));
98 dump_stack();
99 }
100 }
101#endif
102 102
103#ifdef CONFIG_4KSTACKS 103static inline int
104execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
105{
106 union irq_ctx *curctx, *irqctx;
107 u32 *isp, arg1, arg2;
104 108
105 curctx = (union irq_ctx *) current_thread_info(); 109 curctx = (union irq_ctx *) current_thread_info();
106 irqctx = hardirq_ctx[smp_processor_id()]; 110 irqctx = hardirq_ctx[smp_processor_id()];
@@ -111,52 +115,39 @@ unsigned int do_IRQ(struct pt_regs *regs)
111 * handler) we can't do that and just have to keep using the 115 * handler) we can't do that and just have to keep using the
112 * current stack (which is the irq stack already after all) 116 * current stack (which is the irq stack already after all)
113 */ 117 */
114 if (curctx != irqctx) { 118 if (unlikely(curctx == irqctx))
115 int arg1, arg2, bx; 119 return 0;
116 120
117 /* build the stack frame on the IRQ stack */ 121 /* build the stack frame on the IRQ stack */
118 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 122 isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
119 irqctx->tinfo.task = curctx->tinfo.task; 123 irqctx->tinfo.task = curctx->tinfo.task;
120 irqctx->tinfo.previous_esp = current_stack_pointer; 124 irqctx->tinfo.previous_esp = current_stack_pointer;
121 125
122 /* 126 /*
123 * Copy the softirq bits in preempt_count so that the 127 * Copy the softirq bits in preempt_count so that the
124 * softirq checks work in the hardirq context. 128 * softirq checks work in the hardirq context.
125 */ 129 */
126 irqctx->tinfo.preempt_count = 130 irqctx->tinfo.preempt_count =
127 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | 131 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
128 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 132 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
129 133
130 asm volatile( 134 if (unlikely(overflow))
131 " xchgl %%ebx,%%esp \n" 135 call_on_stack(print_stack_overflow, isp);
132 " call *%%edi \n" 136
133 " movl %%ebx,%%esp \n" 137 asm volatile("xchgl %%ebx,%%esp \n"
134 : "=a" (arg1), "=d" (arg2), "=b" (bx) 138 "call *%%edi \n"
135 : "0" (irq), "1" (desc), "2" (isp), 139 "movl %%ebx,%%esp \n"
136 "D" (desc->handle_irq) 140 : "=a" (arg1), "=d" (arg2), "=b" (isp)
137 : "memory", "cc", "ecx" 141 : "0" (irq), "1" (desc), "2" (isp),
138 ); 142 "D" (desc->handle_irq)
139 } else 143 : "memory", "cc", "ecx");
140#endif
141 desc->handle_irq(irq, desc);
142
143 irq_exit();
144 set_irq_regs(old_regs);
145 return 1; 144 return 1;
146} 145}
147 146
148#ifdef CONFIG_4KSTACKS
149
150static char softirq_stack[NR_CPUS * THREAD_SIZE]
151 __attribute__((__section__(".bss.page_aligned")));
152
153static char hardirq_stack[NR_CPUS * THREAD_SIZE]
154 __attribute__((__section__(".bss.page_aligned")));
155
156/* 147/*
157 * allocate per-cpu stacks for hardirq and for softirq processing 148 * allocate per-cpu stacks for hardirq and for softirq processing
158 */ 149 */
159void irq_ctx_init(int cpu) 150void __cpuinit irq_ctx_init(int cpu)
160{ 151{
161 union irq_ctx *irqctx; 152 union irq_ctx *irqctx;
162 153
@@ -164,25 +155,25 @@ void irq_ctx_init(int cpu)
164 return; 155 return;
165 156
166 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; 157 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
167 irqctx->tinfo.task = NULL; 158 irqctx->tinfo.task = NULL;
168 irqctx->tinfo.exec_domain = NULL; 159 irqctx->tinfo.exec_domain = NULL;
169 irqctx->tinfo.cpu = cpu; 160 irqctx->tinfo.cpu = cpu;
170 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 161 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
171 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 162 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
172 163
173 hardirq_ctx[cpu] = irqctx; 164 hardirq_ctx[cpu] = irqctx;
174 165
175 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; 166 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
176 irqctx->tinfo.task = NULL; 167 irqctx->tinfo.task = NULL;
177 irqctx->tinfo.exec_domain = NULL; 168 irqctx->tinfo.exec_domain = NULL;
178 irqctx->tinfo.cpu = cpu; 169 irqctx->tinfo.cpu = cpu;
179 irqctx->tinfo.preempt_count = 0; 170 irqctx->tinfo.preempt_count = 0;
180 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 171 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
181 172
182 softirq_ctx[cpu] = irqctx; 173 softirq_ctx[cpu] = irqctx;
183 174
184 printk("CPU %u irqstacks, hard=%p soft=%p\n", 175 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
185 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); 176 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
186} 177}
187 178
188void irq_ctx_exit(int cpu) 179void irq_ctx_exit(int cpu)
@@ -211,25 +202,56 @@ asmlinkage void do_softirq(void)
211 /* build the stack frame on the softirq stack */ 202 /* build the stack frame on the softirq stack */
212 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 203 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
213 204
214 asm volatile( 205 call_on_stack(__do_softirq, isp);
215 " xchgl %%ebx,%%esp \n"
216 " call __do_softirq \n"
217 " movl %%ebx,%%esp \n"
218 : "=b"(isp)
219 : "0"(isp)
220 : "memory", "cc", "edx", "ecx", "eax"
221 );
222 /* 206 /*
223 * Shouldnt happen, we returned above if in_interrupt(): 207 * Shouldnt happen, we returned above if in_interrupt():
224 */ 208 */
225 WARN_ON_ONCE(softirq_count()); 209 WARN_ON_ONCE(softirq_count());
226 } 210 }
227 211
228 local_irq_restore(flags); 212 local_irq_restore(flags);
229} 213}
214
215#else
216static inline int
217execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
230#endif 218#endif
231 219
232/* 220/*
221 * do_IRQ handles all normal device IRQ's (the special
222 * SMP cross-CPU interrupts have their own specific
223 * handlers).
224 */
225unsigned int do_IRQ(struct pt_regs *regs)
226{
227 struct pt_regs *old_regs;
228 /* high bit used in ret_from_ code */
229 int overflow, irq = ~regs->orig_ax;
230 struct irq_desc *desc = irq_desc + irq;
231
232 if (unlikely((unsigned)irq >= NR_IRQS)) {
233 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
234 __func__, irq);
235 BUG();
236 }
237
238 old_regs = set_irq_regs(regs);
239 irq_enter();
240
241 overflow = check_stack_overflow();
242
243 if (!execute_on_irq_stack(overflow, desc, irq)) {
244 if (unlikely(overflow))
245 print_stack_overflow();
246 desc->handle_irq(irq, desc);
247 }
248
249 irq_exit();
250 set_irq_regs(old_regs);
251 return 1;
252}
253
254/*
233 * Interrupt statistics: 255 * Interrupt statistics:
234 */ 256 */
235 257
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 64bc0f14285f..31f49e8f46a7 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -34,6 +34,20 @@
34 * interrupt-controller happy. 34 * interrupt-controller happy.
35 */ 35 */
36 36
37#define IRQ_NAME2(nr) nr##_interrupt(void)
38#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
39
40/*
41 * SMP has a few special interrupts for IPI messages
42 */
43
44#define BUILD_IRQ(nr) \
45 asmlinkage void IRQ_NAME(nr); \
46 asm("\n.p2align\n" \
47 "IRQ" #nr "_interrupt:\n\t" \
48 "push $~(" #nr ") ; " \
49 "jmp common_interrupt");
50
37#define BI(x,y) \ 51#define BI(x,y) \
38 BUILD_IRQ(x##y) 52 BUILD_IRQ(x##y)
39 53
@@ -170,33 +184,33 @@ void __init native_init_IRQ(void)
170 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 184 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
171 * IPI, driven by wakeup. 185 * IPI, driven by wakeup.
172 */ 186 */
173 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 187 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
174 188
175 /* IPIs for invalidation */ 189 /* IPIs for invalidation */
176 set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); 190 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
177 set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); 191 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
178 set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); 192 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
179 set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); 193 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
180 set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); 194 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
181 set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); 195 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
182 set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); 196 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
183 set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); 197 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
184 198
185 /* IPI for generic function call */ 199 /* IPI for generic function call */
186 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 200 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
187 201
188 /* Low priority IPI to cleanup after moving an irq */ 202 /* Low priority IPI to cleanup after moving an irq */
189 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 203 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
190#endif 204#endif
191 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 205 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
192 set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 206 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
193 207
194 /* self generated IPI for local APIC timer */ 208 /* self generated IPI for local APIC timer */
195 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 209 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
196 210
197 /* IPI vectors for APIC spurious and error interrupts */ 211 /* IPI vectors for APIC spurious and error interrupts */
198 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 212 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
199 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 213 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
200 214
201 if (!acpi_ioapic) 215 if (!acpi_ioapic)
202 setup_irq(2, &irq2); 216 setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index a2b030780aa9..ba7d19e102b1 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -33,8 +33,7 @@
33#include <asm/apic.h> 33#include <asm/apic.h>
34#include <asm/timer.h> 34#include <asm/timer.h>
35#include <asm/i8253.h> 35#include <asm/i8253.h>
36 36#include <asm/irq_vectors.h>
37#include <irq_vectors.h>
38 37
39#define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) 38#define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
40#define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) 39#define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
diff --git a/arch/x86/mach-visws/visws_apic.c b/arch/x86/mach-visws/visws_apic.c
index cef9cb1d15ac..d8b2cfd85d92 100644
--- a/arch/x86/mach-visws/visws_apic.c
+++ b/arch/x86/mach-visws/visws_apic.c
@@ -21,10 +21,9 @@
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/i8259.h> 23#include <asm/i8259.h>
24#include <asm/irq_vectors.h>
24 25
25#include "cobalt.h" 26#include "cobalt.h"
26#include "irq_vectors.h"
27
28 27
29static DEFINE_SPINLOCK(cobalt_lock); 28static DEFINE_SPINLOCK(cobalt_lock);
30 29