diff options
27 files changed, 1253 insertions, 850 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 33c5216fd3e1..ff1a7b49a460 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -514,8 +514,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) | |||
514 | * Make sure all (legacy) PCI IRQs are set as level-triggered. | 514 | * Make sure all (legacy) PCI IRQs are set as level-triggered. |
515 | */ | 515 | */ |
516 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { | 516 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { |
517 | extern void eisa_set_level_irq(unsigned int irq); | ||
518 | |||
519 | if (triggering == ACPI_LEVEL_SENSITIVE) | 517 | if (triggering == ACPI_LEVEL_SENSITIVE) |
520 | eisa_set_level_irq(gsi); | 518 | eisa_set_level_irq(gsi); |
521 | } | 519 | } |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 45d8da405ad9..ce4538ebb7fe 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -70,6 +70,10 @@ static int local_apic_timer_disabled; | |||
70 | int local_apic_timer_c2_ok; | 70 | int local_apic_timer_c2_ok; |
71 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 71 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
72 | 72 | ||
73 | int first_system_vector = 0xfe; | ||
74 | |||
75 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
76 | |||
73 | /* | 77 | /* |
74 | * Debug level, exported for io_apic.c | 78 | * Debug level, exported for io_apic.c |
75 | */ | 79 | */ |
@@ -1351,13 +1355,13 @@ void __init smp_intr_init(void) | |||
1351 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 1355 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
1352 | * IPI, driven by wakeup. | 1356 | * IPI, driven by wakeup. |
1353 | */ | 1357 | */ |
1354 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | 1358 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
1355 | 1359 | ||
1356 | /* IPI for invalidation */ | 1360 | /* IPI for invalidation */ |
1357 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | 1361 | alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); |
1358 | 1362 | ||
1359 | /* IPI for generic function call */ | 1363 | /* IPI for generic function call */ |
1360 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1364 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
1361 | } | 1365 | } |
1362 | #endif | 1366 | #endif |
1363 | 1367 | ||
@@ -1370,15 +1374,15 @@ void __init apic_intr_init(void) | |||
1370 | smp_intr_init(); | 1374 | smp_intr_init(); |
1371 | #endif | 1375 | #endif |
1372 | /* self generated IPI for local APIC timer */ | 1376 | /* self generated IPI for local APIC timer */ |
1373 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 1377 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
1374 | 1378 | ||
1375 | /* IPI vectors for APIC spurious and error interrupts */ | 1379 | /* IPI vectors for APIC spurious and error interrupts */ |
1376 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 1380 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
1377 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 1381 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
1378 | 1382 | ||
1379 | /* thermal monitor LVT interrupt */ | 1383 | /* thermal monitor LVT interrupt */ |
1380 | #ifdef CONFIG_X86_MCE_P4THERMAL | 1384 | #ifdef CONFIG_X86_MCE_P4THERMAL |
1381 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 1385 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
1382 | #endif | 1386 | #endif |
1383 | } | 1387 | } |
1384 | 1388 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c778e4fa55a2..159a1c76d2bd 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -51,7 +51,7 @@ | |||
51 | #include <asm/percpu.h> | 51 | #include <asm/percpu.h> |
52 | #include <asm/dwarf2.h> | 52 | #include <asm/dwarf2.h> |
53 | #include <asm/processor-flags.h> | 53 | #include <asm/processor-flags.h> |
54 | #include "irq_vectors.h" | 54 | #include <asm/irq_vectors.h> |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * We use macros for low-level operations which need to be overridden | 57 | * We use macros for low-level operations which need to be overridden |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index ebf13908a743..45e84acca8a9 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV APIC functions (note: not an Intel compatible APIC) | 6 | * SGI UV APIC functions (note: not an Intel compatible APIC) |
7 | * | 7 | * |
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
@@ -55,37 +55,37 @@ static cpumask_t uv_vector_allocation_domain(int cpu) | |||
55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
56 | { | 56 | { |
57 | unsigned long val; | 57 | unsigned long val; |
58 | int nasid; | 58 | int pnode; |
59 | 59 | ||
60 | nasid = uv_apicid_to_nasid(phys_apicid); | 60 | pnode = uv_apicid_to_pnode(phys_apicid); |
61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
64 | APIC_DM_INIT; | 64 | APIC_DM_INIT; |
65 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 65 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
66 | mdelay(10); | 66 | mdelay(10); |
67 | 67 | ||
68 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 68 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
69 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 69 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
70 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 70 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
71 | APIC_DM_STARTUP; | 71 | APIC_DM_STARTUP; |
72 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 72 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void uv_send_IPI_one(int cpu, int vector) | 76 | static void uv_send_IPI_one(int cpu, int vector) |
77 | { | 77 | { |
78 | unsigned long val, apicid, lapicid; | 78 | unsigned long val, apicid, lapicid; |
79 | int nasid; | 79 | int pnode; |
80 | 80 | ||
81 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ | 81 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ |
82 | lapicid = apicid & 0x3f; /* ZZZ macro needed */ | 82 | lapicid = apicid & 0x3f; /* ZZZ macro needed */ |
83 | nasid = uv_apicid_to_nasid(apicid); | 83 | pnode = uv_apicid_to_pnode(apicid); |
84 | val = | 84 | val = |
85 | (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << | 85 | (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid << |
86 | UVH_IPI_INT_APIC_ID_SHFT) | | 86 | UVH_IPI_INT_APIC_ID_SHFT) | |
87 | (vector << UVH_IPI_INT_VECTOR_SHFT); | 87 | (vector << UVH_IPI_INT_VECTOR_SHFT); |
88 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | 88 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 91 | static void uv_send_IPI_mask(cpumask_t mask, int vector) |
@@ -159,39 +159,81 @@ struct genapic apic_x2apic_uv_x = { | |||
159 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | 159 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static __cpuinit void set_x2apic_extra_bits(int nasid) | 162 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
163 | { | 163 | { |
164 | __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); | 164 | __get_cpu_var(x2apic_extra_bits) = (pnode << 6); |
165 | } | 165 | } |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Called on boot cpu. | 168 | * Called on boot cpu. |
169 | */ | 169 | */ |
170 | static __init int boot_pnode_to_blade(int pnode) | ||
171 | { | ||
172 | int blade; | ||
173 | |||
174 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | ||
175 | if (pnode == uv_blade_info[blade].pnode) | ||
176 | return blade; | ||
177 | BUG(); | ||
178 | } | ||
179 | |||
180 | struct redir_addr { | ||
181 | unsigned long redirect; | ||
182 | unsigned long alias; | ||
183 | }; | ||
184 | |||
185 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT | ||
186 | |||
187 | static __initdata struct redir_addr redir_addrs[] = { | ||
188 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, | ||
189 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, | ||
190 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, | ||
191 | }; | ||
192 | |||
193 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | ||
194 | { | ||
195 | union uvh_si_alias0_overlay_config_u alias; | ||
196 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; | ||
197 | int i; | ||
198 | |||
199 | for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { | ||
200 | alias.v = uv_read_local_mmr(redir_addrs[i].alias); | ||
201 | if (alias.s.base == 0) { | ||
202 | *size = (1UL << alias.s.m_alias); | ||
203 | redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); | ||
204 | *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; | ||
205 | return; | ||
206 | } | ||
207 | } | ||
208 | BUG(); | ||
209 | } | ||
210 | |||
170 | static __init void uv_system_init(void) | 211 | static __init void uv_system_init(void) |
171 | { | 212 | { |
172 | union uvh_si_addr_map_config_u m_n_config; | 213 | union uvh_si_addr_map_config_u m_n_config; |
173 | int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; | 214 | union uvh_node_id_u node_id; |
174 | unsigned long mmr_base; | 215 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
216 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | ||
217 | unsigned long mmr_base, present; | ||
175 | 218 | ||
176 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 219 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
220 | m_val = m_n_config.s.m_skt; | ||
221 | n_val = m_n_config.s.n_skt; | ||
177 | mmr_base = | 222 | mmr_base = |
178 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | 223 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & |
179 | ~UV_MMR_ENABLE; | 224 | ~UV_MMR_ENABLE; |
180 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | 225 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); |
181 | 226 | ||
182 | last_nasid = -1; | 227 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) |
183 | for_each_possible_cpu(cpu) { | 228 | uv_possible_blades += |
184 | nid = cpu_to_node(cpu); | 229 | hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); |
185 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
186 | if (nasid != last_nasid) | ||
187 | uv_possible_blades++; | ||
188 | last_nasid = nasid; | ||
189 | } | ||
190 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 230 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
191 | 231 | ||
192 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 232 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
193 | uv_blade_info = alloc_bootmem_pages(bytes); | 233 | uv_blade_info = alloc_bootmem_pages(bytes); |
194 | 234 | ||
235 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | ||
236 | |||
195 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 237 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
196 | uv_node_to_blade = alloc_bootmem_pages(bytes); | 238 | uv_node_to_blade = alloc_bootmem_pages(bytes); |
197 | memset(uv_node_to_blade, 255, bytes); | 239 | memset(uv_node_to_blade, 255, bytes); |
@@ -200,43 +242,56 @@ static __init void uv_system_init(void) | |||
200 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | 242 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); |
201 | memset(uv_cpu_to_blade, 255, bytes); | 243 | memset(uv_cpu_to_blade, 255, bytes); |
202 | 244 | ||
203 | last_nasid = -1; | 245 | blade = 0; |
204 | blade = -1; | 246 | for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { |
205 | lcpu = -1; | 247 | present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); |
206 | for_each_possible_cpu(cpu) { | 248 | for (j = 0; j < 64; j++) { |
207 | nid = cpu_to_node(cpu); | 249 | if (!test_bit(j, &present)) |
208 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | 250 | continue; |
209 | if (nasid != last_nasid) { | 251 | uv_blade_info[blade].pnode = (i * 64 + j); |
210 | blade++; | 252 | uv_blade_info[blade].nr_possible_cpus = 0; |
211 | lcpu = -1; | ||
212 | uv_blade_info[blade].nr_posible_cpus = 0; | ||
213 | uv_blade_info[blade].nr_online_cpus = 0; | 253 | uv_blade_info[blade].nr_online_cpus = 0; |
254 | blade++; | ||
214 | } | 255 | } |
215 | last_nasid = nasid; | 256 | } |
216 | lcpu++; | ||
217 | 257 | ||
218 | uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; | 258 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); |
219 | uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; | 259 | gnode_upper = (((unsigned long)node_id.s.node_id) & |
260 | ~((1 << n_val) - 1)) << m_val; | ||
261 | |||
262 | for_each_present_cpu(cpu) { | ||
263 | nid = cpu_to_node(cpu); | ||
264 | pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); | ||
265 | blade = boot_pnode_to_blade(pnode); | ||
266 | lcpu = uv_blade_info[blade].nr_possible_cpus; | ||
267 | uv_blade_info[blade].nr_possible_cpus++; | ||
268 | |||
269 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; | ||
270 | uv_cpu_hub_info(cpu)->lowmem_remap_top = | ||
271 | lowmem_redir_base + lowmem_redir_size; | ||
272 | uv_cpu_hub_info(cpu)->m_val = m_val; | ||
273 | uv_cpu_hub_info(cpu)->n_val = m_val; | ||
220 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | 274 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; |
221 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | 275 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; |
222 | uv_cpu_hub_info(cpu)->local_nasid = nasid; | 276 | uv_cpu_hub_info(cpu)->pnode = pnode; |
223 | uv_cpu_hub_info(cpu)->gnode_upper = | 277 | uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; |
224 | nasid & ~((1 << uv_hub_info->n_val) - 1); | 278 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
279 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | ||
225 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 280 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
226 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ | 281 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ |
227 | uv_blade_info[blade].nasid = nasid; | ||
228 | uv_blade_info[blade].nr_posible_cpus++; | ||
229 | uv_node_to_blade[nid] = blade; | 282 | uv_node_to_blade[nid] = blade; |
230 | uv_cpu_to_blade[cpu] = blade; | 283 | uv_cpu_to_blade[cpu] = blade; |
231 | 284 | ||
232 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", | 285 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, pnode %d, nid %d, " |
233 | cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); | 286 | "lcpu %d, blade %d\n", |
234 | printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); | 287 | cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, |
288 | lcpu, blade); | ||
235 | } | 289 | } |
236 | } | 290 | } |
237 | 291 | ||
238 | /* | 292 | /* |
239 | * Called on each cpu to initialize the per_cpu UV data area. | 293 | * Called on each cpu to initialize the per_cpu UV data area. |
294 | * ZZZ hotplug not supported yet | ||
240 | */ | 295 | */ |
241 | void __cpuinit uv_cpu_init(void) | 296 | void __cpuinit uv_cpu_init(void) |
242 | { | 297 | { |
@@ -246,5 +301,5 @@ void __cpuinit uv_cpu_init(void) | |||
246 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | 301 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; |
247 | 302 | ||
248 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | 303 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) |
249 | set_x2apic_extra_bits(uv_hub_info->local_nasid); | 304 | set_x2apic_extra_bits(uv_hub_info->pnode); |
250 | } | 305 | } |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 7a0fda8f01b5..dc92b49d9204 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -297,34 +297,28 @@ void init_8259A(int auto_eoi) | |||
297 | * outb_pic - this has to work on a wide range of PC hardware. | 297 | * outb_pic - this has to work on a wide range of PC hardware. |
298 | */ | 298 | */ |
299 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ | 299 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ |
300 | #ifndef CONFIG_X86_64 | 300 | |
301 | outb_pic(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ | 301 | /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, |
302 | outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ | 302 | to 0x20-0x27 on i386 */ |
303 | #else /* CONFIG_X86_64 */ | ||
304 | /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */ | ||
305 | outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); | 303 | outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); |
304 | |||
306 | /* 8259A-1 (the master) has a slave on IR2 */ | 305 | /* 8259A-1 (the master) has a slave on IR2 */ |
307 | outb_pic(0x04, PIC_MASTER_IMR); | 306 | outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); |
308 | #endif /* CONFIG_X86_64 */ | 307 | |
309 | if (auto_eoi) /* master does Auto EOI */ | 308 | if (auto_eoi) /* master does Auto EOI */ |
310 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); | 309 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); |
311 | else /* master expects normal EOI */ | 310 | else /* master expects normal EOI */ |
312 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); | 311 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); |
313 | 312 | ||
314 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ | 313 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ |
315 | #ifndef CONFIG_X86_64 | 314 | |
316 | outb_pic(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ | 315 | /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ |
317 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ | ||
318 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ | ||
319 | #else /* CONFIG_X86_64 */ | ||
320 | /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */ | ||
321 | outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); | 316 | outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); |
322 | /* 8259A-2 is a slave on master's IR2 */ | 317 | /* 8259A-2 is a slave on master's IR2 */ |
323 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); | 318 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); |
324 | /* (slave's support for AEOI in flat mode is to be investigated) */ | 319 | /* (slave's support for AEOI in flat mode is to be investigated) */ |
325 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); | 320 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); |
326 | 321 | ||
327 | #endif /* CONFIG_X86_64 */ | ||
328 | if (auto_eoi) | 322 | if (auto_eoi) |
329 | /* | 323 | /* |
330 | * In AEOI mode we just have to mask the interrupt | 324 | * In AEOI mode we just have to mask the interrupt |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index d4f9df2b022a..dac47d61d2be 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1174,7 +1174,7 @@ static int __assign_irq_vector(int irq) | |||
1174 | offset = current_offset; | 1174 | offset = current_offset; |
1175 | next: | 1175 | next: |
1176 | vector += 8; | 1176 | vector += 8; |
1177 | if (vector >= FIRST_SYSTEM_VECTOR) { | 1177 | if (vector >= first_system_vector) { |
1178 | offset = (offset + 1) % 8; | 1178 | offset = (offset + 1) % 8; |
1179 | vector = FIRST_DEVICE_VECTOR + offset; | 1179 | vector = FIRST_DEVICE_VECTOR + offset; |
1180 | } | 1180 | } |
@@ -2280,7 +2280,7 @@ void __init setup_IO_APIC(void) | |||
2280 | int i; | 2280 | int i; |
2281 | 2281 | ||
2282 | /* Reserve all the system vectors. */ | 2282 | /* Reserve all the system vectors. */ |
2283 | for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++) | 2283 | for (i = first_system_vector; i < NR_VECTORS; i++) |
2284 | set_bit(i, used_vectors); | 2284 | set_bit(i, used_vectors); |
2285 | 2285 | ||
2286 | enable_IO_APIC(); | 2286 | enable_IO_APIC(); |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index e5ef60303562..78a3866ab367 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -82,6 +82,10 @@ static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { | |||
82 | 82 | ||
83 | static int assign_irq_vector(int irq, cpumask_t mask); | 83 | static int assign_irq_vector(int irq, cpumask_t mask); |
84 | 84 | ||
85 | int first_system_vector = 0xfe; | ||
86 | |||
87 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
88 | |||
85 | #define __apicdebuginit __init | 89 | #define __apicdebuginit __init |
86 | 90 | ||
87 | int sis_apic_bug; /* not actually supported, dummy for compile */ | 91 | int sis_apic_bug; /* not actually supported, dummy for compile */ |
@@ -737,7 +741,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
737 | offset = current_offset; | 741 | offset = current_offset; |
738 | next: | 742 | next: |
739 | vector += 8; | 743 | vector += 8; |
740 | if (vector >= FIRST_SYSTEM_VECTOR) { | 744 | if (vector >= first_system_vector) { |
741 | /* If we run out of vectors on large boxen, must share them. */ | 745 | /* If we run out of vectors on large boxen, must share them. */ |
742 | offset = (offset + 1) % 8; | 746 | offset = (offset + 1) % 8; |
743 | vector = FIRST_DEVICE_VECTOR + offset; | 747 | vector = FIRST_DEVICE_VECTOR + offset; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 468acd04aa2e..47a6f6f12478 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq) | |||
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
52 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
53 | static int check_stack_overflow(void) | ||
54 | { | ||
55 | long sp; | ||
56 | |||
57 | __asm__ __volatile__("andl %%esp,%0" : | ||
58 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | ||
59 | |||
60 | return sp < (sizeof(struct thread_info) + STACK_WARN); | ||
61 | } | ||
62 | |||
63 | static void print_stack_overflow(void) | ||
64 | { | ||
65 | printk(KERN_WARNING "low stack detected by irq handler\n"); | ||
66 | dump_stack(); | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | static inline int check_stack_overflow(void) { return 0; } | ||
71 | static inline void print_stack_overflow(void) { } | ||
72 | #endif | ||
73 | |||
51 | #ifdef CONFIG_4KSTACKS | 74 | #ifdef CONFIG_4KSTACKS |
52 | /* | 75 | /* |
53 | * per-CPU IRQ handling contexts (thread information and stack) | 76 | * per-CPU IRQ handling contexts (thread information and stack) |
@@ -59,48 +82,29 @@ union irq_ctx { | |||
59 | 82 | ||
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
62 | #endif | ||
63 | 85 | ||
64 | /* | 86 | static char softirq_stack[NR_CPUS * THREAD_SIZE] |
65 | * do_IRQ handles all normal device IRQ's (the special | 87 | __attribute__((__section__(".bss.page_aligned"))); |
66 | * SMP cross-CPU interrupts have their own specific | ||
67 | * handlers). | ||
68 | */ | ||
69 | unsigned int do_IRQ(struct pt_regs *regs) | ||
70 | { | ||
71 | struct pt_regs *old_regs; | ||
72 | /* high bit used in ret_from_ code */ | ||
73 | int irq = ~regs->orig_ax; | ||
74 | struct irq_desc *desc = irq_desc + irq; | ||
75 | #ifdef CONFIG_4KSTACKS | ||
76 | union irq_ctx *curctx, *irqctx; | ||
77 | u32 *isp; | ||
78 | #endif | ||
79 | 88 | ||
80 | if (unlikely((unsigned)irq >= NR_IRQS)) { | 89 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] |
81 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 90 | __attribute__((__section__(".bss.page_aligned"))); |
82 | __func__, irq); | ||
83 | BUG(); | ||
84 | } | ||
85 | 91 | ||
86 | old_regs = set_irq_regs(regs); | 92 | static void call_on_stack(void *func, void *stack) |
87 | irq_enter(); | 93 | { |
88 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 94 | asm volatile("xchgl %%ebx,%%esp \n" |
89 | /* Debugging check for stack overflow: is there less than 1KB free? */ | 95 | "call *%%edi \n" |
90 | { | 96 | "movl %%ebx,%%esp \n" |
91 | long sp; | 97 | : "=b" (stack) |
92 | 98 | : "0" (stack), | |
93 | __asm__ __volatile__("andl %%esp,%0" : | 99 | "D"(func) |
94 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | 100 | : "memory", "cc", "edx", "ecx", "eax"); |
95 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | 101 | } |
96 | printk("do_IRQ: stack overflow: %ld\n", | ||
97 | sp - sizeof(struct thread_info)); | ||
98 | dump_stack(); | ||
99 | } | ||
100 | } | ||
101 | #endif | ||
102 | 102 | ||
103 | #ifdef CONFIG_4KSTACKS | 103 | static inline int |
104 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | ||
105 | { | ||
106 | union irq_ctx *curctx, *irqctx; | ||
107 | u32 *isp, arg1, arg2; | ||
104 | 108 | ||
105 | curctx = (union irq_ctx *) current_thread_info(); | 109 | curctx = (union irq_ctx *) current_thread_info(); |
106 | irqctx = hardirq_ctx[smp_processor_id()]; | 110 | irqctx = hardirq_ctx[smp_processor_id()]; |
@@ -111,52 +115,39 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
111 | * handler) we can't do that and just have to keep using the | 115 | * handler) we can't do that and just have to keep using the |
112 | * current stack (which is the irq stack already after all) | 116 | * current stack (which is the irq stack already after all) |
113 | */ | 117 | */ |
114 | if (curctx != irqctx) { | 118 | if (unlikely(curctx == irqctx)) |
115 | int arg1, arg2, bx; | 119 | return 0; |
116 | 120 | ||
117 | /* build the stack frame on the IRQ stack */ | 121 | /* build the stack frame on the IRQ stack */ |
118 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | 122 | isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); |
119 | irqctx->tinfo.task = curctx->tinfo.task; | 123 | irqctx->tinfo.task = curctx->tinfo.task; |
120 | irqctx->tinfo.previous_esp = current_stack_pointer; | 124 | irqctx->tinfo.previous_esp = current_stack_pointer; |
121 | 125 | ||
122 | /* | 126 | /* |
123 | * Copy the softirq bits in preempt_count so that the | 127 | * Copy the softirq bits in preempt_count so that the |
124 | * softirq checks work in the hardirq context. | 128 | * softirq checks work in the hardirq context. |
125 | */ | 129 | */ |
126 | irqctx->tinfo.preempt_count = | 130 | irqctx->tinfo.preempt_count = |
127 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | 131 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
128 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | 132 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
129 | 133 | ||
130 | asm volatile( | 134 | if (unlikely(overflow)) |
131 | " xchgl %%ebx,%%esp \n" | 135 | call_on_stack(print_stack_overflow, isp); |
132 | " call *%%edi \n" | 136 | |
133 | " movl %%ebx,%%esp \n" | 137 | asm volatile("xchgl %%ebx,%%esp \n" |
134 | : "=a" (arg1), "=d" (arg2), "=b" (bx) | 138 | "call *%%edi \n" |
135 | : "0" (irq), "1" (desc), "2" (isp), | 139 | "movl %%ebx,%%esp \n" |
136 | "D" (desc->handle_irq) | 140 | : "=a" (arg1), "=d" (arg2), "=b" (isp) |
137 | : "memory", "cc", "ecx" | 141 | : "0" (irq), "1" (desc), "2" (isp), |
138 | ); | 142 | "D" (desc->handle_irq) |
139 | } else | 143 | : "memory", "cc", "ecx"); |
140 | #endif | ||
141 | desc->handle_irq(irq, desc); | ||
142 | |||
143 | irq_exit(); | ||
144 | set_irq_regs(old_regs); | ||
145 | return 1; | 144 | return 1; |
146 | } | 145 | } |
147 | 146 | ||
148 | #ifdef CONFIG_4KSTACKS | ||
149 | |||
150 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | ||
151 | __attribute__((__section__(".bss.page_aligned"))); | ||
152 | |||
153 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | ||
154 | __attribute__((__section__(".bss.page_aligned"))); | ||
155 | |||
156 | /* | 147 | /* |
157 | * allocate per-cpu stacks for hardirq and for softirq processing | 148 | * allocate per-cpu stacks for hardirq and for softirq processing |
158 | */ | 149 | */ |
159 | void irq_ctx_init(int cpu) | 150 | void __cpuinit irq_ctx_init(int cpu) |
160 | { | 151 | { |
161 | union irq_ctx *irqctx; | 152 | union irq_ctx *irqctx; |
162 | 153 | ||
@@ -164,25 +155,25 @@ void irq_ctx_init(int cpu) | |||
164 | return; | 155 | return; |
165 | 156 | ||
166 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | 157 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; |
167 | irqctx->tinfo.task = NULL; | 158 | irqctx->tinfo.task = NULL; |
168 | irqctx->tinfo.exec_domain = NULL; | 159 | irqctx->tinfo.exec_domain = NULL; |
169 | irqctx->tinfo.cpu = cpu; | 160 | irqctx->tinfo.cpu = cpu; |
170 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 161 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
171 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 162 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
172 | 163 | ||
173 | hardirq_ctx[cpu] = irqctx; | 164 | hardirq_ctx[cpu] = irqctx; |
174 | 165 | ||
175 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; | 166 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; |
176 | irqctx->tinfo.task = NULL; | 167 | irqctx->tinfo.task = NULL; |
177 | irqctx->tinfo.exec_domain = NULL; | 168 | irqctx->tinfo.exec_domain = NULL; |
178 | irqctx->tinfo.cpu = cpu; | 169 | irqctx->tinfo.cpu = cpu; |
179 | irqctx->tinfo.preempt_count = 0; | 170 | irqctx->tinfo.preempt_count = 0; |
180 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 171 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
181 | 172 | ||
182 | softirq_ctx[cpu] = irqctx; | 173 | softirq_ctx[cpu] = irqctx; |
183 | 174 | ||
184 | printk("CPU %u irqstacks, hard=%p soft=%p\n", | 175 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
185 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); | 176 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); |
186 | } | 177 | } |
187 | 178 | ||
188 | void irq_ctx_exit(int cpu) | 179 | void irq_ctx_exit(int cpu) |
@@ -211,25 +202,56 @@ asmlinkage void do_softirq(void) | |||
211 | /* build the stack frame on the softirq stack */ | 202 | /* build the stack frame on the softirq stack */ |
212 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | 203 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); |
213 | 204 | ||
214 | asm volatile( | 205 | call_on_stack(__do_softirq, isp); |
215 | " xchgl %%ebx,%%esp \n" | ||
216 | " call __do_softirq \n" | ||
217 | " movl %%ebx,%%esp \n" | ||
218 | : "=b"(isp) | ||
219 | : "0"(isp) | ||
220 | : "memory", "cc", "edx", "ecx", "eax" | ||
221 | ); | ||
222 | /* | 206 | /* |
223 | * Shouldnt happen, we returned above if in_interrupt(): | 207 | * Shouldnt happen, we returned above if in_interrupt(): |
224 | */ | 208 | */ |
225 | WARN_ON_ONCE(softirq_count()); | 209 | WARN_ON_ONCE(softirq_count()); |
226 | } | 210 | } |
227 | 211 | ||
228 | local_irq_restore(flags); | 212 | local_irq_restore(flags); |
229 | } | 213 | } |
214 | |||
215 | #else | ||
216 | static inline int | ||
217 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | ||
230 | #endif | 218 | #endif |
231 | 219 | ||
232 | /* | 220 | /* |
221 | * do_IRQ handles all normal device IRQ's (the special | ||
222 | * SMP cross-CPU interrupts have their own specific | ||
223 | * handlers). | ||
224 | */ | ||
225 | unsigned int do_IRQ(struct pt_regs *regs) | ||
226 | { | ||
227 | struct pt_regs *old_regs; | ||
228 | /* high bit used in ret_from_ code */ | ||
229 | int overflow, irq = ~regs->orig_ax; | ||
230 | struct irq_desc *desc = irq_desc + irq; | ||
231 | |||
232 | if (unlikely((unsigned)irq >= NR_IRQS)) { | ||
233 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | ||
234 | __func__, irq); | ||
235 | BUG(); | ||
236 | } | ||
237 | |||
238 | old_regs = set_irq_regs(regs); | ||
239 | irq_enter(); | ||
240 | |||
241 | overflow = check_stack_overflow(); | ||
242 | |||
243 | if (!execute_on_irq_stack(overflow, desc, irq)) { | ||
244 | if (unlikely(overflow)) | ||
245 | print_stack_overflow(); | ||
246 | desc->handle_irq(irq, desc); | ||
247 | } | ||
248 | |||
249 | irq_exit(); | ||
250 | set_irq_regs(old_regs); | ||
251 | return 1; | ||
252 | } | ||
253 | |||
254 | /* | ||
233 | * Interrupt statistics: | 255 | * Interrupt statistics: |
234 | */ | 256 | */ |
235 | 257 | ||
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 64bc0f14285f..31f49e8f46a7 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -34,6 +34,20 @@ | |||
34 | * interrupt-controller happy. | 34 | * interrupt-controller happy. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define IRQ_NAME2(nr) nr##_interrupt(void) | ||
38 | #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) | ||
39 | |||
40 | /* | ||
41 | * SMP has a few special interrupts for IPI messages | ||
42 | */ | ||
43 | |||
44 | #define BUILD_IRQ(nr) \ | ||
45 | asmlinkage void IRQ_NAME(nr); \ | ||
46 | asm("\n.p2align\n" \ | ||
47 | "IRQ" #nr "_interrupt:\n\t" \ | ||
48 | "push $~(" #nr ") ; " \ | ||
49 | "jmp common_interrupt"); | ||
50 | |||
37 | #define BI(x,y) \ | 51 | #define BI(x,y) \ |
38 | BUILD_IRQ(x##y) | 52 | BUILD_IRQ(x##y) |
39 | 53 | ||
@@ -170,33 +184,33 @@ void __init native_init_IRQ(void) | |||
170 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 184 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
171 | * IPI, driven by wakeup. | 185 | * IPI, driven by wakeup. |
172 | */ | 186 | */ |
173 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | 187 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
174 | 188 | ||
175 | /* IPIs for invalidation */ | 189 | /* IPIs for invalidation */ |
176 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | 190 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); |
177 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | 191 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); |
178 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | 192 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); |
179 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | 193 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); |
180 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | 194 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); |
181 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | 195 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); |
182 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | 196 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); |
183 | set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | 197 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); |
184 | 198 | ||
185 | /* IPI for generic function call */ | 199 | /* IPI for generic function call */ |
186 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
187 | 201 | ||
188 | /* Low priority IPI to cleanup after moving an irq */ | 202 | /* Low priority IPI to cleanup after moving an irq */ |
189 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 203 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
190 | #endif | 204 | #endif |
191 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 205 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
192 | set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | 206 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); |
193 | 207 | ||
194 | /* self generated IPI for local APIC timer */ | 208 | /* self generated IPI for local APIC timer */ |
195 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 209 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
196 | 210 | ||
197 | /* IPI vectors for APIC spurious and error interrupts */ | 211 | /* IPI vectors for APIC spurious and error interrupts */ |
198 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 212 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
199 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 213 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
200 | 214 | ||
201 | if (!acpi_ioapic) | 215 | if (!acpi_ioapic) |
202 | setup_irq(2, &irq2); | 216 | setup_irq(2, &irq2); |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index a2b030780aa9..ba7d19e102b1 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -33,8 +33,7 @@ | |||
33 | #include <asm/apic.h> | 33 | #include <asm/apic.h> |
34 | #include <asm/timer.h> | 34 | #include <asm/timer.h> |
35 | #include <asm/i8253.h> | 35 | #include <asm/i8253.h> |
36 | 36 | #include <asm/irq_vectors.h> | |
37 | #include <irq_vectors.h> | ||
38 | 37 | ||
39 | #define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | 38 | #define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) |
40 | #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) | 39 | #define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring()) |
diff --git a/arch/x86/mach-visws/visws_apic.c b/arch/x86/mach-visws/visws_apic.c index cef9cb1d15ac..d8b2cfd85d92 100644 --- a/arch/x86/mach-visws/visws_apic.c +++ b/arch/x86/mach-visws/visws_apic.c | |||
@@ -21,10 +21,9 @@ | |||
21 | #include <asm/io.h> | 21 | #include <asm/io.h> |
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/i8259.h> | 23 | #include <asm/i8259.h> |
24 | #include <asm/irq_vectors.h> | ||
24 | 25 | ||
25 | #include "cobalt.h" | 26 | #include "cobalt.h" |
26 | #include "irq_vectors.h" | ||
27 | |||
28 | 27 | ||
29 | static DEFINE_SPINLOCK(cobalt_lock); | 28 | static DEFINE_SPINLOCK(cobalt_lock); |
30 | 29 | ||
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 268a012bcd79..b3875d4b4fab 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h | |||
@@ -311,6 +311,28 @@ static inline void set_intr_gate(unsigned int n, void *addr) | |||
311 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); | 311 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); |
312 | } | 312 | } |
313 | 313 | ||
314 | #define SYS_VECTOR_FREE 0 | ||
315 | #define SYS_VECTOR_ALLOCED 1 | ||
316 | |||
317 | extern int first_system_vector; | ||
318 | extern char system_vectors[]; | ||
319 | |||
320 | static inline void alloc_system_vector(int vector) | ||
321 | { | ||
322 | if (system_vectors[vector] == SYS_VECTOR_FREE) { | ||
323 | system_vectors[vector] = SYS_VECTOR_ALLOCED; | ||
324 | if (first_system_vector > vector) | ||
325 | first_system_vector = vector; | ||
326 | } else | ||
327 | BUG(); | ||
328 | } | ||
329 | |||
330 | static inline void alloc_intr_gate(unsigned int n, void *addr) | ||
331 | { | ||
332 | alloc_system_vector(n); | ||
333 | set_intr_gate(n, addr); | ||
334 | } | ||
335 | |||
314 | /* | 336 | /* |
315 | * This routine sets up an interrupt gate at directory privilege level 3. | 337 | * This routine sets up an interrupt gate at directory privilege level 3. |
316 | */ | 338 | */ |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index 1de931b263ce..0f8504627c41 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
@@ -44,4 +44,6 @@ DECLARE_PER_CPU(int, x2apic_extra_bits); | |||
44 | extern void uv_cpu_init(void); | 44 | extern void uv_cpu_init(void); |
45 | extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | 45 | extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); |
46 | 46 | ||
47 | extern void setup_apic_routing(void); | ||
48 | |||
47 | #endif | 49 | #endif |
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index bf025399d939..1428b41dcbb9 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h | |||
@@ -1,5 +1,106 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | ||
2 | #define _ASM_HW_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
6 | * | ||
7 | * moved some of the old arch/i386/kernel/irq.h to here. VY | ||
8 | * | ||
9 | * IRQ/IPI changes taken from work by Thomas Radke | ||
10 | * <tomsoft@informatik.tu-chemnitz.de> | ||
11 | * | ||
12 | * hacked by Andi Kleen for x86-64. | ||
13 | * unified by tglx | ||
14 | */ | ||
15 | |||
16 | #include <asm/irq_vectors.h> | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/profile.h> | ||
22 | #include <linux/smp.h> | ||
23 | |||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/irq.h> | ||
26 | #include <asm/sections.h> | ||
27 | |||
28 | #define platform_legacy_irq(irq) ((irq) < 16) | ||
29 | |||
30 | /* Interrupt handlers registered during init_IRQ */ | ||
31 | extern void apic_timer_interrupt(void); | ||
32 | extern void error_interrupt(void); | ||
33 | extern void spurious_interrupt(void); | ||
34 | extern void thermal_interrupt(void); | ||
35 | extern void reschedule_interrupt(void); | ||
36 | |||
37 | extern void invalidate_interrupt(void); | ||
38 | extern void invalidate_interrupt0(void); | ||
39 | extern void invalidate_interrupt1(void); | ||
40 | extern void invalidate_interrupt2(void); | ||
41 | extern void invalidate_interrupt3(void); | ||
42 | extern void invalidate_interrupt4(void); | ||
43 | extern void invalidate_interrupt5(void); | ||
44 | extern void invalidate_interrupt6(void); | ||
45 | extern void invalidate_interrupt7(void); | ||
46 | |||
47 | extern void irq_move_cleanup_interrupt(void); | ||
48 | extern void threshold_interrupt(void); | ||
49 | |||
50 | extern void call_function_interrupt(void); | ||
51 | |||
52 | /* PIC specific functions */ | ||
53 | extern void disable_8259A_irq(unsigned int irq); | ||
54 | extern void enable_8259A_irq(unsigned int irq); | ||
55 | extern int i8259A_irq_pending(unsigned int irq); | ||
56 | extern void make_8259A_irq(unsigned int irq); | ||
57 | extern void init_8259A(int aeoi); | ||
58 | |||
59 | /* IOAPIC */ | ||
60 | #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) | ||
61 | extern unsigned long io_apic_irqs; | ||
62 | |||
63 | extern void init_VISWS_APIC_irqs(void); | ||
64 | extern void setup_IO_APIC(void); | ||
65 | extern void disable_IO_APIC(void); | ||
66 | extern void print_IO_APIC(void); | ||
67 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | ||
68 | extern void setup_ioapic_dest(void); | ||
69 | |||
70 | #ifdef CONFIG_X86_64 | ||
71 | extern void enable_IO_APIC(void); | ||
72 | #endif | ||
73 | |||
74 | /* IPI functions */ | ||
75 | extern void send_IPI_self(int vector); | ||
76 | extern void send_IPI(int dest, int vector); | ||
77 | |||
78 | /* Statistics */ | ||
79 | extern atomic_t irq_err_count; | ||
80 | extern atomic_t irq_mis_count; | ||
81 | |||
82 | /* EISA */ | ||
83 | extern void eisa_set_level_irq(unsigned int irq); | ||
84 | |||
85 | /* Voyager functions */ | ||
86 | extern asmlinkage void vic_cpi_interrupt(void); | ||
87 | extern asmlinkage void vic_sys_interrupt(void); | ||
88 | extern asmlinkage void vic_cmn_interrupt(void); | ||
89 | extern asmlinkage void qic_timer_interrupt(void); | ||
90 | extern asmlinkage void qic_invalidate_interrupt(void); | ||
91 | extern asmlinkage void qic_reschedule_interrupt(void); | ||
92 | extern asmlinkage void qic_enable_irq_interrupt(void); | ||
93 | extern asmlinkage void qic_call_function_interrupt(void); | ||
94 | |||
1 | #ifdef CONFIG_X86_32 | 95 | #ifdef CONFIG_X86_32 |
2 | # include "hw_irq_32.h" | 96 | extern void (*const interrupt[NR_IRQS])(void); |
3 | #else | 97 | #else |
4 | # include "hw_irq_64.h" | 98 | typedef int vector_irq_t[NR_VECTORS]; |
99 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | ||
100 | extern void __setup_vector_irq(int cpu); | ||
101 | extern spinlock_t vector_lock; | ||
102 | #endif | ||
103 | |||
104 | #endif /* !ASSEMBLY_ */ | ||
105 | |||
5 | #endif | 106 | #endif |
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h deleted file mode 100644 index ea88054e03f3..000000000000 --- a/include/asm-x86/hw_irq_32.h +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | ||
2 | #define _ASM_HW_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/hw_irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * moved some of the old arch/i386/kernel/irq.h to here. VY | ||
10 | * | ||
11 | * IRQ/IPI changes taken from work by Thomas Radke | ||
12 | * <tomsoft@informatik.tu-chemnitz.de> | ||
13 | */ | ||
14 | |||
15 | #include <linux/profile.h> | ||
16 | #include <asm/atomic.h> | ||
17 | #include <asm/irq.h> | ||
18 | #include <asm/sections.h> | ||
19 | |||
20 | #define NMI_VECTOR 0x02 | ||
21 | |||
22 | /* | ||
23 | * Various low-level irq details needed by irq.c, process.c, | ||
24 | * time.c, io_apic.c and smp.c | ||
25 | * | ||
26 | * Interrupt entry/exit code at both C and assembly level | ||
27 | */ | ||
28 | |||
29 | extern void (*const interrupt[NR_IRQS])(void); | ||
30 | |||
31 | #ifdef CONFIG_SMP | ||
32 | void reschedule_interrupt(void); | ||
33 | void invalidate_interrupt(void); | ||
34 | void call_function_interrupt(void); | ||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_X86_LOCAL_APIC | ||
38 | void apic_timer_interrupt(void); | ||
39 | void error_interrupt(void); | ||
40 | void spurious_interrupt(void); | ||
41 | void thermal_interrupt(void); | ||
42 | #define platform_legacy_irq(irq) ((irq) < 16) | ||
43 | #endif | ||
44 | |||
45 | void disable_8259A_irq(unsigned int irq); | ||
46 | void enable_8259A_irq(unsigned int irq); | ||
47 | int i8259A_irq_pending(unsigned int irq); | ||
48 | void make_8259A_irq(unsigned int irq); | ||
49 | void init_8259A(int aeoi); | ||
50 | void send_IPI_self(int vector); | ||
51 | void init_VISWS_APIC_irqs(void); | ||
52 | void setup_IO_APIC(void); | ||
53 | void disable_IO_APIC(void); | ||
54 | void print_IO_APIC(void); | ||
55 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | ||
56 | void send_IPI(int dest, int vector); | ||
57 | void setup_ioapic_dest(void); | ||
58 | |||
59 | extern unsigned long io_apic_irqs; | ||
60 | |||
61 | extern atomic_t irq_err_count; | ||
62 | extern atomic_t irq_mis_count; | ||
63 | |||
64 | #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) | ||
65 | |||
66 | #endif /* _ASM_HW_IRQ_H */ | ||
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h deleted file mode 100644 index 0062ef390f67..000000000000 --- a/include/asm-x86/hw_irq_64.h +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | ||
2 | #define _ASM_HW_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/hw_irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * moved some of the old arch/i386/kernel/irq.h to here. VY | ||
10 | * | ||
11 | * IRQ/IPI changes taken from work by Thomas Radke | ||
12 | * <tomsoft@informatik.tu-chemnitz.de> | ||
13 | * | ||
14 | * hacked by Andi Kleen for x86-64. | ||
15 | */ | ||
16 | |||
17 | #ifndef __ASSEMBLY__ | ||
18 | #include <asm/atomic.h> | ||
19 | #include <asm/irq.h> | ||
20 | #include <linux/profile.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/percpu.h> | ||
23 | #endif | ||
24 | |||
25 | #define NMI_VECTOR 0x02 | ||
26 | /* | ||
27 | * IDT vectors usable for external interrupt sources start | ||
28 | * at 0x20: | ||
29 | */ | ||
30 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
31 | |||
32 | #define IA32_SYSCALL_VECTOR 0x80 | ||
33 | |||
34 | |||
35 | /* Reserve the lowest usable priority level 0x20 - 0x2f for triggering | ||
36 | * cleanup after irq migration. | ||
37 | */ | ||
38 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | ||
39 | |||
40 | /* | ||
41 | * Vectors 0x30-0x3f are used for ISA interrupts. | ||
42 | */ | ||
43 | #define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) | ||
44 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) | ||
45 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) | ||
46 | #define IRQ3_VECTOR (IRQ0_VECTOR + 3) | ||
47 | #define IRQ4_VECTOR (IRQ0_VECTOR + 4) | ||
48 | #define IRQ5_VECTOR (IRQ0_VECTOR + 5) | ||
49 | #define IRQ6_VECTOR (IRQ0_VECTOR + 6) | ||
50 | #define IRQ7_VECTOR (IRQ0_VECTOR + 7) | ||
51 | #define IRQ8_VECTOR (IRQ0_VECTOR + 8) | ||
52 | #define IRQ9_VECTOR (IRQ0_VECTOR + 9) | ||
53 | #define IRQ10_VECTOR (IRQ0_VECTOR + 10) | ||
54 | #define IRQ11_VECTOR (IRQ0_VECTOR + 11) | ||
55 | #define IRQ12_VECTOR (IRQ0_VECTOR + 12) | ||
56 | #define IRQ13_VECTOR (IRQ0_VECTOR + 13) | ||
57 | #define IRQ14_VECTOR (IRQ0_VECTOR + 14) | ||
58 | #define IRQ15_VECTOR (IRQ0_VECTOR + 15) | ||
59 | |||
60 | /* | ||
61 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
62 | * | ||
63 | * some of the following vectors are 'rare', they are merged | ||
64 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
65 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
66 | */ | ||
67 | #define SPURIOUS_APIC_VECTOR 0xff | ||
68 | #define ERROR_APIC_VECTOR 0xfe | ||
69 | #define RESCHEDULE_VECTOR 0xfd | ||
70 | #define CALL_FUNCTION_VECTOR 0xfc | ||
71 | /* fb free - please don't readd KDB here because it's useless | ||
72 | (hint - think what a NMI bit does to a vector) */ | ||
73 | #define THERMAL_APIC_VECTOR 0xfa | ||
74 | #define THRESHOLD_APIC_VECTOR 0xf9 | ||
75 | /* f8 free */ | ||
76 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | ||
77 | #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ | ||
78 | |||
79 | #define NUM_INVALIDATE_TLB_VECTORS 8 | ||
80 | |||
81 | /* | ||
82 | * Local APIC timer IRQ vector is on a different priority level, | ||
83 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
84 | * sources per level' errata. | ||
85 | */ | ||
86 | #define LOCAL_TIMER_VECTOR 0xef | ||
87 | |||
88 | /* | ||
89 | * First APIC vector available to drivers: (vectors 0x30-0xee) | ||
90 | * we start at 0x41 to spread out vectors evenly between priority | ||
91 | * levels. (0x80 is the syscall vector) | ||
92 | */ | ||
93 | #define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) | ||
94 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */ | ||
95 | |||
96 | |||
97 | #ifndef __ASSEMBLY__ | ||
98 | |||
99 | /* Interrupt handlers registered during init_IRQ */ | ||
100 | void apic_timer_interrupt(void); | ||
101 | void spurious_interrupt(void); | ||
102 | void error_interrupt(void); | ||
103 | void reschedule_interrupt(void); | ||
104 | void call_function_interrupt(void); | ||
105 | void irq_move_cleanup_interrupt(void); | ||
106 | void invalidate_interrupt0(void); | ||
107 | void invalidate_interrupt1(void); | ||
108 | void invalidate_interrupt2(void); | ||
109 | void invalidate_interrupt3(void); | ||
110 | void invalidate_interrupt4(void); | ||
111 | void invalidate_interrupt5(void); | ||
112 | void invalidate_interrupt6(void); | ||
113 | void invalidate_interrupt7(void); | ||
114 | void thermal_interrupt(void); | ||
115 | void threshold_interrupt(void); | ||
116 | void i8254_timer_resume(void); | ||
117 | |||
118 | typedef int vector_irq_t[NR_VECTORS]; | ||
119 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | ||
120 | extern void __setup_vector_irq(int cpu); | ||
121 | extern spinlock_t vector_lock; | ||
122 | |||
123 | /* | ||
124 | * Various low-level irq details needed by irq.c, process.c, | ||
125 | * time.c, io_apic.c and smp.c | ||
126 | * | ||
127 | * Interrupt entry/exit code at both C and assembly level | ||
128 | */ | ||
129 | |||
130 | extern void disable_8259A_irq(unsigned int irq); | ||
131 | extern void enable_8259A_irq(unsigned int irq); | ||
132 | extern int i8259A_irq_pending(unsigned int irq); | ||
133 | extern void make_8259A_irq(unsigned int irq); | ||
134 | extern void init_8259A(int aeoi); | ||
135 | extern void send_IPI_self(int vector); | ||
136 | extern void init_VISWS_APIC_irqs(void); | ||
137 | extern void setup_IO_APIC(void); | ||
138 | extern void enable_IO_APIC(void); | ||
139 | extern void disable_IO_APIC(void); | ||
140 | extern void print_IO_APIC(void); | ||
141 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | ||
142 | extern void send_IPI(int dest, int vector); | ||
143 | extern void setup_ioapic_dest(void); | ||
144 | extern void native_init_IRQ(void); | ||
145 | |||
146 | extern unsigned long io_apic_irqs; | ||
147 | |||
148 | extern atomic_t irq_err_count; | ||
149 | extern atomic_t irq_mis_count; | ||
150 | |||
151 | #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) | ||
152 | |||
153 | #include <asm/ptrace.h> | ||
154 | |||
155 | #define IRQ_NAME2(nr) nr##_interrupt(void) | ||
156 | #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) | ||
157 | |||
158 | /* | ||
159 | * SMP has a few special interrupts for IPI messages | ||
160 | */ | ||
161 | |||
162 | #define BUILD_IRQ(nr) \ | ||
163 | asmlinkage void IRQ_NAME(nr); \ | ||
164 | asm("\n.p2align\n" \ | ||
165 | "IRQ" #nr "_interrupt:\n\t" \ | ||
166 | "push $~(" #nr ") ; " \ | ||
167 | "jmp common_interrupt"); | ||
168 | |||
169 | #define platform_legacy_irq(irq) ((irq) < 16) | ||
170 | |||
171 | #endif | ||
172 | |||
173 | #endif /* _ASM_HW_IRQ_H */ | ||
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h index 7ba905465a53..1a2925757317 100644 --- a/include/asm-x86/irq.h +++ b/include/asm-x86/irq.h | |||
@@ -1,5 +1,50 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_IRQ_H |
2 | # include "irq_32.h" | 2 | #define _ASM_IRQ_H |
3 | /* | ||
4 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
5 | * | ||
6 | * IRQ/IPI changes taken from work by Thomas Radke | ||
7 | * <tomsoft@informatik.tu-chemnitz.de> | ||
8 | */ | ||
9 | |||
10 | #include <asm/apicdef.h> | ||
11 | #include <asm/irq_vectors.h> | ||
12 | |||
13 | static inline int irq_canonicalize(int irq) | ||
14 | { | ||
15 | return ((irq == 2) ? 9 : irq); | ||
16 | } | ||
17 | |||
18 | #ifdef CONFIG_X86_LOCAL_APIC | ||
19 | # define ARCH_HAS_NMI_WATCHDOG | ||
20 | #endif | ||
21 | |||
22 | #ifdef CONFIG_4KSTACKS | ||
23 | extern void irq_ctx_init(int cpu); | ||
24 | extern void irq_ctx_exit(int cpu); | ||
25 | # define __ARCH_HAS_DO_SOFTIRQ | ||
3 | #else | 26 | #else |
4 | # include "irq_64.h" | 27 | # define irq_ctx_init(cpu) do { } while (0) |
28 | # define irq_ctx_exit(cpu) do { } while (0) | ||
29 | # ifdef CONFIG_X86_64 | ||
30 | # define __ARCH_HAS_DO_SOFTIRQ | ||
31 | # endif | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_IRQBALANCE | ||
35 | extern int irqbalance_disable(char *str); | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_HOTPLUG_CPU | ||
39 | #include <linux/cpumask.h> | ||
40 | extern void fixup_irqs(cpumask_t map); | ||
5 | #endif | 41 | #endif |
42 | |||
43 | extern unsigned int do_IRQ(struct pt_regs *regs); | ||
44 | extern void init_IRQ(void); | ||
45 | extern void native_init_IRQ(void); | ||
46 | |||
47 | /* Interrupt vector management */ | ||
48 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
49 | |||
50 | #endif /* _ASM_IRQ_H */ | ||
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h deleted file mode 100644 index 0b79f3185243..000000000000 --- a/include/asm-x86/irq_32.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | #ifndef _ASM_IRQ_H | ||
2 | #define _ASM_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * IRQ/IPI changes taken from work by Thomas Radke | ||
10 | * <tomsoft@informatik.tu-chemnitz.de> | ||
11 | */ | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | /* include comes from machine specific directory */ | ||
15 | #include "irq_vectors.h" | ||
16 | #include <asm/thread_info.h> | ||
17 | |||
18 | static inline int irq_canonicalize(int irq) | ||
19 | { | ||
20 | return ((irq == 2) ? 9 : irq); | ||
21 | } | ||
22 | |||
23 | #ifdef CONFIG_X86_LOCAL_APIC | ||
24 | # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ | ||
25 | #endif | ||
26 | |||
27 | #ifdef CONFIG_4KSTACKS | ||
28 | extern void irq_ctx_init(int cpu); | ||
29 | extern void irq_ctx_exit(int cpu); | ||
30 | # define __ARCH_HAS_DO_SOFTIRQ | ||
31 | #else | ||
32 | # define irq_ctx_init(cpu) do { } while (0) | ||
33 | # define irq_ctx_exit(cpu) do { } while (0) | ||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_IRQBALANCE | ||
37 | extern int irqbalance_disable(char *str); | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | ||
41 | extern void fixup_irqs(cpumask_t map); | ||
42 | #endif | ||
43 | |||
44 | unsigned int do_IRQ(struct pt_regs *regs); | ||
45 | void init_IRQ(void); | ||
46 | void __init native_init_IRQ(void); | ||
47 | |||
48 | /* Interrupt vector management */ | ||
49 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
50 | |||
51 | #endif /* _ASM_IRQ_H */ | ||
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h deleted file mode 100644 index 083d35a62c94..000000000000 --- a/include/asm-x86/irq_64.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | #ifndef _ASM_IRQ_H | ||
2 | #define _ASM_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * IRQ/IPI changes taken from work by Thomas Radke | ||
10 | * <tomsoft@informatik.tu-chemnitz.de> | ||
11 | */ | ||
12 | |||
13 | #define TIMER_IRQ 0 | ||
14 | |||
15 | /* | ||
16 | * 16 8259A IRQ's, 208 potential APIC interrupt sources. | ||
17 | * Right now the APIC is mostly only used for SMP. | ||
18 | * 256 vectors is an architectural limit. (we can have | ||
19 | * more than 256 devices theoretically, but they will | ||
20 | * have to use shared interrupts) | ||
21 | * Since vectors 0x00-0x1f are used/reserved for the CPU, | ||
22 | * the usable vector space is 0x20-0xff (224 vectors) | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * The maximum number of vectors supported by x86_64 processors | ||
27 | * is limited to 256. For processors other than x86_64, NR_VECTORS | ||
28 | * should be changed accordingly. | ||
29 | */ | ||
30 | #define NR_VECTORS 256 | ||
31 | |||
32 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ | ||
33 | |||
34 | #define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) | ||
35 | #define NR_IRQ_VECTORS NR_IRQS | ||
36 | |||
37 | static inline int irq_canonicalize(int irq) | ||
38 | { | ||
39 | return ((irq == 2) ? 9 : irq); | ||
40 | } | ||
41 | |||
42 | #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ | ||
43 | |||
44 | #ifdef CONFIG_HOTPLUG_CPU | ||
45 | #include <linux/cpumask.h> | ||
46 | extern void fixup_irqs(cpumask_t map); | ||
47 | #endif | ||
48 | |||
49 | #define __ARCH_HAS_DO_SOFTIRQ 1 | ||
50 | |||
51 | #endif /* _ASM_IRQ_H */ | ||
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h new file mode 100644 index 000000000000..b58581e2e24e --- /dev/null +++ b/include/asm-x86/irq_vectors.h | |||
@@ -0,0 +1,169 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_H | ||
2 | #define _ASM_IRQ_VECTORS_H | ||
3 | |||
4 | #include <linux/threads.h> | ||
5 | |||
6 | #define NMI_VECTOR 0x02 | ||
7 | |||
8 | /* | ||
9 | * IDT vectors usable for external interrupt sources start | ||
10 | * at 0x20: | ||
11 | */ | ||
12 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
13 | |||
14 | #ifdef CONFIG_X86_32 | ||
15 | # define SYSCALL_VECTOR 0x80 | ||
16 | #else | ||
17 | # define IA32_SYSCALL_VECTOR 0x80 | ||
18 | #endif | ||
19 | |||
20 | /* | ||
21 | * Reserve the lowest usable priority level 0x20 - 0x2f for triggering | ||
22 | * cleanup after irq migration on 64 bit. | ||
23 | */ | ||
24 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | ||
25 | |||
26 | /* | ||
27 | * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit. | ||
28 | * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit. | ||
29 | */ | ||
30 | #ifdef CONFIG_X86_32 | ||
31 | #define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR) | ||
32 | #else | ||
33 | #define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) | ||
34 | #endif | ||
35 | #define IRQ1_VECTOR (IRQ0_VECTOR + 1) | ||
36 | #define IRQ2_VECTOR (IRQ0_VECTOR + 2) | ||
37 | #define IRQ3_VECTOR (IRQ0_VECTOR + 3) | ||
38 | #define IRQ4_VECTOR (IRQ0_VECTOR + 4) | ||
39 | #define IRQ5_VECTOR (IRQ0_VECTOR + 5) | ||
40 | #define IRQ6_VECTOR (IRQ0_VECTOR + 6) | ||
41 | #define IRQ7_VECTOR (IRQ0_VECTOR + 7) | ||
42 | #define IRQ8_VECTOR (IRQ0_VECTOR + 8) | ||
43 | #define IRQ9_VECTOR (IRQ0_VECTOR + 9) | ||
44 | #define IRQ10_VECTOR (IRQ0_VECTOR + 10) | ||
45 | #define IRQ11_VECTOR (IRQ0_VECTOR + 11) | ||
46 | #define IRQ12_VECTOR (IRQ0_VECTOR + 12) | ||
47 | #define IRQ13_VECTOR (IRQ0_VECTOR + 13) | ||
48 | #define IRQ14_VECTOR (IRQ0_VECTOR + 14) | ||
49 | #define IRQ15_VECTOR (IRQ0_VECTOR + 15) | ||
50 | |||
51 | /* | ||
52 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
53 | * | ||
54 | * some of the following vectors are 'rare', they are merged | ||
55 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
56 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
57 | * | ||
58 | * Vectors 0xf0-0xfa are free (reserved for future Linux use). | ||
59 | */ | ||
60 | #ifdef CONFIG_X86_32 | ||
61 | |||
62 | # define SPURIOUS_APIC_VECTOR 0xff | ||
63 | # define ERROR_APIC_VECTOR 0xfe | ||
64 | # define INVALIDATE_TLB_VECTOR 0xfd | ||
65 | # define RESCHEDULE_VECTOR 0xfc | ||
66 | # define CALL_FUNCTION_VECTOR 0xfb | ||
67 | # define THERMAL_APIC_VECTOR 0xf0 | ||
68 | |||
69 | #else | ||
70 | |||
71 | #define SPURIOUS_APIC_VECTOR 0xff | ||
72 | #define ERROR_APIC_VECTOR 0xfe | ||
73 | #define RESCHEDULE_VECTOR 0xfd | ||
74 | #define CALL_FUNCTION_VECTOR 0xfc | ||
75 | #define THERMAL_APIC_VECTOR 0xfa | ||
76 | #define THRESHOLD_APIC_VECTOR 0xf9 | ||
77 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | ||
78 | #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ | ||
79 | |||
80 | #define NUM_INVALIDATE_TLB_VECTORS 8 | ||
81 | |||
82 | #endif | ||
83 | |||
84 | /* | ||
85 | * Local APIC timer IRQ vector is on a different priority level, | ||
86 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
87 | * sources per level' errata. | ||
88 | */ | ||
89 | #define LOCAL_TIMER_VECTOR 0xef | ||
90 | |||
91 | /* | ||
92 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | ||
93 | * start at 0x31(0x41) to spread out vectors evenly between priority | ||
94 | * levels. (0x80 is the syscall vector) | ||
95 | */ | ||
96 | #ifdef CONFIG_X86_32 | ||
97 | # define FIRST_DEVICE_VECTOR 0x31 | ||
98 | #else | ||
99 | # define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) | ||
100 | #endif | ||
101 | |||
102 | #define NR_VECTORS 256 | ||
103 | |||
104 | #define FPU_IRQ 13 | ||
105 | |||
106 | #define FIRST_VM86_IRQ 3 | ||
107 | #define LAST_VM86_IRQ 15 | ||
108 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
109 | |||
110 | #if !defined(CONFIG_X86_VISWS) && !defined(CONFIG_X86_VOYAGER) | ||
111 | |||
112 | # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) | ||
113 | |||
114 | # define NR_IRQS 224 | ||
115 | |||
116 | # if (224 >= 32 * NR_CPUS) | ||
117 | # define NR_IRQ_VECTORS NR_IRQS | ||
118 | # else | ||
119 | # define NR_IRQ_VECTORS (32 * NR_CPUS) | ||
120 | # endif | ||
121 | |||
122 | # else /* IO_APIC || PARAVIRT */ | ||
123 | |||
124 | # define NR_IRQS 16 | ||
125 | # define NR_IRQ_VECTORS NR_IRQS | ||
126 | |||
127 | # endif | ||
128 | |||
129 | #else /* !VISWS && !VOYAGER */ | ||
130 | |||
131 | # define NR_IRQS 224 | ||
132 | # define NR_IRQ_VECTORS NR_IRQS | ||
133 | |||
134 | #endif /* VISWS */ | ||
135 | |||
136 | /* Voyager specific defines */ | ||
137 | /* These define the CPIs we use in linux */ | ||
138 | #define VIC_CPI_LEVEL0 0 | ||
139 | #define VIC_CPI_LEVEL1 1 | ||
140 | /* now the fake CPIs */ | ||
141 | #define VIC_TIMER_CPI 2 | ||
142 | #define VIC_INVALIDATE_CPI 3 | ||
143 | #define VIC_RESCHEDULE_CPI 4 | ||
144 | #define VIC_ENABLE_IRQ_CPI 5 | ||
145 | #define VIC_CALL_FUNCTION_CPI 6 | ||
146 | |||
147 | /* Now the QIC CPIs: Since we don't need the two initial levels, | ||
148 | * these are 2 less than the VIC CPIs */ | ||
149 | #define QIC_CPI_OFFSET 1 | ||
150 | #define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) | ||
151 | #define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) | ||
152 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) | ||
153 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) | ||
154 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) | ||
155 | |||
156 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI | ||
157 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | ||
158 | |||
159 | /* this is the SYS_INT CPI. */ | ||
160 | #define VIC_SYS_INT 8 | ||
161 | #define VIC_CMN_INT 15 | ||
162 | |||
163 | /* This is the boot CPI for alternate processors. It gets overwritten | ||
164 | * by the above once the system has activated all available processors */ | ||
165 | #define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 | ||
166 | #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) | ||
167 | |||
168 | |||
169 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h deleted file mode 100644 index 881c63ca61ad..000000000000 --- a/include/asm-x86/mach-default/irq_vectors.h +++ /dev/null | |||
@@ -1,96 +0,0 @@ | |||
1 | /* | ||
2 | * This file should contain #defines for all of the interrupt vector | ||
3 | * numbers used by this architecture. | ||
4 | * | ||
5 | * In addition, there are some standard defines: | ||
6 | * | ||
7 | * FIRST_EXTERNAL_VECTOR: | ||
8 | * The first free place for external interrupts | ||
9 | * | ||
10 | * SYSCALL_VECTOR: | ||
11 | * The IRQ vector a syscall makes the user to kernel transition | ||
12 | * under. | ||
13 | * | ||
14 | * TIMER_IRQ: | ||
15 | * The IRQ number the timer interrupt comes in at. | ||
16 | * | ||
17 | * NR_IRQS: | ||
18 | * The total number of interrupt vectors (including all the | ||
19 | * architecture specific interrupts) needed. | ||
20 | * | ||
21 | */ | ||
22 | #ifndef _ASM_IRQ_VECTORS_H | ||
23 | #define _ASM_IRQ_VECTORS_H | ||
24 | |||
25 | /* | ||
26 | * IDT vectors usable for external interrupt sources start | ||
27 | * at 0x20: | ||
28 | */ | ||
29 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
30 | |||
31 | #define SYSCALL_VECTOR 0x80 | ||
32 | |||
33 | /* | ||
34 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
39 | * | ||
40 | * some of the following vectors are 'rare', they are merged | ||
41 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
42 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
43 | * | ||
44 | * Vectors 0xf0-0xfa are free (reserved for future Linux use). | ||
45 | */ | ||
46 | #define SPURIOUS_APIC_VECTOR 0xff | ||
47 | #define ERROR_APIC_VECTOR 0xfe | ||
48 | #define INVALIDATE_TLB_VECTOR 0xfd | ||
49 | #define RESCHEDULE_VECTOR 0xfc | ||
50 | #define CALL_FUNCTION_VECTOR 0xfb | ||
51 | |||
52 | #define THERMAL_APIC_VECTOR 0xf0 | ||
53 | /* | ||
54 | * Local APIC timer IRQ vector is on a different priority level, | ||
55 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
56 | * sources per level' errata. | ||
57 | */ | ||
58 | #define LOCAL_TIMER_VECTOR 0xef | ||
59 | |||
60 | /* | ||
61 | * First APIC vector available to drivers: (vectors 0x30-0xee) | ||
62 | * we start at 0x31 to spread out vectors evenly between priority | ||
63 | * levels. (0x80 is the syscall vector) | ||
64 | */ | ||
65 | #define FIRST_DEVICE_VECTOR 0x31 | ||
66 | #define FIRST_SYSTEM_VECTOR 0xef | ||
67 | |||
68 | #define TIMER_IRQ 0 | ||
69 | |||
70 | /* | ||
71 | * 16 8259A IRQ's, 208 potential APIC interrupt sources. | ||
72 | * Right now the APIC is mostly only used for SMP. | ||
73 | * 256 vectors is an architectural limit. (we can have | ||
74 | * more than 256 devices theoretically, but they will | ||
75 | * have to use shared interrupts) | ||
76 | * Since vectors 0x00-0x1f are used/reserved for the CPU, | ||
77 | * the usable vector space is 0x20-0xff (224 vectors) | ||
78 | */ | ||
79 | |||
80 | /* | ||
81 | * The maximum number of vectors supported by i386 processors | ||
82 | * is limited to 256. For processors other than i386, NR_VECTORS | ||
83 | * should be changed accordingly. | ||
84 | */ | ||
85 | #define NR_VECTORS 256 | ||
86 | |||
87 | #include "irq_vectors_limits.h" | ||
88 | |||
89 | #define FPU_IRQ 13 | ||
90 | |||
91 | #define FIRST_VM86_IRQ 3 | ||
92 | #define LAST_VM86_IRQ 15 | ||
93 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
94 | |||
95 | |||
96 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-x86/mach-default/irq_vectors_limits.h b/include/asm-x86/mach-default/irq_vectors_limits.h deleted file mode 100644 index a90c7a60109f..000000000000 --- a/include/asm-x86/mach-default/irq_vectors_limits.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | ||
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | ||
3 | |||
4 | #if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) | ||
5 | #define NR_IRQS 224 | ||
6 | # if (224 >= 32 * NR_CPUS) | ||
7 | # define NR_IRQ_VECTORS NR_IRQS | ||
8 | # else | ||
9 | # define NR_IRQ_VECTORS (32 * NR_CPUS) | ||
10 | # endif | ||
11 | #else | ||
12 | #define NR_IRQS 16 | ||
13 | #define NR_IRQ_VECTORS NR_IRQS | ||
14 | #endif | ||
15 | |||
16 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ | ||
diff --git a/include/asm-x86/mach-visws/irq_vectors.h b/include/asm-x86/mach-visws/irq_vectors.h deleted file mode 100644 index cb572d8db505..000000000000 --- a/include/asm-x86/mach-visws/irq_vectors.h +++ /dev/null | |||
@@ -1,62 +0,0 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_H | ||
2 | #define _ASM_IRQ_VECTORS_H | ||
3 | |||
4 | /* | ||
5 | * IDT vectors usable for external interrupt sources start | ||
6 | * at 0x20: | ||
7 | */ | ||
8 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
9 | |||
10 | #define SYSCALL_VECTOR 0x80 | ||
11 | |||
12 | /* | ||
13 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
18 | * | ||
19 | * some of the following vectors are 'rare', they are merged | ||
20 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
21 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
22 | * | ||
23 | * Vectors 0xf0-0xfa are free (reserved for future Linux use). | ||
24 | */ | ||
25 | #define SPURIOUS_APIC_VECTOR 0xff | ||
26 | #define ERROR_APIC_VECTOR 0xfe | ||
27 | #define INVALIDATE_TLB_VECTOR 0xfd | ||
28 | #define RESCHEDULE_VECTOR 0xfc | ||
29 | #define CALL_FUNCTION_VECTOR 0xfb | ||
30 | |||
31 | #define THERMAL_APIC_VECTOR 0xf0 | ||
32 | /* | ||
33 | * Local APIC timer IRQ vector is on a different priority level, | ||
34 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
35 | * sources per level' errata. | ||
36 | */ | ||
37 | #define LOCAL_TIMER_VECTOR 0xef | ||
38 | |||
39 | /* | ||
40 | * First APIC vector available to drivers: (vectors 0x30-0xee) | ||
41 | * we start at 0x31 to spread out vectors evenly between priority | ||
42 | * levels. (0x80 is the syscall vector) | ||
43 | */ | ||
44 | #define FIRST_DEVICE_VECTOR 0x31 | ||
45 | #define FIRST_SYSTEM_VECTOR 0xef | ||
46 | |||
47 | #define TIMER_IRQ 0 | ||
48 | |||
49 | /* | ||
50 | * IRQ definitions | ||
51 | */ | ||
52 | #define NR_VECTORS 256 | ||
53 | #define NR_IRQS 224 | ||
54 | #define NR_IRQ_VECTORS NR_IRQS | ||
55 | |||
56 | #define FPU_IRQ 13 | ||
57 | |||
58 | #define FIRST_VM86_IRQ 3 | ||
59 | #define LAST_VM86_IRQ 15 | ||
60 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
61 | |||
62 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h deleted file mode 100644 index 165421f5821c..000000000000 --- a/include/asm-x86/mach-voyager/irq_vectors.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* Copyright (C) 2002 | ||
4 | * | ||
5 | * Author: James.Bottomley@HansenPartnership.com | ||
6 | * | ||
7 | * linux/arch/i386/voyager/irq_vectors.h | ||
8 | * | ||
9 | * This file provides definitions for the VIC and QIC CPIs | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_IRQ_VECTORS_H | ||
13 | #define _ASM_IRQ_VECTORS_H | ||
14 | |||
15 | /* | ||
16 | * IDT vectors usable for external interrupt sources start | ||
17 | * at 0x20: | ||
18 | */ | ||
19 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
20 | |||
21 | #define SYSCALL_VECTOR 0x80 | ||
22 | |||
23 | /* | ||
24 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
25 | */ | ||
26 | |||
27 | /* These define the CPIs we use in linux */ | ||
28 | #define VIC_CPI_LEVEL0 0 | ||
29 | #define VIC_CPI_LEVEL1 1 | ||
30 | /* now the fake CPIs */ | ||
31 | #define VIC_TIMER_CPI 2 | ||
32 | #define VIC_INVALIDATE_CPI 3 | ||
33 | #define VIC_RESCHEDULE_CPI 4 | ||
34 | #define VIC_ENABLE_IRQ_CPI 5 | ||
35 | #define VIC_CALL_FUNCTION_CPI 6 | ||
36 | |||
37 | /* Now the QIC CPIs: Since we don't need the two initial levels, | ||
38 | * these are 2 less than the VIC CPIs */ | ||
39 | #define QIC_CPI_OFFSET 1 | ||
40 | #define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) | ||
41 | #define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) | ||
42 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) | ||
43 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) | ||
44 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) | ||
45 | |||
46 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI | ||
47 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | ||
48 | |||
49 | /* this is the SYS_INT CPI. */ | ||
50 | #define VIC_SYS_INT 8 | ||
51 | #define VIC_CMN_INT 15 | ||
52 | |||
53 | /* This is the boot CPI for alternate processors. It gets overwritten | ||
54 | * by the above once the system has activated all available processors */ | ||
55 | #define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 | ||
56 | #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) | ||
57 | |||
58 | #define NR_VECTORS 256 | ||
59 | #define NR_IRQS 224 | ||
60 | #define NR_IRQ_VECTORS NR_IRQS | ||
61 | |||
62 | #define FPU_IRQ 13 | ||
63 | |||
64 | #define FIRST_VM86_IRQ 3 | ||
65 | #define LAST_VM86_IRQ 15 | ||
66 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
67 | |||
68 | #ifndef __ASSEMBLY__ | ||
69 | extern asmlinkage void vic_cpi_interrupt(void); | ||
70 | extern asmlinkage void vic_sys_interrupt(void); | ||
71 | extern asmlinkage void vic_cmn_interrupt(void); | ||
72 | extern asmlinkage void qic_timer_interrupt(void); | ||
73 | extern asmlinkage void qic_invalidate_interrupt(void); | ||
74 | extern asmlinkage void qic_reschedule_interrupt(void); | ||
75 | extern asmlinkage void qic_enable_irq_interrupt(void); | ||
76 | extern asmlinkage void qic_call_function_interrupt(void); | ||
77 | #endif /* !__ASSEMBLY__ */ | ||
78 | |||
79 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h index 26b9240d1e23..65004881de5f 100644 --- a/include/asm-x86/uv/uv_hub.h +++ b/include/asm-x86/uv/uv_hub.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV architectural definitions | 6 | * SGI UV architectural definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ASM_X86_UV_HUB_H__ | 11 | #ifndef __ASM_X86_UV_HUB_H__ |
@@ -20,26 +20,49 @@ | |||
20 | /* | 20 | /* |
21 | * Addressing Terminology | 21 | * Addressing Terminology |
22 | * | 22 | * |
23 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | 23 | * M - The low M bits of a physical address represent the offset |
24 | * routers always have low bit of 1, C/MBricks have low bit | 24 | * into the blade local memory. RAM memory on a blade is physically |
25 | * equal to 0. Most addressing macros that target UV hub chips | 25 | * contiguous (although various IO spaces may punch holes in |
26 | * right shift the NASID by 1 to exclude the always-zero bit. | 26 | * it).. |
27 | * | 27 | * |
28 | * SNASID - NASID right shifted by 1 bit. | 28 | * N - Number of bits in the node portion of a socket physical |
29 | * address. | ||
30 | * | ||
31 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | ||
32 | * routers always have low bit of 1, C/MBricks have low bit | ||
33 | * equal to 0. Most addressing macros that target UV hub chips | ||
34 | * right shift the NASID by 1 to exclude the always-zero bit. | ||
35 | * NASIDs contain up to 15 bits. | ||
36 | * | ||
37 | * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead | ||
38 | * of nasids. | ||
39 | * | ||
40 | * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant | ||
41 | * of the nasid for socket usage. | ||
42 | * | ||
43 | * | ||
44 | * NumaLink Global Physical Address Format: | ||
45 | * +--------------------------------+---------------------+ | ||
46 | * |00..000| GNODE | NodeOffset | | ||
47 | * +--------------------------------+---------------------+ | ||
48 | * |<-------53 - M bits --->|<--------M bits -----> | ||
49 | * | ||
50 | * M - number of node offset bits (35 .. 40) | ||
29 | * | 51 | * |
30 | * | 52 | * |
31 | * Memory/UV-HUB Processor Socket Address Format: | 53 | * Memory/UV-HUB Processor Socket Address Format: |
32 | * +--------+---------------+---------------------+ | 54 | * +----------------+---------------+---------------------+ |
33 | * |00..0000| SNASID | NodeOffset | | 55 | * |00..000000000000| PNODE | NodeOffset | |
34 | * +--------+---------------+---------------------+ | 56 | * +----------------+---------------+---------------------+ |
35 | * <--- N bits --->|<--------M bits -----> | 57 | * <--- N bits --->|<--------M bits -----> |
36 | * | 58 | * |
37 | * M number of node offset bits (35 .. 40) | 59 | * M - number of node offset bits (35 .. 40) |
38 | * N number of SNASID bits (0 .. 10) | 60 | * N - number of PNODE bits (0 .. 10) |
39 | * | 61 | * |
40 | * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). | 62 | * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). |
41 | * The actual values are configuration dependent and are set at | 63 | * The actual values are configuration dependent and are set at |
42 | * boot time | 64 | * boot time. M & N values are set by the hardware/BIOS at boot. |
65 | * | ||
43 | * | 66 | * |
44 | * APICID format | 67 | * APICID format |
45 | * NOTE!!!!!! This is the current format of the APICID. However, code | 68 | * NOTE!!!!!! This is the current format of the APICID. However, code |
@@ -48,14 +71,14 @@ | |||
48 | * | 71 | * |
49 | * 1111110000000000 | 72 | * 1111110000000000 |
50 | * 5432109876543210 | 73 | * 5432109876543210 |
51 | * nnnnnnnnnnlc0cch | 74 | * pppppppppplc0cch |
52 | * sssssssssss | 75 | * sssssssssss |
53 | * | 76 | * |
54 | * n = snasid bits | 77 | * p = pnode bits |
55 | * l = socket number on board | 78 | * l = socket number on board |
56 | * c = core | 79 | * c = core |
57 | * h = hyperthread | 80 | * h = hyperthread |
58 | * s = bits that are in the socket CSR | 81 | * s = bits that are in the SOCKET_ID CSR |
59 | * | 82 | * |
60 | * Note: Processor only supports 12 bits in the APICID register. The ACPI | 83 | * Note: Processor only supports 12 bits in the APICID register. The ACPI |
61 | * tables hold all 16 bits. Software needs to be aware of this. | 84 | * tables hold all 16 bits. Software needs to be aware of this. |
@@ -74,7 +97,7 @@ | |||
74 | * This value is also the value of the maximum number of non-router NASIDs | 97 | * This value is also the value of the maximum number of non-router NASIDs |
75 | * in the numalink fabric. | 98 | * in the numalink fabric. |
76 | * | 99 | * |
77 | * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. | 100 | * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused. |
78 | */ | 101 | */ |
79 | #define UV_MAX_NUMALINK_BLADES 16384 | 102 | #define UV_MAX_NUMALINK_BLADES 16384 |
80 | 103 | ||
@@ -96,8 +119,12 @@ | |||
96 | */ | 119 | */ |
97 | struct uv_hub_info_s { | 120 | struct uv_hub_info_s { |
98 | unsigned long global_mmr_base; | 121 | unsigned long global_mmr_base; |
99 | unsigned short local_nasid; | 122 | unsigned long gpa_mask; |
100 | unsigned short gnode_upper; | 123 | unsigned long gnode_upper; |
124 | unsigned long lowmem_remap_top; | ||
125 | unsigned long lowmem_remap_base; | ||
126 | unsigned short pnode; | ||
127 | unsigned short pnode_mask; | ||
101 | unsigned short coherency_domain_number; | 128 | unsigned short coherency_domain_number; |
102 | unsigned short numa_blade_id; | 129 | unsigned short numa_blade_id; |
103 | unsigned char blade_processor_id; | 130 | unsigned char blade_processor_id; |
@@ -112,83 +139,124 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
112 | * Local & Global MMR space macros. | 139 | * Local & Global MMR space macros. |
113 | * Note: macros are intended to be used ONLY by inline functions | 140 | * Note: macros are intended to be used ONLY by inline functions |
114 | * in this file - not by other kernel code. | 141 | * in this file - not by other kernel code. |
142 | * n - NASID (full 15-bit global nasid) | ||
143 | * g - GNODE (full 15-bit global nasid, right shifted 1) | ||
144 | * p - PNODE (local part of nsids, right shifted 1) | ||
115 | */ | 145 | */ |
116 | #define UV_SNASID(n) ((n) >> 1) | 146 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) |
117 | #define UV_NASID(n) ((n) << 1) | 147 | #define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper) |
118 | 148 | ||
119 | #define UV_LOCAL_MMR_BASE 0xf4000000UL | 149 | #define UV_LOCAL_MMR_BASE 0xf4000000UL |
120 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL | 150 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL |
121 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) | 151 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) |
122 | 152 | ||
123 | #define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff | 153 | #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 |
124 | #define UV_GLOBAL_MMR32_SNASID_SHIFT 15 | 154 | #define UV_GLOBAL_MMR64_PNODE_SHIFT 26 |
125 | #define UV_GLOBAL_MMR64_SNASID_SHIFT 26 | ||
126 | 155 | ||
127 | #define UV_GLOBAL_MMR32_NASID_BITS(n) \ | 156 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) |
128 | (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \ | ||
129 | (UV_GLOBAL_MMR32_SNASID_SHIFT)) | ||
130 | 157 | ||
131 | #define UV_GLOBAL_MMR64_NASID_BITS(n) \ | 158 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ |
132 | ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) | 159 | ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT) |
160 | |||
161 | #define UV_APIC_PNODE_SHIFT 6 | ||
162 | |||
163 | /* | ||
164 | * Macros for converting between kernel virtual addresses, socket local physical | ||
165 | * addresses, and UV global physical addresses. | ||
166 | * Note: use the standard __pa() & __va() macros for converting | ||
167 | * between socket virtual and socket physical addresses. | ||
168 | */ | ||
169 | |||
170 | /* socket phys RAM --> UV global physical address */ | ||
171 | static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) | ||
172 | { | ||
173 | if (paddr < uv_hub_info->lowmem_remap_top) | ||
174 | paddr += uv_hub_info->lowmem_remap_base; | ||
175 | return paddr | uv_hub_info->gnode_upper; | ||
176 | } | ||
177 | |||
178 | |||
179 | /* socket virtual --> UV global physical address */ | ||
180 | static inline unsigned long uv_gpa(void *v) | ||
181 | { | ||
182 | return __pa(v) | uv_hub_info->gnode_upper; | ||
183 | } | ||
184 | |||
185 | /* socket virtual --> UV global physical address */ | ||
186 | static inline void *uv_vgpa(void *v) | ||
187 | { | ||
188 | return (void *)uv_gpa(v); | ||
189 | } | ||
190 | |||
191 | /* UV global physical address --> socket virtual */ | ||
192 | static inline void *uv_va(unsigned long gpa) | ||
193 | { | ||
194 | return __va(gpa & uv_hub_info->gpa_mask); | ||
195 | } | ||
196 | |||
197 | /* pnode, offset --> socket virtual */ | ||
198 | static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) | ||
199 | { | ||
200 | return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset); | ||
201 | } | ||
133 | 202 | ||
134 | #define UV_APIC_NASID_SHIFT 6 | ||
135 | 203 | ||
136 | /* | 204 | /* |
137 | * Extract a NASID from an APICID (full apicid, not processor subset) | 205 | * Extract a PNODE from an APICID (full apicid, not processor subset) |
138 | */ | 206 | */ |
139 | static inline int uv_apicid_to_nasid(int apicid) | 207 | static inline int uv_apicid_to_pnode(int apicid) |
140 | { | 208 | { |
141 | return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); | 209 | return (apicid >> UV_APIC_PNODE_SHIFT); |
142 | } | 210 | } |
143 | 211 | ||
144 | /* | 212 | /* |
145 | * Access global MMRs using the low memory MMR32 space. This region supports | 213 | * Access global MMRs using the low memory MMR32 space. This region supports |
146 | * faster MMR access but not all MMRs are accessible in this space. | 214 | * faster MMR access but not all MMRs are accessible in this space. |
147 | */ | 215 | */ |
148 | static inline unsigned long *uv_global_mmr32_address(int nasid, | 216 | static inline unsigned long *uv_global_mmr32_address(int pnode, |
149 | unsigned long offset) | 217 | unsigned long offset) |
150 | { | 218 | { |
151 | return __va(UV_GLOBAL_MMR32_BASE | | 219 | return __va(UV_GLOBAL_MMR32_BASE | |
152 | UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); | 220 | UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); |
153 | } | 221 | } |
154 | 222 | ||
155 | static inline void uv_write_global_mmr32(int nasid, unsigned long offset, | 223 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, |
156 | unsigned long val) | 224 | unsigned long val) |
157 | { | 225 | { |
158 | *uv_global_mmr32_address(nasid, offset) = val; | 226 | *uv_global_mmr32_address(pnode, offset) = val; |
159 | } | 227 | } |
160 | 228 | ||
161 | static inline unsigned long uv_read_global_mmr32(int nasid, | 229 | static inline unsigned long uv_read_global_mmr32(int pnode, |
162 | unsigned long offset) | 230 | unsigned long offset) |
163 | { | 231 | { |
164 | return *uv_global_mmr32_address(nasid, offset); | 232 | return *uv_global_mmr32_address(pnode, offset); |
165 | } | 233 | } |
166 | 234 | ||
167 | /* | 235 | /* |
168 | * Access Global MMR space using the MMR space located at the top of physical | 236 | * Access Global MMR space using the MMR space located at the top of physical |
169 | * memory. | 237 | * memory. |
170 | */ | 238 | */ |
171 | static inline unsigned long *uv_global_mmr64_address(int nasid, | 239 | static inline unsigned long *uv_global_mmr64_address(int pnode, |
172 | unsigned long offset) | 240 | unsigned long offset) |
173 | { | 241 | { |
174 | return __va(UV_GLOBAL_MMR64_BASE | | 242 | return __va(UV_GLOBAL_MMR64_BASE | |
175 | UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); | 243 | UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); |
176 | } | 244 | } |
177 | 245 | ||
178 | static inline void uv_write_global_mmr64(int nasid, unsigned long offset, | 246 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, |
179 | unsigned long val) | 247 | unsigned long val) |
180 | { | 248 | { |
181 | *uv_global_mmr64_address(nasid, offset) = val; | 249 | *uv_global_mmr64_address(pnode, offset) = val; |
182 | } | 250 | } |
183 | 251 | ||
184 | static inline unsigned long uv_read_global_mmr64(int nasid, | 252 | static inline unsigned long uv_read_global_mmr64(int pnode, |
185 | unsigned long offset) | 253 | unsigned long offset) |
186 | { | 254 | { |
187 | return *uv_global_mmr64_address(nasid, offset); | 255 | return *uv_global_mmr64_address(pnode, offset); |
188 | } | 256 | } |
189 | 257 | ||
190 | /* | 258 | /* |
191 | * Access node local MMRs. Faster than using global space but only local MMRs | 259 | * Access hub local MMRs. Faster than using global space but only local MMRs |
192 | * are accessible. | 260 | * are accessible. |
193 | */ | 261 | */ |
194 | static inline unsigned long *uv_local_mmr_address(unsigned long offset) | 262 | static inline unsigned long *uv_local_mmr_address(unsigned long offset) |
@@ -207,15 +275,15 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) | |||
207 | } | 275 | } |
208 | 276 | ||
209 | /* | 277 | /* |
210 | * Structures and definitions for converting between cpu, node, and blade | 278 | * Structures and definitions for converting between cpu, node, pnode, and blade |
211 | * numbers. | 279 | * numbers. |
212 | */ | 280 | */ |
213 | struct uv_blade_info { | 281 | struct uv_blade_info { |
214 | unsigned short nr_posible_cpus; | 282 | unsigned short nr_possible_cpus; |
215 | unsigned short nr_online_cpus; | 283 | unsigned short nr_online_cpus; |
216 | unsigned short nasid; | 284 | unsigned short pnode; |
217 | }; | 285 | }; |
218 | struct uv_blade_info *uv_blade_info; | 286 | extern struct uv_blade_info *uv_blade_info; |
219 | extern short *uv_node_to_blade; | 287 | extern short *uv_node_to_blade; |
220 | extern short *uv_cpu_to_blade; | 288 | extern short *uv_cpu_to_blade; |
221 | extern short uv_possible_blades; | 289 | extern short uv_possible_blades; |
@@ -244,16 +312,16 @@ static inline int uv_node_to_blade_id(int nid) | |||
244 | return uv_node_to_blade[nid]; | 312 | return uv_node_to_blade[nid]; |
245 | } | 313 | } |
246 | 314 | ||
247 | /* Convert a blade id to the NASID of the blade */ | 315 | /* Convert a blade id to the PNODE of the blade */ |
248 | static inline int uv_blade_to_nasid(int bid) | 316 | static inline int uv_blade_to_pnode(int bid) |
249 | { | 317 | { |
250 | return uv_blade_info[bid].nasid; | 318 | return uv_blade_info[bid].pnode; |
251 | } | 319 | } |
252 | 320 | ||
253 | /* Determine the number of possible cpus on a blade */ | 321 | /* Determine the number of possible cpus on a blade */ |
254 | static inline int uv_blade_nr_possible_cpus(int bid) | 322 | static inline int uv_blade_nr_possible_cpus(int bid) |
255 | { | 323 | { |
256 | return uv_blade_info[bid].nr_posible_cpus; | 324 | return uv_blade_info[bid].nr_possible_cpus; |
257 | } | 325 | } |
258 | 326 | ||
259 | /* Determine the number of online cpus on a blade */ | 327 | /* Determine the number of online cpus on a blade */ |
@@ -262,16 +330,16 @@ static inline int uv_blade_nr_online_cpus(int bid) | |||
262 | return uv_blade_info[bid].nr_online_cpus; | 330 | return uv_blade_info[bid].nr_online_cpus; |
263 | } | 331 | } |
264 | 332 | ||
265 | /* Convert a cpu id to the NASID of the blade containing the cpu */ | 333 | /* Convert a cpu id to the PNODE of the blade containing the cpu */ |
266 | static inline int uv_cpu_to_nasid(int cpu) | 334 | static inline int uv_cpu_to_pnode(int cpu) |
267 | { | 335 | { |
268 | return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; | 336 | return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode; |
269 | } | 337 | } |
270 | 338 | ||
271 | /* Convert a node number to the NASID of the blade */ | 339 | /* Convert a linux node number to the PNODE of the blade */ |
272 | static inline int uv_node_to_nasid(int nid) | 340 | static inline int uv_node_to_pnode(int nid) |
273 | { | 341 | { |
274 | return uv_blade_info[uv_node_to_blade_id(nid)].nasid; | 342 | return uv_blade_info[uv_node_to_blade_id(nid)].pnode; |
275 | } | 343 | } |
276 | 344 | ||
277 | /* Maximum possible number of blades */ | 345 | /* Maximum possible number of blades */ |
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h index 3b69fe6b6376..ac9846076521 100644 --- a/include/asm-x86/uv/uv_mmrs.h +++ b/include/asm-x86/uv/uv_mmrs.h | |||
@@ -11,11 +11,46 @@ | |||
11 | #ifndef __ASM_X86_UV_MMRS__ | 11 | #ifndef __ASM_X86_UV_MMRS__ |
12 | #define __ASM_X86_UV_MMRS__ | 12 | #define __ASM_X86_UV_MMRS__ |
13 | 13 | ||
14 | /* | 14 | #define UV_MMR_ENABLE (1UL << 63) |
15 | * AUTO GENERATED - Do not edit | ||
16 | */ | ||
17 | 15 | ||
18 | #define UV_MMR_ENABLE (1UL << 63) | 16 | /* ========================================================================= */ |
17 | /* UVH_BAU_DATA_CONFIG */ | ||
18 | /* ========================================================================= */ | ||
19 | #define UVH_BAU_DATA_CONFIG 0x61680UL | ||
20 | #define UVH_BAU_DATA_CONFIG_32 0x0450 | ||
21 | |||
22 | #define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 | ||
23 | #define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
24 | #define UVH_BAU_DATA_CONFIG_DM_SHFT 8 | ||
25 | #define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL | ||
26 | #define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11 | ||
27 | #define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
28 | #define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12 | ||
29 | #define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
30 | #define UVH_BAU_DATA_CONFIG_P_SHFT 13 | ||
31 | #define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL | ||
32 | #define UVH_BAU_DATA_CONFIG_T_SHFT 15 | ||
33 | #define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL | ||
34 | #define UVH_BAU_DATA_CONFIG_M_SHFT 16 | ||
35 | #define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL | ||
36 | #define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32 | ||
37 | #define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
38 | |||
39 | union uvh_bau_data_config_u { | ||
40 | unsigned long v; | ||
41 | struct uvh_bau_data_config_s { | ||
42 | unsigned long vector_ : 8; /* RW */ | ||
43 | unsigned long dm : 3; /* RW */ | ||
44 | unsigned long destmode : 1; /* RW */ | ||
45 | unsigned long status : 1; /* RO */ | ||
46 | unsigned long p : 1; /* RO */ | ||
47 | unsigned long rsvd_14 : 1; /* */ | ||
48 | unsigned long t : 1; /* RO */ | ||
49 | unsigned long m : 1; /* RW */ | ||
50 | unsigned long rsvd_17_31: 15; /* */ | ||
51 | unsigned long apic_id : 32; /* RW */ | ||
52 | } s; | ||
53 | }; | ||
19 | 54 | ||
20 | /* ========================================================================= */ | 55 | /* ========================================================================= */ |
21 | /* UVH_IPI_INT */ | 56 | /* UVH_IPI_INT */ |
@@ -109,6 +144,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u { | |||
109 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ | 144 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ |
110 | /* ========================================================================= */ | 145 | /* ========================================================================= */ |
111 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL | 146 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL |
147 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0aa0 | ||
112 | 148 | ||
113 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 | 149 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 |
114 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL | 150 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL |
@@ -169,6 +205,7 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
169 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ | 205 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ |
170 | /* ========================================================================= */ | 206 | /* ========================================================================= */ |
171 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL | 207 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL |
208 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0aa8 | ||
172 | 209 | ||
173 | /* ========================================================================= */ | 210 | /* ========================================================================= */ |
174 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ | 211 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ |
@@ -248,6 +285,331 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
248 | }; | 285 | }; |
249 | 286 | ||
250 | /* ========================================================================= */ | 287 | /* ========================================================================= */ |
288 | /* UVH_LB_MCAST_AOERR0_RPT_ENABLE */ | ||
289 | /* ========================================================================= */ | ||
290 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL | ||
291 | |||
292 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0 | ||
293 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL | ||
294 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1 | ||
295 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL | ||
296 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2 | ||
297 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL | ||
298 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3 | ||
299 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL | ||
300 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4 | ||
301 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL | ||
302 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5 | ||
303 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL | ||
304 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6 | ||
305 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL | ||
306 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7 | ||
307 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL | ||
308 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8 | ||
309 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL | ||
310 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9 | ||
311 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL | ||
312 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10 | ||
313 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL | ||
314 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11 | ||
315 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL | ||
316 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12 | ||
317 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL | ||
318 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13 | ||
319 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL | ||
320 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14 | ||
321 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL | ||
322 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15 | ||
323 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL | ||
324 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16 | ||
325 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL | ||
326 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17 | ||
327 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL | ||
328 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18 | ||
329 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL | ||
330 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19 | ||
331 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL | ||
332 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20 | ||
333 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL | ||
334 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21 | ||
335 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL | ||
336 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_TIMEOUT_SHFT 22 | ||
337 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_TIMEOUT_MASK 0x0000000000400000UL | ||
338 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 23 | ||
339 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000000800000UL | ||
340 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 24 | ||
341 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000001000000UL | ||
342 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 25 | ||
343 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000002000000UL | ||
344 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 26 | ||
345 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000004000000UL | ||
346 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 27 | ||
347 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000008000000UL | ||
348 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 28 | ||
349 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000010000000UL | ||
350 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 29 | ||
351 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000020000000UL | ||
352 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 30 | ||
353 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000040000000UL | ||
354 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 31 | ||
355 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000080000000UL | ||
356 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 32 | ||
357 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000100000000UL | ||
358 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 33 | ||
359 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000200000000UL | ||
360 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 34 | ||
361 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000400000000UL | ||
362 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 35 | ||
363 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000000800000000UL | ||
364 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 36 | ||
365 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000001000000000UL | ||
366 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 37 | ||
367 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000002000000000UL | ||
368 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 38 | ||
369 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000004000000000UL | ||
370 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 39 | ||
371 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000008000000000UL | ||
372 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 40 | ||
373 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000010000000000UL | ||
374 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 41 | ||
375 | #define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000020000000000UL | ||
376 | |||
377 | union uvh_lb_mcast_aoerr0_rpt_enable_u { | ||
378 | unsigned long v; | ||
379 | struct uvh_lb_mcast_aoerr0_rpt_enable_s { | ||
380 | unsigned long mcast_obese_msg : 1; /* RW */ | ||
381 | unsigned long mcast_data_sb_err : 1; /* RW */ | ||
382 | unsigned long mcast_nack_buff_parity : 1; /* RW */ | ||
383 | unsigned long mcast_timeout : 1; /* RW */ | ||
384 | unsigned long mcast_inactive_reply : 1; /* RW */ | ||
385 | unsigned long mcast_upgrade_error : 1; /* RW */ | ||
386 | unsigned long mcast_reg_count_underflow : 1; /* RW */ | ||
387 | unsigned long mcast_rep_obese_msg : 1; /* RW */ | ||
388 | unsigned long ucache_req_runt_msg : 1; /* RW */ | ||
389 | unsigned long ucache_req_obese_msg : 1; /* RW */ | ||
390 | unsigned long ucache_req_data_sb_err : 1; /* RW */ | ||
391 | unsigned long ucache_rep_runt_msg : 1; /* RW */ | ||
392 | unsigned long ucache_rep_obese_msg : 1; /* RW */ | ||
393 | unsigned long ucache_rep_data_sb_err : 1; /* RW */ | ||
394 | unsigned long ucache_rep_command_err : 1; /* RW */ | ||
395 | unsigned long ucache_pend_timeout : 1; /* RW */ | ||
396 | unsigned long macc_req_runt_msg : 1; /* RW */ | ||
397 | unsigned long macc_req_obese_msg : 1; /* RW */ | ||
398 | unsigned long macc_req_data_sb_err : 1; /* RW */ | ||
399 | unsigned long macc_rep_runt_msg : 1; /* RW */ | ||
400 | unsigned long macc_rep_obese_msg : 1; /* RW */ | ||
401 | unsigned long macc_rep_data_sb_err : 1; /* RW */ | ||
402 | unsigned long macc_timeout : 1; /* RW */ | ||
403 | unsigned long macc_spurious_event : 1; /* RW */ | ||
404 | unsigned long ioh_destination_table_parity : 1; /* RW */ | ||
405 | unsigned long get_had_error_reply : 1; /* RW */ | ||
406 | unsigned long get_timeout : 1; /* RW */ | ||
407 | unsigned long lock_manager_had_error_reply : 1; /* RW */ | ||
408 | unsigned long put_had_error_reply : 1; /* RW */ | ||
409 | unsigned long put_timeout : 1; /* RW */ | ||
410 | unsigned long sb_activation_overrun : 1; /* RW */ | ||
411 | unsigned long completed_gb_activation_had_error_reply : 1; /* RW */ | ||
412 | unsigned long completed_gb_activation_timeout : 1; /* RW */ | ||
413 | unsigned long descriptor_buffer_0_parity : 1; /* RW */ | ||
414 | unsigned long descriptor_buffer_1_parity : 1; /* RW */ | ||
415 | unsigned long socket_destination_table_parity : 1; /* RW */ | ||
416 | unsigned long bau_reply_payload_corruption : 1; /* RW */ | ||
417 | unsigned long io_port_destination_table_parity : 1; /* RW */ | ||
418 | unsigned long intd_soft_ack_timeout : 1; /* RW */ | ||
419 | unsigned long int_rep_obese_msg : 1; /* RW */ | ||
420 | unsigned long int_rep_command_err : 1; /* RW */ | ||
421 | unsigned long int_timeout : 1; /* RW */ | ||
422 | unsigned long rsvd_42_63 : 22; /* */ | ||
423 | } s; | ||
424 | }; | ||
425 | |||
426 | /* ========================================================================= */ | ||
427 | /* UVH_LOCAL_INT0_CONFIG */ | ||
428 | /* ========================================================================= */ | ||
429 | #define UVH_LOCAL_INT0_CONFIG 0x61000UL | ||
430 | |||
431 | #define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0 | ||
432 | #define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
433 | #define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8 | ||
434 | #define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
435 | #define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11 | ||
436 | #define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
437 | #define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12 | ||
438 | #define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
439 | #define UVH_LOCAL_INT0_CONFIG_P_SHFT 13 | ||
440 | #define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
441 | #define UVH_LOCAL_INT0_CONFIG_T_SHFT 15 | ||
442 | #define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
443 | #define UVH_LOCAL_INT0_CONFIG_M_SHFT 16 | ||
444 | #define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
445 | #define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32 | ||
446 | #define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
447 | |||
448 | union uvh_local_int0_config_u { | ||
449 | unsigned long v; | ||
450 | struct uvh_local_int0_config_s { | ||
451 | unsigned long vector_ : 8; /* RW */ | ||
452 | unsigned long dm : 3; /* RW */ | ||
453 | unsigned long destmode : 1; /* RW */ | ||
454 | unsigned long status : 1; /* RO */ | ||
455 | unsigned long p : 1; /* RO */ | ||
456 | unsigned long rsvd_14 : 1; /* */ | ||
457 | unsigned long t : 1; /* RO */ | ||
458 | unsigned long m : 1; /* RW */ | ||
459 | unsigned long rsvd_17_31: 15; /* */ | ||
460 | unsigned long apic_id : 32; /* RW */ | ||
461 | } s; | ||
462 | }; | ||
463 | |||
464 | /* ========================================================================= */ | ||
465 | /* UVH_LOCAL_INT0_ENABLE */ | ||
466 | /* ========================================================================= */ | ||
467 | #define UVH_LOCAL_INT0_ENABLE 0x65000UL | ||
468 | |||
469 | #define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0 | ||
470 | #define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL | ||
471 | #define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1 | ||
472 | #define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL | ||
473 | #define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2 | ||
474 | #define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL | ||
475 | #define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3 | ||
476 | #define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL | ||
477 | #define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4 | ||
478 | #define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL | ||
479 | #define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5 | ||
480 | #define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL | ||
481 | #define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6 | ||
482 | #define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL | ||
483 | #define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7 | ||
484 | #define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL | ||
485 | #define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8 | ||
486 | #define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL | ||
487 | #define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9 | ||
488 | #define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL | ||
489 | #define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10 | ||
490 | #define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL | ||
491 | #define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11 | ||
492 | #define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL | ||
493 | #define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12 | ||
494 | #define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL | ||
495 | #define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13 | ||
496 | #define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL | ||
497 | #define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14 | ||
498 | #define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL | ||
499 | #define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15 | ||
500 | #define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL | ||
501 | #define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16 | ||
502 | #define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL | ||
503 | #define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17 | ||
504 | #define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL | ||
505 | #define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18 | ||
506 | #define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL | ||
507 | #define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19 | ||
508 | #define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL | ||
509 | #define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20 | ||
510 | #define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL | ||
511 | #define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21 | ||
512 | #define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL | ||
513 | #define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22 | ||
514 | #define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL | ||
515 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23 | ||
516 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL | ||
517 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24 | ||
518 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL | ||
519 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25 | ||
520 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL | ||
521 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26 | ||
522 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL | ||
523 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27 | ||
524 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL | ||
525 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28 | ||
526 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL | ||
527 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29 | ||
528 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL | ||
529 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30 | ||
530 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL | ||
531 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31 | ||
532 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL | ||
533 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32 | ||
534 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL | ||
535 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33 | ||
536 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL | ||
537 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34 | ||
538 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL | ||
539 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35 | ||
540 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL | ||
541 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36 | ||
542 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL | ||
543 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37 | ||
544 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL | ||
545 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38 | ||
546 | #define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL | ||
547 | #define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39 | ||
548 | #define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL | ||
549 | #define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40 | ||
550 | #define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL | ||
551 | #define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41 | ||
552 | #define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL | ||
553 | #define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42 | ||
554 | #define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL | ||
555 | #define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43 | ||
556 | #define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL | ||
557 | #define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44 | ||
558 | #define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL | ||
559 | |||
560 | union uvh_local_int0_enable_u { | ||
561 | unsigned long v; | ||
562 | struct uvh_local_int0_enable_s { | ||
563 | unsigned long lb_hcerr : 1; /* RW */ | ||
564 | unsigned long gr0_hcerr : 1; /* RW */ | ||
565 | unsigned long gr1_hcerr : 1; /* RW */ | ||
566 | unsigned long lh_hcerr : 1; /* RW */ | ||
567 | unsigned long rh_hcerr : 1; /* RW */ | ||
568 | unsigned long xn_hcerr : 1; /* RW */ | ||
569 | unsigned long si_hcerr : 1; /* RW */ | ||
570 | unsigned long lb_aoerr0 : 1; /* RW */ | ||
571 | unsigned long gr0_aoerr0 : 1; /* RW */ | ||
572 | unsigned long gr1_aoerr0 : 1; /* RW */ | ||
573 | unsigned long lh_aoerr0 : 1; /* RW */ | ||
574 | unsigned long rh_aoerr0 : 1; /* RW */ | ||
575 | unsigned long xn_aoerr0 : 1; /* RW */ | ||
576 | unsigned long si_aoerr0 : 1; /* RW */ | ||
577 | unsigned long lb_aoerr1 : 1; /* RW */ | ||
578 | unsigned long gr0_aoerr1 : 1; /* RW */ | ||
579 | unsigned long gr1_aoerr1 : 1; /* RW */ | ||
580 | unsigned long lh_aoerr1 : 1; /* RW */ | ||
581 | unsigned long rh_aoerr1 : 1; /* RW */ | ||
582 | unsigned long xn_aoerr1 : 1; /* RW */ | ||
583 | unsigned long si_aoerr1 : 1; /* RW */ | ||
584 | unsigned long rh_vpi_int : 1; /* RW */ | ||
585 | unsigned long system_shutdown_int : 1; /* RW */ | ||
586 | unsigned long lb_irq_int_0 : 1; /* RW */ | ||
587 | unsigned long lb_irq_int_1 : 1; /* RW */ | ||
588 | unsigned long lb_irq_int_2 : 1; /* RW */ | ||
589 | unsigned long lb_irq_int_3 : 1; /* RW */ | ||
590 | unsigned long lb_irq_int_4 : 1; /* RW */ | ||
591 | unsigned long lb_irq_int_5 : 1; /* RW */ | ||
592 | unsigned long lb_irq_int_6 : 1; /* RW */ | ||
593 | unsigned long lb_irq_int_7 : 1; /* RW */ | ||
594 | unsigned long lb_irq_int_8 : 1; /* RW */ | ||
595 | unsigned long lb_irq_int_9 : 1; /* RW */ | ||
596 | unsigned long lb_irq_int_10 : 1; /* RW */ | ||
597 | unsigned long lb_irq_int_11 : 1; /* RW */ | ||
598 | unsigned long lb_irq_int_12 : 1; /* RW */ | ||
599 | unsigned long lb_irq_int_13 : 1; /* RW */ | ||
600 | unsigned long lb_irq_int_14 : 1; /* RW */ | ||
601 | unsigned long lb_irq_int_15 : 1; /* RW */ | ||
602 | unsigned long l1_nmi_int : 1; /* RW */ | ||
603 | unsigned long stop_clock : 1; /* RW */ | ||
604 | unsigned long asic_to_l1 : 1; /* RW */ | ||
605 | unsigned long l1_to_asic : 1; /* RW */ | ||
606 | unsigned long ltc_int : 1; /* RW */ | ||
607 | unsigned long la_seq_trigger : 1; /* RW */ | ||
608 | unsigned long rsvd_45_63 : 19; /* */ | ||
609 | } s; | ||
610 | }; | ||
611 | |||
612 | /* ========================================================================= */ | ||
251 | /* UVH_NODE_ID */ | 613 | /* UVH_NODE_ID */ |
252 | /* ========================================================================= */ | 614 | /* ========================================================================= */ |
253 | #define UVH_NODE_ID 0x0UL | 615 | #define UVH_NODE_ID 0x0UL |
@@ -284,6 +646,73 @@ union uvh_node_id_u { | |||
284 | }; | 646 | }; |
285 | 647 | ||
286 | /* ========================================================================= */ | 648 | /* ========================================================================= */ |
649 | /* UVH_NODE_PRESENT_TABLE */ | ||
650 | /* ========================================================================= */ | ||
651 | #define UVH_NODE_PRESENT_TABLE 0x1400UL | ||
652 | #define UVH_NODE_PRESENT_TABLE_DEPTH 16 | ||
653 | |||
654 | #define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 | ||
655 | #define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL | ||
656 | |||
657 | union uvh_node_present_table_u { | ||
658 | unsigned long v; | ||
659 | struct uvh_node_present_table_s { | ||
660 | unsigned long nodes : 64; /* RW */ | ||
661 | } s; | ||
662 | }; | ||
663 | |||
664 | /* ========================================================================= */ | ||
665 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ | ||
666 | /* ========================================================================= */ | ||
667 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL | ||
668 | |||
669 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 | ||
670 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL | ||
671 | |||
672 | union uvh_rh_gam_alias210_redirect_config_0_mmr_u { | ||
673 | unsigned long v; | ||
674 | struct uvh_rh_gam_alias210_redirect_config_0_mmr_s { | ||
675 | unsigned long rsvd_0_23 : 24; /* */ | ||
676 | unsigned long dest_base : 22; /* RW */ | ||
677 | unsigned long rsvd_46_63: 18; /* */ | ||
678 | } s; | ||
679 | }; | ||
680 | |||
681 | /* ========================================================================= */ | ||
682 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ | ||
683 | /* ========================================================================= */ | ||
684 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL | ||
685 | |||
686 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 | ||
687 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL | ||
688 | |||
689 | union uvh_rh_gam_alias210_redirect_config_1_mmr_u { | ||
690 | unsigned long v; | ||
691 | struct uvh_rh_gam_alias210_redirect_config_1_mmr_s { | ||
692 | unsigned long rsvd_0_23 : 24; /* */ | ||
693 | unsigned long dest_base : 22; /* RW */ | ||
694 | unsigned long rsvd_46_63: 18; /* */ | ||
695 | } s; | ||
696 | }; | ||
697 | |||
698 | /* ========================================================================= */ | ||
699 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ | ||
700 | /* ========================================================================= */ | ||
701 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL | ||
702 | |||
703 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 | ||
704 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL | ||
705 | |||
706 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u { | ||
707 | unsigned long v; | ||
708 | struct uvh_rh_gam_alias210_redirect_config_2_mmr_s { | ||
709 | unsigned long rsvd_0_23 : 24; /* */ | ||
710 | unsigned long dest_base : 22; /* RW */ | ||
711 | unsigned long rsvd_46_63: 18; /* */ | ||
712 | } s; | ||
713 | }; | ||
714 | |||
715 | /* ========================================================================= */ | ||
287 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | 716 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ |
288 | /* ========================================================================= */ | 717 | /* ========================================================================= */ |
289 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | 718 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL |
@@ -369,5 +798,77 @@ union uvh_si_addr_map_config_u { | |||
369 | } s; | 798 | } s; |
370 | }; | 799 | }; |
371 | 800 | ||
801 | /* ========================================================================= */ | ||
802 | /* UVH_SI_ALIAS0_OVERLAY_CONFIG */ | ||
803 | /* ========================================================================= */ | ||
804 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL | ||
805 | |||
806 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24 | ||
807 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
808 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
809 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
810 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
811 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
812 | |||
813 | union uvh_si_alias0_overlay_config_u { | ||
814 | unsigned long v; | ||
815 | struct uvh_si_alias0_overlay_config_s { | ||
816 | unsigned long rsvd_0_23: 24; /* */ | ||
817 | unsigned long base : 8; /* RW */ | ||
818 | unsigned long rsvd_32_47: 16; /* */ | ||
819 | unsigned long m_alias : 5; /* RW */ | ||
820 | unsigned long rsvd_53_62: 10; /* */ | ||
821 | unsigned long enable : 1; /* RW */ | ||
822 | } s; | ||
823 | }; | ||
824 | |||
825 | /* ========================================================================= */ | ||
826 | /* UVH_SI_ALIAS1_OVERLAY_CONFIG */ | ||
827 | /* ========================================================================= */ | ||
828 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL | ||
829 | |||
830 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24 | ||
831 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
832 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
833 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
834 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
835 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
836 | |||
837 | union uvh_si_alias1_overlay_config_u { | ||
838 | unsigned long v; | ||
839 | struct uvh_si_alias1_overlay_config_s { | ||
840 | unsigned long rsvd_0_23: 24; /* */ | ||
841 | unsigned long base : 8; /* RW */ | ||
842 | unsigned long rsvd_32_47: 16; /* */ | ||
843 | unsigned long m_alias : 5; /* RW */ | ||
844 | unsigned long rsvd_53_62: 10; /* */ | ||
845 | unsigned long enable : 1; /* RW */ | ||
846 | } s; | ||
847 | }; | ||
848 | |||
849 | /* ========================================================================= */ | ||
850 | /* UVH_SI_ALIAS2_OVERLAY_CONFIG */ | ||
851 | /* ========================================================================= */ | ||
852 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL | ||
853 | |||
854 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24 | ||
855 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
856 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
857 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
858 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
859 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
860 | |||
861 | union uvh_si_alias2_overlay_config_u { | ||
862 | unsigned long v; | ||
863 | struct uvh_si_alias2_overlay_config_s { | ||
864 | unsigned long rsvd_0_23: 24; /* */ | ||
865 | unsigned long base : 8; /* RW */ | ||
866 | unsigned long rsvd_32_47: 16; /* */ | ||
867 | unsigned long m_alias : 5; /* RW */ | ||
868 | unsigned long rsvd_53_62: 10; /* */ | ||
869 | unsigned long enable : 1; /* RW */ | ||
870 | } s; | ||
871 | }; | ||
872 | |||
372 | 873 | ||
373 | #endif /* __ASM_X86_UV_MMRS__ */ | 874 | #endif /* __ASM_X86_UV_MMRS__ */ |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index e8ffce898bf9..cf9f40a91c9c 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -1,11 +1,11 @@ | |||
1 | #ifndef _LINUX_KERNEL_STAT_H | 1 | #ifndef _LINUX_KERNEL_STAT_H |
2 | #define _LINUX_KERNEL_STAT_H | 2 | #define _LINUX_KERNEL_STAT_H |
3 | 3 | ||
4 | #include <asm/irq.h> | ||
5 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
6 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
7 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
8 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <asm/irq.h> | ||
9 | #include <asm/cputime.h> | 9 | #include <asm/cputime.h> |
10 | 10 | ||
11 | /* | 11 | /* |