diff options
| author | Keith Owens <kaos@sgi.com> | 2005-09-11 03:22:53 -0400 |
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2005-09-11 17:08:41 -0400 |
| commit | 7f613c7d2203ae137d98fc1c38abc30fd7048637 (patch) | |
| tree | d8155a5cca33e4fe178625396886fcbb81f39e7a | |
| parent | 289d773ee89ea80dcc364ef97d1be7ad1817387e (diff) | |
[PATCH] MCA/INIT: use per cpu stacks
The bulk of the change. Use per cpu MCA/INIT stacks. Change the SAL
to OS state (sos) to be per process. Do all the assembler work on the
MCA/INIT stacks, leaving the original stack alone. Pass per cpu state
data to the C handlers for MCA and INIT, which also means changing the
mca_drv interfaces slightly. Lots of verification on whether the
original stack is usable before converting it to a sleeping process.
Signed-off-by: Keith Owens <kaos@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
| -rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 40 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 821 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca_asm.S | 1358 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca_drv.c | 37 | ||||
| -rw-r--r-- | include/asm-ia64/mca.h | 102 | ||||
| -rw-r--r-- | include/asm-ia64/mca_asm.h | 125 |
6 files changed, 1363 insertions, 1120 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 7d1ae2982c53..f6a234289341 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
| @@ -211,17 +211,41 @@ void foo(void) | |||
| 211 | #endif | 211 | #endif |
| 212 | 212 | ||
| 213 | BLANK(); | 213 | BLANK(); |
| 214 | DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, | 214 | DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET, |
| 215 | offsetof (struct ia64_mca_cpu, proc_state_dump)); | 215 | offsetof (struct ia64_mca_cpu, mca_stack)); |
| 216 | DEFINE(IA64_MCA_CPU_STACK_OFFSET, | ||
| 217 | offsetof (struct ia64_mca_cpu, stack)); | ||
| 218 | DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET, | ||
| 219 | offsetof (struct ia64_mca_cpu, stackframe)); | ||
| 220 | DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET, | ||
| 221 | offsetof (struct ia64_mca_cpu, rbstore)); | ||
| 222 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, | 216 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, |
| 223 | offsetof (struct ia64_mca_cpu, init_stack)); | 217 | offsetof (struct ia64_mca_cpu, init_stack)); |
| 224 | BLANK(); | 218 | BLANK(); |
| 219 | DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET, | ||
| 220 | offsetof (struct ia64_sal_os_state, sal_ra)); | ||
| 221 | DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, | ||
| 222 | offsetof (struct ia64_sal_os_state, os_gp)); | ||
| 223 | DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, | ||
| 224 | offsetof (struct ia64_sal_os_state, pal_min_state)); | ||
| 225 | DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, | ||
| 226 | offsetof (struct ia64_sal_os_state, proc_state_param)); | ||
| 227 | DEFINE(IA64_SAL_OS_STATE_SIZE, | ||
| 228 | sizeof (struct ia64_sal_os_state)); | ||
| 229 | DEFINE(IA64_PMSA_GR_OFFSET, | ||
| 230 | offsetof (struct pal_min_state_area_s, pmsa_gr)); | ||
| 231 | DEFINE(IA64_PMSA_BANK1_GR_OFFSET, | ||
| 232 | offsetof (struct pal_min_state_area_s, pmsa_bank1_gr)); | ||
| 233 | DEFINE(IA64_PMSA_PR_OFFSET, | ||
| 234 | offsetof (struct pal_min_state_area_s, pmsa_pr)); | ||
| 235 | DEFINE(IA64_PMSA_BR0_OFFSET, | ||
| 236 | offsetof (struct pal_min_state_area_s, pmsa_br0)); | ||
| 237 | DEFINE(IA64_PMSA_RSC_OFFSET, | ||
| 238 | offsetof (struct pal_min_state_area_s, pmsa_rsc)); | ||
| 239 | DEFINE(IA64_PMSA_IIP_OFFSET, | ||
| 240 | offsetof (struct pal_min_state_area_s, pmsa_iip)); | ||
| 241 | DEFINE(IA64_PMSA_IPSR_OFFSET, | ||
| 242 | offsetof (struct pal_min_state_area_s, pmsa_ipsr)); | ||
| 243 | DEFINE(IA64_PMSA_IFS_OFFSET, | ||
| 244 | offsetof (struct pal_min_state_area_s, pmsa_ifs)); | ||
| 245 | DEFINE(IA64_PMSA_XIP_OFFSET, | ||
| 246 | offsetof (struct pal_min_state_area_s, pmsa_xip)); | ||
| 247 | BLANK(); | ||
| 248 | |||
| 225 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ | 249 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ |
| 226 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); | 250 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); |
| 227 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); | 251 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8d484204a3ff..6dc726ad7137 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -48,6 +48,9 @@ | |||
| 48 | * Delete dead variables and functions. | 48 | * Delete dead variables and functions. |
| 49 | * Reorder to remove the need for forward declarations and to consolidate | 49 | * Reorder to remove the need for forward declarations and to consolidate |
| 50 | * related code. | 50 | * related code. |
| 51 | * | ||
| 52 | * 2005-08-12 Keith Owens <kaos@sgi.com> | ||
| 53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. | ||
| 51 | */ | 54 | */ |
| 52 | #include <linux/config.h> | 55 | #include <linux/config.h> |
| 53 | #include <linux/types.h> | 56 | #include <linux/types.h> |
| @@ -77,6 +80,8 @@ | |||
| 77 | #include <asm/irq.h> | 80 | #include <asm/irq.h> |
| 78 | #include <asm/hw_irq.h> | 81 | #include <asm/hw_irq.h> |
| 79 | 82 | ||
| 83 | #include "entry.h" | ||
| 84 | |||
| 80 | #if defined(IA64_MCA_DEBUG_INFO) | 85 | #if defined(IA64_MCA_DEBUG_INFO) |
| 81 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) | 86 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) |
| 82 | #else | 87 | #else |
| @@ -84,9 +89,7 @@ | |||
| 84 | #endif | 89 | #endif |
| 85 | 90 | ||
| 86 | /* Used by mca_asm.S */ | 91 | /* Used by mca_asm.S */ |
| 87 | ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; | 92 | u32 ia64_mca_serialize; |
| 88 | ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; | ||
| 89 | u64 ia64_mca_serialize; | ||
| 90 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | 93 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ |
| 91 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 94 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
| 92 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 95 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
| @@ -95,8 +98,10 @@ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | |||
| 95 | unsigned long __per_cpu_mca[NR_CPUS]; | 98 | unsigned long __per_cpu_mca[NR_CPUS]; |
| 96 | 99 | ||
| 97 | /* In mca_asm.S */ | 100 | /* In mca_asm.S */ |
| 98 | extern void ia64_monarch_init_handler (void); | 101 | extern void ia64_os_init_dispatch_monarch (void); |
| 99 | extern void ia64_slave_init_handler (void); | 102 | extern void ia64_os_init_dispatch_slave (void); |
| 103 | |||
| 104 | static int monarch_cpu = -1; | ||
| 100 | 105 | ||
| 101 | static ia64_mc_info_t ia64_mc_info; | 106 | static ia64_mc_info_t ia64_mc_info; |
| 102 | 107 | ||
| @@ -234,7 +239,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) | |||
| 234 | * This function retrieves a specified error record type from SAL | 239 | * This function retrieves a specified error record type from SAL |
| 235 | * and wakes up any processes waiting for error records. | 240 | * and wakes up any processes waiting for error records. |
| 236 | * | 241 | * |
| 237 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) | 242 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) |
| 243 | * FIXME: remove MCA and irq_safe. | ||
| 238 | */ | 244 | */ |
| 239 | static void | 245 | static void |
| 240 | ia64_mca_log_sal_error_record(int sal_info_type) | 246 | ia64_mca_log_sal_error_record(int sal_info_type) |
| @@ -242,7 +248,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
| 242 | u8 *buffer; | 248 | u8 *buffer; |
| 243 | sal_log_record_header_t *rh; | 249 | sal_log_record_header_t *rh; |
| 244 | u64 size; | 250 | u64 size; |
| 245 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; | 251 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; |
| 246 | #ifdef IA64_MCA_DEBUG_INFO | 252 | #ifdef IA64_MCA_DEBUG_INFO |
| 247 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; | 253 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; |
| 248 | #endif | 254 | #endif |
| @@ -330,182 +336,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
| 330 | 336 | ||
| 331 | #endif /* CONFIG_ACPI */ | 337 | #endif /* CONFIG_ACPI */ |
| 332 | 338 | ||
| 333 | static void | ||
| 334 | show_min_state (pal_min_state_area_t *minstate) | ||
| 335 | { | ||
| 336 | u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; | ||
| 337 | u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; | ||
| 338 | |||
| 339 | printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); | ||
| 340 | printk("pr\t\t%016lx\n", minstate->pmsa_pr); | ||
| 341 | printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); | ||
| 342 | printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); | ||
| 343 | printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); | ||
| 344 | printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); | ||
| 345 | printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); | ||
| 346 | printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); | ||
| 347 | printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); | ||
| 348 | printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); | ||
| 349 | printk("b1\t\t%016lx ", minstate->pmsa_br1); | ||
| 350 | print_symbol("%s\n", minstate->pmsa_br1); | ||
| 351 | |||
| 352 | printk("\nstatic registers r0-r15:\n"); | ||
| 353 | printk(" r0- 3 %016lx %016lx %016lx %016lx\n", | ||
| 354 | 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); | ||
| 355 | printk(" r4- 7 %016lx %016lx %016lx %016lx\n", | ||
| 356 | minstate->pmsa_gr[3], minstate->pmsa_gr[4], | ||
| 357 | minstate->pmsa_gr[5], minstate->pmsa_gr[6]); | ||
| 358 | printk(" r8-11 %016lx %016lx %016lx %016lx\n", | ||
| 359 | minstate->pmsa_gr[7], minstate->pmsa_gr[8], | ||
| 360 | minstate->pmsa_gr[9], minstate->pmsa_gr[10]); | ||
| 361 | printk("r12-15 %016lx %016lx %016lx %016lx\n", | ||
| 362 | minstate->pmsa_gr[11], minstate->pmsa_gr[12], | ||
| 363 | minstate->pmsa_gr[13], minstate->pmsa_gr[14]); | ||
| 364 | |||
| 365 | printk("\nbank 0:\n"); | ||
| 366 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
| 367 | minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], | ||
| 368 | minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); | ||
| 369 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
| 370 | minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], | ||
| 371 | minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); | ||
| 372 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
| 373 | minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], | ||
| 374 | minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); | ||
| 375 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
| 376 | minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], | ||
| 377 | minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); | ||
| 378 | |||
| 379 | printk("\nbank 1:\n"); | ||
| 380 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
| 381 | minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], | ||
| 382 | minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); | ||
| 383 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
| 384 | minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], | ||
| 385 | minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); | ||
| 386 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
| 387 | minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], | ||
| 388 | minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); | ||
| 389 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
| 390 | minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], | ||
| 391 | minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); | ||
| 392 | } | ||
| 393 | |||
| 394 | static void | ||
| 395 | fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) | ||
| 396 | { | ||
| 397 | u64 *dst_banked, *src_banked, bit, shift, nat_bits; | ||
| 398 | int i; | ||
| 399 | |||
| 400 | /* | ||
| 401 | * First, update the pt-regs and switch-stack structures with the contents stored | ||
| 402 | * in the min-state area: | ||
| 403 | */ | ||
| 404 | if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { | ||
| 405 | pt->cr_ipsr = ms->pmsa_xpsr; | ||
| 406 | pt->cr_iip = ms->pmsa_xip; | ||
| 407 | pt->cr_ifs = ms->pmsa_xfs; | ||
| 408 | } else { | ||
| 409 | pt->cr_ipsr = ms->pmsa_ipsr; | ||
| 410 | pt->cr_iip = ms->pmsa_iip; | ||
| 411 | pt->cr_ifs = ms->pmsa_ifs; | ||
| 412 | } | ||
| 413 | pt->ar_rsc = ms->pmsa_rsc; | ||
| 414 | pt->pr = ms->pmsa_pr; | ||
| 415 | pt->r1 = ms->pmsa_gr[0]; | ||
| 416 | pt->r2 = ms->pmsa_gr[1]; | ||
| 417 | pt->r3 = ms->pmsa_gr[2]; | ||
| 418 | sw->r4 = ms->pmsa_gr[3]; | ||
| 419 | sw->r5 = ms->pmsa_gr[4]; | ||
| 420 | sw->r6 = ms->pmsa_gr[5]; | ||
| 421 | sw->r7 = ms->pmsa_gr[6]; | ||
| 422 | pt->r8 = ms->pmsa_gr[7]; | ||
| 423 | pt->r9 = ms->pmsa_gr[8]; | ||
| 424 | pt->r10 = ms->pmsa_gr[9]; | ||
| 425 | pt->r11 = ms->pmsa_gr[10]; | ||
| 426 | pt->r12 = ms->pmsa_gr[11]; | ||
| 427 | pt->r13 = ms->pmsa_gr[12]; | ||
| 428 | pt->r14 = ms->pmsa_gr[13]; | ||
| 429 | pt->r15 = ms->pmsa_gr[14]; | ||
| 430 | dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ | ||
| 431 | src_banked = ms->pmsa_bank1_gr; | ||
| 432 | for (i = 0; i < 16; ++i) | ||
| 433 | dst_banked[i] = src_banked[i]; | ||
| 434 | pt->b0 = ms->pmsa_br0; | ||
| 435 | sw->b1 = ms->pmsa_br1; | ||
| 436 | |||
| 437 | /* construct the NaT bits for the pt-regs structure: */ | ||
| 438 | # define PUT_NAT_BIT(dst, addr) \ | ||
| 439 | do { \ | ||
| 440 | bit = nat_bits & 1; nat_bits >>= 1; \ | ||
| 441 | shift = ((unsigned long) addr >> 3) & 0x3f; \ | ||
| 442 | dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ | ||
| 443 | } while (0) | ||
| 444 | |||
| 445 | /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ | ||
| 446 | shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; | ||
| 447 | nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); | ||
| 448 | |||
| 449 | PUT_NAT_BIT(sw->caller_unat, &pt->r1); | ||
| 450 | PUT_NAT_BIT(sw->caller_unat, &pt->r2); | ||
| 451 | PUT_NAT_BIT(sw->caller_unat, &pt->r3); | ||
| 452 | PUT_NAT_BIT(sw->ar_unat, &sw->r4); | ||
| 453 | PUT_NAT_BIT(sw->ar_unat, &sw->r5); | ||
| 454 | PUT_NAT_BIT(sw->ar_unat, &sw->r6); | ||
| 455 | PUT_NAT_BIT(sw->ar_unat, &sw->r7); | ||
| 456 | PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); | ||
| 457 | PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); | ||
| 458 | PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); | ||
| 459 | PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); | ||
| 460 | nat_bits >>= 16; /* skip over bank0 NaT bits */ | ||
| 461 | PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); | ||
| 462 | PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); | ||
| 463 | PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); | ||
| 464 | PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); | ||
| 465 | PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); | ||
| 466 | PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); | ||
| 467 | PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); | ||
| 468 | PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); | ||
| 469 | } | ||
| 470 | |||
| 471 | static void | ||
| 472 | init_handler_platform (pal_min_state_area_t *ms, | ||
| 473 | struct pt_regs *pt, struct switch_stack *sw) | ||
| 474 | { | ||
| 475 | struct unw_frame_info info; | ||
| 476 | |||
| 477 | /* if a kernel debugger is available call it here else just dump the registers */ | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
| 481 | * generated via the BMC's command-line interface, but since the console is on the | ||
| 482 | * same serial line, the user will need some time to switch out of the BMC before | ||
| 483 | * the dump begins. | ||
| 484 | */ | ||
| 485 | printk("Delaying for 5 seconds...\n"); | ||
| 486 | udelay(5*1000000); | ||
| 487 | show_min_state(ms); | ||
| 488 | |||
| 489 | printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); | ||
| 490 | fetch_min_state(ms, pt, sw); | ||
| 491 | unw_init_from_interruption(&info, current, pt, sw); | ||
| 492 | ia64_do_show_stack(&info, NULL); | ||
| 493 | |||
| 494 | if (read_trylock(&tasklist_lock)) { | ||
| 495 | struct task_struct *g, *t; | ||
| 496 | do_each_thread (g, t) { | ||
| 497 | if (t == current) | ||
| 498 | continue; | ||
| 499 | |||
| 500 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
| 501 | show_stack(t, NULL); | ||
| 502 | } while_each_thread (g, t); | ||
| 503 | } | ||
| 504 | |||
| 505 | printk("\nINIT dump complete. Please reboot now.\n"); | ||
| 506 | while (1); /* hang city if no debugger */ | ||
| 507 | } | ||
| 508 | |||
| 509 | #ifdef CONFIG_ACPI | 339 | #ifdef CONFIG_ACPI |
| 510 | /* | 340 | /* |
| 511 | * ia64_mca_register_cpev | 341 | * ia64_mca_register_cpev |
| @@ -648,42 +478,6 @@ ia64_mca_cmc_vector_enable_keventd(void *unused) | |||
| 648 | } | 478 | } |
| 649 | 479 | ||
| 650 | /* | 480 | /* |
| 651 | * ia64_mca_wakeup_ipi_wait | ||
| 652 | * | ||
| 653 | * Wait for the inter-cpu interrupt to be sent by the | ||
| 654 | * monarch processor once it is done with handling the | ||
| 655 | * MCA. | ||
| 656 | * | ||
| 657 | * Inputs : None | ||
| 658 | * Outputs : None | ||
| 659 | */ | ||
| 660 | static void | ||
| 661 | ia64_mca_wakeup_ipi_wait(void) | ||
| 662 | { | ||
| 663 | int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); | ||
| 664 | int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); | ||
| 665 | u64 irr = 0; | ||
| 666 | |||
| 667 | do { | ||
| 668 | switch(irr_num) { | ||
| 669 | case 0: | ||
| 670 | irr = ia64_getreg(_IA64_REG_CR_IRR0); | ||
| 671 | break; | ||
| 672 | case 1: | ||
| 673 | irr = ia64_getreg(_IA64_REG_CR_IRR1); | ||
| 674 | break; | ||
| 675 | case 2: | ||
| 676 | irr = ia64_getreg(_IA64_REG_CR_IRR2); | ||
| 677 | break; | ||
| 678 | case 3: | ||
| 679 | irr = ia64_getreg(_IA64_REG_CR_IRR3); | ||
| 680 | break; | ||
| 681 | } | ||
| 682 | cpu_relax(); | ||
| 683 | } while (!(irr & (1UL << irr_bit))) ; | ||
| 684 | } | ||
| 685 | |||
| 686 | /* | ||
| 687 | * ia64_mca_wakeup | 481 | * ia64_mca_wakeup |
| 688 | * | 482 | * |
| 689 | * Send an inter-cpu interrupt to wake-up a particular cpu | 483 | * Send an inter-cpu interrupt to wake-up a particular cpu |
| @@ -748,11 +542,9 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | |||
| 748 | */ | 542 | */ |
| 749 | ia64_sal_mc_rendez(); | 543 | ia64_sal_mc_rendez(); |
| 750 | 544 | ||
| 751 | /* Wait for the wakeup IPI from the monarch | 545 | /* Wait for the monarch cpu to exit. */ |
| 752 | * This waiting is done by polling on the wakeup-interrupt | 546 | while (monarch_cpu != -1) |
| 753 | * vector bit in the processor's IRRs | 547 | cpu_relax(); /* spin until monarch leaves */ |
| 754 | */ | ||
| 755 | ia64_mca_wakeup_ipi_wait(); | ||
| 756 | 548 | ||
| 757 | /* Enable all interrupts */ | 549 | /* Enable all interrupts */ |
| 758 | local_irq_restore(flags); | 550 | local_irq_restore(flags); |
| @@ -780,53 +572,13 @@ ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs) | |||
| 780 | return IRQ_HANDLED; | 572 | return IRQ_HANDLED; |
| 781 | } | 573 | } |
| 782 | 574 | ||
| 783 | /* | ||
| 784 | * ia64_return_to_sal_check | ||
| 785 | * | ||
| 786 | * This is function called before going back from the OS_MCA handler | ||
| 787 | * to the OS_MCA dispatch code which finally takes the control back | ||
| 788 | * to the SAL. | ||
| 789 | * The main purpose of this routine is to setup the OS_MCA to SAL | ||
| 790 | * return state which can be used by the OS_MCA dispatch code | ||
| 791 | * just before going back to SAL. | ||
| 792 | * | ||
| 793 | * Inputs : None | ||
| 794 | * Outputs : None | ||
| 795 | */ | ||
| 796 | |||
| 797 | static void | ||
| 798 | ia64_return_to_sal_check(int recover) | ||
| 799 | { | ||
| 800 | |||
| 801 | /* Copy over some relevant stuff from the sal_to_os_mca_handoff | ||
| 802 | * so that it can be used at the time of os_mca_to_sal_handoff | ||
| 803 | */ | ||
| 804 | ia64_os_to_sal_handoff_state.imots_sal_gp = | ||
| 805 | ia64_sal_to_os_handoff_state.imsto_sal_gp; | ||
| 806 | |||
| 807 | ia64_os_to_sal_handoff_state.imots_sal_check_ra = | ||
| 808 | ia64_sal_to_os_handoff_state.imsto_sal_check_ra; | ||
| 809 | |||
| 810 | if (recover) | ||
| 811 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; | ||
| 812 | else | ||
| 813 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; | ||
| 814 | |||
| 815 | /* Default = tell SAL to return to same context */ | ||
| 816 | ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT; | ||
| 817 | |||
| 818 | ia64_os_to_sal_handoff_state.imots_new_min_state = | ||
| 819 | (u64 *)ia64_sal_to_os_handoff_state.pal_min_state; | ||
| 820 | |||
| 821 | } | ||
| 822 | |||
| 823 | /* Function pointer for extra MCA recovery */ | 575 | /* Function pointer for extra MCA recovery */ |
| 824 | int (*ia64_mca_ucmc_extension) | 576 | int (*ia64_mca_ucmc_extension) |
| 825 | (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) | 577 | (void*,struct ia64_sal_os_state*) |
| 826 | = NULL; | 578 | = NULL; |
| 827 | 579 | ||
| 828 | int | 580 | int |
| 829 | ia64_reg_MCA_extension(void *fn) | 581 | ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) |
| 830 | { | 582 | { |
| 831 | if (ia64_mca_ucmc_extension) | 583 | if (ia64_mca_ucmc_extension) |
| 832 | return 1; | 584 | return 1; |
| @@ -845,8 +597,321 @@ ia64_unreg_MCA_extension(void) | |||
| 845 | EXPORT_SYMBOL(ia64_reg_MCA_extension); | 597 | EXPORT_SYMBOL(ia64_reg_MCA_extension); |
| 846 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); | 598 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); |
| 847 | 599 | ||
| 600 | |||
| 601 | static inline void | ||
| 602 | copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) | ||
| 603 | { | ||
| 604 | u64 fslot, tslot, nat; | ||
| 605 | *tr = *fr; | ||
| 606 | fslot = ((unsigned long)fr >> 3) & 63; | ||
| 607 | tslot = ((unsigned long)tr >> 3) & 63; | ||
| 608 | *tnat &= ~(1UL << tslot); | ||
| 609 | nat = (fnat >> fslot) & 1; | ||
| 610 | *tnat |= (nat << tslot); | ||
| 611 | } | ||
| 612 | |||
| 613 | /* On entry to this routine, we are running on the per cpu stack, see | ||
| 614 | * mca_asm.h. The original stack has not been touched by this event. Some of | ||
| 615 | * the original stack's registers will be in the RBS on this stack. This stack | ||
| 616 | * also contains a partial pt_regs and switch_stack, the rest of the data is in | ||
| 617 | * PAL minstate. | ||
| 618 | * | ||
| 619 | * The first thing to do is modify the original stack to look like a blocked | ||
| 620 | * task so we can run backtrace on the original task. Also mark the per cpu | ||
| 621 | * stack as current to ensure that we use the correct task state, it also means | ||
| 622 | * that we can do backtrace on the MCA/INIT handler code itself. | ||
| 623 | */ | ||
| 624 | |||
| 625 | static task_t * | ||
| 626 | ia64_mca_modify_original_stack(struct pt_regs *regs, | ||
| 627 | const struct switch_stack *sw, | ||
| 628 | struct ia64_sal_os_state *sos, | ||
| 629 | const char *type) | ||
| 630 | { | ||
| 631 | char *p, comm[sizeof(current->comm)]; | ||
| 632 | ia64_va va; | ||
| 633 | extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ | ||
| 634 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
| 635 | task_t *previous_current; | ||
| 636 | struct pt_regs *old_regs; | ||
| 637 | struct switch_stack *old_sw; | ||
| 638 | unsigned size = sizeof(struct pt_regs) + | ||
| 639 | sizeof(struct switch_stack) + 16; | ||
| 640 | u64 *old_bspstore, *old_bsp; | ||
| 641 | u64 *new_bspstore, *new_bsp; | ||
| 642 | u64 old_unat, old_rnat, new_rnat, nat; | ||
| 643 | u64 slots, loadrs = regs->loadrs; | ||
| 644 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; | ||
| 645 | u64 ar_bspstore = regs->ar_bspstore; | ||
| 646 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); | ||
| 647 | const u64 *bank; | ||
| 648 | const char *msg; | ||
| 649 | int cpu = smp_processor_id(); | ||
| 650 | |||
| 651 | previous_current = curr_task(cpu); | ||
| 652 | set_curr_task(cpu, current); | ||
| 653 | if ((p = strchr(current->comm, ' '))) | ||
| 654 | *p = '\0'; | ||
| 655 | |||
| 656 | /* Best effort attempt to cope with MCA/INIT delivered while in | ||
| 657 | * physical mode. | ||
| 658 | */ | ||
| 659 | regs->cr_ipsr = ms->pmsa_ipsr; | ||
| 660 | if (ia64_psr(regs)->dt == 0) { | ||
| 661 | va.l = r12; | ||
| 662 | if (va.f.reg == 0) { | ||
| 663 | va.f.reg = 7; | ||
| 664 | r12 = va.l; | ||
| 665 | } | ||
| 666 | va.l = r13; | ||
| 667 | if (va.f.reg == 0) { | ||
| 668 | va.f.reg = 7; | ||
| 669 | r13 = va.l; | ||
| 670 | } | ||
| 671 | } | ||
| 672 | if (ia64_psr(regs)->rt == 0) { | ||
| 673 | va.l = ar_bspstore; | ||
| 674 | if (va.f.reg == 0) { | ||
| 675 | va.f.reg = 7; | ||
| 676 | ar_bspstore = va.l; | ||
| 677 | } | ||
| 678 | va.l = ar_bsp; | ||
| 679 | if (va.f.reg == 0) { | ||
| 680 | va.f.reg = 7; | ||
| 681 | ar_bsp = va.l; | ||
| 682 | } | ||
| 683 | } | ||
| 684 | |||
| 685 | /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers | ||
| 686 | * have been copied to the old stack, the old stack may fail the | ||
| 687 | * validation tests below. So ia64_old_stack() must restore the dirty | ||
| 688 | * registers from the new stack. The old and new bspstore probably | ||
| 689 | * have different alignments, so loadrs calculated on the old bsp | ||
| 690 | * cannot be used to restore from the new bsp. Calculate a suitable | ||
| 691 | * loadrs for the new stack and save it in the new pt_regs, where | ||
| 692 | * ia64_old_stack() can get it. | ||
| 693 | */ | ||
| 694 | old_bspstore = (u64 *)ar_bspstore; | ||
| 695 | old_bsp = (u64 *)ar_bsp; | ||
| 696 | slots = ia64_rse_num_regs(old_bspstore, old_bsp); | ||
| 697 | new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); | ||
| 698 | new_bsp = ia64_rse_skip_regs(new_bspstore, slots); | ||
| 699 | regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; | ||
| 700 | |||
| 701 | /* Verify the previous stack state before we change it */ | ||
| 702 | if (user_mode(regs)) { | ||
| 703 | msg = "occurred in user space"; | ||
| 704 | goto no_mod; | ||
| 705 | } | ||
| 706 | if (r13 != sos->prev_IA64_KR_CURRENT) { | ||
| 707 | msg = "inconsistent previous current and r13"; | ||
| 708 | goto no_mod; | ||
| 709 | } | ||
| 710 | if ((r12 - r13) >= KERNEL_STACK_SIZE) { | ||
| 711 | msg = "inconsistent r12 and r13"; | ||
| 712 | goto no_mod; | ||
| 713 | } | ||
| 714 | if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { | ||
| 715 | msg = "inconsistent ar.bspstore and r13"; | ||
| 716 | goto no_mod; | ||
| 717 | } | ||
| 718 | va.p = old_bspstore; | ||
| 719 | if (va.f.reg < 5) { | ||
| 720 | msg = "old_bspstore is in the wrong region"; | ||
| 721 | goto no_mod; | ||
| 722 | } | ||
| 723 | if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { | ||
| 724 | msg = "inconsistent ar.bsp and r13"; | ||
| 725 | goto no_mod; | ||
| 726 | } | ||
| 727 | size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; | ||
| 728 | if (ar_bspstore + size > r12) { | ||
| 729 | msg = "no room for blocked state"; | ||
| 730 | goto no_mod; | ||
| 731 | } | ||
| 732 | |||
| 733 | /* Change the comm field on the MCA/INT task to include the pid that | ||
| 734 | * was interrupted, it makes for easier debugging. If that pid was 0 | ||
| 735 | * (swapper or nested MCA/INIT) then use the start of the previous comm | ||
| 736 | * field suffixed with its cpu. | ||
| 737 | */ | ||
| 738 | if (previous_current->pid) | ||
| 739 | snprintf(comm, sizeof(comm), "%s %d", | ||
| 740 | current->comm, previous_current->pid); | ||
| 741 | else { | ||
| 742 | int l; | ||
| 743 | if ((p = strchr(previous_current->comm, ' '))) | ||
| 744 | l = p - previous_current->comm; | ||
| 745 | else | ||
| 746 | l = strlen(previous_current->comm); | ||
| 747 | snprintf(comm, sizeof(comm), "%s %*s %d", | ||
| 748 | current->comm, l, previous_current->comm, | ||
| 749 | previous_current->thread_info->cpu); | ||
| 750 | } | ||
| 751 | memcpy(current->comm, comm, sizeof(current->comm)); | ||
| 752 | |||
| 753 | /* Make the original task look blocked. First stack a struct pt_regs, | ||
| 754 | * describing the state at the time of interrupt. mca_asm.S built a | ||
| 755 | * partial pt_regs, copy it and fill in the blanks using minstate. | ||
| 756 | */ | ||
| 757 | p = (char *)r12 - sizeof(*regs); | ||
| 758 | old_regs = (struct pt_regs *)p; | ||
| 759 | memcpy(old_regs, regs, sizeof(*regs)); | ||
| 760 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
| 761 | * pmsa_{xip,xpsr,xfs} | ||
| 762 | */ | ||
| 763 | if (ia64_psr(regs)->ic) { | ||
| 764 | old_regs->cr_iip = ms->pmsa_iip; | ||
| 765 | old_regs->cr_ipsr = ms->pmsa_ipsr; | ||
| 766 | old_regs->cr_ifs = ms->pmsa_ifs; | ||
| 767 | } else { | ||
| 768 | old_regs->cr_iip = ms->pmsa_xip; | ||
| 769 | old_regs->cr_ipsr = ms->pmsa_xpsr; | ||
| 770 | old_regs->cr_ifs = ms->pmsa_xfs; | ||
| 771 | } | ||
| 772 | old_regs->pr = ms->pmsa_pr; | ||
| 773 | old_regs->b0 = ms->pmsa_br0; | ||
| 774 | old_regs->loadrs = loadrs; | ||
| 775 | old_regs->ar_rsc = ms->pmsa_rsc; | ||
| 776 | old_unat = old_regs->ar_unat; | ||
| 777 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); | ||
| 778 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); | ||
| 779 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); | ||
| 780 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); | ||
| 781 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); | ||
| 782 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); | ||
| 783 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); | ||
| 784 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); | ||
| 785 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); | ||
| 786 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); | ||
| 787 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); | ||
| 788 | if (ia64_psr(old_regs)->bn) | ||
| 789 | bank = ms->pmsa_bank1_gr; | ||
| 790 | else | ||
| 791 | bank = ms->pmsa_bank0_gr; | ||
| 792 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); | ||
| 793 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); | ||
| 794 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); | ||
| 795 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); | ||
| 796 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); | ||
| 797 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); | ||
| 798 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); | ||
| 799 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); | ||
| 800 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); | ||
| 801 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); | ||
| 802 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); | ||
| 803 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); | ||
| 804 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); | ||
| 805 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); | ||
| 806 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); | ||
| 807 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); | ||
| 808 | |||
| 809 | /* Next stack a struct switch_stack. mca_asm.S built a partial | ||
| 810 | * switch_stack, copy it and fill in the blanks using pt_regs and | ||
| 811 | * minstate. | ||
| 812 | * | ||
| 813 | * In the synthesized switch_stack, b0 points to ia64_leave_kernel, | ||
| 814 | * ar.pfs is set to 0. | ||
| 815 | * | ||
| 816 | * unwind.c::unw_unwind() does special processing for interrupt frames. | ||
| 817 | * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate | ||
| 818 | * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not | ||
| 819 | * that this is documented, of course. Set PRED_NON_SYSCALL in the | ||
| 820 | * switch_stack on the original stack so it will unwind correctly when | ||
| 821 | * unwind.c reads pt_regs. | ||
| 822 | * | ||
| 823 | * thread.ksp is updated to point to the synthesized switch_stack. | ||
| 824 | */ | ||
| 825 | p -= sizeof(struct switch_stack); | ||
| 826 | old_sw = (struct switch_stack *)p; | ||
| 827 | memcpy(old_sw, sw, sizeof(*sw)); | ||
| 828 | old_sw->caller_unat = old_unat; | ||
| 829 | old_sw->ar_fpsr = old_regs->ar_fpsr; | ||
| 830 | copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); | ||
| 831 | copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); | ||
| 832 | copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); | ||
| 833 | copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); | ||
| 834 | old_sw->b0 = (u64)ia64_leave_kernel; | ||
| 835 | old_sw->b1 = ms->pmsa_br1; | ||
| 836 | old_sw->ar_pfs = 0; | ||
| 837 | old_sw->ar_unat = old_unat; | ||
| 838 | old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); | ||
| 839 | previous_current->thread.ksp = (u64)p - 16; | ||
| 840 | |||
| 841 | /* Finally copy the original stack's registers back to its RBS. | ||
| 842 | * Registers from ar.bspstore through ar.bsp at the time of the event | ||
| 843 | * are in the current RBS, copy them back to the original stack. The | ||
| 844 | * copy must be done register by register because the original bspstore | ||
| 845 | * and the current one have different alignments, so the saved RNAT | ||
| 846 | * data occurs at different places. | ||
| 847 | * | ||
| 848 | * mca_asm does cover, so the old_bsp already includes all registers at | ||
| 849 | * the time of MCA/INIT. It also does flushrs, so all registers before | ||
| 850 | * this function have been written to backing store on the MCA/INIT | ||
| 851 | * stack. | ||
| 852 | */ | ||
| 853 | new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); | ||
| 854 | old_rnat = regs->ar_rnat; | ||
| 855 | while (slots--) { | ||
| 856 | if (ia64_rse_is_rnat_slot(new_bspstore)) { | ||
| 857 | new_rnat = ia64_get_rnat(new_bspstore++); | ||
| 858 | } | ||
| 859 | if (ia64_rse_is_rnat_slot(old_bspstore)) { | ||
| 860 | *old_bspstore++ = old_rnat; | ||
| 861 | old_rnat = 0; | ||
| 862 | } | ||
| 863 | nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; | ||
| 864 | old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); | ||
| 865 | old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); | ||
| 866 | *old_bspstore++ = *new_bspstore++; | ||
| 867 | } | ||
| 868 | old_sw->ar_bspstore = (unsigned long)old_bspstore; | ||
| 869 | old_sw->ar_rnat = old_rnat; | ||
| 870 | |||
| 871 | sos->prev_task = previous_current; | ||
| 872 | return previous_current; | ||
| 873 | |||
| 874 | no_mod: | ||
| 875 | printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | ||
| 876 | smp_processor_id(), type, msg); | ||
| 877 | return previous_current; | ||
| 878 | } | ||
| 879 | |||
| 880 | /* The monarch/slave interaction is based on monarch_cpu and requires that all | ||
| 881 | * slaves have entered rendezvous before the monarch leaves. If any cpu has | ||
| 882 | * not entered rendezvous yet then wait a bit. The assumption is that any | ||
| 883 | * slave that has not rendezvoused after a reasonable time is never going to do | ||
| 884 | * so. In this context, slave includes cpus that respond to the MCA rendezvous | ||
| 885 | * interrupt, as well as cpus that receive the INIT slave event. | ||
| 886 | */ | ||
| 887 | |||
| 888 | static void | ||
| 889 | ia64_wait_for_slaves(int monarch) | ||
| 890 | { | ||
| 891 | int c, wait = 0; | ||
| 892 | for_each_online_cpu(c) { | ||
| 893 | if (c == monarch) | ||
| 894 | continue; | ||
| 895 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
| 896 | udelay(1000); /* short wait first */ | ||
| 897 | wait = 1; | ||
| 898 | break; | ||
| 899 | } | ||
| 900 | } | ||
| 901 | if (!wait) | ||
| 902 | return; | ||
| 903 | for_each_online_cpu(c) { | ||
| 904 | if (c == monarch) | ||
| 905 | continue; | ||
| 906 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
| 907 | udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ | ||
| 908 | break; | ||
| 909 | } | ||
| 910 | } | ||
| 911 | } | ||
| 912 | |||
| 848 | /* | 913 | /* |
| 849 | * ia64_mca_ucmc_handler | 914 | * ia64_mca_handler |
| 850 | * | 915 | * |
| 851 | * This is uncorrectable machine check handler called from OS_MCA | 916 | * This is uncorrectable machine check handler called from OS_MCA |
| 852 | * dispatch code which is in turn called from SAL_CHECK(). | 917 | * dispatch code which is in turn called from SAL_CHECK(). |
| @@ -857,16 +922,28 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension); | |||
| 857 | * further MCA logging is enabled by clearing logs. | 922 | * further MCA logging is enabled by clearing logs. |
| 858 | * Monarch also has the duty of sending wakeup-IPIs to pull the | 923 | * Monarch also has the duty of sending wakeup-IPIs to pull the |
| 859 | * slave processors out of rendezvous spinloop. | 924 | * slave processors out of rendezvous spinloop. |
| 860 | * | ||
| 861 | * Inputs : None | ||
| 862 | * Outputs : None | ||
| 863 | */ | 925 | */ |
| 864 | void | 926 | void |
| 865 | ia64_mca_ucmc_handler(void) | 927 | ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, |
| 928 | struct ia64_sal_os_state *sos) | ||
| 866 | { | 929 | { |
| 867 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) | 930 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) |
| 868 | &ia64_sal_to_os_handoff_state.proc_state_param; | 931 | &sos->proc_state_param; |
| 869 | int recover; | 932 | int recover, cpu = smp_processor_id(); |
| 933 | task_t *previous_current; | ||
| 934 | |||
| 935 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | ||
| 936 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | ||
| 937 | monarch_cpu = cpu; | ||
| 938 | ia64_wait_for_slaves(cpu); | ||
| 939 | |||
| 940 | /* Wakeup all the processors which are spinning in the rendezvous loop. | ||
| 941 | * They will leave SAL, then spin in the OS with interrupts disabled | ||
| 942 | * until this monarch cpu leaves the MCA handler. That gets control | ||
| 943 | * back to the OS so we can backtrace the other cpus, backtrace when | ||
| 944 | * spinning in SAL does not work. | ||
| 945 | */ | ||
| 946 | ia64_mca_wakeup_all(); | ||
| 870 | 947 | ||
| 871 | /* Get the MCA error record and log it */ | 948 | /* Get the MCA error record and log it */ |
| 872 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); | 949 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); |
| @@ -874,25 +951,20 @@ ia64_mca_ucmc_handler(void) | |||
| 874 | /* TLB error is only exist in this SAL error record */ | 951 | /* TLB error is only exist in this SAL error record */ |
| 875 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) | 952 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) |
| 876 | /* other error recovery */ | 953 | /* other error recovery */ |
| 877 | || (ia64_mca_ucmc_extension | 954 | || (ia64_mca_ucmc_extension |
| 878 | && ia64_mca_ucmc_extension( | 955 | && ia64_mca_ucmc_extension( |
| 879 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), | 956 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), |
| 880 | &ia64_sal_to_os_handoff_state, | 957 | sos)); |
| 881 | &ia64_os_to_sal_handoff_state)); | ||
| 882 | 958 | ||
| 883 | if (recover) { | 959 | if (recover) { |
| 884 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); | 960 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); |
| 885 | rh->severity = sal_log_severity_corrected; | 961 | rh->severity = sal_log_severity_corrected; |
| 886 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 962 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
| 963 | sos->os_status = IA64_MCA_CORRECTED; | ||
| 887 | } | 964 | } |
| 888 | /* | ||
| 889 | * Wakeup all the processors which are spinning in the rendezvous | ||
| 890 | * loop. | ||
| 891 | */ | ||
| 892 | ia64_mca_wakeup_all(); | ||
| 893 | 965 | ||
| 894 | /* Return to SAL */ | 966 | set_curr_task(cpu, previous_current); |
| 895 | ia64_return_to_sal_check(recover); | 967 | monarch_cpu = -1; |
| 896 | } | 968 | } |
| 897 | 969 | ||
| 898 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 970 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); |
| @@ -1116,34 +1188,114 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
| 1116 | /* | 1188 | /* |
| 1117 | * C portion of the OS INIT handler | 1189 | * C portion of the OS INIT handler |
| 1118 | * | 1190 | * |
| 1119 | * Called from ia64_monarch_init_handler | 1191 | * Called from ia64_os_init_dispatch |
| 1120 | * | ||
| 1121 | * Inputs: pointer to pt_regs where processor info was saved. | ||
| 1122 | * | 1192 | * |
| 1123 | * Returns: | 1193 | * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for |
| 1124 | * 0 if SAL must warm boot the System | 1194 | * this event. This code is used for both monarch and slave INIT events, see |
| 1125 | * 1 if SAL must return to interrupted context using PAL_MC_RESUME | 1195 | * sos->monarch. |
| 1126 | * | 1196 | * |
| 1197 | * All INIT events switch to the INIT stack and change the previous process to | ||
| 1198 | * blocked status. If one of the INIT events is the monarch then we are | ||
| 1199 | * probably processing the nmi button/command. Use the monarch cpu to dump all | ||
| 1200 | * the processes. The slave INIT events all spin until the monarch cpu | ||
| 1201 | * returns. We can also get INIT slave events for MCA, in which case the MCA | ||
| 1202 | * process is the monarch. | ||
| 1127 | */ | 1203 | */ |
| 1204 | |||
| 1128 | void | 1205 | void |
| 1129 | ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) | 1206 | ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, |
| 1207 | struct ia64_sal_os_state *sos) | ||
| 1130 | { | 1208 | { |
| 1131 | pal_min_state_area_t *ms; | 1209 | static atomic_t slaves; |
| 1210 | static atomic_t monarchs; | ||
| 1211 | task_t *previous_current; | ||
| 1212 | int cpu = smp_processor_id(), c; | ||
| 1213 | struct task_struct *g, *t; | ||
| 1132 | 1214 | ||
| 1133 | oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ | 1215 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
| 1134 | console_loglevel = 15; /* make sure printks make it to console */ | 1216 | console_loglevel = 15; /* make sure printks make it to console */ |
| 1135 | 1217 | ||
| 1136 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", | 1218 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", |
| 1137 | ia64_sal_to_os_handoff_state.proc_state_param); | 1219 | sos->proc_state_param, cpu, sos->monarch); |
| 1220 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); | ||
| 1138 | 1221 | ||
| 1139 | /* | 1222 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); |
| 1140 | * Address of minstate area provided by PAL is physical, | 1223 | sos->os_status = IA64_INIT_RESUME; |
| 1141 | * uncacheable (bit 63 set). Convert to Linux virtual | 1224 | |
| 1142 | * address in region 6. | 1225 | /* FIXME: Workaround for broken proms that drive all INIT events as |
| 1226 | * slaves. The last slave that enters is promoted to be a monarch. | ||
| 1227 | * Remove this code in September 2006, that gives platforms a year to | ||
| 1228 | * fix their proms and get their customers updated. | ||
| 1143 | */ | 1229 | */ |
| 1144 | ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); | 1230 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { |
| 1231 | printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", | ||
| 1232 | __FUNCTION__, cpu); | ||
| 1233 | atomic_dec(&slaves); | ||
| 1234 | sos->monarch = 1; | ||
| 1235 | } | ||
| 1145 | 1236 | ||
| 1146 | init_handler_platform(ms, pt, sw); /* call platform specific routines */ | 1237 | /* FIXME: Workaround for broken proms that drive all INIT events as |
| 1238 | * monarchs. Second and subsequent monarchs are demoted to slaves. | ||
| 1239 | * Remove this code in September 2006, that gives platforms a year to | ||
| 1240 | * fix their proms and get their customers updated. | ||
| 1241 | */ | ||
| 1242 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { | ||
| 1243 | printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", | ||
| 1244 | __FUNCTION__, cpu); | ||
| 1245 | atomic_dec(&monarchs); | ||
| 1246 | sos->monarch = 0; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | if (!sos->monarch) { | ||
| 1250 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | ||
| 1251 | while (monarch_cpu == -1) | ||
| 1252 | cpu_relax(); /* spin until monarch enters */ | ||
| 1253 | while (monarch_cpu != -1) | ||
| 1254 | cpu_relax(); /* spin until monarch leaves */ | ||
| 1255 | printk("Slave on cpu %d returning to normal service.\n", cpu); | ||
| 1256 | set_curr_task(cpu, previous_current); | ||
| 1257 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | ||
| 1258 | atomic_dec(&slaves); | ||
| 1259 | return; | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | monarch_cpu = cpu; | ||
| 1263 | |||
| 1264 | /* | ||
| 1265 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
| 1266 | * generated via the BMC's command-line interface, but since the console is on the | ||
| 1267 | * same serial line, the user will need some time to switch out of the BMC before | ||
| 1268 | * the dump begins. | ||
| 1269 | */ | ||
| 1270 | printk("Delaying for 5 seconds...\n"); | ||
| 1271 | udelay(5*1000000); | ||
| 1272 | ia64_wait_for_slaves(cpu); | ||
| 1273 | printk(KERN_ERR "Processes interrupted by INIT -"); | ||
| 1274 | for_each_online_cpu(c) { | ||
| 1275 | struct ia64_sal_os_state *s; | ||
| 1276 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | ||
| 1277 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | ||
| 1278 | g = s->prev_task; | ||
| 1279 | if (g) { | ||
| 1280 | if (g->pid) | ||
| 1281 | printk(" %d", g->pid); | ||
| 1282 | else | ||
| 1283 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
| 1284 | } | ||
| 1285 | } | ||
| 1286 | printk("\n\n"); | ||
| 1287 | if (read_trylock(&tasklist_lock)) { | ||
| 1288 | do_each_thread (g, t) { | ||
| 1289 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
| 1290 | show_stack(t, NULL); | ||
| 1291 | } while_each_thread (g, t); | ||
| 1292 | read_unlock(&tasklist_lock); | ||
| 1293 | } | ||
| 1294 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | ||
| 1295 | atomic_dec(&monarchs); | ||
| 1296 | set_curr_task(cpu, previous_current); | ||
| 1297 | monarch_cpu = -1; | ||
| 1298 | return; | ||
| 1147 | } | 1299 | } |
| 1148 | 1300 | ||
| 1149 | static int __init | 1301 | static int __init |
| @@ -1193,6 +1345,34 @@ static struct irqaction mca_cpep_irqaction = { | |||
| 1193 | }; | 1345 | }; |
| 1194 | #endif /* CONFIG_ACPI */ | 1346 | #endif /* CONFIG_ACPI */ |
| 1195 | 1347 | ||
| 1348 | /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on | ||
| 1349 | * these stacks can never sleep, they cannot return from the kernel to user | ||
| 1350 | * space, they do not appear in a normal ps listing. So there is no need to | ||
| 1351 | * format most of the fields. | ||
| 1352 | */ | ||
| 1353 | |||
| 1354 | static void | ||
| 1355 | format_mca_init_stack(void *mca_data, unsigned long offset, | ||
| 1356 | const char *type, int cpu) | ||
| 1357 | { | ||
| 1358 | struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); | ||
| 1359 | struct thread_info *ti; | ||
| 1360 | memset(p, 0, KERNEL_STACK_SIZE); | ||
| 1361 | ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); | ||
| 1362 | ti->flags = _TIF_MCA_INIT; | ||
| 1363 | ti->preempt_count = 1; | ||
| 1364 | ti->task = p; | ||
| 1365 | ti->cpu = cpu; | ||
| 1366 | p->thread_info = ti; | ||
| 1367 | p->state = TASK_UNINTERRUPTIBLE; | ||
| 1368 | __set_bit(cpu, &p->cpus_allowed); | ||
| 1369 | INIT_LIST_HEAD(&p->tasks); | ||
| 1370 | p->parent = p->real_parent = p->group_leader = p; | ||
| 1371 | INIT_LIST_HEAD(&p->children); | ||
| 1372 | INIT_LIST_HEAD(&p->sibling); | ||
| 1373 | strncpy(p->comm, type, sizeof(p->comm)-1); | ||
| 1374 | } | ||
| 1375 | |||
| 1196 | /* Do per-CPU MCA-related initialization. */ | 1376 | /* Do per-CPU MCA-related initialization. */ |
| 1197 | 1377 | ||
| 1198 | void __devinit | 1378 | void __devinit |
| @@ -1205,19 +1385,28 @@ ia64_mca_cpu_init(void *cpu_data) | |||
| 1205 | int cpu; | 1385 | int cpu; |
| 1206 | 1386 | ||
| 1207 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1387 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
| 1208 | * NR_CPUS); | 1388 | * NR_CPUS + KERNEL_STACK_SIZE); |
| 1389 | mca_data = (void *)(((unsigned long)mca_data + | ||
| 1390 | KERNEL_STACK_SIZE - 1) & | ||
| 1391 | (-KERNEL_STACK_SIZE)); | ||
| 1209 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1392 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 1393 | format_mca_init_stack(mca_data, | ||
| 1394 | offsetof(struct ia64_mca_cpu, mca_stack), | ||
| 1395 | "MCA", cpu); | ||
| 1396 | format_mca_init_stack(mca_data, | ||
| 1397 | offsetof(struct ia64_mca_cpu, init_stack), | ||
| 1398 | "INIT", cpu); | ||
| 1210 | __per_cpu_mca[cpu] = __pa(mca_data); | 1399 | __per_cpu_mca[cpu] = __pa(mca_data); |
| 1211 | mca_data += sizeof(struct ia64_mca_cpu); | 1400 | mca_data += sizeof(struct ia64_mca_cpu); |
| 1212 | } | 1401 | } |
| 1213 | } | 1402 | } |
| 1214 | 1403 | ||
| 1215 | /* | 1404 | /* |
| 1216 | * The MCA info structure was allocated earlier and its | 1405 | * The MCA info structure was allocated earlier and its |
| 1217 | * physical address saved in __per_cpu_mca[cpu]. Copy that | 1406 | * physical address saved in __per_cpu_mca[cpu]. Copy that |
| 1218 | * address * to ia64_mca_data so we can access it as a per-CPU | 1407 | * address * to ia64_mca_data so we can access it as a per-CPU |
| 1219 | * variable. | 1408 | * variable. |
| 1220 | */ | 1409 | */ |
| 1221 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; | 1410 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; |
| 1222 | 1411 | ||
| 1223 | /* | 1412 | /* |
| @@ -1227,11 +1416,11 @@ ia64_mca_cpu_init(void *cpu_data) | |||
| 1227 | __get_cpu_var(ia64_mca_per_cpu_pte) = | 1416 | __get_cpu_var(ia64_mca_per_cpu_pte) = |
| 1228 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); | 1417 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); |
| 1229 | 1418 | ||
| 1230 | /* | 1419 | /* |
| 1231 | * Also, stash away a copy of the PAL address and the PTE | 1420 | * Also, stash away a copy of the PAL address and the PTE |
| 1232 | * needed to map it. | 1421 | * needed to map it. |
| 1233 | */ | 1422 | */ |
| 1234 | pal_vaddr = efi_get_pal_addr(); | 1423 | pal_vaddr = efi_get_pal_addr(); |
| 1235 | if (!pal_vaddr) | 1424 | if (!pal_vaddr) |
| 1236 | return; | 1425 | return; |
| 1237 | __get_cpu_var(ia64_mca_pal_base) = | 1426 | __get_cpu_var(ia64_mca_pal_base) = |
| @@ -1263,8 +1452,8 @@ ia64_mca_cpu_init(void *cpu_data) | |||
| 1263 | void __init | 1452 | void __init |
| 1264 | ia64_mca_init(void) | 1453 | ia64_mca_init(void) |
| 1265 | { | 1454 | { |
| 1266 | ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; | 1455 | ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; |
| 1267 | ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; | 1456 | ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; |
| 1268 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; | 1457 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; |
| 1269 | int i; | 1458 | int i; |
| 1270 | s64 rc; | 1459 | s64 rc; |
| @@ -1342,9 +1531,9 @@ ia64_mca_init(void) | |||
| 1342 | * XXX - disable SAL checksum by setting size to 0, should be | 1531 | * XXX - disable SAL checksum by setting size to 0, should be |
| 1343 | * size of the actual init handler in mca_asm.S. | 1532 | * size of the actual init handler in mca_asm.S. |
| 1344 | */ | 1533 | */ |
| 1345 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); | 1534 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); |
| 1346 | ia64_mc_info.imi_monarch_init_handler_size = 0; | 1535 | ia64_mc_info.imi_monarch_init_handler_size = 0; |
| 1347 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); | 1536 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); |
| 1348 | ia64_mc_info.imi_slave_init_handler_size = 0; | 1537 | ia64_mc_info.imi_slave_init_handler_size = 0; |
| 1349 | 1538 | ||
| 1350 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, | 1539 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index ef3fd7265b67..499a065f4e60 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
| @@ -16,6 +16,9 @@ | |||
| 16 | // 04/11/12 Russ Anderson <rja@sgi.com> | 16 | // 04/11/12 Russ Anderson <rja@sgi.com> |
| 17 | // Added per cpu MCA/INIT stack save areas. | 17 | // Added per cpu MCA/INIT stack save areas. |
| 18 | // | 18 | // |
| 19 | // 12/08/05 Keith Owens <kaos@sgi.com> | ||
| 20 | // Use per cpu MCA/INIT stacks for all data. | ||
| 21 | // | ||
| 19 | #include <linux/config.h> | 22 | #include <linux/config.h> |
| 20 | #include <linux/threads.h> | 23 | #include <linux/threads.h> |
| 21 | 24 | ||
| @@ -25,96 +28,23 @@ | |||
| 25 | #include <asm/mca_asm.h> | 28 | #include <asm/mca_asm.h> |
| 26 | #include <asm/mca.h> | 29 | #include <asm/mca.h> |
| 27 | 30 | ||
| 28 | /* | 31 | #include "entry.h" |
| 29 | * When we get a machine check, the kernel stack pointer is no longer | ||
| 30 | * valid, so we need to set a new stack pointer. | ||
| 31 | */ | ||
| 32 | #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Needed for return context to SAL | ||
| 36 | */ | ||
| 37 | #define IA64_MCA_SAME_CONTEXT 0 | ||
| 38 | #define IA64_MCA_COLD_BOOT -2 | ||
| 39 | |||
| 40 | #include "minstate.h" | ||
| 41 | |||
| 42 | /* | ||
| 43 | * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) | ||
| 44 | * 1. GR1 = OS GP | ||
| 45 | * 2. GR8 = PAL_PROC physical address | ||
| 46 | * 3. GR9 = SAL_PROC physical address | ||
| 47 | * 4. GR10 = SAL GP (physical) | ||
| 48 | * 5. GR11 = Rendez state | ||
| 49 | * 6. GR12 = Return address to location within SAL_CHECK | ||
| 50 | */ | ||
| 51 | #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ | ||
| 52 | LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ | ||
| 53 | st8 [_tmp]=r1,0x08;; \ | ||
| 54 | st8 [_tmp]=r8,0x08;; \ | ||
| 55 | st8 [_tmp]=r9,0x08;; \ | ||
| 56 | st8 [_tmp]=r10,0x08;; \ | ||
| 57 | st8 [_tmp]=r11,0x08;; \ | ||
| 58 | st8 [_tmp]=r12,0x08;; \ | ||
| 59 | st8 [_tmp]=r17,0x08;; \ | ||
| 60 | st8 [_tmp]=r18,0x08 | ||
| 61 | |||
| 62 | /* | ||
| 63 | * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) | ||
| 64 | * (p6) is executed if we never entered virtual mode (TLB error) | ||
| 65 | * (p7) is executed if we entered virtual mode as expected (normal case) | ||
| 66 | * 1. GR8 = OS_MCA return status | ||
| 67 | * 2. GR9 = SAL GP (physical) | ||
| 68 | * 3. GR10 = 0/1 returning same/new context | ||
| 69 | * 4. GR22 = New min state save area pointer | ||
| 70 | * returns ptr to SAL rtn save loc in _tmp | ||
| 71 | */ | ||
| 72 | #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ | ||
| 73 | movl _tmp=ia64_os_to_sal_handoff_state;; \ | ||
| 74 | DATA_VA_TO_PA(_tmp);; \ | ||
| 75 | ld8 r8=[_tmp],0x08;; \ | ||
| 76 | ld8 r9=[_tmp],0x08;; \ | ||
| 77 | ld8 r10=[_tmp],0x08;; \ | ||
| 78 | ld8 r22=[_tmp],0x08;; | ||
| 79 | // now _tmp is pointing to SAL rtn save location | ||
| 80 | |||
| 81 | /* | ||
| 82 | * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state | ||
| 83 | * imots_os_status=IA64_MCA_COLD_BOOT | ||
| 84 | * imots_sal_gp=SAL GP | ||
| 85 | * imots_context=IA64_MCA_SAME_CONTEXT | ||
| 86 | * imots_new_min_state=Min state save area pointer | ||
| 87 | * imots_sal_check_ra=Return address to location within SAL_CHECK | ||
| 88 | * | ||
| 89 | */ | ||
| 90 | #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ | ||
| 91 | movl tmp=IA64_MCA_COLD_BOOT; \ | ||
| 92 | movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ | ||
| 93 | movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ | ||
| 94 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
| 95 | ld8 tmp=[sal_to_os_handoff],48;; \ | ||
| 96 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
| 97 | movl tmp=IA64_MCA_SAME_CONTEXT;; \ | ||
| 98 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
| 99 | ld8 tmp=[sal_to_os_handoff],-8;; \ | ||
| 100 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
| 101 | ld8 tmp=[sal_to_os_handoff];; \ | ||
| 102 | st8 [os_to_sal_handoff]=tmp;; | ||
| 103 | 32 | ||
| 104 | #define GET_IA64_MCA_DATA(reg) \ | 33 | #define GET_IA64_MCA_DATA(reg) \ |
| 105 | GET_THIS_PADDR(reg, ia64_mca_data) \ | 34 | GET_THIS_PADDR(reg, ia64_mca_data) \ |
| 106 | ;; \ | 35 | ;; \ |
| 107 | ld8 reg=[reg] | 36 | ld8 reg=[reg] |
| 108 | 37 | ||
| 109 | .global ia64_os_mca_dispatch | ||
| 110 | .global ia64_os_mca_dispatch_end | ||
| 111 | .global ia64_sal_to_os_handoff_state | ||
| 112 | .global ia64_os_to_sal_handoff_state | ||
| 113 | .global ia64_do_tlb_purge | 38 | .global ia64_do_tlb_purge |
| 39 | .global ia64_os_mca_dispatch | ||
| 40 | .global ia64_os_init_dispatch_monarch | ||
| 41 | .global ia64_os_init_dispatch_slave | ||
| 114 | 42 | ||
| 115 | .text | 43 | .text |
| 116 | .align 16 | 44 | .align 16 |
| 117 | 45 | ||
| 46 | //StartMain//////////////////////////////////////////////////////////////////// | ||
| 47 | |||
| 118 | /* | 48 | /* |
| 119 | * Just the TLB purge part is moved to a separate function | 49 | * Just the TLB purge part is moved to a separate function |
| 120 | * so we can re-use the code for cpu hotplug code as well | 50 | * so we can re-use the code for cpu hotplug code as well |
| @@ -207,34 +137,31 @@ ia64_do_tlb_purge: | |||
| 207 | br.sptk.many b1 | 137 | br.sptk.many b1 |
| 208 | ;; | 138 | ;; |
| 209 | 139 | ||
| 210 | ia64_os_mca_dispatch: | 140 | //EndMain////////////////////////////////////////////////////////////////////// |
| 141 | |||
| 142 | //StartMain//////////////////////////////////////////////////////////////////// | ||
| 211 | 143 | ||
| 144 | ia64_os_mca_dispatch: | ||
| 212 | // Serialize all MCA processing | 145 | // Serialize all MCA processing |
| 213 | mov r3=1;; | 146 | mov r3=1;; |
| 214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 147 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; |
| 215 | ia64_os_mca_spin: | 148 | ia64_os_mca_spin: |
| 216 | xchg8 r4=[r2],r3;; | 149 | xchg4 r4=[r2],r3;; |
| 217 | cmp.ne p6,p0=r4,r0 | 150 | cmp.ne p6,p0=r4,r0 |
| 218 | (p6) br ia64_os_mca_spin | 151 | (p6) br ia64_os_mca_spin |
| 219 | 152 | ||
| 220 | // Save the SAL to OS MCA handoff state as defined | 153 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
| 221 | // by SAL SPEC 3.0 | 154 | LOAD_PHYSICAL(p0,r2,1f) // return address |
| 222 | // NOTE : The order in which the state gets saved | 155 | mov r19=1 // All MCA events are treated as monarch (for now) |
| 223 | // is dependent on the way the C-structure | 156 | br.sptk ia64_state_save // save the state that is not in minstate |
| 224 | // for ia64_mca_sal_to_os_state_t has been | 157 | 1: |
| 225 | // defined in include/asm/mca.h | ||
| 226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
| 227 | ;; | ||
| 228 | |||
| 229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
| 230 | begin_os_mca_dump: | ||
| 231 | br ia64_os_mca_proc_state_dump;; | ||
| 232 | 158 | ||
| 233 | ia64_os_mca_done_dump: | 159 | GET_IA64_MCA_DATA(r2) |
| 234 | 160 | // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param | |
| 235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | 161 | ;; |
| 162 | add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 | ||
| 236 | ;; | 163 | ;; |
| 237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | 164 | ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. |
| 238 | ;; | 165 | ;; |
| 239 | tbit.nz p6,p7=r18,60 | 166 | tbit.nz p6,p7=r18,60 |
| 240 | (p7) br.spnt done_tlb_purge_and_reload | 167 | (p7) br.spnt done_tlb_purge_and_reload |
| @@ -323,624 +250,775 @@ ia64_reload_tr: | |||
| 323 | itr.d dtr[r20]=r16 | 250 | itr.d dtr[r20]=r16 |
| 324 | ;; | 251 | ;; |
| 325 | srlz.d | 252 | srlz.d |
| 326 | ;; | ||
| 327 | br.sptk.many done_tlb_purge_and_reload | ||
| 328 | err: | ||
| 329 | COLD_BOOT_HANDOFF_STATE(r20,r21,r22) | ||
| 330 | br.sptk.many ia64_os_mca_done_restore | ||
| 331 | 253 | ||
| 332 | done_tlb_purge_and_reload: | 254 | done_tlb_purge_and_reload: |
| 333 | 255 | ||
| 334 | // Setup new stack frame for OS_MCA handling | 256 | // switch to per cpu MCA stack |
| 335 | GET_IA64_MCA_DATA(r2) | 257 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
| 336 | ;; | 258 | LOAD_PHYSICAL(p0,r2,1f) // return address |
| 337 | add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 259 | br.sptk ia64_new_stack |
| 338 | add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 | 260 | 1: |
| 339 | ;; | 261 | |
| 340 | rse_switch_context(r6,r3,r2);; // RSC management in this new context | 262 | // everything saved, now we can set the kernel registers |
| 263 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
| 264 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 265 | br.sptk ia64_set_kernel_registers | ||
| 266 | 1: | ||
| 341 | 267 | ||
| 268 | // This must be done in physical mode | ||
| 342 | GET_IA64_MCA_DATA(r2) | 269 | GET_IA64_MCA_DATA(r2) |
| 343 | ;; | 270 | ;; |
| 344 | add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 | 271 | mov r7=r2 |
| 345 | ;; | ||
| 346 | mov r12=r2 // establish new stack-pointer | ||
| 347 | 272 | ||
| 348 | // Enter virtual mode from physical mode | 273 | // Enter virtual mode from physical mode |
| 349 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) | 274 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) |
| 350 | ia64_os_mca_virtual_begin: | 275 | |
| 276 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
| 277 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
| 278 | // handler, set a dummy return address of 0 in this routine. That | ||
| 279 | // requires that ia64_os_mca_virtual_begin be a global function. | ||
| 280 | ENTRY(ia64_os_mca_virtual_begin) | ||
| 281 | .prologue | ||
| 282 | .save rp,r0 | ||
| 283 | .body | ||
| 284 | |||
| 285 | mov ar.rsc=3 // set eager mode for C handler | ||
| 286 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
| 287 | ;; | ||
| 351 | 288 | ||
| 352 | // Call virtual mode handler | 289 | // Call virtual mode handler |
| 353 | movl r2=ia64_mca_ucmc_handler;; | 290 | alloc r14=ar.pfs,0,0,3,0 |
| 354 | mov b6=r2;; | 291 | ;; |
| 355 | br.call.sptk.many b0=b6;; | 292 | DATA_PA_TO_VA(r2,r7) |
| 356 | .ret0: | 293 | ;; |
| 294 | add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 | ||
| 295 | add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 | ||
| 296 | add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
| 297 | br.call.sptk.many b0=ia64_mca_handler | ||
| 298 | |||
| 357 | // Revert back to physical mode before going back to SAL | 299 | // Revert back to physical mode before going back to SAL |
| 358 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) | 300 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) |
| 359 | ia64_os_mca_virtual_end: | 301 | ia64_os_mca_virtual_end: |
| 360 | 302 | ||
| 361 | // restore the original stack frame here | 303 | END(ia64_os_mca_virtual_begin) |
| 304 | |||
| 305 | // switch back to previous stack | ||
| 306 | alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame | ||
| 307 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
| 308 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 309 | br.sptk ia64_old_stack | ||
| 310 | 1: | ||
| 311 | |||
| 312 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
| 313 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 314 | br.sptk ia64_state_restore // restore the SAL state | ||
| 315 | 1: | ||
| 316 | |||
| 317 | mov b0=r12 // SAL_CHECK return address | ||
| 318 | |||
| 319 | // release lock | ||
| 320 | LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; | ||
| 321 | st4.rel [r3]=r0 | ||
| 322 | |||
| 323 | br b0 | ||
| 324 | |||
| 325 | //EndMain////////////////////////////////////////////////////////////////////// | ||
| 326 | |||
| 327 | //StartMain//////////////////////////////////////////////////////////////////// | ||
| 328 | |||
| 329 | // | ||
| 330 | // SAL to OS entry point for INIT on all processors. This has been defined for | ||
| 331 | // registration purposes with SAL as a part of ia64_mca_init. Monarch and | ||
| 332 | // slave INIT have identical processing, except for the value of the | ||
| 333 | // sos->monarch flag in r19. | ||
| 334 | // | ||
| 335 | |||
| 336 | ia64_os_init_dispatch_monarch: | ||
| 337 | mov r19=1 // Bow, bow, ye lower middle classes! | ||
| 338 | br.sptk ia64_os_init_dispatch | ||
| 339 | |||
| 340 | ia64_os_init_dispatch_slave: | ||
| 341 | mov r19=0 // <igor>yeth, mathter</igor> | ||
| 342 | |||
| 343 | ia64_os_init_dispatch: | ||
| 344 | |||
| 345 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
| 346 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 347 | br.sptk ia64_state_save // save the state that is not in minstate | ||
| 348 | 1: | ||
| 349 | |||
| 350 | // switch to per cpu INIT stack | ||
| 351 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
| 352 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 353 | br.sptk ia64_new_stack | ||
| 354 | 1: | ||
| 355 | |||
| 356 | // everything saved, now we can set the kernel registers | ||
| 357 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
| 358 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 359 | br.sptk ia64_set_kernel_registers | ||
| 360 | 1: | ||
| 361 | |||
| 362 | // This must be done in physical mode | ||
| 362 | GET_IA64_MCA_DATA(r2) | 363 | GET_IA64_MCA_DATA(r2) |
| 363 | ;; | 364 | ;; |
| 364 | add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 365 | mov r7=r2 |
| 365 | ;; | 366 | |
| 366 | movl r4=IA64_PSR_MC | 367 | // Enter virtual mode from physical mode |
| 368 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) | ||
| 369 | |||
| 370 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
| 371 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
| 372 | // handler, set a dummy return address of 0 in this routine. That | ||
| 373 | // requires that ia64_os_init_virtual_begin be a global function. | ||
| 374 | ENTRY(ia64_os_init_virtual_begin) | ||
| 375 | .prologue | ||
| 376 | .save rp,r0 | ||
| 377 | .body | ||
| 378 | |||
| 379 | mov ar.rsc=3 // set eager mode for C handler | ||
| 380 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
| 367 | ;; | 381 | ;; |
| 368 | rse_return_context(r4,r3,r2) // switch from interrupt context for RSE | ||
| 369 | 382 | ||
| 370 | // let us restore all the registers from our PSI structure | 383 | // Call virtual mode handler |
| 371 | mov r8=gp | 384 | alloc r14=ar.pfs,0,0,3,0 |
| 385 | ;; | ||
| 386 | DATA_PA_TO_VA(r2,r7) | ||
| 372 | ;; | 387 | ;; |
| 373 | begin_os_mca_restore: | 388 | add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 |
| 374 | br ia64_os_mca_proc_state_restore;; | 389 | add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 |
| 390 | add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
| 391 | br.call.sptk.many b0=ia64_init_handler | ||
| 375 | 392 | ||
| 376 | ia64_os_mca_done_restore: | 393 | // Revert back to physical mode before going back to SAL |
| 377 | OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; | 394 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) |
| 378 | // branch back to SALE_CHECK | 395 | ia64_os_init_virtual_end: |
| 379 | ld8 r3=[r2];; | ||
| 380 | mov b0=r3;; // SAL_CHECK return address | ||
| 381 | 396 | ||
| 382 | // release lock | 397 | END(ia64_os_init_virtual_begin) |
| 383 | movl r3=ia64_mca_serialize;; | 398 | |
| 384 | DATA_VA_TO_PA(r3);; | 399 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack |
| 385 | st8.rel [r3]=r0 | 400 | LOAD_PHYSICAL(p0,r2,1f) // return address |
| 401 | br.sptk ia64_state_restore // restore the SAL state | ||
| 402 | 1: | ||
| 386 | 403 | ||
| 404 | // switch back to previous stack | ||
| 405 | alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame | ||
| 406 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
| 407 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
| 408 | br.sptk ia64_old_stack | ||
| 409 | 1: | ||
| 410 | |||
| 411 | mov b0=r12 // SAL_CHECK return address | ||
| 387 | br b0 | 412 | br b0 |
| 388 | ;; | 413 | |
| 389 | ia64_os_mca_dispatch_end: | ||
| 390 | //EndMain////////////////////////////////////////////////////////////////////// | 414 | //EndMain////////////////////////////////////////////////////////////////////// |
| 391 | 415 | ||
| 416 | // common defines for the stubs | ||
| 417 | #define ms r4 | ||
| 418 | #define regs r5 | ||
| 419 | #define temp1 r2 /* careful, it overlaps with input registers */ | ||
| 420 | #define temp2 r3 /* careful, it overlaps with input registers */ | ||
| 421 | #define temp3 r7 | ||
| 422 | #define temp4 r14 | ||
| 423 | |||
| 392 | 424 | ||
| 393 | //++ | 425 | //++ |
| 394 | // Name: | 426 | // Name: |
| 395 | // ia64_os_mca_proc_state_dump() | 427 | // ia64_state_save() |
| 396 | // | 428 | // |
| 397 | // Stub Description: | 429 | // Stub Description: |
| 398 | // | 430 | // |
| 399 | // This stub dumps the processor state during MCHK to a data area | 431 | // Save the state that is not in minstate. This is sensitive to the layout of |
| 432 | // struct ia64_sal_os_state in mca.h. | ||
| 433 | // | ||
| 434 | // r2 contains the return address, r3 contains either | ||
| 435 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
| 436 | // | ||
| 437 | // The OS to SAL section of struct ia64_sal_os_state is set to a default | ||
| 438 | // value of cold boot (MCA) or warm boot (INIT) and return to the same | ||
| 439 | // context. ia64_sal_os_state is also used to hold some registers that | ||
| 440 | // need to be saved and restored across the stack switches. | ||
| 441 | // | ||
| 442 | // Most input registers to this stub come from PAL/SAL | ||
| 443 | // r1 os gp, physical | ||
| 444 | // r8 pal_proc entry point | ||
| 445 | // r9 sal_proc entry point | ||
| 446 | // r10 sal gp | ||
| 447 | // r11 MCA - rendevzous state, INIT - reason code | ||
| 448 | // r12 sal return address | ||
| 449 | // r17 pal min_state | ||
| 450 | // r18 processor state parameter | ||
| 451 | // r19 monarch flag, set by the caller of this routine | ||
| 452 | // | ||
| 453 | // In addition to the SAL to OS state, this routine saves all the | ||
| 454 | // registers that appear in struct pt_regs and struct switch_stack, | ||
| 455 | // excluding those that are already in the PAL minstate area. This | ||
| 456 | // results in a partial pt_regs and switch_stack, the C code copies the | ||
| 457 | // remaining registers from PAL minstate to pt_regs and switch_stack. The | ||
| 458 | // resulting structures contain all the state of the original process when | ||
| 459 | // MCA/INIT occurred. | ||
| 400 | // | 460 | // |
| 401 | //-- | 461 | //-- |
| 402 | 462 | ||
| 403 | ia64_os_mca_proc_state_dump: | 463 | ia64_state_save: |
| 404 | // Save bank 1 GRs 16-31 which will be used by c-language code when we switch | 464 | add regs=MCA_SOS_OFFSET, r3 |
| 405 | // to virtual addressing mode. | 465 | add ms=MCA_SOS_OFFSET+8, r3 |
| 406 | GET_IA64_MCA_DATA(r2) | 466 | mov b0=r2 // save return address |
| 467 | cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 | ||
| 468 | ;; | ||
| 469 | GET_IA64_MCA_DATA(temp2) | ||
| 470 | ;; | ||
| 471 | add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack | ||
| 472 | add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack | ||
| 473 | ;; | ||
| 474 | mov regs=temp1 // save the start of sos | ||
| 475 | st8 [temp1]=r1,16 // os_gp | ||
| 476 | st8 [temp2]=r8,16 // pal_proc | ||
| 477 | ;; | ||
| 478 | st8 [temp1]=r9,16 // sal_proc | ||
| 479 | st8 [temp2]=r11,16 // rv_rc | ||
| 480 | mov r11=cr.iipa | ||
| 481 | ;; | ||
| 482 | st8 [temp1]=r18,16 // proc_state_param | ||
| 483 | st8 [temp2]=r19,16 // monarch | ||
| 484 | mov r6=IA64_KR(CURRENT) | ||
| 485 | ;; | ||
| 486 | st8 [temp1]=r12,16 // sal_ra | ||
| 487 | st8 [temp2]=r10,16 // sal_gp | ||
| 488 | mov r12=cr.isr | ||
| 489 | ;; | ||
| 490 | st8 [temp1]=r17,16 // pal_min_state | ||
| 491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT | ||
| 492 | mov r6=cr.ifa | ||
| 493 | ;; | ||
| 494 | st8 [temp1]=r0,16 // prev_task, starts off as NULL | ||
| 495 | st8 [temp2]=r12,16 // cr.isr | ||
| 496 | mov r12=cr.itir | ||
| 497 | ;; | ||
| 498 | st8 [temp1]=r6,16 // cr.ifa | ||
| 499 | st8 [temp2]=r12,16 // cr.itir | ||
| 500 | mov r12=cr.iim | ||
| 501 | ;; | ||
| 502 | st8 [temp1]=r11,16 // cr.iipa | ||
| 503 | st8 [temp2]=r12,16 // cr.iim | ||
| 504 | mov r6=cr.iha | ||
| 505 | (p1) mov r12=IA64_MCA_COLD_BOOT | ||
| 506 | (p2) mov r12=IA64_INIT_WARM_BOOT | ||
| 507 | ;; | ||
| 508 | st8 [temp1]=r6,16 // cr.iha | ||
| 509 | st8 [temp2]=r12 // os_status, default is cold boot | ||
| 510 | mov r6=IA64_MCA_SAME_CONTEXT | ||
| 511 | ;; | ||
| 512 | st8 [temp1]=r6 // context, default is same context | ||
| 513 | |||
| 514 | // Save the pt_regs data that is not in minstate. The previous code | ||
| 515 | // left regs at sos. | ||
| 516 | add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs | ||
| 517 | ;; | ||
| 518 | add temp1=PT(B6), regs | ||
| 519 | mov temp3=b6 | ||
| 520 | mov temp4=b7 | ||
| 521 | add temp2=PT(B7), regs | ||
| 522 | ;; | ||
| 523 | st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 | ||
| 524 | st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 | ||
| 525 | mov temp3=ar.csd | ||
| 526 | mov temp4=ar.ssd | ||
| 527 | cover // must be last in group | ||
| 407 | ;; | 528 | ;; |
| 408 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 529 | st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd |
| 409 | ;; | 530 | st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd |
| 410 | // save ar.NaT | 531 | mov temp3=ar.unat |
| 411 | mov r5=ar.unat // ar.unat | 532 | mov temp4=ar.pfs |
| 412 | 533 | ;; | |
| 413 | // save banked GRs 16-31 along with NaT bits | 534 | st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat |
| 414 | bsw.1;; | 535 | st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs |
| 415 | st8.spill [r2]=r16,8;; | 536 | mov temp3=ar.rnat |
| 416 | st8.spill [r2]=r17,8;; | 537 | mov temp4=ar.bspstore |
| 417 | st8.spill [r2]=r18,8;; | 538 | ;; |
| 418 | st8.spill [r2]=r19,8;; | 539 | st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat |
| 419 | st8.spill [r2]=r20,8;; | 540 | st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore |
| 420 | st8.spill [r2]=r21,8;; | 541 | mov temp3=ar.bsp |
| 421 | st8.spill [r2]=r22,8;; | 542 | ;; |
| 422 | st8.spill [r2]=r23,8;; | 543 | sub temp3=temp3, temp4 // ar.bsp - ar.bspstore |
| 423 | st8.spill [r2]=r24,8;; | 544 | mov temp4=ar.fpsr |
| 424 | st8.spill [r2]=r25,8;; | 545 | ;; |
| 425 | st8.spill [r2]=r26,8;; | 546 | shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" |
| 426 | st8.spill [r2]=r27,8;; | 547 | ;; |
| 427 | st8.spill [r2]=r28,8;; | 548 | st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs |
| 428 | st8.spill [r2]=r29,8;; | 549 | st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr |
| 429 | st8.spill [r2]=r30,8;; | 550 | mov temp3=ar.ccv |
| 430 | st8.spill [r2]=r31,8;; | 551 | ;; |
| 431 | 552 | st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv | |
| 432 | mov r4=ar.unat;; | 553 | stf.spill [temp2]=f6,PT(F8)-PT(F6) |
| 433 | st8 [r2]=r4,8 // save User NaT bits for r16-r31 | 554 | ;; |
| 434 | mov ar.unat=r5 // restore original unat | 555 | stf.spill [temp1]=f7,PT(F9)-PT(F7) |
| 435 | bsw.0;; | 556 | stf.spill [temp2]=f8,PT(F10)-PT(F8) |
| 436 | 557 | ;; | |
| 437 | //save BRs | 558 | stf.spill [temp1]=f9,PT(F11)-PT(F9) |
| 438 | add r4=8,r2 // duplicate r2 in r4 | 559 | stf.spill [temp2]=f10 |
| 439 | add r6=2*8,r2 // duplicate r2 in r4 | 560 | ;; |
| 440 | 561 | stf.spill [temp1]=f11 | |
| 441 | mov r3=b0 | 562 | |
| 442 | mov r5=b1 | 563 | // Save the switch_stack data that is not in minstate nor pt_regs. The |
| 443 | mov r7=b2;; | 564 | // previous code left regs at pt_regs. |
| 444 | st8 [r2]=r3,3*8 | 565 | add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs |
| 445 | st8 [r4]=r5,3*8 | 566 | ;; |
| 446 | st8 [r6]=r7,3*8;; | 567 | add temp1=SW(F2), regs |
| 447 | 568 | add temp2=SW(F3), regs | |
| 448 | mov r3=b3 | 569 | ;; |
| 449 | mov r5=b4 | 570 | stf.spill [temp1]=f2,32 |
| 450 | mov r7=b5;; | 571 | stf.spill [temp2]=f3,32 |
| 451 | st8 [r2]=r3,3*8 | 572 | ;; |
| 452 | st8 [r4]=r5,3*8 | 573 | stf.spill [temp1]=f4,32 |
| 453 | st8 [r6]=r7,3*8;; | 574 | stf.spill [temp2]=f5,32 |
| 454 | 575 | ;; | |
| 455 | mov r3=b6 | 576 | stf.spill [temp1]=f12,32 |
| 456 | mov r5=b7;; | 577 | stf.spill [temp2]=f13,32 |
| 457 | st8 [r2]=r3,2*8 | 578 | ;; |
| 458 | st8 [r4]=r5,2*8;; | 579 | stf.spill [temp1]=f14,32 |
| 459 | 580 | stf.spill [temp2]=f15,32 | |
| 460 | cSaveCRs: | 581 | ;; |
| 461 | // save CRs | 582 | stf.spill [temp1]=f16,32 |
| 462 | add r4=8,r2 // duplicate r2 in r4 | 583 | stf.spill [temp2]=f17,32 |
| 463 | add r6=2*8,r2 // duplicate r2 in r4 | 584 | ;; |
| 464 | 585 | stf.spill [temp1]=f18,32 | |
| 465 | mov r3=cr.dcr | 586 | stf.spill [temp2]=f19,32 |
| 466 | mov r5=cr.itm | 587 | ;; |
| 467 | mov r7=cr.iva;; | 588 | stf.spill [temp1]=f20,32 |
| 468 | 589 | stf.spill [temp2]=f21,32 | |
| 469 | st8 [r2]=r3,8*8 | 590 | ;; |
| 470 | st8 [r4]=r5,3*8 | 591 | stf.spill [temp1]=f22,32 |
| 471 | st8 [r6]=r7,3*8;; // 48 byte rements | 592 | stf.spill [temp2]=f23,32 |
| 472 | 593 | ;; | |
| 473 | mov r3=cr.pta;; | 594 | stf.spill [temp1]=f24,32 |
| 474 | st8 [r2]=r3,8*8;; // 64 byte rements | 595 | stf.spill [temp2]=f25,32 |
| 475 | 596 | ;; | |
| 476 | // if PSR.ic=0, reading interruption registers causes an illegal operation fault | 597 | stf.spill [temp1]=f26,32 |
| 477 | mov r3=psr;; | 598 | stf.spill [temp2]=f27,32 |
| 478 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | 599 | ;; |
| 479 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | 600 | stf.spill [temp1]=f28,32 |
| 480 | begin_skip_intr_regs: | 601 | stf.spill [temp2]=f29,32 |
| 481 | (p6) br SkipIntrRegs;; | 602 | ;; |
| 482 | 603 | stf.spill [temp1]=f30,SW(B2)-SW(F30) | |
| 483 | add r4=8,r2 // duplicate r2 in r4 | 604 | stf.spill [temp2]=f31,SW(B3)-SW(F31) |
| 484 | add r6=2*8,r2 // duplicate r2 in r6 | 605 | mov temp3=b2 |
| 485 | 606 | mov temp4=b3 | |
| 486 | mov r3=cr.ipsr | 607 | ;; |
| 487 | mov r5=cr.isr | 608 | st8 [temp1]=temp3,16 // save b2 |
| 488 | mov r7=r0;; | 609 | st8 [temp2]=temp4,16 // save b3 |
| 489 | st8 [r2]=r3,3*8 | 610 | mov temp3=b4 |
| 490 | st8 [r4]=r5,3*8 | 611 | mov temp4=b5 |
| 491 | st8 [r6]=r7,3*8;; | 612 | ;; |
| 492 | 613 | st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 | |
| 493 | mov r3=cr.iip | 614 | st8 [temp2]=temp4 // save b5 |
| 494 | mov r5=cr.ifa | 615 | mov temp3=ar.lc |
| 495 | mov r7=cr.itir;; | 616 | ;; |
| 496 | st8 [r2]=r3,3*8 | 617 | st8 [temp1]=temp3 // save ar.lc |
| 497 | st8 [r4]=r5,3*8 | 618 | |
| 498 | st8 [r6]=r7,3*8;; | 619 | // FIXME: Some proms are incorrectly accessing the minstate area as |
| 499 | 620 | // cached data. The C code uses region 6, uncached virtual. Ensure | |
| 500 | mov r3=cr.iipa | 621 | // that there is no cache data lying around for the first 1K of the |
| 501 | mov r5=cr.ifs | 622 | // minstate area. |
| 502 | mov r7=cr.iim;; | 623 | // Remove this code in September 2006, that gives platforms a year to |
| 503 | st8 [r2]=r3,3*8 | 624 | // fix their proms and get their customers updated. |
| 504 | st8 [r4]=r5,3*8 | 625 | |
| 505 | st8 [r6]=r7,3*8;; | 626 | add r1=32*1,r17 |
| 506 | 627 | add r2=32*2,r17 | |
| 507 | mov r3=cr25;; // cr.iha | 628 | add r3=32*3,r17 |
| 508 | st8 [r2]=r3,160;; // 160 byte rement | 629 | add r4=32*4,r17 |
| 509 | 630 | add r5=32*5,r17 | |
| 510 | SkipIntrRegs: | 631 | add r6=32*6,r17 |
| 511 | st8 [r2]=r0,152;; // another 152 byte . | 632 | add r7=32*7,r17 |
| 512 | 633 | ;; | |
| 513 | add r4=8,r2 // duplicate r2 in r4 | 634 | fc r17 |
| 514 | add r6=2*8,r2 // duplicate r2 in r6 | 635 | fc r1 |
| 515 | 636 | fc r2 | |
| 516 | mov r3=cr.lid | 637 | fc r3 |
| 517 | // mov r5=cr.ivr // cr.ivr, don't read it | 638 | fc r4 |
| 518 | mov r7=cr.tpr;; | 639 | fc r5 |
| 519 | st8 [r2]=r3,3*8 | 640 | fc r6 |
| 520 | st8 [r4]=r5,3*8 | 641 | fc r7 |
| 521 | st8 [r6]=r7,3*8;; | 642 | add r17=32*8,r17 |
| 522 | 643 | add r1=32*8,r1 | |
| 523 | mov r3=r0 // cr.eoi => cr67 | 644 | add r2=32*8,r2 |
| 524 | mov r5=r0 // cr.irr0 => cr68 | 645 | add r3=32*8,r3 |
| 525 | mov r7=r0;; // cr.irr1 => cr69 | 646 | add r4=32*8,r4 |
| 526 | st8 [r2]=r3,3*8 | 647 | add r5=32*8,r5 |
| 527 | st8 [r4]=r5,3*8 | 648 | add r6=32*8,r6 |
| 528 | st8 [r6]=r7,3*8;; | 649 | add r7=32*8,r7 |
| 529 | 650 | ;; | |
| 530 | mov r3=r0 // cr.irr2 => cr70 | 651 | fc r17 |
| 531 | mov r5=r0 // cr.irr3 => cr71 | 652 | fc r1 |
| 532 | mov r7=cr.itv;; | 653 | fc r2 |
| 533 | st8 [r2]=r3,3*8 | 654 | fc r3 |
| 534 | st8 [r4]=r5,3*8 | 655 | fc r4 |
| 535 | st8 [r6]=r7,3*8;; | 656 | fc r5 |
| 536 | 657 | fc r6 | |
| 537 | mov r3=cr.pmv | 658 | fc r7 |
| 538 | mov r5=cr.cmcv;; | 659 | add r17=32*8,r17 |
| 539 | st8 [r2]=r3,7*8 | 660 | add r1=32*8,r1 |
| 540 | st8 [r4]=r5,7*8;; | 661 | add r2=32*8,r2 |
| 541 | 662 | add r3=32*8,r3 | |
| 542 | mov r3=r0 // cr.lrr0 => cr80 | 663 | add r4=32*8,r4 |
| 543 | mov r5=r0;; // cr.lrr1 => cr81 | 664 | add r5=32*8,r5 |
| 544 | st8 [r2]=r3,23*8 | 665 | add r6=32*8,r6 |
| 545 | st8 [r4]=r5,23*8;; | 666 | add r7=32*8,r7 |
| 546 | 667 | ;; | |
| 547 | adds r2=25*8,r2;; | 668 | fc r17 |
| 548 | 669 | fc r1 | |
| 549 | cSaveARs: | 670 | fc r2 |
| 550 | // save ARs | 671 | fc r3 |
| 551 | add r4=8,r2 // duplicate r2 in r4 | 672 | fc r4 |
| 552 | add r6=2*8,r2 // duplicate r2 in r6 | 673 | fc r5 |
| 553 | 674 | fc r6 | |
| 554 | mov r3=ar.k0 | 675 | fc r7 |
| 555 | mov r5=ar.k1 | 676 | add r17=32*8,r17 |
| 556 | mov r7=ar.k2;; | 677 | add r1=32*8,r1 |
| 557 | st8 [r2]=r3,3*8 | 678 | add r2=32*8,r2 |
| 558 | st8 [r4]=r5,3*8 | 679 | add r3=32*8,r3 |
| 559 | st8 [r6]=r7,3*8;; | 680 | add r4=32*8,r4 |
| 560 | 681 | add r5=32*8,r5 | |
| 561 | mov r3=ar.k3 | 682 | add r6=32*8,r6 |
| 562 | mov r5=ar.k4 | 683 | add r7=32*8,r7 |
| 563 | mov r7=ar.k5;; | 684 | ;; |
| 564 | st8 [r2]=r3,3*8 | 685 | fc r17 |
| 565 | st8 [r4]=r5,3*8 | 686 | fc r1 |
| 566 | st8 [r6]=r7,3*8;; | 687 | fc r2 |
| 567 | 688 | fc r3 | |
| 568 | mov r3=ar.k6 | 689 | fc r4 |
| 569 | mov r5=ar.k7 | 690 | fc r5 |
| 570 | mov r7=r0;; // ar.kr8 | 691 | fc r6 |
| 571 | st8 [r2]=r3,10*8 | 692 | fc r7 |
| 572 | st8 [r4]=r5,10*8 | 693 | |
| 573 | st8 [r6]=r7,10*8;; // rement by 72 bytes | 694 | br.sptk b0 |
| 574 | |||
| 575 | mov r3=ar.rsc | ||
| 576 | mov ar.rsc=r0 // put RSE in enforced lazy mode | ||
| 577 | mov r5=ar.bsp | ||
| 578 | ;; | ||
| 579 | mov r7=ar.bspstore;; | ||
| 580 | st8 [r2]=r3,3*8 | ||
| 581 | st8 [r4]=r5,3*8 | ||
| 582 | st8 [r6]=r7,3*8;; | ||
| 583 | |||
| 584 | mov r3=ar.rnat;; | ||
| 585 | st8 [r2]=r3,8*13 // increment by 13x8 bytes | ||
| 586 | |||
| 587 | mov r3=ar.ccv;; | ||
| 588 | st8 [r2]=r3,8*4 | ||
| 589 | |||
| 590 | mov r3=ar.unat;; | ||
| 591 | st8 [r2]=r3,8*4 | ||
| 592 | |||
| 593 | mov r3=ar.fpsr;; | ||
| 594 | st8 [r2]=r3,8*4 | ||
| 595 | |||
| 596 | mov r3=ar.itc;; | ||
| 597 | st8 [r2]=r3,160 // 160 | ||
| 598 | |||
| 599 | mov r3=ar.pfs;; | ||
| 600 | st8 [r2]=r3,8 | ||
| 601 | |||
| 602 | mov r3=ar.lc;; | ||
| 603 | st8 [r2]=r3,8 | ||
| 604 | |||
| 605 | mov r3=ar.ec;; | ||
| 606 | st8 [r2]=r3 | ||
| 607 | add r2=8*62,r2 //padding | ||
| 608 | |||
| 609 | // save RRs | ||
| 610 | mov ar.lc=0x08-1 | ||
| 611 | movl r4=0x00;; | ||
| 612 | |||
| 613 | cStRR: | ||
| 614 | dep.z r5=r4,61,3;; | ||
| 615 | mov r3=rr[r5];; | ||
| 616 | st8 [r2]=r3,8 | ||
| 617 | add r4=1,r4 | ||
| 618 | br.cloop.sptk.few cStRR | ||
| 619 | ;; | ||
| 620 | end_os_mca_dump: | ||
| 621 | br ia64_os_mca_done_dump;; | ||
| 622 | 695 | ||
| 623 | //EndStub////////////////////////////////////////////////////////////////////// | 696 | //EndStub////////////////////////////////////////////////////////////////////// |
| 624 | 697 | ||
| 625 | 698 | ||
| 626 | //++ | 699 | //++ |
| 627 | // Name: | 700 | // Name: |
| 628 | // ia64_os_mca_proc_state_restore() | 701 | // ia64_state_restore() |
| 629 | // | 702 | // |
| 630 | // Stub Description: | 703 | // Stub Description: |
| 631 | // | 704 | // |
| 632 | // This is a stub to restore the saved processor state during MCHK | 705 | // Restore the SAL/OS state. This is sensitive to the layout of struct |
| 706 | // ia64_sal_os_state in mca.h. | ||
| 707 | // | ||
| 708 | // r2 contains the return address, r3 contains either | ||
| 709 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
| 710 | // | ||
| 711 | // In addition to the SAL to OS state, this routine restores all the | ||
| 712 | // registers that appear in struct pt_regs and struct switch_stack, | ||
| 713 | // excluding those in the PAL minstate area. | ||
| 633 | // | 714 | // |
| 634 | //-- | 715 | //-- |
| 635 | 716 | ||
| 636 | ia64_os_mca_proc_state_restore: | 717 | ia64_state_restore: |
| 718 | // Restore the switch_stack data that is not in minstate nor pt_regs. | ||
| 719 | add regs=MCA_SWITCH_STACK_OFFSET, r3 | ||
| 720 | mov b0=r2 // save return address | ||
| 721 | ;; | ||
| 722 | GET_IA64_MCA_DATA(temp2) | ||
| 723 | ;; | ||
| 724 | add regs=temp2, regs | ||
| 725 | ;; | ||
| 726 | add temp1=SW(F2), regs | ||
| 727 | add temp2=SW(F3), regs | ||
| 728 | ;; | ||
| 729 | ldf.fill f2=[temp1],32 | ||
| 730 | ldf.fill f3=[temp2],32 | ||
| 731 | ;; | ||
| 732 | ldf.fill f4=[temp1],32 | ||
| 733 | ldf.fill f5=[temp2],32 | ||
| 734 | ;; | ||
| 735 | ldf.fill f12=[temp1],32 | ||
| 736 | ldf.fill f13=[temp2],32 | ||
| 737 | ;; | ||
| 738 | ldf.fill f14=[temp1],32 | ||
| 739 | ldf.fill f15=[temp2],32 | ||
| 740 | ;; | ||
| 741 | ldf.fill f16=[temp1],32 | ||
| 742 | ldf.fill f17=[temp2],32 | ||
| 743 | ;; | ||
| 744 | ldf.fill f18=[temp1],32 | ||
| 745 | ldf.fill f19=[temp2],32 | ||
| 746 | ;; | ||
| 747 | ldf.fill f20=[temp1],32 | ||
| 748 | ldf.fill f21=[temp2],32 | ||
| 749 | ;; | ||
| 750 | ldf.fill f22=[temp1],32 | ||
| 751 | ldf.fill f23=[temp2],32 | ||
| 752 | ;; | ||
| 753 | ldf.fill f24=[temp1],32 | ||
| 754 | ldf.fill f25=[temp2],32 | ||
| 755 | ;; | ||
| 756 | ldf.fill f26=[temp1],32 | ||
| 757 | ldf.fill f27=[temp2],32 | ||
| 758 | ;; | ||
| 759 | ldf.fill f28=[temp1],32 | ||
| 760 | ldf.fill f29=[temp2],32 | ||
| 761 | ;; | ||
| 762 | ldf.fill f30=[temp1],SW(B2)-SW(F30) | ||
| 763 | ldf.fill f31=[temp2],SW(B3)-SW(F31) | ||
| 764 | ;; | ||
| 765 | ld8 temp3=[temp1],16 // restore b2 | ||
| 766 | ld8 temp4=[temp2],16 // restore b3 | ||
| 767 | ;; | ||
| 768 | mov b2=temp3 | ||
| 769 | mov b3=temp4 | ||
| 770 | ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 | ||
| 771 | ld8 temp4=[temp2] // restore b5 | ||
| 772 | ;; | ||
| 773 | mov b4=temp3 | ||
| 774 | mov b5=temp4 | ||
| 775 | ld8 temp3=[temp1] // restore ar.lc | ||
| 776 | ;; | ||
| 777 | mov ar.lc=temp3 | ||
| 637 | 778 | ||
| 638 | // Restore bank1 GR16-31 | 779 | // Restore the pt_regs data that is not in minstate. The previous code |
| 639 | GET_IA64_MCA_DATA(r2) | 780 | // left regs at switch_stack. |
| 781 | add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs | ||
| 782 | ;; | ||
| 783 | add temp1=PT(B6), regs | ||
| 784 | add temp2=PT(B7), regs | ||
| 785 | ;; | ||
| 786 | ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 | ||
| 787 | ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 | ||
| 788 | ;; | ||
| 789 | mov b6=temp3 | ||
| 790 | mov b7=temp4 | ||
| 791 | ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd | ||
| 792 | ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd | ||
| 793 | ;; | ||
| 794 | mov ar.csd=temp3 | ||
| 795 | mov ar.ssd=temp4 | ||
| 796 | ld8 temp3=[temp1] // restore ar.unat | ||
| 797 | add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 | ||
| 798 | ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs | ||
| 799 | ;; | ||
| 800 | mov ar.unat=temp3 | ||
| 801 | mov ar.pfs=temp4 | ||
| 802 | // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. | ||
| 803 | ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv | ||
| 804 | ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr | ||
| 805 | ;; | ||
| 806 | mov ar.ccv=temp3 | ||
| 807 | mov ar.fpsr=temp4 | ||
| 808 | ldf.fill f6=[temp1],PT(F8)-PT(F6) | ||
| 809 | ldf.fill f7=[temp2],PT(F9)-PT(F7) | ||
| 810 | ;; | ||
| 811 | ldf.fill f8=[temp1],PT(F10)-PT(F8) | ||
| 812 | ldf.fill f9=[temp2],PT(F11)-PT(F9) | ||
| 813 | ;; | ||
| 814 | ldf.fill f10=[temp1] | ||
| 815 | ldf.fill f11=[temp2] | ||
| 816 | |||
| 817 | // Restore the SAL to OS state. The previous code left regs at pt_regs. | ||
| 818 | add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs | ||
| 640 | ;; | 819 | ;; |
| 641 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 820 | add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs |
| 642 | 821 | add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs | |
| 643 | restore_GRs: // restore bank-1 GRs 16-31 | 822 | ;; |
| 644 | bsw.1;; | 823 | ld8 r12=[temp1],16 // sal_ra |
| 645 | add r3=16*8,r2;; // to get to NaT of GR 16-31 | 824 | ld8 r9=[temp2],16 // sal_gp |
| 646 | ld8 r3=[r3];; | 825 | ;; |
| 647 | mov ar.unat=r3;; // first restore NaT | 826 | ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task |
| 648 | 827 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | |
| 649 | ld8.fill r16=[r2],8;; | 828 | ;; |
| 650 | ld8.fill r17=[r2],8;; | 829 | ld8 temp3=[temp1],16 // cr.isr |
| 651 | ld8.fill r18=[r2],8;; | 830 | ld8 temp4=[temp2],16 // cr.ifa |
| 652 | ld8.fill r19=[r2],8;; | 831 | ;; |
| 653 | ld8.fill r20=[r2],8;; | 832 | mov cr.isr=temp3 |
| 654 | ld8.fill r21=[r2],8;; | 833 | mov cr.ifa=temp4 |
| 655 | ld8.fill r22=[r2],8;; | 834 | ld8 temp3=[temp1],16 // cr.itir |
| 656 | ld8.fill r23=[r2],8;; | 835 | ld8 temp4=[temp2],16 // cr.iipa |
| 657 | ld8.fill r24=[r2],8;; | 836 | ;; |
| 658 | ld8.fill r25=[r2],8;; | 837 | mov cr.itir=temp3 |
| 659 | ld8.fill r26=[r2],8;; | 838 | mov cr.iipa=temp4 |
| 660 | ld8.fill r27=[r2],8;; | 839 | ld8 temp3=[temp1],16 // cr.iim |
| 661 | ld8.fill r28=[r2],8;; | 840 | ld8 temp4=[temp2],16 // cr.iha |
| 662 | ld8.fill r29=[r2],8;; | 841 | ;; |
| 663 | ld8.fill r30=[r2],8;; | 842 | mov cr.iim=temp3 |
| 664 | ld8.fill r31=[r2],8;; | 843 | mov cr.iha=temp4 |
| 665 | 844 | dep r22=0,r22,62,2 // pal_min_state, physical, uncached | |
| 666 | ld8 r3=[r2],8;; // increment to skip NaT | 845 | mov IA64_KR(CURRENT)=r21 |
| 667 | bsw.0;; | 846 | ld8 r8=[temp1] // os_status |
| 668 | 847 | ld8 r10=[temp2] // context | |
| 669 | restore_BRs: | 848 | |
| 670 | add r4=8,r2 // duplicate r2 in r4 | 849 | br.sptk b0 |
| 671 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
| 672 | |||
| 673 | ld8 r3=[r2],3*8 | ||
| 674 | ld8 r5=[r4],3*8 | ||
| 675 | ld8 r7=[r6],3*8;; | ||
| 676 | mov b0=r3 | ||
| 677 | mov b1=r5 | ||
| 678 | mov b2=r7;; | ||
| 679 | |||
| 680 | ld8 r3=[r2],3*8 | ||
| 681 | ld8 r5=[r4],3*8 | ||
| 682 | ld8 r7=[r6],3*8;; | ||
| 683 | mov b3=r3 | ||
| 684 | mov b4=r5 | ||
| 685 | mov b5=r7;; | ||
| 686 | |||
| 687 | ld8 r3=[r2],2*8 | ||
| 688 | ld8 r5=[r4],2*8;; | ||
| 689 | mov b6=r3 | ||
| 690 | mov b7=r5;; | ||
| 691 | |||
| 692 | restore_CRs: | ||
| 693 | add r4=8,r2 // duplicate r2 in r4 | ||
| 694 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
| 695 | |||
| 696 | ld8 r3=[r2],8*8 | ||
| 697 | ld8 r5=[r4],3*8 | ||
| 698 | ld8 r7=[r6],3*8;; // 48 byte increments | ||
| 699 | mov cr.dcr=r3 | ||
| 700 | mov cr.itm=r5 | ||
| 701 | mov cr.iva=r7;; | ||
| 702 | |||
| 703 | ld8 r3=[r2],8*8;; // 64 byte increments | ||
| 704 | // mov cr.pta=r3 | ||
| 705 | |||
| 706 | |||
| 707 | // if PSR.ic=1, reading interruption registers causes an illegal operation fault | ||
| 708 | mov r3=psr;; | ||
| 709 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | ||
| 710 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | ||
| 711 | |||
| 712 | begin_rskip_intr_regs: | ||
| 713 | (p6) br rSkipIntrRegs;; | ||
| 714 | |||
| 715 | add r4=8,r2 // duplicate r2 in r4 | ||
| 716 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
| 717 | |||
| 718 | ld8 r3=[r2],3*8 | ||
| 719 | ld8 r5=[r4],3*8 | ||
| 720 | ld8 r7=[r6],3*8;; | ||
| 721 | mov cr.ipsr=r3 | ||
| 722 | // mov cr.isr=r5 // cr.isr is read only | ||
| 723 | |||
| 724 | ld8 r3=[r2],3*8 | ||
| 725 | ld8 r5=[r4],3*8 | ||
| 726 | ld8 r7=[r6],3*8;; | ||
| 727 | mov cr.iip=r3 | ||
| 728 | mov cr.ifa=r5 | ||
| 729 | mov cr.itir=r7;; | ||
| 730 | |||
| 731 | ld8 r3=[r2],3*8 | ||
| 732 | ld8 r5=[r4],3*8 | ||
| 733 | ld8 r7=[r6],3*8;; | ||
| 734 | mov cr.iipa=r3 | ||
| 735 | mov cr.ifs=r5 | ||
| 736 | mov cr.iim=r7 | ||
| 737 | |||
| 738 | ld8 r3=[r2],160;; // 160 byte increment | ||
| 739 | mov cr.iha=r3 | ||
| 740 | |||
| 741 | rSkipIntrRegs: | ||
| 742 | ld8 r3=[r2],152;; // another 152 byte inc. | ||
| 743 | |||
| 744 | add r4=8,r2 // duplicate r2 in r4 | ||
| 745 | add r6=2*8,r2;; // duplicate r2 in r6 | ||
| 746 | |||
| 747 | ld8 r3=[r2],8*3 | ||
| 748 | ld8 r5=[r4],8*3 | ||
| 749 | ld8 r7=[r6],8*3;; | ||
| 750 | mov cr.lid=r3 | ||
| 751 | // mov cr.ivr=r5 // cr.ivr is read only | ||
| 752 | mov cr.tpr=r7;; | ||
| 753 | |||
| 754 | ld8 r3=[r2],8*3 | ||
| 755 | ld8 r5=[r4],8*3 | ||
| 756 | ld8 r7=[r6],8*3;; | ||
| 757 | // mov cr.eoi=r3 | ||
| 758 | // mov cr.irr0=r5 // cr.irr0 is read only | ||
| 759 | // mov cr.irr1=r7;; // cr.irr1 is read only | ||
| 760 | |||
| 761 | ld8 r3=[r2],8*3 | ||
| 762 | ld8 r5=[r4],8*3 | ||
| 763 | ld8 r7=[r6],8*3;; | ||
| 764 | // mov cr.irr2=r3 // cr.irr2 is read only | ||
| 765 | // mov cr.irr3=r5 // cr.irr3 is read only | ||
| 766 | mov cr.itv=r7;; | ||
| 767 | |||
| 768 | ld8 r3=[r2],8*7 | ||
| 769 | ld8 r5=[r4],8*7;; | ||
| 770 | mov cr.pmv=r3 | ||
| 771 | mov cr.cmcv=r5;; | ||
| 772 | |||
| 773 | ld8 r3=[r2],8*23 | ||
| 774 | ld8 r5=[r4],8*23;; | ||
| 775 | adds r2=8*23,r2 | ||
| 776 | adds r4=8*23,r4;; | ||
| 777 | // mov cr.lrr0=r3 | ||
| 778 | // mov cr.lrr1=r5 | ||
| 779 | |||
| 780 | adds r2=8*2,r2;; | ||
| 781 | |||
| 782 | restore_ARs: | ||
| 783 | add r4=8,r2 // duplicate r2 in r4 | ||
| 784 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
| 785 | |||
| 786 | ld8 r3=[r2],3*8 | ||
| 787 | ld8 r5=[r4],3*8 | ||
| 788 | ld8 r7=[r6],3*8;; | ||
| 789 | mov ar.k0=r3 | ||
| 790 | mov ar.k1=r5 | ||
| 791 | mov ar.k2=r7;; | ||
| 792 | |||
| 793 | ld8 r3=[r2],3*8 | ||
| 794 | ld8 r5=[r4],3*8 | ||
| 795 | ld8 r7=[r6],3*8;; | ||
| 796 | mov ar.k3=r3 | ||
| 797 | mov ar.k4=r5 | ||
| 798 | mov ar.k5=r7;; | ||
| 799 | |||
| 800 | ld8 r3=[r2],10*8 | ||
| 801 | ld8 r5=[r4],10*8 | ||
| 802 | ld8 r7=[r6],10*8;; | ||
| 803 | mov ar.k6=r3 | ||
| 804 | mov ar.k7=r5 | ||
| 805 | ;; | ||
| 806 | |||
| 807 | ld8 r3=[r2],3*8 | ||
| 808 | ld8 r5=[r4],3*8 | ||
| 809 | ld8 r7=[r6],3*8;; | ||
| 810 | // mov ar.rsc=r3 | ||
| 811 | // mov ar.bsp=r5 // ar.bsp is read only | ||
| 812 | mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode | ||
| 813 | ;; | ||
| 814 | mov ar.bspstore=r7;; | ||
| 815 | |||
| 816 | ld8 r9=[r2],8*13;; | ||
| 817 | mov ar.rnat=r9 | ||
| 818 | |||
| 819 | mov ar.rsc=r3 | ||
| 820 | ld8 r3=[r2],8*4;; | ||
| 821 | mov ar.ccv=r3 | ||
| 822 | |||
| 823 | ld8 r3=[r2],8*4;; | ||
| 824 | mov ar.unat=r3 | ||
| 825 | |||
| 826 | ld8 r3=[r2],8*4;; | ||
| 827 | mov ar.fpsr=r3 | ||
| 828 | |||
| 829 | ld8 r3=[r2],160;; // 160 | ||
| 830 | // mov ar.itc=r3 | ||
| 831 | |||
| 832 | ld8 r3=[r2],8;; | ||
| 833 | mov ar.pfs=r3 | ||
| 834 | |||
| 835 | ld8 r3=[r2],8;; | ||
| 836 | mov ar.lc=r3 | ||
| 837 | |||
| 838 | ld8 r3=[r2];; | ||
| 839 | mov ar.ec=r3 | ||
| 840 | add r2=8*62,r2;; // padding | ||
| 841 | |||
| 842 | restore_RRs: | ||
| 843 | mov r5=ar.lc | ||
| 844 | mov ar.lc=0x08-1 | ||
| 845 | movl r4=0x00;; | ||
| 846 | cStRRr: | ||
| 847 | dep.z r7=r4,61,3 | ||
| 848 | ld8 r3=[r2],8;; | ||
| 849 | mov rr[r7]=r3 // what are its access previledges? | ||
| 850 | add r4=1,r4 | ||
| 851 | br.cloop.sptk.few cStRRr | ||
| 852 | ;; | ||
| 853 | mov ar.lc=r5 | ||
| 854 | ;; | ||
| 855 | end_os_mca_restore: | ||
| 856 | br ia64_os_mca_done_restore;; | ||
| 857 | 850 | ||
| 858 | //EndStub////////////////////////////////////////////////////////////////////// | 851 | //EndStub////////////////////////////////////////////////////////////////////// |
| 859 | 852 | ||
| 860 | 853 | ||
| 861 | // ok, the issue here is that we need to save state information so | 854 | //++ |
| 862 | // it can be useable by the kernel debugger and show regs routines. | 855 | // Name: |
| 863 | // In order to do this, our best bet is save the current state (plus | 856 | // ia64_new_stack() |
| 864 | // the state information obtain from the MIN_STATE_AREA) into a pt_regs | ||
| 865 | // format. This way we can pass it on in a useable format. | ||
| 866 | // | 857 | // |
| 867 | 858 | // Stub Description: | |
| 868 | // | 859 | // |
| 869 | // SAL to OS entry point for INIT on the monarch processor | 860 | // Switch to the MCA/INIT stack. |
| 870 | // This has been defined for registration purposes with SAL | ||
| 871 | // as a part of ia64_mca_init. | ||
| 872 | // | 861 | // |
| 873 | // When we get here, the following registers have been | 862 | // r2 contains the return address, r3 contains either |
| 874 | // set by the SAL for our use | 863 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. |
| 875 | // | 864 | // |
| 876 | // 1. GR1 = OS INIT GP | 865 | // On entry RBS is still on the original stack, this routine switches RBS |
| 877 | // 2. GR8 = PAL_PROC physical address | 866 | // to use the MCA/INIT stack. |
| 878 | // 3. GR9 = SAL_PROC physical address | ||
| 879 | // 4. GR10 = SAL GP (physical) | ||
| 880 | // 5. GR11 = Init Reason | ||
| 881 | // 0 = Received INIT for event other than crash dump switch | ||
| 882 | // 1 = Received wakeup at the end of an OS_MCA corrected machine check | ||
| 883 | // 2 = Received INIT dude to CrashDump switch assertion | ||
| 884 | // | 867 | // |
| 885 | // 6. GR12 = Return address to location within SAL_INIT procedure | 868 | // On entry, sos->pal_min_state is physical, on exit it is virtual. |
| 886 | 869 | // | |
| 870 | //-- | ||
| 887 | 871 | ||
| 888 | GLOBAL_ENTRY(ia64_monarch_init_handler) | 872 | ia64_new_stack: |
| 889 | .prologue | 873 | add regs=MCA_PT_REGS_OFFSET, r3 |
| 890 | // stash the information the SAL passed to os | 874 | add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 |
| 891 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | 875 | mov b0=r2 // save return address |
| 876 | GET_IA64_MCA_DATA(temp1) | ||
| 877 | invala | ||
| 892 | ;; | 878 | ;; |
| 893 | SAVE_MIN_WITH_COVER | 879 | add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack |
| 880 | add regs=regs, temp1 // struct pt_regs on MCA or INIT stack | ||
| 894 | ;; | 881 | ;; |
| 895 | mov r8=cr.ifa | 882 | // Address of minstate area provided by PAL is physical, uncacheable. |
| 896 | mov r9=cr.isr | 883 | // Convert to Linux virtual address in region 6 for C code. |
| 897 | adds r3=8,r2 // set up second base pointer | 884 | ld8 ms=[temp2] // pal_min_state, physical |
| 898 | ;; | 885 | ;; |
| 899 | SAVE_REST | 886 | dep temp1=-1,ms,62,2 // set region 6 |
| 900 | 887 | mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET | |
| 901 | // ok, enough should be saved at this point to be dangerous, and supply | 888 | ;; |
| 902 | // information for a dump | 889 | st8 [temp2]=temp1 // pal_min_state, virtual |
| 903 | // We need to switch to Virtual mode before hitting the C functions. | ||
| 904 | 890 | ||
| 905 | movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN | 891 | add temp4=temp3, regs // start of bspstore on new stack |
| 906 | mov r3=psr // get the current psr, minimum enabled at this point | ||
| 907 | ;; | 892 | ;; |
| 908 | or r2=r2,r3 | 893 | mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack |
| 909 | ;; | 894 | ;; |
| 910 | movl r3=IVirtual_Switch | 895 | flushrs // must be first in group |
| 896 | br.sptk b0 | ||
| 897 | |||
| 898 | //EndStub////////////////////////////////////////////////////////////////////// | ||
| 899 | |||
| 900 | |||
| 901 | //++ | ||
| 902 | // Name: | ||
| 903 | // ia64_old_stack() | ||
| 904 | // | ||
| 905 | // Stub Description: | ||
| 906 | // | ||
| 907 | // Switch to the old stack. | ||
| 908 | // | ||
| 909 | // r2 contains the return address, r3 contains either | ||
| 910 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
| 911 | // | ||
| 912 | // On entry, pal_min_state is virtual, on exit it is physical. | ||
| 913 | // | ||
| 914 | // On entry RBS is on the MCA/INIT stack, this routine switches RBS | ||
| 915 | // back to the previous stack. | ||
| 916 | // | ||
| 917 | // The psr is set to all zeroes. SAL return requires either all zeroes or | ||
| 918 | // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this | ||
| 919 | // code does not perform correctly. | ||
| 920 | // | ||
| 921 | // The dirty registers at the time of the event were flushed to the | ||
| 922 | // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers | ||
| 923 | // before reverting to the previous bspstore. | ||
| 924 | //-- | ||
| 925 | |||
| 926 | ia64_old_stack: | ||
| 927 | add regs=MCA_PT_REGS_OFFSET, r3 | ||
| 928 | mov b0=r2 // save return address | ||
| 929 | GET_IA64_MCA_DATA(temp2) | ||
| 930 | LOAD_PHYSICAL(p0,temp1,1f) | ||
| 911 | ;; | 931 | ;; |
| 912 | mov cr.iip=r3 // short return to set the appropriate bits | 932 | mov cr.ipsr=r0 |
| 913 | mov cr.ipsr=r2 // need to do an rfi to set appropriate bits | 933 | mov cr.ifs=r0 |
| 934 | mov cr.iip=temp1 | ||
| 914 | ;; | 935 | ;; |
| 936 | invala | ||
| 915 | rfi | 937 | rfi |
| 938 | 1: | ||
| 939 | |||
| 940 | add regs=regs, temp2 // struct pt_regs on MCA or INIT stack | ||
| 916 | ;; | 941 | ;; |
| 917 | IVirtual_Switch: | 942 | add temp1=PT(LOADRS), regs |
| 918 | // | ||
| 919 | // We should now be running virtual | ||
| 920 | // | ||
| 921 | // Let's call the C handler to get the rest of the state info | ||
| 922 | // | ||
| 923 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
| 924 | ;; | 943 | ;; |
| 925 | adds out0=16,sp // out0 = pointer to pt_regs | 944 | ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs |
| 926 | ;; | 945 | ;; |
| 927 | DO_SAVE_SWITCH_STACK | 946 | ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore |
| 928 | .body | 947 | mov ar.rsc=temp2 |
| 929 | adds out1=16,sp // out0 = pointer to switch_stack | 948 | ;; |
| 949 | loadrs | ||
| 950 | ld8 temp4=[temp1] // restore ar.rnat | ||
| 951 | ;; | ||
| 952 | mov ar.bspstore=temp3 // back to old stack | ||
| 953 | ;; | ||
| 954 | mov ar.rnat=temp4 | ||
| 955 | ;; | ||
| 956 | |||
| 957 | br.sptk b0 | ||
| 930 | 958 | ||
| 931 | br.call.sptk.many rp=ia64_init_handler | 959 | //EndStub////////////////////////////////////////////////////////////////////// |
| 932 | .ret1: | ||
| 933 | 960 | ||
| 934 | return_from_init: | ||
| 935 | br.sptk return_from_init | ||
| 936 | END(ia64_monarch_init_handler) | ||
| 937 | 961 | ||
| 962 | //++ | ||
| 963 | // Name: | ||
| 964 | // ia64_set_kernel_registers() | ||
| 938 | // | 965 | // |
| 939 | // SAL to OS entry point for INIT on the slave processor | 966 | // Stub Description: |
| 940 | // This has been defined for registration purposes with SAL | 967 | // |
| 941 | // as a part of ia64_mca_init. | 968 | // Set the registers that are required by the C code in order to run on an |
| 969 | // MCA/INIT stack. | ||
| 970 | // | ||
| 971 | // r2 contains the return address, r3 contains either | ||
| 972 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
| 942 | // | 973 | // |
| 974 | //-- | ||
| 975 | |||
| 976 | ia64_set_kernel_registers: | ||
| 977 | add temp3=MCA_SP_OFFSET, r3 | ||
| 978 | add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 | ||
| 979 | mov b0=r2 // save return address | ||
| 980 | GET_IA64_MCA_DATA(temp1) | ||
| 981 | ;; | ||
| 982 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | ||
| 983 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | ||
| 984 | add r13=temp1, r3 // set current to start of MCA/INIT stack | ||
| 985 | ;; | ||
| 986 | ld8 r1=[temp4] // OS GP from SAL OS state | ||
| 987 | ;; | ||
| 988 | DATA_PA_TO_VA(r1,temp1) | ||
| 989 | DATA_PA_TO_VA(r12,temp2) | ||
| 990 | DATA_PA_TO_VA(r13,temp3) | ||
| 991 | ;; | ||
| 992 | mov IA64_KR(CURRENT)=r13 | ||
| 993 | |||
| 994 | // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? | ||
| 995 | |||
| 996 | br.sptk b0 | ||
| 997 | |||
| 998 | //EndStub////////////////////////////////////////////////////////////////////// | ||
| 999 | |||
| 1000 | #undef ms | ||
| 1001 | #undef regs | ||
| 1002 | #undef temp1 | ||
| 1003 | #undef temp2 | ||
| 1004 | #undef temp3 | ||
| 1005 | #undef temp4 | ||
| 1006 | |||
| 943 | 1007 | ||
| 944 | GLOBAL_ENTRY(ia64_slave_init_handler) | 1008 | // Support function for mca.c, it is here to avoid using inline asm. Given the |
| 945 | 1: br.sptk 1b | 1009 | // address of an rnat slot, if that address is below the current ar.bspstore |
| 946 | END(ia64_slave_init_handler) | 1010 | // then return the contents of that slot, otherwise return the contents of |
| 1011 | // ar.rnat. | ||
| 1012 | GLOBAL_ENTRY(ia64_get_rnat) | ||
| 1013 | alloc r14=ar.pfs,1,0,0,0 | ||
| 1014 | mov ar.rsc=0 | ||
| 1015 | ;; | ||
| 1016 | mov r14=ar.bspstore | ||
| 1017 | ;; | ||
| 1018 | cmp.lt p6,p7=in0,r14 | ||
| 1019 | ;; | ||
| 1020 | (p6) ld8 r8=[in0] | ||
| 1021 | (p7) mov r8=ar.rnat | ||
| 1022 | mov ar.rsc=3 | ||
| 1023 | br.ret.sptk.many rp | ||
| 1024 | END(ia64_get_rnat) | ||
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index abc0113a821d..6e683745af49 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2004 FUJITSU LIMITED | 5 | * Copyright (C) 2004 FUJITSU LIMITED |
| 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) | 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) |
| 7 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
| 8 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
| 7 | */ | 9 | */ |
| 8 | #include <linux/config.h> | 10 | #include <linux/config.h> |
| 9 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| @@ -38,10 +40,6 @@ | |||
| 38 | /* max size of SAL error record (default) */ | 40 | /* max size of SAL error record (default) */ |
| 39 | static int sal_rec_max = 10000; | 41 | static int sal_rec_max = 10000; |
| 40 | 42 | ||
| 41 | /* from mca.c */ | ||
| 42 | static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state; | ||
| 43 | static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state; | ||
| 44 | |||
| 45 | /* from mca_drv_asm.S */ | 43 | /* from mca_drv_asm.S */ |
| 46 | extern void *mca_handler_bhhook(void); | 44 | extern void *mca_handler_bhhook(void); |
| 47 | 45 | ||
| @@ -316,7 +314,8 @@ init_record_index_pools(void) | |||
| 316 | */ | 314 | */ |
| 317 | 315 | ||
| 318 | static mca_type_t | 316 | static mca_type_t |
| 319 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 317 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
| 318 | struct ia64_sal_os_state *sos) | ||
| 320 | { | 319 | { |
| 321 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 320 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
| 322 | 321 | ||
| @@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | |||
| 327 | * Therefore it is local MCA when rendezvous has not been requested. | 326 | * Therefore it is local MCA when rendezvous has not been requested. |
| 328 | * Failed to rendezvous, the system must be down. | 327 | * Failed to rendezvous, the system must be down. |
| 329 | */ | 328 | */ |
| 330 | switch (sal_to_os_handoff_state->imsto_rendez_state) { | 329 | switch (sos->rv_rc) { |
| 331 | case -1: /* SAL rendezvous unsuccessful */ | 330 | case -1: /* SAL rendezvous unsuccessful */ |
| 332 | return MCA_IS_GLOBAL; | 331 | return MCA_IS_GLOBAL; |
| 333 | case 0: /* SAL rendezvous not required */ | 332 | case 0: /* SAL rendezvous not required */ |
| @@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | |||
| 388 | */ | 387 | */ |
| 389 | 388 | ||
| 390 | static int | 389 | static int |
| 391 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 390 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
| 391 | struct ia64_sal_os_state *sos) | ||
| 392 | { | 392 | { |
| 393 | sal_log_mod_error_info_t *smei; | 393 | sal_log_mod_error_info_t *smei; |
| 394 | pal_min_state_area_t *pmsa; | 394 | pal_min_state_area_t *pmsa; |
| @@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
| 426 | * setup for resume to bottom half of MCA, | 426 | * setup for resume to bottom half of MCA, |
| 427 | * "mca_handler_bhhook" | 427 | * "mca_handler_bhhook" |
| 428 | */ | 428 | */ |
| 429 | pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); | 429 | pmsa = sos->pal_min_state; |
| 430 | /* pass to bhhook as 1st argument (gr8) */ | 430 | /* pass to bhhook as 1st argument (gr8) */ |
| 431 | pmsa->pmsa_gr[8-1] = smei->target_identifier; | 431 | pmsa->pmsa_gr[8-1] = smei->target_identifier; |
| 432 | /* set interrupted return address (but no use) */ | 432 | /* set interrupted return address (but no use) */ |
| @@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
| 459 | */ | 459 | */ |
| 460 | 460 | ||
| 461 | static int | 461 | static int |
| 462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
| 463 | struct ia64_sal_os_state *sos) | ||
| 463 | { | 464 | { |
| 464 | int status = 0; | 465 | int status = 0; |
| 465 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 466 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
| @@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
| 469 | case 1: /* partial read */ | 470 | case 1: /* partial read */ |
| 470 | case 3: /* full line(cpu) read */ | 471 | case 3: /* full line(cpu) read */ |
| 471 | case 9: /* I/O space read */ | 472 | case 9: /* I/O space read */ |
| 472 | status = recover_from_read_error(slidx, peidx, pbci); | 473 | status = recover_from_read_error(slidx, peidx, pbci, sos); |
| 473 | break; | 474 | break; |
| 474 | case 0: /* unknown */ | 475 | case 0: /* unknown */ |
| 475 | case 2: /* partial write */ | 476 | case 2: /* partial write */ |
| @@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
| 508 | */ | 509 | */ |
| 509 | 510 | ||
| 510 | static int | 511 | static int |
| 511 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 512 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
| 513 | struct ia64_sal_os_state *sos) | ||
| 512 | { | 514 | { |
| 513 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 515 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
| 514 | 516 | ||
| @@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
| 545 | * This means "there are some platform errors". | 547 | * This means "there are some platform errors". |
| 546 | */ | 548 | */ |
| 547 | if (platform) | 549 | if (platform) |
| 548 | return recover_from_platform_error(slidx, peidx, pbci); | 550 | return recover_from_platform_error(slidx, peidx, pbci, sos); |
| 549 | /* | 551 | /* |
| 550 | * On account of strange SAL error record, we cannot recover. | 552 | * On account of strange SAL error record, we cannot recover. |
| 551 | */ | 553 | */ |
| @@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
| 562 | 564 | ||
| 563 | static int | 565 | static int |
| 564 | mca_try_to_recover(void *rec, | 566 | mca_try_to_recover(void *rec, |
| 565 | ia64_mca_sal_to_os_state_t *sal_to_os_state, | 567 | struct ia64_sal_os_state *sos) |
| 566 | ia64_mca_os_to_sal_state_t *os_to_sal_state) | ||
| 567 | { | 568 | { |
| 568 | int platform_err; | 569 | int platform_err; |
| 569 | int n_proc_err; | 570 | int n_proc_err; |
| @@ -571,10 +572,6 @@ mca_try_to_recover(void *rec, | |||
| 571 | peidx_table_t peidx; | 572 | peidx_table_t peidx; |
| 572 | pal_bus_check_info_t pbci; | 573 | pal_bus_check_info_t pbci; |
| 573 | 574 | ||
| 574 | /* handoff state from/to mca.c */ | ||
| 575 | sal_to_os_handoff_state = sal_to_os_state; | ||
| 576 | os_to_sal_handoff_state = os_to_sal_state; | ||
| 577 | |||
| 578 | /* Make index of SAL error record */ | 575 | /* Make index of SAL error record */ |
| 579 | platform_err = mca_make_slidx(rec, &slidx); | 576 | platform_err = mca_make_slidx(rec, &slidx); |
| 580 | 577 | ||
| @@ -597,11 +594,11 @@ mca_try_to_recover(void *rec, | |||
| 597 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); | 594 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); |
| 598 | 595 | ||
| 599 | /* Check whether MCA is global or not */ | 596 | /* Check whether MCA is global or not */ |
| 600 | if (is_mca_global(&peidx, &pbci)) | 597 | if (is_mca_global(&peidx, &pbci, sos)) |
| 601 | return 0; | 598 | return 0; |
| 602 | 599 | ||
| 603 | /* Try to recover a processor error */ | 600 | /* Try to recover a processor error */ |
| 604 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); | 601 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); |
| 605 | } | 602 | } |
| 606 | 603 | ||
| 607 | /* | 604 | /* |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 149ad0118455..97a28b8b2ddd 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
| @@ -11,8 +11,6 @@ | |||
| 11 | #ifndef _ASM_IA64_MCA_H | 11 | #ifndef _ASM_IA64_MCA_H |
| 12 | #define _ASM_IA64_MCA_H | 12 | #define _ASM_IA64_MCA_H |
| 13 | 13 | ||
| 14 | #define IA64_MCA_STACK_SIZE 8192 | ||
| 15 | |||
| 16 | #if !defined(__ASSEMBLY__) | 14 | #if !defined(__ASSEMBLY__) |
| 17 | 15 | ||
| 18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| @@ -48,7 +46,8 @@ typedef union cmcv_reg_u { | |||
| 48 | 46 | ||
| 49 | enum { | 47 | enum { |
| 50 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, | 48 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, |
| 51 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 | 49 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, |
| 50 | IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, | ||
| 52 | }; | 51 | }; |
| 53 | 52 | ||
| 54 | /* Information maintained by the MC infrastructure */ | 53 | /* Information maintained by the MC infrastructure */ |
| @@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { | |||
| 63 | 62 | ||
| 64 | } ia64_mc_info_t; | 63 | } ia64_mc_info_t; |
| 65 | 64 | ||
| 66 | typedef struct ia64_mca_sal_to_os_state_s { | 65 | /* Handover state from SAL to OS and vice versa, for both MCA and INIT events. |
| 67 | u64 imsto_os_gp; /* GP of the os registered with the SAL */ | 66 | * Besides the handover state, it also contains some saved registers from the |
| 68 | u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ | 67 | * time of the event. |
| 69 | u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ | 68 | * Note: mca_asm.S depends on the precise layout of this structure. |
| 70 | u64 imsto_sal_gp; /* GP of the SAL - physical */ | 69 | */ |
| 71 | u64 imsto_rendez_state; /* Rendez state information */ | 70 | |
| 72 | u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going | 71 | struct ia64_sal_os_state { |
| 73 | * back to SAL from OS after MCA handling. | 72 | /* SAL to OS, must be at offset 0 */ |
| 74 | */ | 73 | u64 os_gp; /* GP of the os registered with the SAL, physical */ |
| 75 | u64 pal_min_state; /* from PAL in r17 */ | 74 | u64 pal_proc; /* PAL_PROC entry point, physical */ |
| 76 | u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ | 75 | u64 sal_proc; /* SAL_PROC entry point, physical */ |
| 77 | } ia64_mca_sal_to_os_state_t; | 76 | u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ |
| 77 | u64 proc_state_param; /* from R18 */ | ||
| 78 | u64 monarch; /* 1 for a monarch event, 0 for a slave */ | ||
| 79 | /* common, must follow SAL to OS */ | ||
| 80 | u64 sal_ra; /* Return address in SAL, physical */ | ||
| 81 | u64 sal_gp; /* GP of the SAL - physical */ | ||
| 82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ | ||
| 83 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ | ||
| 84 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ | ||
| 85 | /* Some interrupt registers are not saved in minstate, pt_regs or | ||
| 86 | * switch_stack. Because MCA/INIT can occur when interrupts are | ||
| 87 | * disabled, we need to save the additional interrupt registers over | ||
| 88 | * MCA/INIT and resume. | ||
| 89 | */ | ||
| 90 | u64 isr; | ||
| 91 | u64 ifa; | ||
| 92 | u64 itir; | ||
| 93 | u64 iipa; | ||
| 94 | u64 iim; | ||
| 95 | u64 iha; | ||
| 96 | /* OS to SAL, must follow common */ | ||
| 97 | u64 os_status; /* OS status to SAL, enum below */ | ||
| 98 | u64 context; /* 0 if return to same context | ||
| 99 | 1 if return to new context */ | ||
| 100 | }; | ||
| 78 | 101 | ||
| 79 | enum { | 102 | enum { |
| 80 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ | 103 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ |
| @@ -84,35 +107,21 @@ enum { | |||
| 84 | }; | 107 | }; |
| 85 | 108 | ||
| 86 | enum { | 109 | enum { |
| 110 | IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ | ||
| 111 | IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ | ||
| 112 | }; | ||
| 113 | |||
| 114 | enum { | ||
| 87 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ | 115 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ |
| 88 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ | 116 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ |
| 89 | }; | 117 | }; |
| 90 | 118 | ||
| 91 | typedef struct ia64_mca_os_to_sal_state_s { | ||
| 92 | u64 imots_os_status; /* OS status to SAL as to what happened | ||
| 93 | * with the MCA handling. | ||
| 94 | */ | ||
| 95 | u64 imots_sal_gp; /* GP of the SAL - physical */ | ||
| 96 | u64 imots_context; /* 0 if return to same context | ||
| 97 | 1 if return to new context */ | ||
| 98 | u64 *imots_new_min_state; /* Pointer to structure containing | ||
| 99 | * new values of registers in the min state | ||
| 100 | * save area. | ||
| 101 | */ | ||
| 102 | u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going | ||
| 103 | * back to SAL from OS after MCA handling. | ||
| 104 | */ | ||
| 105 | } ia64_mca_os_to_sal_state_t; | ||
| 106 | |||
| 107 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ | 119 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ |
| 108 | 120 | ||
| 109 | struct ia64_mca_cpu { | 121 | struct ia64_mca_cpu { |
| 110 | u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ | 122 | u64 mca_stack[KERNEL_STACK_SIZE/8]; |
| 111 | u64 proc_state_dump[512]; | ||
| 112 | u64 stackframe[32]; | ||
| 113 | u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ | ||
| 114 | u64 init_stack[KERNEL_STACK_SIZE/8]; | 123 | u64 init_stack[KERNEL_STACK_SIZE/8]; |
| 115 | } __attribute__ ((aligned(16))); | 124 | }; |
| 116 | 125 | ||
| 117 | /* Array of physical addresses of each CPU's MCA area. */ | 126 | /* Array of physical addresses of each CPU's MCA area. */ |
| 118 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 127 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
| @@ -121,12 +130,29 @@ extern void ia64_mca_init(void); | |||
| 121 | extern void ia64_mca_cpu_init(void *); | 130 | extern void ia64_mca_cpu_init(void *); |
| 122 | extern void ia64_os_mca_dispatch(void); | 131 | extern void ia64_os_mca_dispatch(void); |
| 123 | extern void ia64_os_mca_dispatch_end(void); | 132 | extern void ia64_os_mca_dispatch_end(void); |
| 124 | extern void ia64_mca_ucmc_handler(void); | 133 | extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); |
| 134 | extern void ia64_init_handler(struct pt_regs *, | ||
| 135 | struct switch_stack *, | ||
| 136 | struct ia64_sal_os_state *); | ||
| 125 | extern void ia64_monarch_init_handler(void); | 137 | extern void ia64_monarch_init_handler(void); |
| 126 | extern void ia64_slave_init_handler(void); | 138 | extern void ia64_slave_init_handler(void); |
| 127 | extern void ia64_mca_cmc_vector_setup(void); | 139 | extern void ia64_mca_cmc_vector_setup(void); |
| 128 | extern int ia64_reg_MCA_extension(void*); | 140 | extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); |
| 129 | extern void ia64_unreg_MCA_extension(void); | 141 | extern void ia64_unreg_MCA_extension(void); |
| 142 | extern u64 ia64_get_rnat(u64 *); | ||
| 143 | |||
| 144 | #else /* __ASSEMBLY__ */ | ||
| 145 | |||
| 146 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ | ||
| 147 | #define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
| 148 | #define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ | ||
| 149 | #define IA64_MCA_HALT -3 /* System to be halted by SAL */ | ||
| 150 | |||
| 151 | #define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ | ||
| 152 | #define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
| 153 | |||
| 154 | #define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ | ||
| 155 | #define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ | ||
| 130 | 156 | ||
| 131 | #endif /* !__ASSEMBLY__ */ | 157 | #endif /* !__ASSEMBLY__ */ |
| 132 | #endif /* _ASM_IA64_MCA_H */ | 158 | #endif /* _ASM_IA64_MCA_H */ |
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e0f91f..27c9203d8ce3 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
| 9 | * Copyright (C) 2002 Intel Corp. | 9 | * Copyright (C) 2002 Intel Corp. |
| 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> |
| 11 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
| 12 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
| 11 | */ | 13 | */ |
| 12 | #ifndef _ASM_IA64_MCA_ASM_H | 14 | #ifndef _ASM_IA64_MCA_ASM_H |
| 13 | #define _ASM_IA64_MCA_ASM_H | 15 | #define _ASM_IA64_MCA_ASM_H |
| @@ -207,106 +209,33 @@ | |||
| 207 | ;; | 209 | ;; |
| 208 | 210 | ||
| 209 | /* | 211 | /* |
| 210 | * The following offsets capture the order in which the | 212 | * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel |
| 211 | * RSE related registers from the old context are | 213 | * stacks, except that the SAL/OS state and a switch_stack are stored near the |
| 212 | * saved onto the new stack frame. | 214 | * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as |
| 215 | * well as MCA over INIT, each event needs its own SAL/OS state. All entries | ||
| 216 | * are 16 byte aligned. | ||
| 213 | * | 217 | * |
| 214 | * +-----------------------+ | 218 | * +---------------------------+ |
| 215 | * |NDIRTY [BSP - BSPSTORE]| | 219 | * | pt_regs | |
| 216 | * +-----------------------+ | 220 | * +---------------------------+ |
| 217 | * | RNAT | | 221 | * | switch_stack | |
| 218 | * +-----------------------+ | 222 | * +---------------------------+ |
| 219 | * | BSPSTORE | | 223 | * | SAL/OS state | |
| 220 | * +-----------------------+ | 224 | * +---------------------------+ |
| 221 | * | IFS | | 225 | * | 16 byte scratch area | |
| 222 | * +-----------------------+ | 226 | * +---------------------------+ <-------- SP at start of C MCA handler |
| 223 | * | PFS | | 227 | * | ..... | |
| 224 | * +-----------------------+ | 228 | * +---------------------------+ |
| 225 | * | RSC | | 229 | * | RBS for MCA/INIT handler | |
| 226 | * +-----------------------+ <-------- Bottom of new stack frame | 230 | * +---------------------------+ |
| 231 | * | struct task for MCA/INIT | | ||
| 232 | * +---------------------------+ <-------- Bottom of MCA/INIT stack | ||
| 227 | */ | 233 | */ |
| 228 | #define rse_rsc_offset 0 | ||
| 229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
| 230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
| 231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
| 232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
| 233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
| 234 | 234 | ||
| 235 | /* | 235 | #define ALIGN16(x) ((x)&~15) |
| 236 | * rse_switch_context | 236 | #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) |
| 237 | * | 237 | #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) |
| 238 | * 1. Save old RSC onto the new stack frame | 238 | #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) |
| 239 | * 2. Save PFS onto new stack frame | 239 | #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) |
| 240 | * 3. Cover the old frame and start a new frame. | ||
| 241 | * 4. Save IFS onto new stack frame | ||
| 242 | * 5. Save the old BSPSTORE on the new stack frame | ||
| 243 | * 6. Save the old RNAT on the new stack frame | ||
| 244 | * 7. Write BSPSTORE with the new backing store pointer | ||
| 245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
| 246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
| 247 | */ | ||
| 248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
| 249 | ;; \ | ||
| 250 | mov temp=ar.rsc;; \ | ||
| 251 | st8 [p_stackframe]=temp,8;; \ | ||
| 252 | mov temp=ar.pfs;; \ | ||
| 253 | st8 [p_stackframe]=temp,8; \ | ||
| 254 | cover ;; \ | ||
| 255 | mov temp=cr.ifs;; \ | ||
| 256 | st8 [p_stackframe]=temp,8;; \ | ||
| 257 | mov temp=ar.bspstore;; \ | ||
| 258 | st8 [p_stackframe]=temp,8;; \ | ||
| 259 | mov temp=ar.rnat;; \ | ||
| 260 | st8 [p_stackframe]=temp,8; \ | ||
| 261 | mov ar.bspstore=p_bspstore;; \ | ||
| 262 | mov temp=ar.bsp;; \ | ||
| 263 | sub temp=temp,p_bspstore;; \ | ||
| 264 | st8 [p_stackframe]=temp,8;; | ||
| 265 | |||
| 266 | /* | ||
| 267 | * rse_return_context | ||
| 268 | * 1. Allocate a zero-sized frame | ||
| 269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
| 270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
| 271 | * context which were saved on the new stack frame have been loaded | ||
| 272 | * back into the stacked registers | ||
| 273 | * 4. Restore BSPSTORE | ||
| 274 | * 5. Restore RNAT | ||
| 275 | * 6. Restore PFS | ||
| 276 | * 7. Restore IFS | ||
| 277 | * 8. Restore RSC | ||
| 278 | * 9. Issue an RFI | ||
| 279 | */ | ||
| 280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
| 281 | ;; \ | ||
| 282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
| 283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
| 284 | ld8 temp=[p_stackframe];; \ | ||
| 285 | shl temp=temp,16;; \ | ||
| 286 | mov ar.rsc=temp;; \ | ||
| 287 | loadrs;; \ | ||
| 288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
| 289 | ld8 temp=[p_stackframe];; \ | ||
| 290 | mov ar.bspstore=temp;; \ | ||
| 291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
| 292 | ld8 temp=[p_stackframe];; \ | ||
| 293 | mov ar.rnat=temp;; \ | ||
| 294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
| 295 | ld8 temp=[p_stackframe];; \ | ||
| 296 | mov ar.pfs=temp;; \ | ||
| 297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
| 298 | ld8 temp=[p_stackframe];; \ | ||
| 299 | mov cr.ifs=temp;; \ | ||
| 300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
| 301 | ld8 temp=[p_stackframe];; \ | ||
| 302 | mov ar.rsc=temp ; \ | ||
| 303 | mov temp=psr;; \ | ||
| 304 | or temp=temp,psr_mask_reg;; \ | ||
| 305 | mov cr.ipsr=temp;; \ | ||
| 306 | mov temp=ip;; \ | ||
| 307 | add temp=0x30,temp;; \ | ||
| 308 | mov cr.iip=temp;; \ | ||
| 309 | srlz.i;; \ | ||
| 310 | rfi;; | ||
| 311 | 240 | ||
| 312 | #endif /* _ASM_IA64_MCA_ASM_H */ | 241 | #endif /* _ASM_IA64_MCA_ASM_H */ |
