diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 120 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 44 |
4 files changed, 134 insertions, 58 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 96736a119c91..801eeaeaf3de 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -347,7 +347,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
347 | ((struct fnptr *)kretprobe_trampoline)->ip; | 347 | ((struct fnptr *)kretprobe_trampoline)->ip; |
348 | 348 | ||
349 | spin_lock_irqsave(&kretprobe_lock, flags); | 349 | spin_lock_irqsave(&kretprobe_lock, flags); |
350 | head = kretprobe_inst_table_head(current); | 350 | head = kretprobe_inst_table_head(current); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * It is possible to have multiple instances associated with a given | 353 | * It is possible to have multiple instances associated with a given |
@@ -363,9 +363,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
363 | * kretprobe_trampoline | 363 | * kretprobe_trampoline |
364 | */ | 364 | */ |
365 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 365 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
366 | if (ri->task != current) | 366 | if (ri->task != current) |
367 | /* another task is sharing our hash bucket */ | 367 | /* another task is sharing our hash bucket */ |
368 | continue; | 368 | continue; |
369 | 369 | ||
370 | if (ri->rp && ri->rp->handler) | 370 | if (ri->rp && ri->rp->handler) |
371 | ri->rp->handler(ri, regs); | 371 | ri->rp->handler(ri, regs); |
@@ -394,7 +394,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
394 | * kprobe_handler() that we don't want the post_handler | 394 | * kprobe_handler() that we don't want the post_handler |
395 | * to run (and have re-enabled preemption) | 395 | * to run (and have re-enabled preemption) |
396 | */ | 396 | */ |
397 | return 1; | 397 | return 1; |
398 | } | 398 | } |
399 | 399 | ||
400 | /* Called with kretprobe_lock held */ | 400 | /* Called with kretprobe_lock held */ |
@@ -739,12 +739,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
739 | 739 | ||
740 | switch(val) { | 740 | switch(val) { |
741 | case DIE_BREAK: | 741 | case DIE_BREAK: |
742 | if (pre_kprobes_handler(args)) | 742 | /* err is break number from ia64_bad_break() */ |
743 | ret = NOTIFY_STOP; | 743 | if (args->err == 0x80200 || args->err == 0x80300) |
744 | if (pre_kprobes_handler(args)) | ||
745 | ret = NOTIFY_STOP; | ||
744 | break; | 746 | break; |
745 | case DIE_SS: | 747 | case DIE_FAULT: |
746 | if (post_kprobes_handler(args->regs)) | 748 | /* err is vector number from ia64_fault() */ |
747 | ret = NOTIFY_STOP; | 749 | if (args->err == 36) |
750 | if (post_kprobes_handler(args->regs)) | ||
751 | ret = NOTIFY_STOP; | ||
748 | break; | 752 | break; |
749 | case DIE_PAGE_FAULT: | 753 | case DIE_PAGE_FAULT: |
750 | /* kprobe_running() needs smp_processor_id() */ | 754 | /* kprobe_running() needs smp_processor_id() */ |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 52c47da17246..355af15287c7 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -51,6 +51,9 @@ | |||
51 | * | 51 | * |
52 | * 2005-08-12 Keith Owens <kaos@sgi.com> | 52 | * 2005-08-12 Keith Owens <kaos@sgi.com> |
53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. | 53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. |
54 | * | ||
55 | * 2005-10-07 Keith Owens <kaos@sgi.com> | ||
56 | * Add notify_die() hooks. | ||
54 | */ | 57 | */ |
55 | #include <linux/config.h> | 58 | #include <linux/config.h> |
56 | #include <linux/types.h> | 59 | #include <linux/types.h> |
@@ -58,7 +61,6 @@ | |||
58 | #include <linux/sched.h> | 61 | #include <linux/sched.h> |
59 | #include <linux/interrupt.h> | 62 | #include <linux/interrupt.h> |
60 | #include <linux/irq.h> | 63 | #include <linux/irq.h> |
61 | #include <linux/kallsyms.h> | ||
62 | #include <linux/smp_lock.h> | 64 | #include <linux/smp_lock.h> |
63 | #include <linux/bootmem.h> | 65 | #include <linux/bootmem.h> |
64 | #include <linux/acpi.h> | 66 | #include <linux/acpi.h> |
@@ -69,6 +71,7 @@ | |||
69 | #include <linux/workqueue.h> | 71 | #include <linux/workqueue.h> |
70 | 72 | ||
71 | #include <asm/delay.h> | 73 | #include <asm/delay.h> |
74 | #include <asm/kdebug.h> | ||
72 | #include <asm/machvec.h> | 75 | #include <asm/machvec.h> |
73 | #include <asm/meminit.h> | 76 | #include <asm/meminit.h> |
74 | #include <asm/page.h> | 77 | #include <asm/page.h> |
@@ -132,6 +135,14 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); | |||
132 | 135 | ||
133 | static int mca_init; | 136 | static int mca_init; |
134 | 137 | ||
138 | |||
139 | static void inline | ||
140 | ia64_mca_spin(const char *func) | ||
141 | { | ||
142 | printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); | ||
143 | while (1) | ||
144 | cpu_relax(); | ||
145 | } | ||
135 | /* | 146 | /* |
136 | * IA64_MCA log support | 147 | * IA64_MCA log support |
137 | */ | 148 | */ |
@@ -526,13 +537,16 @@ ia64_mca_wakeup_all(void) | |||
526 | * Outputs : None | 537 | * Outputs : None |
527 | */ | 538 | */ |
528 | static irqreturn_t | 539 | static irqreturn_t |
529 | ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | 540 | ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) |
530 | { | 541 | { |
531 | unsigned long flags; | 542 | unsigned long flags; |
532 | int cpu = smp_processor_id(); | 543 | int cpu = smp_processor_id(); |
533 | 544 | ||
534 | /* Mask all interrupts */ | 545 | /* Mask all interrupts */ |
535 | local_irq_save(flags); | 546 | local_irq_save(flags); |
547 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) | ||
548 | == NOTIFY_STOP) | ||
549 | ia64_mca_spin(__FUNCTION__); | ||
536 | 550 | ||
537 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; | 551 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; |
538 | /* Register with the SAL monarch that the slave has | 552 | /* Register with the SAL monarch that the slave has |
@@ -540,10 +554,18 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | |||
540 | */ | 554 | */ |
541 | ia64_sal_mc_rendez(); | 555 | ia64_sal_mc_rendez(); |
542 | 556 | ||
557 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) | ||
558 | == NOTIFY_STOP) | ||
559 | ia64_mca_spin(__FUNCTION__); | ||
560 | |||
543 | /* Wait for the monarch cpu to exit. */ | 561 | /* Wait for the monarch cpu to exit. */ |
544 | while (monarch_cpu != -1) | 562 | while (monarch_cpu != -1) |
545 | cpu_relax(); /* spin until monarch leaves */ | 563 | cpu_relax(); /* spin until monarch leaves */ |
546 | 564 | ||
565 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) | ||
566 | == NOTIFY_STOP) | ||
567 | ia64_mca_spin(__FUNCTION__); | ||
568 | |||
547 | /* Enable all interrupts */ | 569 | /* Enable all interrupts */ |
548 | local_irq_restore(flags); | 570 | local_irq_restore(flags); |
549 | return IRQ_HANDLED; | 571 | return IRQ_HANDLED; |
@@ -933,6 +955,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
933 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 955 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
934 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | 956 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); |
935 | monarch_cpu = cpu; | 957 | monarch_cpu = cpu; |
958 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0) | ||
959 | == NOTIFY_STOP) | ||
960 | ia64_mca_spin(__FUNCTION__); | ||
936 | ia64_wait_for_slaves(cpu); | 961 | ia64_wait_for_slaves(cpu); |
937 | 962 | ||
938 | /* Wakeup all the processors which are spinning in the rendezvous loop. | 963 | /* Wakeup all the processors which are spinning in the rendezvous loop. |
@@ -942,6 +967,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
942 | * spinning in SAL does not work. | 967 | * spinning in SAL does not work. |
943 | */ | 968 | */ |
944 | ia64_mca_wakeup_all(); | 969 | ia64_mca_wakeup_all(); |
970 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0) | ||
971 | == NOTIFY_STOP) | ||
972 | ia64_mca_spin(__FUNCTION__); | ||
945 | 973 | ||
946 | /* Get the MCA error record and log it */ | 974 | /* Get the MCA error record and log it */ |
947 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); | 975 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); |
@@ -960,6 +988,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
960 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 988 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
961 | sos->os_status = IA64_MCA_CORRECTED; | 989 | sos->os_status = IA64_MCA_CORRECTED; |
962 | } | 990 | } |
991 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover) | ||
992 | == NOTIFY_STOP) | ||
993 | ia64_mca_spin(__FUNCTION__); | ||
963 | 994 | ||
964 | set_curr_task(cpu, previous_current); | 995 | set_curr_task(cpu, previous_current); |
965 | monarch_cpu = -1; | 996 | monarch_cpu = -1; |
@@ -1188,6 +1219,37 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
1188 | 1219 | ||
1189 | #endif /* CONFIG_ACPI */ | 1220 | #endif /* CONFIG_ACPI */ |
1190 | 1221 | ||
1222 | static int | ||
1223 | default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) | ||
1224 | { | ||
1225 | int c; | ||
1226 | struct task_struct *g, *t; | ||
1227 | if (val != DIE_INIT_MONARCH_PROCESS) | ||
1228 | return NOTIFY_DONE; | ||
1229 | printk(KERN_ERR "Processes interrupted by INIT -"); | ||
1230 | for_each_online_cpu(c) { | ||
1231 | struct ia64_sal_os_state *s; | ||
1232 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | ||
1233 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | ||
1234 | g = s->prev_task; | ||
1235 | if (g) { | ||
1236 | if (g->pid) | ||
1237 | printk(" %d", g->pid); | ||
1238 | else | ||
1239 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1240 | } | ||
1241 | } | ||
1242 | printk("\n\n"); | ||
1243 | if (read_trylock(&tasklist_lock)) { | ||
1244 | do_each_thread (g, t) { | ||
1245 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1246 | show_stack(t, NULL); | ||
1247 | } while_each_thread (g, t); | ||
1248 | read_unlock(&tasklist_lock); | ||
1249 | } | ||
1250 | return NOTIFY_DONE; | ||
1251 | } | ||
1252 | |||
1191 | /* | 1253 | /* |
1192 | * C portion of the OS INIT handler | 1254 | * C portion of the OS INIT handler |
1193 | * | 1255 | * |
@@ -1212,8 +1274,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1212 | static atomic_t slaves; | 1274 | static atomic_t slaves; |
1213 | static atomic_t monarchs; | 1275 | static atomic_t monarchs; |
1214 | task_t *previous_current; | 1276 | task_t *previous_current; |
1215 | int cpu = smp_processor_id(), c; | 1277 | int cpu = smp_processor_id(); |
1216 | struct task_struct *g, *t; | ||
1217 | 1278 | ||
1218 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 1279 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1219 | console_loglevel = 15; /* make sure printks make it to console */ | 1280 | console_loglevel = 15; /* make sure printks make it to console */ |
@@ -1253,8 +1314,17 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1253 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | 1314 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; |
1254 | while (monarch_cpu == -1) | 1315 | while (monarch_cpu == -1) |
1255 | cpu_relax(); /* spin until monarch enters */ | 1316 | cpu_relax(); /* spin until monarch enters */ |
1317 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0) | ||
1318 | == NOTIFY_STOP) | ||
1319 | ia64_mca_spin(__FUNCTION__); | ||
1320 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0) | ||
1321 | == NOTIFY_STOP) | ||
1322 | ia64_mca_spin(__FUNCTION__); | ||
1256 | while (monarch_cpu != -1) | 1323 | while (monarch_cpu != -1) |
1257 | cpu_relax(); /* spin until monarch leaves */ | 1324 | cpu_relax(); /* spin until monarch leaves */ |
1325 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0) | ||
1326 | == NOTIFY_STOP) | ||
1327 | ia64_mca_spin(__FUNCTION__); | ||
1258 | printk("Slave on cpu %d returning to normal service.\n", cpu); | 1328 | printk("Slave on cpu %d returning to normal service.\n", cpu); |
1259 | set_curr_task(cpu, previous_current); | 1329 | set_curr_task(cpu, previous_current); |
1260 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | 1330 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; |
@@ -1263,6 +1333,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1263 | } | 1333 | } |
1264 | 1334 | ||
1265 | monarch_cpu = cpu; | 1335 | monarch_cpu = cpu; |
1336 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0) | ||
1337 | == NOTIFY_STOP) | ||
1338 | ia64_mca_spin(__FUNCTION__); | ||
1266 | 1339 | ||
1267 | /* | 1340 | /* |
1268 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | 1341 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be |
@@ -1273,27 +1346,16 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1273 | printk("Delaying for 5 seconds...\n"); | 1346 | printk("Delaying for 5 seconds...\n"); |
1274 | udelay(5*1000000); | 1347 | udelay(5*1000000); |
1275 | ia64_wait_for_slaves(cpu); | 1348 | ia64_wait_for_slaves(cpu); |
1276 | printk(KERN_ERR "Processes interrupted by INIT -"); | 1349 | /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through |
1277 | for_each_online_cpu(c) { | 1350 | * to default_monarch_init_process() above and just print all the |
1278 | struct ia64_sal_os_state *s; | 1351 | * tasks. |
1279 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | 1352 | */ |
1280 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | 1353 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0) |
1281 | g = s->prev_task; | 1354 | == NOTIFY_STOP) |
1282 | if (g) { | 1355 | ia64_mca_spin(__FUNCTION__); |
1283 | if (g->pid) | 1356 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0) |
1284 | printk(" %d", g->pid); | 1357 | == NOTIFY_STOP) |
1285 | else | 1358 | ia64_mca_spin(__FUNCTION__); |
1286 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1287 | } | ||
1288 | } | ||
1289 | printk("\n\n"); | ||
1290 | if (read_trylock(&tasklist_lock)) { | ||
1291 | do_each_thread (g, t) { | ||
1292 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1293 | show_stack(t, NULL); | ||
1294 | } while_each_thread (g, t); | ||
1295 | read_unlock(&tasklist_lock); | ||
1296 | } | ||
1297 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | 1359 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); |
1298 | atomic_dec(&monarchs); | 1360 | atomic_dec(&monarchs); |
1299 | set_curr_task(cpu, previous_current); | 1361 | set_curr_task(cpu, previous_current); |
@@ -1462,6 +1524,10 @@ ia64_mca_init(void) | |||
1462 | s64 rc; | 1524 | s64 rc; |
1463 | struct ia64_sal_retval isrv; | 1525 | struct ia64_sal_retval isrv; |
1464 | u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ | 1526 | u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ |
1527 | static struct notifier_block default_init_monarch_nb = { | ||
1528 | .notifier_call = default_monarch_init_process, | ||
1529 | .priority = 0/* we need to notified last */ | ||
1530 | }; | ||
1465 | 1531 | ||
1466 | IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); | 1532 | IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); |
1467 | 1533 | ||
@@ -1555,6 +1621,10 @@ ia64_mca_init(void) | |||
1555 | "(status %ld)\n", rc); | 1621 | "(status %ld)\n", rc); |
1556 | return; | 1622 | return; |
1557 | } | 1623 | } |
1624 | if (register_die_notifier(&default_init_monarch_nb)) { | ||
1625 | printk(KERN_ERR "Failed to register default monarch INIT process\n"); | ||
1626 | return; | ||
1627 | } | ||
1558 | 1628 | ||
1559 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); | 1629 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); |
1560 | 1630 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 051e050359e4..c78355cb8902 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -4,6 +4,9 @@ | |||
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | 6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support |
7 | * | ||
8 | * 2005-10-07 Keith Owens <kaos@sgi.com> | ||
9 | * Add notify_die() hooks. | ||
7 | */ | 10 | */ |
8 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ | 11 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ |
9 | #include <linux/config.h> | 12 | #include <linux/config.h> |
@@ -34,6 +37,7 @@ | |||
34 | #include <asm/elf.h> | 37 | #include <asm/elf.h> |
35 | #include <asm/ia32.h> | 38 | #include <asm/ia32.h> |
36 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/kdebug.h> | ||
37 | #include <asm/pgalloc.h> | 41 | #include <asm/pgalloc.h> |
38 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
39 | #include <asm/sal.h> | 43 | #include <asm/sal.h> |
@@ -804,12 +808,14 @@ cpu_halt (void) | |||
804 | void | 808 | void |
805 | machine_restart (char *restart_cmd) | 809 | machine_restart (char *restart_cmd) |
806 | { | 810 | { |
811 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); | ||
807 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); | 812 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); |
808 | } | 813 | } |
809 | 814 | ||
810 | void | 815 | void |
811 | machine_halt (void) | 816 | machine_halt (void) |
812 | { | 817 | { |
818 | (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); | ||
813 | cpu_halt(); | 819 | cpu_halt(); |
814 | } | 820 | } |
815 | 821 | ||
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f970359e7edf..fba5fdd1f968 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -30,17 +30,20 @@ fpswa_interface_t *fpswa_interface; | |||
30 | EXPORT_SYMBOL(fpswa_interface); | 30 | EXPORT_SYMBOL(fpswa_interface); |
31 | 31 | ||
32 | struct notifier_block *ia64die_chain; | 32 | struct notifier_block *ia64die_chain; |
33 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
34 | 33 | ||
35 | int register_die_notifier(struct notifier_block *nb) | 34 | int |
35 | register_die_notifier(struct notifier_block *nb) | ||
36 | { | 36 | { |
37 | int err = 0; | 37 | return notifier_chain_register(&ia64die_chain, nb); |
38 | unsigned long flags; | ||
39 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
40 | err = notifier_chain_register(&ia64die_chain, nb); | ||
41 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
42 | return err; | ||
43 | } | 38 | } |
39 | EXPORT_SYMBOL_GPL(register_die_notifier); | ||
40 | |||
41 | int | ||
42 | unregister_die_notifier(struct notifier_block *nb) | ||
43 | { | ||
44 | return notifier_chain_unregister(&ia64die_chain, nb); | ||
45 | } | ||
46 | EXPORT_SYMBOL_GPL(unregister_die_notifier); | ||
44 | 47 | ||
45 | void __init | 48 | void __init |
46 | trap_init (void) | 49 | trap_init (void) |
@@ -105,6 +108,7 @@ die (const char *str, struct pt_regs *regs, long err) | |||
105 | if (++die.lock_owner_depth < 3) { | 108 | if (++die.lock_owner_depth < 3) { |
106 | printk("%s[%d]: %s %ld [%d]\n", | 109 | printk("%s[%d]: %s %ld [%d]\n", |
107 | current->comm, current->pid, str, err, ++die_counter); | 110 | current->comm, current->pid, str, err, ++die_counter); |
111 | (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); | ||
108 | show_regs(regs); | 112 | show_regs(regs); |
109 | } else | 113 | } else |
110 | printk(KERN_ERR "Recursive die() failure, output suppressed\n"); | 114 | printk(KERN_ERR "Recursive die() failure, output suppressed\n"); |
@@ -155,9 +159,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
155 | switch (break_num) { | 159 | switch (break_num) { |
156 | case 0: /* unknown error (used by GCC for __builtin_abort()) */ | 160 | case 0: /* unknown error (used by GCC for __builtin_abort()) */ |
157 | if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) | 161 | if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) |
158 | == NOTIFY_STOP) { | 162 | == NOTIFY_STOP) |
159 | return; | 163 | return; |
160 | } | ||
161 | die_if_kernel("bugcheck!", regs, break_num); | 164 | die_if_kernel("bugcheck!", regs, break_num); |
162 | sig = SIGILL; code = ILL_ILLOPC; | 165 | sig = SIGILL; code = ILL_ILLOPC; |
163 | break; | 166 | break; |
@@ -210,15 +213,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
210 | sig = SIGILL; code = __ILL_BNDMOD; | 213 | sig = SIGILL; code = __ILL_BNDMOD; |
211 | break; | 214 | break; |
212 | 215 | ||
213 | case 0x80200: | ||
214 | case 0x80300: | ||
215 | if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP) | ||
216 | == NOTIFY_STOP) { | ||
217 | return; | ||
218 | } | ||
219 | sig = SIGTRAP; code = TRAP_BRKPT; | ||
220 | break; | ||
221 | |||
222 | default: | 216 | default: |
223 | if (break_num < 0x40000 || break_num > 0x100000) | 217 | if (break_num < 0x40000 || break_num > 0x100000) |
224 | die_if_kernel("Bad break", regs, break_num); | 218 | die_if_kernel("Bad break", regs, break_num); |
@@ -226,6 +220,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
226 | if (break_num < 0x80000) { | 220 | if (break_num < 0x80000) { |
227 | sig = SIGILL; code = __ILL_BREAK; | 221 | sig = SIGILL; code = __ILL_BREAK; |
228 | } else { | 222 | } else { |
223 | if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP) | ||
224 | == NOTIFY_STOP) | ||
225 | return; | ||
229 | sig = SIGTRAP; code = TRAP_BRKPT; | 226 | sig = SIGTRAP; code = TRAP_BRKPT; |
230 | } | 227 | } |
231 | } | 228 | } |
@@ -578,12 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
578 | #endif | 575 | #endif |
579 | break; | 576 | break; |
580 | case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; | 577 | case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; |
581 | case 36: | 578 | case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break; |
582 | if (notify_die(DIE_SS, "ss", ®s, vector, | ||
583 | vector, SIGTRAP) == NOTIFY_STOP) | ||
584 | return; | ||
585 | siginfo.si_code = TRAP_TRACE; ifa = 0; break; | ||
586 | } | 579 | } |
580 | if (notify_die(DIE_FAULT, "ia64_fault", ®s, vector, siginfo.si_code, SIGTRAP) | ||
581 | == NOTIFY_STOP) | ||
582 | return; | ||
587 | siginfo.si_signo = SIGTRAP; | 583 | siginfo.si_signo = SIGTRAP; |
588 | siginfo.si_errno = 0; | 584 | siginfo.si_errno = 0; |
589 | siginfo.si_addr = (void __user *) ifa; | 585 | siginfo.si_addr = (void __user *) ifa; |