aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/crash.c47
-rw-r--r--include/asm-i386/kexec.h45
-rw-r--r--kernel/kexec.c4
3 files changed, 51 insertions, 45 deletions
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index f1e65c2ead6e..d49dbe8dc96b 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -82,53 +82,12 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
82 final_note(buf); 82 final_note(buf);
83} 83}
84 84
85static void crash_get_current_regs(struct pt_regs *regs) 85static void crash_save_self(struct pt_regs *regs)
86{ 86{
87 __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
88 __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
89 __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
90 __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
91 __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
92 __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
93 __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
94 __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
95 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
96 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
97 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
98 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
99 __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
100
101 regs->eip = (unsigned long)current_text_addr();
102}
103
104/* CPU does not save ss and esp on stack if execution is already
105 * running in kernel mode at the time of NMI occurrence. This code
106 * fixes it.
107 */
108static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
109{
110 memcpy(newregs, oldregs, sizeof(*newregs));
111 newregs->esp = (unsigned long)&(oldregs->esp);
112 __asm__ __volatile__(
113 "xorl %%eax, %%eax\n\t"
114 "movw %%ss, %%ax\n\t"
115 :"=a"(newregs->xss));
116}
117
118/* We may have saved_regs from where the error came from
119 * or it is NULL if via a direct panic().
120 */
121static void crash_save_self(struct pt_regs *saved_regs)
122{
123 struct pt_regs regs;
124 int cpu; 87 int cpu;
125 88
126 cpu = smp_processor_id(); 89 cpu = smp_processor_id();
127 if (saved_regs) 90 crash_save_this_cpu(regs, cpu);
128 crash_setup_regs(&regs, saved_regs);
129 else
130 crash_get_current_regs(&regs);
131 crash_save_this_cpu(&regs, cpu);
132} 91}
133 92
134#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
@@ -147,7 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
147 local_irq_disable(); 106 local_irq_disable();
148 107
149 if (!user_mode(regs)) { 108 if (!user_mode(regs)) {
150 crash_setup_regs(&fixed_regs, regs); 109 crash_fixup_ss_esp(&fixed_regs, regs);
151 regs = &fixed_regs; 110 regs = &fixed_regs;
152 } 111 }
153 crash_save_this_cpu(regs, cpu); 112 crash_save_this_cpu(regs, cpu);
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index d80d446498fb..8fb1defd98c6 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -2,6 +2,7 @@
2#define _I386_KEXEC_H 2#define _I386_KEXEC_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5#include <asm/ptrace.h>
5 6
6/* 7/*
7 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 8 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -27,4 +28,48 @@
27 28
28#define MAX_NOTE_BYTES 1024 29#define MAX_NOTE_BYTES 1024
29 30
31/* CPU does not save ss and esp on stack if execution is already
32 * running in kernel mode at the time of NMI occurrence. This code
33 * fixes it.
34 */
35static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
36 struct pt_regs *oldregs)
37{
38 memcpy(newregs, oldregs, sizeof(*newregs));
39 newregs->esp = (unsigned long)&(oldregs->esp);
40 __asm__ __volatile__(
41 "xorl %%eax, %%eax\n\t"
42 "movw %%ss, %%ax\n\t"
43 :"=a"(newregs->xss));
44}
45
46/*
47 * This function is responsible for capturing register states if coming
48 * via panic otherwise just fix up the ss and esp if coming via kernel
49 * mode exception.
50 */
51static inline void crash_setup_regs(struct pt_regs *newregs,
52 struct pt_regs *oldregs)
53{
54 if (oldregs)
55 crash_fixup_ss_esp(newregs, oldregs);
56 else {
57 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
58 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
59 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
60 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
61 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
62 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
63 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
64 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
65 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
66 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
67 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
68 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
69 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
70
71 newregs->eip = (unsigned long)current_text_addr();
72 }
73}
74
30#endif /* _I386_KEXEC_H */ 75#endif /* _I386_KEXEC_H */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1197de8b2a94..de1441656efd 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1057,7 +1057,9 @@ void crash_kexec(struct pt_regs *regs)
1057 if (!locked) { 1057 if (!locked) {
1058 image = xchg(&kexec_crash_image, NULL); 1058 image = xchg(&kexec_crash_image, NULL);
1059 if (image) { 1059 if (image) {
1060 machine_crash_shutdown(regs); 1060 struct pt_regs fixed_regs;
1061 crash_setup_regs(&fixed_regs, regs);
1062 machine_crash_shutdown(&fixed_regs);
1061 machine_kexec(image); 1063 machine_kexec(image);
1062 } 1064 }
1063 xchg(&kexec_lock, 0); 1065 xchg(&kexec_lock, 0);