aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/traps.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-23 23:44:19 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-23 23:44:19 -0500
commit1ebbe2b20091d306453a5cf480a87e6cd28ae76f (patch)
treef5cd7a0fa69b8b1938cb5a0faed2e7b0628072a5 /arch/i386/kernel/traps.c
parentac58c9059da8886b5e8cde012a80266b18ca146e (diff)
parent674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff)
Merge branch 'linus'
Diffstat (limited to 'arch/i386/kernel/traps.c')
-rw-r--r--arch/i386/kernel/traps.c57
1 files changed, 40 insertions, 17 deletions
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index b814dbdcc91e..de5386b01d38 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb)
99{ 99{
100 int err = 0; 100 int err = 0;
101 unsigned long flags; 101 unsigned long flags;
102
103 vmalloc_sync_all();
102 spin_lock_irqsave(&die_notifier_lock, flags); 104 spin_lock_irqsave(&die_notifier_lock, flags);
103 err = notifier_chain_register(&i386die_chain, nb); 105 err = notifier_chain_register(&i386die_chain, nb);
104 spin_unlock_irqrestore(&die_notifier_lock, flags); 106 spin_unlock_irqrestore(&die_notifier_lock, flags);
@@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112 p < (void *)tinfo + THREAD_SIZE - 3; 114 p < (void *)tinfo + THREAD_SIZE - 3;
113} 115}
114 116
115static void print_addr_and_symbol(unsigned long addr, char *log_lvl) 117/*
118 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
119 */
120static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
121 int printed)
116{ 122{
117 printk(log_lvl); 123 if (!printed)
124 printk(log_lvl);
125
126#if CONFIG_STACK_BACKTRACE_COLS == 1
118 printk(" [<%08lx>] ", addr); 127 printk(" [<%08lx>] ", addr);
128#else
129 printk(" <%08lx> ", addr);
130#endif
119 print_symbol("%s", addr); 131 print_symbol("%s", addr);
120 printk("\n"); 132
133 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
134
135 if (printed)
136 printk(" ");
137 else
138 printk("\n");
139
140 return printed;
121} 141}
122 142
123static inline unsigned long print_context_stack(struct thread_info *tinfo, 143static inline unsigned long print_context_stack(struct thread_info *tinfo,
@@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
125 char *log_lvl) 145 char *log_lvl)
126{ 146{
127 unsigned long addr; 147 unsigned long addr;
148 int printed = 0; /* nr of entries already printed on current line */
128 149
129#ifdef CONFIG_FRAME_POINTER 150#ifdef CONFIG_FRAME_POINTER
130 while (valid_stack_ptr(tinfo, (void *)ebp)) { 151 while (valid_stack_ptr(tinfo, (void *)ebp)) {
131 addr = *(unsigned long *)(ebp + 4); 152 addr = *(unsigned long *)(ebp + 4);
132 print_addr_and_symbol(addr, log_lvl); 153 printed = print_addr_and_symbol(addr, log_lvl, printed);
133 ebp = *(unsigned long *)ebp; 154 ebp = *(unsigned long *)ebp;
134 } 155 }
135#else 156#else
136 while (valid_stack_ptr(tinfo, stack)) { 157 while (valid_stack_ptr(tinfo, stack)) {
137 addr = *stack++; 158 addr = *stack++;
138 if (__kernel_text_address(addr)) 159 if (__kernel_text_address(addr))
139 print_addr_and_symbol(addr, log_lvl); 160 printed = print_addr_and_symbol(addr, log_lvl, printed);
140 } 161 }
141#endif 162#endif
163 if (printed)
164 printk("\n");
165
142 return ebp; 166 return ebp;
143} 167}
144 168
@@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 190 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 191 if (!stack)
168 break; 192 break;
169 printk(log_lvl); 193 printk("%s =======================\n", log_lvl);
170 printk(" =======================\n");
171 } 194 }
172} 195}
173 196
@@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
194 for(i = 0; i < kstack_depth_to_print; i++) { 217 for(i = 0; i < kstack_depth_to_print; i++) {
195 if (kstack_end(stack)) 218 if (kstack_end(stack))
196 break; 219 break;
197 if (i && ((i % 8) == 0)) { 220 if (i && ((i % 8) == 0))
198 printk("\n"); 221 printk("\n%s ", log_lvl);
199 printk(log_lvl);
200 printk(" ");
201 }
202 printk("%08lx ", *stack++); 222 printk("%08lx ", *stack++);
203 } 223 }
204 printk("\n"); 224 printk("\n%sCall Trace:\n", log_lvl);
205 printk(log_lvl);
206 printk("Call Trace:\n");
207 show_trace_log_lvl(task, esp, log_lvl); 225 show_trace_log_lvl(task, esp, log_lvl);
208} 226}
209 227
210void show_stack(struct task_struct *task, unsigned long *esp) 228void show_stack(struct task_struct *task, unsigned long *esp)
211{ 229{
230 printk(" ");
212 show_stack_log_lvl(task, esp, ""); 231 show_stack_log_lvl(task, esp, "");
213} 232}
214 233
@@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs)
233 252
234 esp = (unsigned long) (&regs->esp); 253 esp = (unsigned long) (&regs->esp);
235 savesegment(ss, ss); 254 savesegment(ss, ss);
236 if (user_mode(regs)) { 255 if (user_mode_vm(regs)) {
237 in_kernel = 0; 256 in_kernel = 0;
238 esp = regs->esp; 257 esp = regs->esp;
239 ss = regs->xss & 0xffff; 258 ss = regs->xss & 0xffff;
@@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err)
333 static int die_counter; 352 static int die_counter;
334 unsigned long flags; 353 unsigned long flags;
335 354
355 oops_enter();
356
336 if (die.lock_owner != raw_smp_processor_id()) { 357 if (die.lock_owner != raw_smp_processor_id()) {
337 console_verbose(); 358 console_verbose();
338 spin_lock_irqsave(&die.lock, flags); 359 spin_lock_irqsave(&die.lock, flags);
@@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err)
385 ssleep(5); 406 ssleep(5);
386 panic("Fatal exception"); 407 panic("Fatal exception");
387 } 408 }
409 oops_exit();
388 do_exit(SIGSEGV); 410 do_exit(SIGSEGV);
389} 411}
390 412
@@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg)
623 /* If we are in kernel we are probably nested up pretty bad 645 /* If we are in kernel we are probably nested up pretty bad
624 * and might aswell get out now while we still can. 646 * and might aswell get out now while we still can.
625 */ 647 */
626 if (!user_mode(regs)) { 648 if (!user_mode_vm(regs)) {
627 current->thread.trap_no = 2; 649 current->thread.trap_no = 2;
628 crash_kexec(regs); 650 crash_kexec(regs);
629 } 651 }
@@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
694 716
695void set_nmi_callback(nmi_callback_t callback) 717void set_nmi_callback(nmi_callback_t callback)
696{ 718{
719 vmalloc_sync_all();
697 rcu_assign_pointer(nmi_callback, callback); 720 rcu_assign_pointer(nmi_callback, callback);
698} 721}
699EXPORT_SYMBOL_GPL(set_nmi_callback); 722EXPORT_SYMBOL_GPL(set_nmi_callback);