diff options
-rw-r--r-- | arch/x86/kernel/traps_64.c | 74 |
1 files changed, 45 insertions, 29 deletions
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 62c4d8f46ee9..b8303ed95057 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -212,10 +212,46 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
212 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 212 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
213 | */ | 213 | */ |
214 | 214 | ||
215 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | 215 | static inline int valid_stack_ptr(struct thread_info *tinfo, |
216 | void *p, unsigned int size, void *end) | ||
216 | { | 217 | { |
217 | void *t = (void *)tinfo; | 218 | void *t = (void *)tinfo; |
218 | return p > t && p < t + THREAD_SIZE - 3; | 219 | if (end) { |
220 | if (p < end && p >= (end-THREAD_SIZE)) | ||
221 | return 1; | ||
222 | else | ||
223 | return 0; | ||
224 | } | ||
225 | return p > t && p < t + THREAD_SIZE - size; | ||
226 | } | ||
227 | |||
228 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | ||
229 | unsigned long *stack, unsigned long bp, | ||
230 | const struct stacktrace_ops *ops, void *data, | ||
231 | unsigned long *end) | ||
232 | { | ||
233 | /* | ||
234 | * Print function call entries within a stack. 'cond' is the | ||
235 | * "end of stackframe" condition, that the 'stack++' | ||
236 | * iteration will eventually trigger. | ||
237 | */ | ||
238 | while (valid_stack_ptr(tinfo, stack, 3, end)) { | ||
239 | unsigned long addr = *stack++; | ||
240 | /* Use unlocked access here because except for NMIs | ||
241 | we should be already protected against module unloads */ | ||
242 | if (__kernel_text_address(addr)) { | ||
243 | /* | ||
244 | * If the address is either in the text segment of the | ||
245 | * kernel, or in the region which contains vmalloc'ed | ||
246 | * memory, it *may* be the address of a calling | ||
247 | * routine; if so, print it so that someone tracing | ||
248 | * down the cause of the crash will be able to figure | ||
249 | * out the call path that was taken. | ||
250 | */ | ||
251 | ops->address(data, addr, 1); | ||
252 | } | ||
253 | } | ||
254 | return bp; | ||
219 | } | 255 | } |
220 | 256 | ||
221 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 257 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
@@ -229,6 +265,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
229 | 265 | ||
230 | if (!tsk) | 266 | if (!tsk) |
231 | tsk = current; | 267 | tsk = current; |
268 | tinfo = task_thread_info(tsk); | ||
232 | 269 | ||
233 | if (!stack) { | 270 | if (!stack) { |
234 | unsigned long dummy; | 271 | unsigned long dummy; |
@@ -237,28 +274,6 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
237 | stack = (unsigned long *)tsk->thread.sp; | 274 | stack = (unsigned long *)tsk->thread.sp; |
238 | } | 275 | } |
239 | 276 | ||
240 | /* | ||
241 | * Print function call entries within a stack. 'cond' is the | ||
242 | * "end of stackframe" condition, that the 'stack++' | ||
243 | * iteration will eventually trigger. | ||
244 | */ | ||
245 | #define HANDLE_STACK(cond) \ | ||
246 | do while (cond) { \ | ||
247 | unsigned long addr = *stack++; \ | ||
248 | /* Use unlocked access here because except for NMIs \ | ||
249 | we should be already protected against module unloads */ \ | ||
250 | if (__kernel_text_address(addr)) { \ | ||
251 | /* \ | ||
252 | * If the address is either in the text segment of the \ | ||
253 | * kernel, or in the region which contains vmalloc'ed \ | ||
254 | * memory, it *may* be the address of a calling \ | ||
255 | * routine; if so, print it so that someone tracing \ | ||
256 | * down the cause of the crash will be able to figure \ | ||
257 | * out the call path that was taken. \ | ||
258 | */ \ | ||
259 | ops->address(data, addr, 1); \ | ||
260 | } \ | ||
261 | } while (0) | ||
262 | 277 | ||
263 | /* | 278 | /* |
264 | * Print function call entries in all stacks, starting at the | 279 | * Print function call entries in all stacks, starting at the |
@@ -274,7 +289,9 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
274 | if (estack_end) { | 289 | if (estack_end) { |
275 | if (ops->stack(data, id) < 0) | 290 | if (ops->stack(data, id) < 0) |
276 | break; | 291 | break; |
277 | HANDLE_STACK (stack < estack_end); | 292 | |
293 | print_context_stack(tinfo, stack, 0, ops, | ||
294 | data, estack_end); | ||
278 | ops->stack(data, "<EOE>"); | 295 | ops->stack(data, "<EOE>"); |
279 | /* | 296 | /* |
280 | * We link to the next stack via the | 297 | * We link to the next stack via the |
@@ -292,7 +309,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
292 | if (stack >= irqstack && stack < irqstack_end) { | 309 | if (stack >= irqstack && stack < irqstack_end) { |
293 | if (ops->stack(data, "IRQ") < 0) | 310 | if (ops->stack(data, "IRQ") < 0) |
294 | break; | 311 | break; |
295 | HANDLE_STACK (stack < irqstack_end); | 312 | print_context_stack(tinfo, stack, 0, ops, |
313 | data, irqstack_end); | ||
296 | /* | 314 | /* |
297 | * We link to the next stack (which would be | 315 | * We link to the next stack (which would be |
298 | * the process stack normally) the last | 316 | * the process stack normally) the last |
@@ -310,9 +328,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
310 | /* | 328 | /* |
311 | * This handles the process stack: | 329 | * This handles the process stack: |
312 | */ | 330 | */ |
313 | tinfo = task_thread_info(tsk); | 331 | print_context_stack(tinfo, stack, 0, ops, data, NULL); |
314 | HANDLE_STACK (valid_stack_ptr(tinfo, stack)); | ||
315 | #undef HANDLE_STACK | ||
316 | put_cpu(); | 332 | put_cpu(); |
317 | } | 333 | } |
318 | EXPORT_SYMBOL(dump_trace); | 334 | EXPORT_SYMBOL(dump_trace); |