aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/tile/include/asm/stack.h1
-rw-r--r--arch/tile/kernel/stack.c231
2 files changed, 112 insertions, 120 deletions
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 4d97a2db932e..0e9d382a2d45 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -25,7 +25,6 @@
25struct KBacktraceIterator { 25struct KBacktraceIterator {
26 BacktraceIterator it; 26 BacktraceIterator it;
27 struct task_struct *task; /* task we are backtracing */ 27 struct task_struct *task; /* task we are backtracing */
28 pte_t *pgtable; /* page table for user space access */
29 int end; /* iteration complete. */ 28 int end; /* iteration complete. */
30 int new_context; /* new context is starting */ 29 int new_context; /* new context is starting */
31 int profile; /* profiling, so stop on async intrpt */ 30 int profile; /* profiling, so stop on async intrpt */
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 0be6b0109ce0..b2f44c28dda6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -21,9 +21,10 @@
21#include <linux/stacktrace.h> 21#include <linux/stacktrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h>
25#include <linux/fs.h>
24#include <asm/backtrace.h> 26#include <asm/backtrace.h>
25#include <asm/page.h> 27#include <asm/page.h>
26#include <asm/tlbflush.h>
27#include <asm/ucontext.h> 28#include <asm/ucontext.h>
28#include <asm/switch_to.h> 29#include <asm/switch_to.h>
29#include <asm/sigframe.h> 30#include <asm/sigframe.h>
@@ -45,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
45 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; 46 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
46} 47}
47 48
48/* Is address valid for reading? */
49static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
50{
51 HV_PTE *l1_pgtable = kbt->pgtable;
52 HV_PTE *l2_pgtable;
53 unsigned long pfn;
54 HV_PTE pte;
55 struct page *page;
56
57 if (l1_pgtable == NULL)
58 return 0; /* can't read user space in other tasks */
59
60#ifdef CONFIG_64BIT
61 /* Find the real l1_pgtable by looking in the l0_pgtable. */
62 pte = l1_pgtable[HV_L0_INDEX(address)];
63 if (!hv_pte_get_present(pte))
64 return 0;
65 pfn = hv_pte_get_pfn(pte);
66 if (pte_huge(pte)) {
67 if (!pfn_valid(pfn)) {
68 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
69 return 0;
70 }
71 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
72 }
73 page = pfn_to_page(pfn);
74 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
75 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
76#endif
77 pte = l1_pgtable[HV_L1_INDEX(address)];
78 if (!hv_pte_get_present(pte))
79 return 0;
80 pfn = hv_pte_get_pfn(pte);
81 if (pte_huge(pte)) {
82 if (!pfn_valid(pfn)) {
83 pr_err("huge page has bad pfn %#lx\n", pfn);
84 return 0;
85 }
86 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
87 }
88
89 page = pfn_to_page(pfn);
90 if (PageHighMem(page)) {
91 pr_err("L2 page table not in LOWMEM (%#llx)\n",
92 HV_PFN_TO_CPA(pfn));
93 return 0;
94 }
95 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
96 pte = l2_pgtable[HV_L2_INDEX(address)];
97 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
98}
99
100/* Callback for backtracer; basically a glorified memcpy */ 49/* Callback for backtracer; basically a glorified memcpy */
101static bool read_memory_func(void *result, unsigned long address, 50static bool read_memory_func(void *result, unsigned long address,
102 unsigned int size, void *vkbt) 51 unsigned int size, void *vkbt)
103{ 52{
104 int retval; 53 int retval;
105 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; 54 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
55
56 if (address == 0)
57 return 0;
106 if (__kernel_text_address(address)) { 58 if (__kernel_text_address(address)) {
107 /* OK to read kernel code. */ 59 /* OK to read kernel code. */
108 } else if (address >= PAGE_OFFSET) { 60 } else if (address >= PAGE_OFFSET) {
109 /* We only tolerate kernel-space reads of this task's stack */ 61 /* We only tolerate kernel-space reads of this task's stack */
110 if (!in_kernel_stack(kbt, address)) 62 if (!in_kernel_stack(kbt, address))
111 return 0; 63 return 0;
112 } else if (!valid_address(kbt, address)) { 64 } else if (!kbt->is_current) {
113 return 0; /* invalid user-space address */ 65 return 0; /* can't read from other user address spaces */
114 } 66 }
115 pagefault_disable(); 67 pagefault_disable();
116 retval = __copy_from_user_inatomic(result, 68 retval = __copy_from_user_inatomic(result,
@@ -128,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
128 unsigned long sp = kbt->it.sp; 80 unsigned long sp = kbt->it.sp;
129 struct pt_regs *p; 81 struct pt_regs *p;
130 82
83 if (sp % sizeof(long) != 0)
84 return NULL;
131 if (!in_kernel_stack(kbt, sp)) 85 if (!in_kernel_stack(kbt, sp))
132 return NULL; 86 return NULL;
133 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) 87 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@@ -170,27 +124,27 @@ static int is_sigreturn(unsigned long pc)
170} 124}
171 125
172/* Return a pt_regs pointer for a valid signal handler frame */ 126/* Return a pt_regs pointer for a valid signal handler frame */
173static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) 127static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
128 struct rt_sigframe* kframe)
174{ 129{
175 BacktraceIterator *b = &kbt->it; 130 BacktraceIterator *b = &kbt->it;
176 131
177 if (b->pc == VDSO_BASE) { 132 if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
178 struct rt_sigframe *frame; 133 b->sp % sizeof(long) == 0) {
179 unsigned long sigframe_top = 134 int retval;
180 b->sp + sizeof(struct rt_sigframe) - 1; 135 pagefault_disable();
181 if (!valid_address(kbt, b->sp) || 136 retval = __copy_from_user_inatomic(
182 !valid_address(kbt, sigframe_top)) { 137 kframe, (void __user __force *)b->sp,
183 if (kbt->verbose) 138 sizeof(*kframe));
184 pr_err(" (odd signal: sp %#lx?)\n", 139 pagefault_enable();
185 (unsigned long)(b->sp)); 140 if (retval != 0 ||
141 (unsigned int)(kframe->info.si_signo) >= _NSIG)
186 return NULL; 142 return NULL;
187 }
188 frame = (struct rt_sigframe *)b->sp;
189 if (kbt->verbose) { 143 if (kbt->verbose) {
190 pr_err(" <received signal %d>\n", 144 pr_err(" <received signal %d>\n",
191 frame->info.si_signo); 145 kframe->info.si_signo);
192 } 146 }
193 return (struct pt_regs *)&frame->uc.uc_mcontext; 147 return (struct pt_regs *)&kframe->uc.uc_mcontext;
194 } 148 }
195 return NULL; 149 return NULL;
196} 150}
@@ -203,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
203static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) 157static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
204{ 158{
205 struct pt_regs *p; 159 struct pt_regs *p;
160 struct rt_sigframe kframe;
206 161
207 p = valid_fault_handler(kbt); 162 p = valid_fault_handler(kbt);
208 if (p == NULL) 163 if (p == NULL)
209 p = valid_sigframe(kbt); 164 p = valid_sigframe(kbt, &kframe);
210 if (p == NULL) 165 if (p == NULL)
211 return 0; 166 return 0;
212 backtrace_init(&kbt->it, read_memory_func, kbt, 167 backtrace_init(&kbt->it, read_memory_func, kbt,
@@ -266,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
266 221
267 /* 222 /*
268 * Set up callback information. We grab the kernel stack base 223 * Set up callback information. We grab the kernel stack base
269 * so we will allow reads of that address range, and if we're 224 * so we will allow reads of that address range.
270 * asking about the current process we grab the page table
271 * so we can check user accesses before trying to read them.
272 * We flush the TLB to avoid any weird skew issues.
273 */ 225 */
274 is_current = (t == NULL); 226 is_current = (t == NULL || t == current);
275 kbt->is_current = is_current; 227 kbt->is_current = is_current;
276 if (is_current) 228 if (is_current)
277 t = validate_current(); 229 t = validate_current();
278 kbt->task = t; 230 kbt->task = t;
279 kbt->pgtable = NULL;
280 kbt->verbose = 0; /* override in caller if desired */ 231 kbt->verbose = 0; /* override in caller if desired */
281 kbt->profile = 0; /* override in caller if desired */ 232 kbt->profile = 0; /* override in caller if desired */
282 kbt->end = KBT_ONGOING; 233 kbt->end = KBT_ONGOING;
283 kbt->new_context = 0; 234 kbt->new_context = 1;
284 if (is_current) { 235 if (is_current)
285 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
286 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
287 /*
288 * Not just an optimization: this also allows
289 * this to work at all before va/pa mappings
290 * are set up.
291 */
292 kbt->pgtable = swapper_pg_dir;
293 } else {
294 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
295 if (!PageHighMem(page))
296 kbt->pgtable = __va(pgdir_pa);
297 else
298 pr_err("page table not in LOWMEM"
299 " (%#llx)\n", pgdir_pa);
300 }
301 local_flush_tlb_all();
302 validate_stack(regs); 236 validate_stack(regs);
303 }
304 237
305 if (regs == NULL) { 238 if (regs == NULL) {
306 if (is_current || t->state == TASK_RUNNING) { 239 if (is_current || t->state == TASK_RUNNING) {
@@ -346,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
346} 279}
347EXPORT_SYMBOL(KBacktraceIterator_next); 280EXPORT_SYMBOL(KBacktraceIterator_next);
348 281
282static void describe_addr(struct KBacktraceIterator *kbt,
283 unsigned long address,
284 int have_mmap_sem, char *buf, size_t bufsize)
285{
286 struct vm_area_struct *vma;
287 size_t namelen, remaining;
288 unsigned long size, offset, adjust;
289 char *p, *modname;
290 const char *name;
291 int rc;
292
293 /*
294 * Look one byte back for every caller frame (i.e. those that
295 * aren't a new context) so we look up symbol data for the
296 * call itself, not the following instruction, which may be on
297 * a different line (or in a different function).
298 */
299 adjust = !kbt->new_context;
300 address -= adjust;
301
302 if (address >= PAGE_OFFSET) {
303 /* Handle kernel symbols. */
304 BUG_ON(bufsize < KSYM_NAME_LEN);
305 name = kallsyms_lookup(address, &size, &offset,
306 &modname, buf);
307 if (name == NULL) {
308 buf[0] = '\0';
309 return;
310 }
311 namelen = strlen(buf);
312 remaining = (bufsize - 1) - namelen;
313 p = buf + namelen;
314 rc = snprintf(p, remaining, "+%#lx/%#lx ",
315 offset + adjust, size);
316 if (modname && rc < remaining)
317 snprintf(p + rc, remaining - rc, "[%s] ", modname);
318 buf[bufsize-1] = '\0';
319 return;
320 }
321
322 /* If we don't have the mmap_sem, we can't show any more info. */
323 buf[0] = '\0';
324 if (!have_mmap_sem)
325 return;
326
327 /* Find vma info. */
328 vma = find_vma(kbt->task->mm, address);
329 if (vma == NULL || address < vma->vm_start) {
330 snprintf(buf, bufsize, "[unmapped address] ");
331 return;
332 }
333
334 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p))
338 p = "?";
339 s = strrchr(p, '/');
340 if (s)
341 p = s+1;
342 } else {
343 p = "anon";
344 }
345
346 /* Generate a string description of the vma info. */
347 namelen = strlen(p);
348 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start);
352}
353
349/* 354/*
350 * This method wraps the backtracer's more generic support. 355 * This method wraps the backtracer's more generic support.
351 * It is only invoked from the architecture-specific code; show_stack() 356 * It is only invoked from the architecture-specific code; show_stack()
@@ -354,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next);
354void tile_show_stack(struct KBacktraceIterator *kbt, int headers) 359void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
355{ 360{
356 int i; 361 int i;
362 int have_mmap_sem = 0;
357 363
358 if (headers) { 364 if (headers) {
359 /* 365 /*
@@ -370,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
370 kbt->verbose = 1; 376 kbt->verbose = 1;
371 i = 0; 377 i = 0;
372 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { 378 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
373 char *modname;
374 const char *name;
375 unsigned long address = kbt->it.pc;
376 unsigned long offset, size;
377 char namebuf[KSYM_NAME_LEN+100]; 379 char namebuf[KSYM_NAME_LEN+100];
380 unsigned long address = kbt->it.pc;
378 381
379 if (address >= PAGE_OFFSET) 382 /* Try to acquire the mmap_sem as we pass into userspace. */
380 name = kallsyms_lookup(address, &size, &offset, 383 if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
381 &modname, namebuf); 384 have_mmap_sem =
382 else 385 down_read_trylock(&kbt->task->mm->mmap_sem);
383 name = NULL; 386
384 387 describe_addr(kbt, address, have_mmap_sem,
385 if (!name) 388 namebuf, sizeof(namebuf));
386 namebuf[0] = '\0';
387 else {
388 size_t namelen = strlen(namebuf);
389 size_t remaining = (sizeof(namebuf) - 1) - namelen;
390 char *p = namebuf + namelen;
391 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
392 offset, size);
393 if (modname && rc < remaining)
394 snprintf(p + rc, remaining - rc,
395 "[%s] ", modname);
396 namebuf[sizeof(namebuf)-1] = '\0';
397 }
398 389
399 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", 390 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
400 i++, address, namebuf, (unsigned long)(kbt->it.sp)); 391 i++, address, namebuf, (unsigned long)(kbt->it.sp));
@@ -409,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
409 pr_err("Stack dump stopped; next frame identical to this one\n"); 400 pr_err("Stack dump stopped; next frame identical to this one\n");
410 if (headers) 401 if (headers)
411 pr_err("Stack dump complete\n"); 402 pr_err("Stack dump complete\n");
403 if (have_mmap_sem)
404 up_read(&kbt->task->mm->mmap_sem);
412} 405}
413EXPORT_SYMBOL(tile_show_stack); 406EXPORT_SYMBOL(tile_show_stack);
414 407