diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/tile/kernel/stack.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/tile/kernel/stack.c')
-rw-r--r-- | arch/tile/kernel/stack.c | 232 |
1 files changed, 119 insertions, 113 deletions
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda..37ee4d037e0 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -21,12 +21,10 @@ | |||
21 | #include <linux/stacktrace.h> | 21 | #include <linux/stacktrace.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/mmzone.h> | 23 | #include <linux/mmzone.h> |
24 | #include <linux/dcache.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <asm/backtrace.h> | 24 | #include <asm/backtrace.h> |
27 | #include <asm/page.h> | 25 | #include <asm/page.h> |
26 | #include <asm/tlbflush.h> | ||
28 | #include <asm/ucontext.h> | 27 | #include <asm/ucontext.h> |
29 | #include <asm/switch_to.h> | ||
30 | #include <asm/sigframe.h> | 28 | #include <asm/sigframe.h> |
31 | #include <asm/stack.h> | 29 | #include <asm/stack.h> |
32 | #include <arch/abi.h> | 30 | #include <arch/abi.h> |
@@ -46,23 +44,72 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) | |||
46 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | 44 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; |
47 | } | 45 | } |
48 | 46 | ||
47 | /* Is address valid for reading? */ | ||
48 | static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) | ||
49 | { | ||
50 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
51 | HV_PTE *l2_pgtable; | ||
52 | unsigned long pfn; | ||
53 | HV_PTE pte; | ||
54 | struct page *page; | ||
55 | |||
56 | if (l1_pgtable == NULL) | ||
57 | return 0; /* can't read user space in other tasks */ | ||
58 | |||
59 | #ifdef CONFIG_64BIT | ||
60 | /* Find the real l1_pgtable by looking in the l0_pgtable. */ | ||
61 | pte = l1_pgtable[HV_L0_INDEX(address)]; | ||
62 | if (!hv_pte_get_present(pte)) | ||
63 | return 0; | ||
64 | pfn = hv_pte_get_pfn(pte); | ||
65 | if (pte_huge(pte)) { | ||
66 | if (!pfn_valid(pfn)) { | ||
67 | pr_err("L0 huge page has bad pfn %#lx\n", pfn); | ||
68 | return 0; | ||
69 | } | ||
70 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
71 | } | ||
72 | page = pfn_to_page(pfn); | ||
73 | BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ | ||
74 | l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
75 | #endif | ||
76 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
77 | if (!hv_pte_get_present(pte)) | ||
78 | return 0; | ||
79 | pfn = hv_pte_get_pfn(pte); | ||
80 | if (pte_huge(pte)) { | ||
81 | if (!pfn_valid(pfn)) { | ||
82 | pr_err("huge page has bad pfn %#lx\n", pfn); | ||
83 | return 0; | ||
84 | } | ||
85 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
86 | } | ||
87 | |||
88 | page = pfn_to_page(pfn); | ||
89 | if (PageHighMem(page)) { | ||
90 | pr_err("L2 page table not in LOWMEM (%#llx)\n", | ||
91 | HV_PFN_TO_CPA(pfn)); | ||
92 | return 0; | ||
93 | } | ||
94 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
95 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
96 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
97 | } | ||
98 | |||
49 | /* Callback for backtracer; basically a glorified memcpy */ | 99 | /* Callback for backtracer; basically a glorified memcpy */ |
50 | static bool read_memory_func(void *result, unsigned long address, | 100 | static bool read_memory_func(void *result, unsigned long address, |
51 | unsigned int size, void *vkbt) | 101 | unsigned int size, void *vkbt) |
52 | { | 102 | { |
53 | int retval; | 103 | int retval; |
54 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | 104 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; |
55 | |||
56 | if (address == 0) | ||
57 | return 0; | ||
58 | if (__kernel_text_address(address)) { | 105 | if (__kernel_text_address(address)) { |
59 | /* OK to read kernel code. */ | 106 | /* OK to read kernel code. */ |
60 | } else if (address >= PAGE_OFFSET) { | 107 | } else if (address >= PAGE_OFFSET) { |
61 | /* We only tolerate kernel-space reads of this task's stack */ | 108 | /* We only tolerate kernel-space reads of this task's stack */ |
62 | if (!in_kernel_stack(kbt, address)) | 109 | if (!in_kernel_stack(kbt, address)) |
63 | return 0; | 110 | return 0; |
64 | } else if (!kbt->is_current) { | 111 | } else if (!valid_address(kbt, address)) { |
65 | return 0; /* can't read from other user address spaces */ | 112 | return 0; /* invalid user-space address */ |
66 | } | 113 | } |
67 | pagefault_disable(); | 114 | pagefault_disable(); |
68 | retval = __copy_from_user_inatomic(result, | 115 | retval = __copy_from_user_inatomic(result, |
@@ -80,8 +127,6 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
80 | unsigned long sp = kbt->it.sp; | 127 | unsigned long sp = kbt->it.sp; |
81 | struct pt_regs *p; | 128 | struct pt_regs *p; |
82 | 129 | ||
83 | if (sp % sizeof(long) != 0) | ||
84 | return NULL; | ||
85 | if (!in_kernel_stack(kbt, sp)) | 130 | if (!in_kernel_stack(kbt, sp)) |
86 | return NULL; | 131 | return NULL; |
87 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | 132 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) |
@@ -124,27 +169,27 @@ static int is_sigreturn(unsigned long pc) | |||
124 | } | 169 | } |
125 | 170 | ||
126 | /* Return a pt_regs pointer for a valid signal handler frame */ | 171 | /* Return a pt_regs pointer for a valid signal handler frame */ |
127 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, | 172 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) |
128 | struct rt_sigframe* kframe) | ||
129 | { | 173 | { |
130 | BacktraceIterator *b = &kbt->it; | 174 | BacktraceIterator *b = &kbt->it; |
131 | 175 | ||
132 | if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && | 176 | if (b->pc == VDSO_BASE) { |
133 | b->sp % sizeof(long) == 0) { | 177 | struct rt_sigframe *frame; |
134 | int retval; | 178 | unsigned long sigframe_top = |
135 | pagefault_disable(); | 179 | b->sp + sizeof(struct rt_sigframe) - 1; |
136 | retval = __copy_from_user_inatomic( | 180 | if (!valid_address(kbt, b->sp) || |
137 | kframe, (void __user __force *)b->sp, | 181 | !valid_address(kbt, sigframe_top)) { |
138 | sizeof(*kframe)); | 182 | if (kbt->verbose) |
139 | pagefault_enable(); | 183 | pr_err(" (odd signal: sp %#lx?)\n", |
140 | if (retval != 0 || | 184 | (unsigned long)(b->sp)); |
141 | (unsigned int)(kframe->info.si_signo) >= _NSIG) | ||
142 | return NULL; | 185 | return NULL; |
186 | } | ||
187 | frame = (struct rt_sigframe *)b->sp; | ||
143 | if (kbt->verbose) { | 188 | if (kbt->verbose) { |
144 | pr_err(" <received signal %d>\n", | 189 | pr_err(" <received signal %d>\n", |
145 | kframe->info.si_signo); | 190 | frame->info.si_signo); |
146 | } | 191 | } |
147 | return (struct pt_regs *)&kframe->uc.uc_mcontext; | 192 | return (struct pt_regs *)&frame->uc.uc_mcontext; |
148 | } | 193 | } |
149 | return NULL; | 194 | return NULL; |
150 | } | 195 | } |
@@ -157,11 +202,10 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | |||
157 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | 202 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) |
158 | { | 203 | { |
159 | struct pt_regs *p; | 204 | struct pt_regs *p; |
160 | struct rt_sigframe kframe; | ||
161 | 205 | ||
162 | p = valid_fault_handler(kbt); | 206 | p = valid_fault_handler(kbt); |
163 | if (p == NULL) | 207 | if (p == NULL) |
164 | p = valid_sigframe(kbt, &kframe); | 208 | p = valid_sigframe(kbt); |
165 | if (p == NULL) | 209 | if (p == NULL) |
166 | return 0; | 210 | return 0; |
167 | backtrace_init(&kbt->it, read_memory_func, kbt, | 211 | backtrace_init(&kbt->it, read_memory_func, kbt, |
@@ -221,19 +265,41 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | |||
221 | 265 | ||
222 | /* | 266 | /* |
223 | * Set up callback information. We grab the kernel stack base | 267 | * Set up callback information. We grab the kernel stack base |
224 | * so we will allow reads of that address range. | 268 | * so we will allow reads of that address range, and if we're |
269 | * asking about the current process we grab the page table | ||
270 | * so we can check user accesses before trying to read them. | ||
271 | * We flush the TLB to avoid any weird skew issues. | ||
225 | */ | 272 | */ |
226 | is_current = (t == NULL || t == current); | 273 | is_current = (t == NULL); |
227 | kbt->is_current = is_current; | 274 | kbt->is_current = is_current; |
228 | if (is_current) | 275 | if (is_current) |
229 | t = validate_current(); | 276 | t = validate_current(); |
230 | kbt->task = t; | 277 | kbt->task = t; |
278 | kbt->pgtable = NULL; | ||
231 | kbt->verbose = 0; /* override in caller if desired */ | 279 | kbt->verbose = 0; /* override in caller if desired */ |
232 | kbt->profile = 0; /* override in caller if desired */ | 280 | kbt->profile = 0; /* override in caller if desired */ |
233 | kbt->end = KBT_ONGOING; | 281 | kbt->end = KBT_ONGOING; |
234 | kbt->new_context = 1; | 282 | kbt->new_context = 0; |
235 | if (is_current) | 283 | if (is_current) { |
284 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
285 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
286 | /* | ||
287 | * Not just an optimization: this also allows | ||
288 | * this to work at all before va/pa mappings | ||
289 | * are set up. | ||
290 | */ | ||
291 | kbt->pgtable = swapper_pg_dir; | ||
292 | } else { | ||
293 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
294 | if (!PageHighMem(page)) | ||
295 | kbt->pgtable = __va(pgdir_pa); | ||
296 | else | ||
297 | pr_err("page table not in LOWMEM" | ||
298 | " (%#llx)\n", pgdir_pa); | ||
299 | } | ||
300 | local_flush_tlb_all(); | ||
236 | validate_stack(regs); | 301 | validate_stack(regs); |
302 | } | ||
237 | 303 | ||
238 | if (regs == NULL) { | 304 | if (regs == NULL) { |
239 | if (is_current || t->state == TASK_RUNNING) { | 305 | if (is_current || t->state == TASK_RUNNING) { |
@@ -279,78 +345,6 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | |||
279 | } | 345 | } |
280 | EXPORT_SYMBOL(KBacktraceIterator_next); | 346 | EXPORT_SYMBOL(KBacktraceIterator_next); |
281 | 347 | ||
282 | static void describe_addr(struct KBacktraceIterator *kbt, | ||
283 | unsigned long address, | ||
284 | int have_mmap_sem, char *buf, size_t bufsize) | ||
285 | { | ||
286 | struct vm_area_struct *vma; | ||
287 | size_t namelen, remaining; | ||
288 | unsigned long size, offset, adjust; | ||
289 | char *p, *modname; | ||
290 | const char *name; | ||
291 | int rc; | ||
292 | |||
293 | /* | ||
294 | * Look one byte back for every caller frame (i.e. those that | ||
295 | * aren't a new context) so we look up symbol data for the | ||
296 | * call itself, not the following instruction, which may be on | ||
297 | * a different line (or in a different function). | ||
298 | */ | ||
299 | adjust = !kbt->new_context; | ||
300 | address -= adjust; | ||
301 | |||
302 | if (address >= PAGE_OFFSET) { | ||
303 | /* Handle kernel symbols. */ | ||
304 | BUG_ON(bufsize < KSYM_NAME_LEN); | ||
305 | name = kallsyms_lookup(address, &size, &offset, | ||
306 | &modname, buf); | ||
307 | if (name == NULL) { | ||
308 | buf[0] = '\0'; | ||
309 | return; | ||
310 | } | ||
311 | namelen = strlen(buf); | ||
312 | remaining = (bufsize - 1) - namelen; | ||
313 | p = buf + namelen; | ||
314 | rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
315 | offset + adjust, size); | ||
316 | if (modname && rc < remaining) | ||
317 | snprintf(p + rc, remaining - rc, "[%s] ", modname); | ||
318 | buf[bufsize-1] = '\0'; | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | /* If we don't have the mmap_sem, we can't show any more info. */ | ||
323 | buf[0] = '\0'; | ||
324 | if (!have_mmap_sem) | ||
325 | return; | ||
326 | |||
327 | /* Find vma info. */ | ||
328 | vma = find_vma(kbt->task->mm, address); | ||
329 | if (vma == NULL || address < vma->vm_start) { | ||
330 | snprintf(buf, bufsize, "[unmapped address] "); | ||
331 | return; | ||
332 | } | ||
333 | |||
334 | if (vma->vm_file) { | ||
335 | char *s; | ||
336 | p = d_path(&vma->vm_file->f_path, buf, bufsize); | ||
337 | if (IS_ERR(p)) | ||
338 | p = "?"; | ||
339 | s = strrchr(p, '/'); | ||
340 | if (s) | ||
341 | p = s+1; | ||
342 | } else { | ||
343 | p = "anon"; | ||
344 | } | ||
345 | |||
346 | /* Generate a string description of the vma info. */ | ||
347 | namelen = strlen(p); | ||
348 | remaining = (bufsize - 1) - namelen; | ||
349 | memmove(buf, p, namelen); | ||
350 | snprintf(buf + namelen, remaining, "[%lx+%lx] ", | ||
351 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
352 | } | ||
353 | |||
354 | /* | 348 | /* |
355 | * This method wraps the backtracer's more generic support. | 349 | * This method wraps the backtracer's more generic support. |
356 | * It is only invoked from the architecture-specific code; show_stack() | 350 | * It is only invoked from the architecture-specific code; show_stack() |
@@ -359,7 +353,6 @@ static void describe_addr(struct KBacktraceIterator *kbt, | |||
359 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | 353 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) |
360 | { | 354 | { |
361 | int i; | 355 | int i; |
362 | int have_mmap_sem = 0; | ||
363 | 356 | ||
364 | if (headers) { | 357 | if (headers) { |
365 | /* | 358 | /* |
@@ -376,16 +369,31 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
376 | kbt->verbose = 1; | 369 | kbt->verbose = 1; |
377 | i = 0; | 370 | i = 0; |
378 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | 371 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { |
379 | char namebuf[KSYM_NAME_LEN+100]; | 372 | char *modname; |
373 | const char *name; | ||
380 | unsigned long address = kbt->it.pc; | 374 | unsigned long address = kbt->it.pc; |
375 | unsigned long offset, size; | ||
376 | char namebuf[KSYM_NAME_LEN+100]; | ||
381 | 377 | ||
382 | /* Try to acquire the mmap_sem as we pass into userspace. */ | 378 | if (address >= PAGE_OFFSET) |
383 | if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) | 379 | name = kallsyms_lookup(address, &size, &offset, |
384 | have_mmap_sem = | 380 | &modname, namebuf); |
385 | down_read_trylock(&kbt->task->mm->mmap_sem); | 381 | else |
386 | 382 | name = NULL; | |
387 | describe_addr(kbt, address, have_mmap_sem, | 383 | |
388 | namebuf, sizeof(namebuf)); | 384 | if (!name) |
385 | namebuf[0] = '\0'; | ||
386 | else { | ||
387 | size_t namelen = strlen(namebuf); | ||
388 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
389 | char *p = namebuf + namelen; | ||
390 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
391 | offset, size); | ||
392 | if (modname && rc < remaining) | ||
393 | snprintf(p + rc, remaining - rc, | ||
394 | "[%s] ", modname); | ||
395 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
396 | } | ||
389 | 397 | ||
390 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", | 398 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", |
391 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 399 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); |
@@ -400,8 +408,6 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
400 | pr_err("Stack dump stopped; next frame identical to this one\n"); | 408 | pr_err("Stack dump stopped; next frame identical to this one\n"); |
401 | if (headers) | 409 | if (headers) |
402 | pr_err("Stack dump complete\n"); | 410 | pr_err("Stack dump complete\n"); |
403 | if (have_mmap_sem) | ||
404 | up_read(&kbt->task->mm->mmap_sem); | ||
405 | } | 411 | } |
406 | EXPORT_SYMBOL(tile_show_stack); | 412 | EXPORT_SYMBOL(tile_show_stack); |
407 | 413 | ||