aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/stack.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/stack.c')
-rw-r--r--arch/tile/kernel/stack.c75
1 files changed, 47 insertions, 28 deletions
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ea2e0ce28380..37ee4d037e0b 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,9 +30,13 @@
30#include <arch/abi.h> 30#include <arch/abi.h>
31#include <arch/interrupts.h> 31#include <arch/interrupts.h>
32 32
33#define KBT_ONGOING 0 /* Backtrace still ongoing */
34#define KBT_DONE 1 /* Backtrace cleanly completed */
35#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36#define KBT_LOOP 3 /* Backtrace entered a loop */
33 37
34/* Is address on the specified kernel stack? */ 38/* Is address on the specified kernel stack? */
35static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) 39static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
36{ 40{
37 ulong kstack_base = (ulong) kbt->task->stack; 41 ulong kstack_base = (ulong) kbt->task->stack;
38 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ 42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
@@ -40,15 +44,8 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
40 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; 44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
41} 45}
42 46
43/* Is address in the specified kernel code? */
44static int in_kernel_text(VirtualAddress address)
45{
46 return (address >= MEM_SV_INTRPT &&
47 address < MEM_SV_INTRPT + HPAGE_SIZE);
48}
49
50/* Is address valid for reading? */ 47/* Is address valid for reading? */
51static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) 48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
52{ 49{
53 HV_PTE *l1_pgtable = kbt->pgtable; 50 HV_PTE *l1_pgtable = kbt->pgtable;
54 HV_PTE *l2_pgtable; 51 HV_PTE *l2_pgtable;
@@ -59,6 +56,23 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
59 if (l1_pgtable == NULL) 56 if (l1_pgtable == NULL)
60 return 0; /* can't read user space in other tasks */ 57 return 0; /* can't read user space in other tasks */
61 58
59#ifdef CONFIG_64BIT
60 /* Find the real l1_pgtable by looking in the l0_pgtable. */
61 pte = l1_pgtable[HV_L0_INDEX(address)];
62 if (!hv_pte_get_present(pte))
63 return 0;
64 pfn = hv_pte_get_pfn(pte);
65 if (pte_huge(pte)) {
66 if (!pfn_valid(pfn)) {
67 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
68 return 0;
69 }
70 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
71 }
72 page = pfn_to_page(pfn);
73 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
74 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
75#endif
62 pte = l1_pgtable[HV_L1_INDEX(address)]; 76 pte = l1_pgtable[HV_L1_INDEX(address)];
63 if (!hv_pte_get_present(pte)) 77 if (!hv_pte_get_present(pte))
64 return 0; 78 return 0;
@@ -83,12 +97,12 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
83} 97}
84 98
85/* Callback for backtracer; basically a glorified memcpy */ 99/* Callback for backtracer; basically a glorified memcpy */
86static bool read_memory_func(void *result, VirtualAddress address, 100static bool read_memory_func(void *result, unsigned long address,
87 unsigned int size, void *vkbt) 101 unsigned int size, void *vkbt)
88{ 102{
89 int retval; 103 int retval;
90 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; 104 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
91 if (in_kernel_text(address)) { 105 if (__kernel_text_address(address)) {
92 /* OK to read kernel code. */ 106 /* OK to read kernel code. */
93 } else if (address >= PAGE_OFFSET) { 107 } else if (address >= PAGE_OFFSET) {
94 /* We only tolerate kernel-space reads of this task's stack */ 108 /* We only tolerate kernel-space reads of this task's stack */
@@ -110,7 +124,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
110{ 124{
111 const char *fault = NULL; /* happy compiler */ 125 const char *fault = NULL; /* happy compiler */
112 char fault_buf[64]; 126 char fault_buf[64];
113 VirtualAddress sp = kbt->it.sp; 127 unsigned long sp = kbt->it.sp;
114 struct pt_regs *p; 128 struct pt_regs *p;
115 129
116 if (!in_kernel_stack(kbt, sp)) 130 if (!in_kernel_stack(kbt, sp))
@@ -128,7 +142,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
128 } 142 }
129 } 143 }
130 if (EX1_PL(p->ex1) == KERNEL_PL && 144 if (EX1_PL(p->ex1) == KERNEL_PL &&
131 in_kernel_text(p->pc) && 145 __kernel_text_address(p->pc) &&
132 in_kernel_stack(kbt, p->sp) && 146 in_kernel_stack(kbt, p->sp) &&
133 p->sp >= sp) { 147 p->sp >= sp) {
134 if (kbt->verbose) 148 if (kbt->verbose)
@@ -149,7 +163,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
149} 163}
150 164
151/* Is the pc pointing to a sigreturn trampoline? */ 165/* Is the pc pointing to a sigreturn trampoline? */
152static int is_sigreturn(VirtualAddress pc) 166static int is_sigreturn(unsigned long pc)
153{ 167{
154 return (pc == VDSO_BASE); 168 return (pc == VDSO_BASE);
155} 169}
@@ -207,11 +221,11 @@ static int KBacktraceIterator_next_item_inclusive(
207 for (;;) { 221 for (;;) {
208 do { 222 do {
209 if (!KBacktraceIterator_is_sigreturn(kbt)) 223 if (!KBacktraceIterator_is_sigreturn(kbt))
210 return 1; 224 return KBT_ONGOING;
211 } while (backtrace_next(&kbt->it)); 225 } while (backtrace_next(&kbt->it));
212 226
213 if (!KBacktraceIterator_restart(kbt)) 227 if (!KBacktraceIterator_restart(kbt))
214 return 0; 228 return KBT_DONE;
215 } 229 }
216} 230}
217 231
@@ -246,7 +260,7 @@ static void validate_stack(struct pt_regs *regs)
246void KBacktraceIterator_init(struct KBacktraceIterator *kbt, 260void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
247 struct task_struct *t, struct pt_regs *regs) 261 struct task_struct *t, struct pt_regs *regs)
248{ 262{
249 VirtualAddress pc, lr, sp, r52; 263 unsigned long pc, lr, sp, r52;
250 int is_current; 264 int is_current;
251 265
252 /* 266 /*
@@ -264,7 +278,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
264 kbt->pgtable = NULL; 278 kbt->pgtable = NULL;
265 kbt->verbose = 0; /* override in caller if desired */ 279 kbt->verbose = 0; /* override in caller if desired */
266 kbt->profile = 0; /* override in caller if desired */ 280 kbt->profile = 0; /* override in caller if desired */
267 kbt->end = 0; 281 kbt->end = KBT_ONGOING;
268 kbt->new_context = 0; 282 kbt->new_context = 0;
269 if (is_current) { 283 if (is_current) {
270 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; 284 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +304,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
290 if (regs == NULL) { 304 if (regs == NULL) {
291 if (is_current || t->state == TASK_RUNNING) { 305 if (is_current || t->state == TASK_RUNNING) {
292 /* Can't do this; we need registers */ 306 /* Can't do this; we need registers */
293 kbt->end = 1; 307 kbt->end = KBT_RUNNING;
294 return; 308 return;
295 } 309 }
296 pc = get_switch_to_pc(); 310 pc = get_switch_to_pc();
@@ -305,26 +319,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
305 } 319 }
306 320
307 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); 321 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
308 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 322 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
309} 323}
310EXPORT_SYMBOL(KBacktraceIterator_init); 324EXPORT_SYMBOL(KBacktraceIterator_init);
311 325
312int KBacktraceIterator_end(struct KBacktraceIterator *kbt) 326int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
313{ 327{
314 return kbt->end; 328 return kbt->end != KBT_ONGOING;
315} 329}
316EXPORT_SYMBOL(KBacktraceIterator_end); 330EXPORT_SYMBOL(KBacktraceIterator_end);
317 331
318void KBacktraceIterator_next(struct KBacktraceIterator *kbt) 332void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
319{ 333{
334 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
320 kbt->new_context = 0; 335 kbt->new_context = 0;
321 if (!backtrace_next(&kbt->it) && 336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
322 !KBacktraceIterator_restart(kbt)) { 337 kbt->end = KBT_DONE;
323 kbt->end = 1; 338 return;
324 return; 339 }
325 } 340 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
326 341 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
327 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 342 /* Trapped in a loop; give up. */
343 kbt->end = KBT_LOOP;
344 }
328} 345}
329EXPORT_SYMBOL(KBacktraceIterator_next); 346EXPORT_SYMBOL(KBacktraceIterator_next);
330 347
@@ -387,6 +404,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
387 break; 404 break;
388 } 405 }
389 } 406 }
407 if (kbt->end == KBT_LOOP)
408 pr_err("Stack dump stopped; next frame identical to this one\n");
390 if (headers) 409 if (headers)
391 pr_err("Stack dump complete\n"); 410 pr_err("Stack dump complete\n");
392} 411}