diff options
Diffstat (limited to 'arch/tile/kernel/stack.c')
-rw-r--r-- | arch/tile/kernel/stack.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 382170b4b40a..b6268d3ae869 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -56,13 +56,16 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) | |||
56 | HV_PTE pte; | 56 | HV_PTE pte; |
57 | struct page *page; | 57 | struct page *page; |
58 | 58 | ||
59 | if (l1_pgtable == NULL) | ||
60 | return 0; /* can't read user space in other tasks */ | ||
61 | |||
59 | pte = l1_pgtable[HV_L1_INDEX(address)]; | 62 | pte = l1_pgtable[HV_L1_INDEX(address)]; |
60 | if (!hv_pte_get_present(pte)) | 63 | if (!hv_pte_get_present(pte)) |
61 | return 0; | 64 | return 0; |
62 | pfn = hv_pte_get_pfn(pte); | 65 | pfn = hv_pte_get_pfn(pte); |
63 | if (pte_huge(pte)) { | 66 | if (pte_huge(pte)) { |
64 | if (!pfn_valid(pfn)) { | 67 | if (!pfn_valid(pfn)) { |
65 | printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn); | 68 | pr_err("huge page has bad pfn %#lx\n", pfn); |
66 | return 0; | 69 | return 0; |
67 | } | 70 | } |
68 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | 71 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); |
@@ -70,7 +73,7 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) | |||
70 | 73 | ||
71 | page = pfn_to_page(pfn); | 74 | page = pfn_to_page(pfn); |
72 | if (PageHighMem(page)) { | 75 | if (PageHighMem(page)) { |
73 | printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n", | 76 | pr_err("L2 page table not in LOWMEM (%#llx)\n", |
74 | HV_PFN_TO_CPA(pfn)); | 77 | HV_PFN_TO_CPA(pfn)); |
75 | return 0; | 78 | return 0; |
76 | } | 79 | } |
@@ -91,13 +94,12 @@ static bool read_memory_func(void *result, VirtualAddress address, | |||
91 | /* We only tolerate kernel-space reads of this task's stack */ | 94 | /* We only tolerate kernel-space reads of this task's stack */ |
92 | if (!in_kernel_stack(kbt, address)) | 95 | if (!in_kernel_stack(kbt, address)) |
93 | return 0; | 96 | return 0; |
94 | } else if (kbt->pgtable == NULL) { | ||
95 | return 0; /* can't read user space in other tasks */ | ||
96 | } else if (!valid_address(kbt, address)) { | 97 | } else if (!valid_address(kbt, address)) { |
97 | return 0; /* invalid user-space address */ | 98 | return 0; /* invalid user-space address */ |
98 | } | 99 | } |
99 | pagefault_disable(); | 100 | pagefault_disable(); |
100 | retval = __copy_from_user_inatomic(result, (const void *)address, | 101 | retval = __copy_from_user_inatomic(result, |
102 | (void __user __force *)address, | ||
101 | size); | 103 | size); |
102 | pagefault_enable(); | 104 | pagefault_enable(); |
103 | return (retval == 0); | 105 | return (retval == 0); |
@@ -131,14 +133,14 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
131 | in_kernel_stack(kbt, p->sp) && | 133 | in_kernel_stack(kbt, p->sp) && |
132 | p->sp >= sp) { | 134 | p->sp >= sp) { |
133 | if (kbt->verbose) | 135 | if (kbt->verbose) |
134 | printk(KERN_ERR " <%s while in kernel mode>\n", fault); | 136 | pr_err(" <%s while in kernel mode>\n", fault); |
135 | } else if (EX1_PL(p->ex1) == USER_PL && | 137 | } else if (EX1_PL(p->ex1) == USER_PL && |
136 | p->pc < PAGE_OFFSET && | 138 | p->pc < PAGE_OFFSET && |
137 | p->sp < PAGE_OFFSET) { | 139 | p->sp < PAGE_OFFSET) { |
138 | if (kbt->verbose) | 140 | if (kbt->verbose) |
139 | printk(KERN_ERR " <%s while in user mode>\n", fault); | 141 | pr_err(" <%s while in user mode>\n", fault); |
140 | } else if (kbt->verbose) { | 142 | } else if (kbt->verbose) { |
141 | printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", | 143 | pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", |
142 | p->pc, p->sp, p->ex1); | 144 | p->pc, p->sp, p->ex1); |
143 | p = NULL; | 145 | p = NULL; |
144 | } | 146 | } |
@@ -166,13 +168,13 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | |||
166 | if (!valid_address(kbt, b->sp) || | 168 | if (!valid_address(kbt, b->sp) || |
167 | !valid_address(kbt, sigframe_top)) { | 169 | !valid_address(kbt, sigframe_top)) { |
168 | if (kbt->verbose) | 170 | if (kbt->verbose) |
169 | printk(" (odd signal: sp %#lx?)\n", | 171 | pr_err(" (odd signal: sp %#lx?)\n", |
170 | (unsigned long)(b->sp)); | 172 | (unsigned long)(b->sp)); |
171 | return NULL; | 173 | return NULL; |
172 | } | 174 | } |
173 | frame = (struct rt_sigframe *)b->sp; | 175 | frame = (struct rt_sigframe *)b->sp; |
174 | if (kbt->verbose) { | 176 | if (kbt->verbose) { |
175 | printk(KERN_ERR " <received signal %d>\n", | 177 | pr_err(" <received signal %d>\n", |
176 | frame->info.si_signo); | 178 | frame->info.si_signo); |
177 | } | 179 | } |
178 | return &frame->uc.uc_mcontext.regs; | 180 | return &frame->uc.uc_mcontext.regs; |
@@ -180,7 +182,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | |||
180 | return NULL; | 182 | return NULL; |
181 | } | 183 | } |
182 | 184 | ||
183 | int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | 185 | static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) |
184 | { | 186 | { |
185 | return is_sigreturn(kbt->it.pc); | 187 | return is_sigreturn(kbt->it.pc); |
186 | } | 188 | } |
@@ -231,13 +233,13 @@ static void validate_stack(struct pt_regs *regs) | |||
231 | unsigned long sp = stack_pointer; | 233 | unsigned long sp = stack_pointer; |
232 | 234 | ||
233 | if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { | 235 | if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { |
234 | printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n" | 236 | pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" |
235 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | 237 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", |
236 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | 238 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); |
237 | } | 239 | } |
238 | 240 | ||
239 | else if (sp < ksp0_base + sizeof(struct thread_info)) { | 241 | else if (sp < ksp0_base + sizeof(struct thread_info)) { |
240 | printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n" | 242 | pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" |
241 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | 243 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", |
242 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | 244 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); |
243 | } | 245 | } |
@@ -280,7 +282,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | |||
280 | if (!PageHighMem(page)) | 282 | if (!PageHighMem(page)) |
281 | kbt->pgtable = __va(pgdir_pa); | 283 | kbt->pgtable = __va(pgdir_pa); |
282 | else | 284 | else |
283 | printk(KERN_ERR "page table not in LOWMEM" | 285 | pr_err("page table not in LOWMEM" |
284 | " (%#llx)\n", pgdir_pa); | 286 | " (%#llx)\n", pgdir_pa); |
285 | } | 287 | } |
286 | local_flush_tlb_all(); | 288 | local_flush_tlb_all(); |
@@ -288,13 +290,12 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | |||
288 | } | 290 | } |
289 | 291 | ||
290 | if (regs == NULL) { | 292 | if (regs == NULL) { |
291 | extern const void *get_switch_to_pc(void); | ||
292 | if (is_current || t->state == TASK_RUNNING) { | 293 | if (is_current || t->state == TASK_RUNNING) { |
293 | /* Can't do this; we need registers */ | 294 | /* Can't do this; we need registers */ |
294 | kbt->end = 1; | 295 | kbt->end = 1; |
295 | return; | 296 | return; |
296 | } | 297 | } |
297 | pc = (ulong) get_switch_to_pc(); | 298 | pc = get_switch_to_pc(); |
298 | lr = t->thread.pc; | 299 | lr = t->thread.pc; |
299 | sp = t->thread.ksp; | 300 | sp = t->thread.ksp; |
300 | r52 = 0; | 301 | r52 = 0; |
@@ -344,8 +345,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
344 | * then bust_spinlocks() spit out a space in front of us | 345 | * then bust_spinlocks() spit out a space in front of us |
345 | * and it will mess up our KERN_ERR. | 346 | * and it will mess up our KERN_ERR. |
346 | */ | 347 | */ |
347 | printk("\n"); | 348 | pr_err("\n"); |
348 | printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)" | 349 | pr_err("Starting stack dump of tid %d, pid %d (%s)" |
349 | " on cpu %d at cycle %lld\n", | 350 | " on cpu %d at cycle %lld\n", |
350 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, | 351 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, |
351 | smp_processor_id(), get_cycles()); | 352 | smp_processor_id(), get_cycles()); |
@@ -385,17 +386,17 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
385 | namebuf[sizeof(namebuf)-1] = '\0'; | 386 | namebuf[sizeof(namebuf)-1] = '\0'; |
386 | } | 387 | } |
387 | 388 | ||
388 | printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n", | 389 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", |
389 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 390 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); |
390 | 391 | ||
391 | if (i >= 100) { | 392 | if (i >= 100) { |
392 | printk(KERN_ERR "Stack dump truncated" | 393 | pr_err("Stack dump truncated" |
393 | " (%d frames)\n", i); | 394 | " (%d frames)\n", i); |
394 | break; | 395 | break; |
395 | } | 396 | } |
396 | } | 397 | } |
397 | if (headers) | 398 | if (headers) |
398 | printk(KERN_ERR "Stack dump complete\n"); | 399 | pr_err("Stack dump complete\n"); |
399 | } | 400 | } |
400 | EXPORT_SYMBOL(tile_show_stack); | 401 | EXPORT_SYMBOL(tile_show_stack); |
401 | 402 | ||