aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/stack.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-05-28 23:09:12 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-04 17:11:18 -0400
commit867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch)
treec5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/kernel/stack.c
parent5360bd776f73d0a7da571d72a09a03f237e99900 (diff)
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/kernel/stack.c')
-rw-r--r--arch/tile/kernel/stack.c485
1 files changed, 485 insertions, 0 deletions
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
new file mode 100644
index 000000000000..382170b4b40a
--- /dev/null
+++ b/arch/tile/kernel/stack.c
@@ -0,0 +1,485 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <asm/backtrace.h>
25#include <asm/page.h>
26#include <asm/tlbflush.h>
27#include <asm/ucontext.h>
28#include <asm/sigframe.h>
29#include <asm/stack.h>
30#include <arch/abi.h>
31#include <arch/interrupts.h>
32
33
34/* Is address on the specified kernel stack? */
35static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
36{
37 ulong kstack_base = (ulong) kbt->task->stack;
38 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
39 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
40 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
41}
42
43/* Is address in the specified kernel code? */
44static int in_kernel_text(VirtualAddress address)
45{
46 return (address >= MEM_SV_INTRPT &&
47 address < MEM_SV_INTRPT + HPAGE_SIZE);
48}
49
50/* Is address valid for reading? */
51static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
52{
53 HV_PTE *l1_pgtable = kbt->pgtable;
54 HV_PTE *l2_pgtable;
55 unsigned long pfn;
56 HV_PTE pte;
57 struct page *page;
58
59 pte = l1_pgtable[HV_L1_INDEX(address)];
60 if (!hv_pte_get_present(pte))
61 return 0;
62 pfn = hv_pte_get_pfn(pte);
63 if (pte_huge(pte)) {
64 if (!pfn_valid(pfn)) {
65 printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn);
66 return 0;
67 }
68 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
69 }
70
71 page = pfn_to_page(pfn);
72 if (PageHighMem(page)) {
73 printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n",
74 HV_PFN_TO_CPA(pfn));
75 return 0;
76 }
77 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
78 pte = l2_pgtable[HV_L2_INDEX(address)];
79 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
80}
81
82/* Callback for backtracer; basically a glorified memcpy */
83static bool read_memory_func(void *result, VirtualAddress address,
84 unsigned int size, void *vkbt)
85{
86 int retval;
87 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
88 if (in_kernel_text(address)) {
89 /* OK to read kernel code. */
90 } else if (address >= PAGE_OFFSET) {
91 /* We only tolerate kernel-space reads of this task's stack */
92 if (!in_kernel_stack(kbt, address))
93 return 0;
94 } else if (kbt->pgtable == NULL) {
95 return 0; /* can't read user space in other tasks */
96 } else if (!valid_address(kbt, address)) {
97 return 0; /* invalid user-space address */
98 }
99 pagefault_disable();
100 retval = __copy_from_user_inatomic(result, (const void *)address,
101 size);
102 pagefault_enable();
103 return (retval == 0);
104}
105
106/* Return a pt_regs pointer for a valid fault handler frame */
107static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
108{
109#ifndef __tilegx__
110 const char *fault = NULL; /* happy compiler */
111 char fault_buf[64];
112 VirtualAddress sp = kbt->it.sp;
113 struct pt_regs *p;
114
115 if (!in_kernel_stack(kbt, sp))
116 return NULL;
117 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
118 return NULL;
119 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
120 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
121 fault = "syscall";
122 else {
123 if (kbt->verbose) { /* else we aren't going to use it */
124 snprintf(fault_buf, sizeof(fault_buf),
125 "interrupt %ld", p->faultnum);
126 fault = fault_buf;
127 }
128 }
129 if (EX1_PL(p->ex1) == KERNEL_PL &&
130 in_kernel_text(p->pc) &&
131 in_kernel_stack(kbt, p->sp) &&
132 p->sp >= sp) {
133 if (kbt->verbose)
134 printk(KERN_ERR " <%s while in kernel mode>\n", fault);
135 } else if (EX1_PL(p->ex1) == USER_PL &&
136 p->pc < PAGE_OFFSET &&
137 p->sp < PAGE_OFFSET) {
138 if (kbt->verbose)
139 printk(KERN_ERR " <%s while in user mode>\n", fault);
140 } else if (kbt->verbose) {
141 printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
142 p->pc, p->sp, p->ex1);
143 p = NULL;
144 }
145 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
146 return p;
147#endif
148 return NULL;
149}
150
151/* Is the pc pointing to a sigreturn trampoline? */
152static int is_sigreturn(VirtualAddress pc)
153{
154 return (pc == VDSO_BASE);
155}
156
157/* Return a pt_regs pointer for a valid signal handler frame */
158static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
159{
160 BacktraceIterator *b = &kbt->it;
161
162 if (b->pc == VDSO_BASE) {
163 struct rt_sigframe *frame;
164 unsigned long sigframe_top =
165 b->sp + sizeof(struct rt_sigframe) - 1;
166 if (!valid_address(kbt, b->sp) ||
167 !valid_address(kbt, sigframe_top)) {
168 if (kbt->verbose)
169 printk(" (odd signal: sp %#lx?)\n",
170 (unsigned long)(b->sp));
171 return NULL;
172 }
173 frame = (struct rt_sigframe *)b->sp;
174 if (kbt->verbose) {
175 printk(KERN_ERR " <received signal %d>\n",
176 frame->info.si_signo);
177 }
178 return &frame->uc.uc_mcontext.regs;
179 }
180 return NULL;
181}
182
183int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
184{
185 return is_sigreturn(kbt->it.pc);
186}
187
188static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
189{
190 struct pt_regs *p;
191
192 p = valid_fault_handler(kbt);
193 if (p == NULL)
194 p = valid_sigframe(kbt);
195 if (p == NULL)
196 return 0;
197 backtrace_init(&kbt->it, read_memory_func, kbt,
198 p->pc, p->lr, p->sp, p->regs[52]);
199 kbt->new_context = 1;
200 return 1;
201}
202
203/* Find a frame that isn't a sigreturn, if there is one. */
204static int KBacktraceIterator_next_item_inclusive(
205 struct KBacktraceIterator *kbt)
206{
207 for (;;) {
208 do {
209 if (!KBacktraceIterator_is_sigreturn(kbt))
210 return 1;
211 } while (backtrace_next(&kbt->it));
212
213 if (!KBacktraceIterator_restart(kbt))
214 return 0;
215 }
216}
217
218/*
219 * If the current sp is on a page different than what we recorded
220 * as the top-of-kernel-stack last time we context switched, we have
221 * probably blown the stack, and nothing is going to work out well.
222 * If we can at least get out a warning, that may help the debug,
223 * though we probably won't be able to backtrace into the code that
224 * actually did the recursive damage.
225 */
226static void validate_stack(struct pt_regs *regs)
227{
228 int cpu = smp_processor_id();
229 unsigned long ksp0 = get_current_ksp0();
230 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
231 unsigned long sp = stack_pointer;
232
233 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
234 printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
235 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
236 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
237 }
238
239 else if (sp < ksp0_base + sizeof(struct thread_info)) {
240 printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
241 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
242 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
243 }
244}
245
246void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
247 struct task_struct *t, struct pt_regs *regs)
248{
249 VirtualAddress pc, lr, sp, r52;
250 int is_current;
251
252 /*
253 * Set up callback information. We grab the kernel stack base
254 * so we will allow reads of that address range, and if we're
255 * asking about the current process we grab the page table
256 * so we can check user accesses before trying to read them.
257 * We flush the TLB to avoid any weird skew issues.
258 */
259 is_current = (t == NULL);
260 kbt->is_current = is_current;
261 if (is_current)
262 t = validate_current();
263 kbt->task = t;
264 kbt->pgtable = NULL;
265 kbt->verbose = 0; /* override in caller if desired */
266 kbt->profile = 0; /* override in caller if desired */
267 kbt->end = 0;
268 kbt->new_context = 0;
269 if (is_current) {
270 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
271 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
272 /*
273 * Not just an optimization: this also allows
274 * this to work at all before va/pa mappings
275 * are set up.
276 */
277 kbt->pgtable = swapper_pg_dir;
278 } else {
279 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
280 if (!PageHighMem(page))
281 kbt->pgtable = __va(pgdir_pa);
282 else
283 printk(KERN_ERR "page table not in LOWMEM"
284 " (%#llx)\n", pgdir_pa);
285 }
286 local_flush_tlb_all();
287 validate_stack(regs);
288 }
289
290 if (regs == NULL) {
291 extern const void *get_switch_to_pc(void);
292 if (is_current || t->state == TASK_RUNNING) {
293 /* Can't do this; we need registers */
294 kbt->end = 1;
295 return;
296 }
297 pc = (ulong) get_switch_to_pc();
298 lr = t->thread.pc;
299 sp = t->thread.ksp;
300 r52 = 0;
301 } else {
302 pc = regs->pc;
303 lr = regs->lr;
304 sp = regs->sp;
305 r52 = regs->regs[52];
306 }
307
308 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
309 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
310}
311EXPORT_SYMBOL(KBacktraceIterator_init);
312
313int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
314{
315 return kbt->end;
316}
317EXPORT_SYMBOL(KBacktraceIterator_end);
318
319void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
320{
321 kbt->new_context = 0;
322 if (!backtrace_next(&kbt->it) &&
323 !KBacktraceIterator_restart(kbt)) {
324 kbt->end = 1;
325 return;
326 }
327
328 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
329}
330EXPORT_SYMBOL(KBacktraceIterator_next);
331
332/*
333 * This method wraps the backtracer's more generic support.
334 * It is only invoked from the architecture-specific code; show_stack()
335 * and dump_stack() (in entry.S) are architecture-independent entry points.
336 */
337void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
338{
339 int i;
340
341 if (headers) {
342 /*
343 * Add a blank line since if we are called from panic(),
344 * then bust_spinlocks() spit out a space in front of us
345 * and it will mess up our KERN_ERR.
346 */
347 printk("\n");
348 printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)"
349 " on cpu %d at cycle %lld\n",
350 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
351 smp_processor_id(), get_cycles());
352 }
353#ifdef __tilegx__
354 if (kbt->is_current) {
355 __insn_mtspr(SPR_SIM_CONTROL,
356 SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE));
357 }
358#endif
359 kbt->verbose = 1;
360 i = 0;
361 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
362 char *modname;
363 const char *name;
364 unsigned long address = kbt->it.pc;
365 unsigned long offset, size;
366 char namebuf[KSYM_NAME_LEN+100];
367
368 if (address >= PAGE_OFFSET)
369 name = kallsyms_lookup(address, &size, &offset,
370 &modname, namebuf);
371 else
372 name = NULL;
373
374 if (!name)
375 namebuf[0] = '\0';
376 else {
377 size_t namelen = strlen(namebuf);
378 size_t remaining = (sizeof(namebuf) - 1) - namelen;
379 char *p = namebuf + namelen;
380 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
381 offset, size);
382 if (modname && rc < remaining)
383 snprintf(p + rc, remaining - rc,
384 "[%s] ", modname);
385 namebuf[sizeof(namebuf)-1] = '\0';
386 }
387
388 printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n",
389 i++, address, namebuf, (unsigned long)(kbt->it.sp));
390
391 if (i >= 100) {
392 printk(KERN_ERR "Stack dump truncated"
393 " (%d frames)\n", i);
394 break;
395 }
396 }
397 if (headers)
398 printk(KERN_ERR "Stack dump complete\n");
399}
400EXPORT_SYMBOL(tile_show_stack);
401
402
403/* This is called from show_regs() and _dump_stack() */
404void dump_stack_regs(struct pt_regs *regs)
405{
406 struct KBacktraceIterator kbt;
407 KBacktraceIterator_init(&kbt, NULL, regs);
408 tile_show_stack(&kbt, 1);
409}
410EXPORT_SYMBOL(dump_stack_regs);
411
412static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
413 ulong pc, ulong lr, ulong sp, ulong r52)
414{
415 memset(regs, 0, sizeof(struct pt_regs));
416 regs->pc = pc;
417 regs->lr = lr;
418 regs->sp = sp;
419 regs->regs[52] = r52;
420 return regs;
421}
422
423/* This is called from dump_stack() and just converts to pt_regs */
424void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
425{
426 struct pt_regs regs;
427 dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
428}
429
430/* This is called from KBacktraceIterator_init_current() */
431void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
432 ulong lr, ulong sp, ulong r52)
433{
434 struct pt_regs regs;
435 KBacktraceIterator_init(kbt, NULL,
436 regs_to_pt_regs(&regs, pc, lr, sp, r52));
437}
438
439/* This is called only from kernel/sched.c, with esp == NULL */
440void show_stack(struct task_struct *task, unsigned long *esp)
441{
442 struct KBacktraceIterator kbt;
443 if (task == NULL || task == current)
444 KBacktraceIterator_init_current(&kbt);
445 else
446 KBacktraceIterator_init(&kbt, task, NULL);
447 tile_show_stack(&kbt, 0);
448}
449
450#ifdef CONFIG_STACKTRACE
451
452/* Support generic Linux stack API too */
453
454void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
455{
456 struct KBacktraceIterator kbt;
457 int skip = trace->skip;
458 int i = 0;
459
460 if (task == NULL || task == current)
461 KBacktraceIterator_init_current(&kbt);
462 else
463 KBacktraceIterator_init(&kbt, task, NULL);
464 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
465 if (skip) {
466 --skip;
467 continue;
468 }
469 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
470 break;
471 trace->entries[i++] = kbt.it.pc;
472 }
473 trace->nr_entries = i;
474}
475EXPORT_SYMBOL(save_stack_trace_tsk);
476
477void save_stack_trace(struct stack_trace *trace)
478{
479 save_stack_trace_tsk(NULL, trace);
480}
481
482#endif
483
484/* In entry.S */
485EXPORT_SYMBOL(KBacktraceIterator_init_current);