aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/process_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/process_64.c')
-rw-r--r--arch/sparc/kernel/process_64.c305
1 files changed, 153 insertions, 152 deletions
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index fcaa59421126..dff54f46728d 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -27,6 +27,7 @@
27#include <linux/tick.h> 27#include <linux/tick.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30#include <linux/perf_event.h>
30#include <linux/elfcore.h> 31#include <linux/elfcore.h>
31#include <linux/sysrq.h> 32#include <linux/sysrq.h>
32#include <linux/nmi.h> 33#include <linux/nmi.h>
@@ -47,6 +48,7 @@
47#include <asm/syscalls.h> 48#include <asm/syscalls.h>
48#include <asm/irq_regs.h> 49#include <asm/irq_regs.h>
49#include <asm/smp.h> 50#include <asm/smp.h>
51#include <asm/pcr.h>
50 52
51#include "kstack.h" 53#include "kstack.h"
52 54
@@ -204,18 +206,22 @@ void show_regs(struct pt_regs *regs)
204 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); 206 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
205} 207}
206 208
207struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 209union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
208static DEFINE_SPINLOCK(global_reg_snapshot_lock); 210static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
209 211
210static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 212static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
211 int this_cpu) 213 int this_cpu)
212{ 214{
215 struct global_reg_snapshot *rp;
216
213 flushw_all(); 217 flushw_all();
214 218
215 global_reg_snapshot[this_cpu].tstate = regs->tstate; 219 rp = &global_cpu_snapshot[this_cpu].reg;
216 global_reg_snapshot[this_cpu].tpc = regs->tpc; 220
217 global_reg_snapshot[this_cpu].tnpc = regs->tnpc; 221 rp->tstate = regs->tstate;
218 global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; 222 rp->tpc = regs->tpc;
223 rp->tnpc = regs->tnpc;
224 rp->o7 = regs->u_regs[UREG_I7];
219 225
220 if (regs->tstate & TSTATE_PRIV) { 226 if (regs->tstate & TSTATE_PRIV) {
221 struct reg_window *rw; 227 struct reg_window *rw;
@@ -223,17 +229,17 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
223 rw = (struct reg_window *) 229 rw = (struct reg_window *)
224 (regs->u_regs[UREG_FP] + STACK_BIAS); 230 (regs->u_regs[UREG_FP] + STACK_BIAS);
225 if (kstack_valid(tp, (unsigned long) rw)) { 231 if (kstack_valid(tp, (unsigned long) rw)) {
226 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 232 rp->i7 = rw->ins[7];
227 rw = (struct reg_window *) 233 rw = (struct reg_window *)
228 (rw->ins[6] + STACK_BIAS); 234 (rw->ins[6] + STACK_BIAS);
229 if (kstack_valid(tp, (unsigned long) rw)) 235 if (kstack_valid(tp, (unsigned long) rw))
230 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 236 rp->rpc = rw->ins[7];
231 } 237 }
232 } else { 238 } else {
233 global_reg_snapshot[this_cpu].i7 = 0; 239 rp->i7 = 0;
234 global_reg_snapshot[this_cpu].rpc = 0; 240 rp->rpc = 0;
235 } 241 }
236 global_reg_snapshot[this_cpu].thread = tp; 242 rp->thread = tp;
237} 243}
238 244
239/* In order to avoid hangs we do not try to synchronize with the 245/* In order to avoid hangs we do not try to synchronize with the
@@ -261,9 +267,9 @@ void arch_trigger_all_cpu_backtrace(void)
261 if (!regs) 267 if (!regs)
262 regs = tp->kregs; 268 regs = tp->kregs;
263 269
264 spin_lock_irqsave(&global_reg_snapshot_lock, flags); 270 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
265 271
266 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 272 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
267 273
268 this_cpu = raw_smp_processor_id(); 274 this_cpu = raw_smp_processor_id();
269 275
@@ -272,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(void)
272 smp_fetch_global_regs(); 278 smp_fetch_global_regs();
273 279
274 for_each_online_cpu(cpu) { 280 for_each_online_cpu(cpu) {
275 struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; 281 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
276 282
277 __global_reg_poll(gp); 283 __global_reg_poll(gp);
278 284
@@ -295,9 +301,9 @@ void arch_trigger_all_cpu_backtrace(void)
295 } 301 }
296 } 302 }
297 303
298 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 304 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
299 305
300 spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); 306 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
301} 307}
302 308
303#ifdef CONFIG_MAGIC_SYSRQ 309#ifdef CONFIG_MAGIC_SYSRQ
@@ -309,16 +315,90 @@ static void sysrq_handle_globreg(int key)
309 315
310static struct sysrq_key_op sparc_globalreg_op = { 316static struct sysrq_key_op sparc_globalreg_op = {
311 .handler = sysrq_handle_globreg, 317 .handler = sysrq_handle_globreg,
312 .help_msg = "Globalregs", 318 .help_msg = "global-regs(Y)",
313 .action_msg = "Show Global CPU Regs", 319 .action_msg = "Show Global CPU Regs",
314}; 320};
315 321
316static int __init sparc_globreg_init(void) 322static void __global_pmu_self(int this_cpu)
323{
324 struct global_pmu_snapshot *pp;
325 int i, num;
326
327 pp = &global_cpu_snapshot[this_cpu].pmu;
328
329 num = 1;
330 if (tlb_type == hypervisor &&
331 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
332 num = 4;
333
334 for (i = 0; i < num; i++) {
335 pp->pcr[i] = pcr_ops->read_pcr(i);
336 pp->pic[i] = pcr_ops->read_pic(i);
337 }
338}
339
340static void __global_pmu_poll(struct global_pmu_snapshot *pp)
341{
342 int limit = 0;
343
344 while (!pp->pcr[0] && ++limit < 100) {
345 barrier();
346 udelay(1);
347 }
348}
349
350static void pmu_snapshot_all_cpus(void)
351{
352 unsigned long flags;
353 int this_cpu, cpu;
354
355 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
356
357 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
358
359 this_cpu = raw_smp_processor_id();
360
361 __global_pmu_self(this_cpu);
362
363 smp_fetch_global_pmu();
364
365 for_each_online_cpu(cpu) {
366 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
367
368 __global_pmu_poll(pp);
369
370 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
371 (cpu == this_cpu ? '*' : ' '), cpu,
372 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
373 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
374 }
375
376 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
377
378 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
379}
380
381static void sysrq_handle_globpmu(int key)
317{ 382{
318 return register_sysrq_key('y', &sparc_globalreg_op); 383 pmu_snapshot_all_cpus();
319} 384}
320 385
321core_initcall(sparc_globreg_init); 386static struct sysrq_key_op sparc_globalpmu_op = {
387 .handler = sysrq_handle_globpmu,
388 .help_msg = "global-pmu(X)",
389 .action_msg = "Show Global PMU Regs",
390};
391
392static int __init sparc_sysrq_init(void)
393{
394 int ret = register_sysrq_key('y', &sparc_globalreg_op);
395
396 if (!ret)
397 ret = register_sysrq_key('x', &sparc_globalpmu_op);
398 return ret;
399}
400
401core_initcall(sparc_sysrq_init);
322 402
323#endif 403#endif
324 404
@@ -372,13 +452,16 @@ void flush_thread(void)
372/* It's a bit more tricky when 64-bit tasks are involved... */ 452/* It's a bit more tricky when 64-bit tasks are involved... */
373static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 453static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
374{ 454{
455 bool stack_64bit = test_thread_64bit_stack(psp);
375 unsigned long fp, distance, rval; 456 unsigned long fp, distance, rval;
376 457
377 if (!(test_thread_flag(TIF_32BIT))) { 458 if (stack_64bit) {
378 csp += STACK_BIAS; 459 csp += STACK_BIAS;
379 psp += STACK_BIAS; 460 psp += STACK_BIAS;
380 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 461 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
381 fp += STACK_BIAS; 462 fp += STACK_BIAS;
463 if (test_thread_flag(TIF_32BIT))
464 fp &= 0xffffffff;
382 } else 465 } else
383 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 466 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
384 467
@@ -392,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
392 rval = (csp - distance); 475 rval = (csp - distance);
393 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 476 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
394 rval = 0; 477 rval = 0;
395 else if (test_thread_flag(TIF_32BIT)) { 478 else if (!stack_64bit) {
396 if (put_user(((u32)csp), 479 if (put_user(((u32)csp),
397 &(((struct reg_window32 __user *)rval)->ins[6]))) 480 &(((struct reg_window32 __user *)rval)->ins[6])))
398 rval = 0; 481 rval = 0;
@@ -427,18 +510,18 @@ void synchronize_user_stack(void)
427 510
428 flush_user_windows(); 511 flush_user_windows();
429 if ((window = get_thread_wsaved()) != 0) { 512 if ((window = get_thread_wsaved()) != 0) {
430 int winsize = sizeof(struct reg_window);
431 int bias = 0;
432
433 if (test_thread_flag(TIF_32BIT))
434 winsize = sizeof(struct reg_window32);
435 else
436 bias = STACK_BIAS;
437
438 window -= 1; 513 window -= 1;
439 do { 514 do {
440 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
441 struct reg_window *rwin = &t->reg_window[window]; 515 struct reg_window *rwin = &t->reg_window[window];
516 int winsize = sizeof(struct reg_window);
517 unsigned long sp;
518
519 sp = t->rwbuf_stkptrs[window];
520
521 if (test_thread_64bit_stack(sp))
522 sp += STACK_BIAS;
523 else
524 winsize = sizeof(struct reg_window32);
442 525
443 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 526 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
444 shift_window_buffer(window, get_thread_wsaved() - 1, t); 527 shift_window_buffer(window, get_thread_wsaved() - 1, t);
@@ -464,13 +547,6 @@ void fault_in_user_windows(void)
464{ 547{
465 struct thread_info *t = current_thread_info(); 548 struct thread_info *t = current_thread_info();
466 unsigned long window; 549 unsigned long window;
467 int winsize = sizeof(struct reg_window);
468 int bias = 0;
469
470 if (test_thread_flag(TIF_32BIT))
471 winsize = sizeof(struct reg_window32);
472 else
473 bias = STACK_BIAS;
474 550
475 flush_user_windows(); 551 flush_user_windows();
476 window = get_thread_wsaved(); 552 window = get_thread_wsaved();
@@ -478,8 +554,16 @@ void fault_in_user_windows(void)
478 if (likely(window != 0)) { 554 if (likely(window != 0)) {
479 window -= 1; 555 window -= 1;
480 do { 556 do {
481 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
482 struct reg_window *rwin = &t->reg_window[window]; 557 struct reg_window *rwin = &t->reg_window[window];
558 int winsize = sizeof(struct reg_window);
559 unsigned long sp;
560
561 sp = t->rwbuf_stkptrs[window];
562
563 if (test_thread_64bit_stack(sp))
564 sp += STACK_BIAS;
565 else
566 winsize = sizeof(struct reg_window32);
483 567
484 if (unlikely(sp & 0x7UL)) 568 if (unlikely(sp & 0x7UL))
485 stack_unaligned(sp); 569 stack_unaligned(sp);
@@ -538,64 +622,55 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags,
538 * Child --> %o0 == parents pid, %o1 == 1 622 * Child --> %o0 == parents pid, %o1 == 1
539 */ 623 */
540int copy_thread(unsigned long clone_flags, unsigned long sp, 624int copy_thread(unsigned long clone_flags, unsigned long sp,
541 unsigned long unused, 625 unsigned long arg,
542 struct task_struct *p, struct pt_regs *regs) 626 struct task_struct *p, struct pt_regs *regs)
543{ 627{
544 struct thread_info *t = task_thread_info(p); 628 struct thread_info *t = task_thread_info(p);
545 struct sparc_stackf *parent_sf; 629 struct sparc_stackf *parent_sf;
546 unsigned long child_stack_sz; 630 unsigned long child_stack_sz;
547 char *child_trap_frame; 631 char *child_trap_frame;
548 int kernel_thread;
549
550 kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0;
551 parent_sf = ((struct sparc_stackf *) regs) - 1;
552 632
553 /* Calculate offset to stack_frame & pt_regs */ 633 /* Calculate offset to stack_frame & pt_regs */
554 child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + 634 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
555 (kernel_thread ? STACKFRAME_SZ : 0));
556 child_trap_frame = (task_stack_page(p) + 635 child_trap_frame = (task_stack_page(p) +
557 (THREAD_SIZE - child_stack_sz)); 636 (THREAD_SIZE - child_stack_sz));
558 memcpy(child_trap_frame, parent_sf, child_stack_sz);
559 637
560 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) |
561 (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
562 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
563 t->new_child = 1; 638 t->new_child = 1;
564 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 639 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
565 t->kregs = (struct pt_regs *) (child_trap_frame + 640 t->kregs = (struct pt_regs *) (child_trap_frame +
566 sizeof(struct sparc_stackf)); 641 sizeof(struct sparc_stackf));
567 t->fpsaved[0] = 0; 642 t->fpsaved[0] = 0;
568 643
569 if (kernel_thread) { 644 if (unlikely(p->flags & PF_KTHREAD)) {
570 struct sparc_stackf *child_sf = (struct sparc_stackf *) 645 memset(child_trap_frame, 0, child_stack_sz);
571 (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); 646 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
572 647 (current_pt_regs()->tstate + 1) & TSTATE_CWP;
573 /* Zero terminate the stack backtrace. */ 648 t->current_ds = ASI_P;
574 child_sf->fp = NULL; 649 t->kregs->u_regs[UREG_G1] = sp; /* function */
575 t->kregs->u_regs[UREG_FP] = 650 t->kregs->u_regs[UREG_G2] = arg;
576 ((unsigned long) child_sf) - STACK_BIAS; 651 return 0;
652 }
577 653
578 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); 654 parent_sf = ((struct sparc_stackf *) regs) - 1;
579 t->kregs->u_regs[UREG_G6] = (unsigned long) t; 655 memcpy(child_trap_frame, parent_sf, child_stack_sz);
580 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; 656 if (t->flags & _TIF_32BIT) {
581 } else { 657 sp &= 0x00000000ffffffffUL;
582 if (t->flags & _TIF_32BIT) { 658 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
583 sp &= 0x00000000ffffffffUL;
584 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
585 }
586 t->kregs->u_regs[UREG_FP] = sp;
587 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
588 if (sp != regs->u_regs[UREG_FP]) {
589 unsigned long csp;
590
591 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
592 if (!csp)
593 return -EFAULT;
594 t->kregs->u_regs[UREG_FP] = csp;
595 }
596 if (t->utraps)
597 t->utraps[0]++;
598 } 659 }
660 t->kregs->u_regs[UREG_FP] = sp;
661 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
662 (regs->tstate + 1) & TSTATE_CWP;
663 t->current_ds = ASI_AIUS;
664 if (sp != regs->u_regs[UREG_FP]) {
665 unsigned long csp;
666
667 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
668 if (!csp)
669 return -EFAULT;
670 t->kregs->u_regs[UREG_FP] = csp;
671 }
672 if (t->utraps)
673 t->utraps[0]++;
599 674
600 /* Set the return value for the child. */ 675 /* Set the return value for the child. */
601 t->kregs->u_regs[UREG_I0] = current->pid; 676 t->kregs->u_regs[UREG_I0] = current->pid;
@@ -610,45 +685,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
610 return 0; 685 return 0;
611} 686}
612 687
613/*
614 * This is the mechanism for creating a new kernel thread.
615 *
616 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
617 * who haven't done an "execve()") should use this: it will work within
618 * a system call from a "real" process, but the process memory space will
619 * not be freed until both the parent and the child have exited.
620 */
621pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
622{
623 long retval;
624
625 /* If the parent runs before fn(arg) is called by the child,
626 * the input registers of this function can be clobbered.
627 * So we stash 'fn' and 'arg' into global registers which
628 * will not be modified by the parent.
629 */
630 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
631 "mov %5, %%g3\n\t" /* Save ARG into global */
632 "mov %1, %%g1\n\t" /* Clone syscall nr. */
633 "mov %2, %%o0\n\t" /* Clone flags. */
634 "mov 0, %%o1\n\t" /* usp arg == 0 */
635 "t 0x6d\n\t" /* Linux/Sparc clone(). */
636 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
637 " mov %%o0, %0\n\t"
638 "jmpl %%g2, %%o7\n\t" /* Call the function. */
639 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
640 "mov %3, %%g1\n\t"
641 "t 0x6d\n\t" /* Linux/Sparc exit(). */
642 /* Notreached by child. */
643 "1:" :
644 "=r" (retval) :
645 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
646 "i" (__NR_exit), "r" (fn), "r" (arg) :
647 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
648 return retval;
649}
650EXPORT_SYMBOL(kernel_thread);
651
652typedef struct { 688typedef struct {
653 union { 689 union {
654 unsigned int pr_regs[32]; 690 unsigned int pr_regs[32];
@@ -715,41 +751,6 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
715} 751}
716EXPORT_SYMBOL(dump_fpu); 752EXPORT_SYMBOL(dump_fpu);
717 753
718/*
719 * sparc_execve() executes a new program after the asm stub has set
720 * things up for us. This should basically do what I want it to.
721 */
722asmlinkage int sparc_execve(struct pt_regs *regs)
723{
724 int error, base = 0;
725 struct filename *filename;
726
727 /* User register window flush is done by entry.S */
728
729 /* Check for indirect call. */
730 if (regs->u_regs[UREG_G1] == 0)
731 base = 1;
732
733 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
734 error = PTR_ERR(filename);
735 if (IS_ERR(filename))
736 goto out;
737 error = do_execve(filename->name,
738 (const char __user *const __user *)
739 regs->u_regs[base + UREG_I1],
740 (const char __user *const __user *)
741 regs->u_regs[base + UREG_I2], regs);
742 putname(filename);
743 if (!error) {
744 fprs_write(0);
745 current_thread_info()->xfsr[0] = 0;
746 current_thread_info()->fpsaved[0] = 0;
747 regs->tstate &= ~TSTATE_PEF;
748 }
749out:
750 return error;
751}
752
753unsigned long get_wchan(struct task_struct *task) 754unsigned long get_wchan(struct task_struct *task)
754{ 755{
755 unsigned long pc, fp, bias = 0; 756 unsigned long pc, fp, bias = 0;