diff options
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b1c648a36b03..91e52df3d81d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -393,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
393 | struct thread_struct *new_thread, *old_thread; | 395 | struct thread_struct *new_thread, *old_thread; |
394 | unsigned long flags; | 396 | unsigned long flags; |
395 | struct task_struct *last; | 397 | struct task_struct *last; |
398 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
399 | struct ppc64_tlb_batch *batch; | ||
400 | #endif | ||
396 | 401 | ||
397 | #ifdef CONFIG_SMP | 402 | #ifdef CONFIG_SMP |
398 | /* avoid complexity of lazy save/restore of fpu | 403 | /* avoid complexity of lazy save/restore of fpu |
@@ -511,13 +516,22 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
511 | old_thread->accum_tb += (current_tb - start_tb); | 516 | old_thread->accum_tb += (current_tb - start_tb); |
512 | new_thread->start_tb = current_tb; | 517 | new_thread->start_tb = current_tb; |
513 | } | 518 | } |
514 | #endif | 519 | #endif /* CONFIG_PPC64 */ |
520 | |||
521 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
522 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
523 | if (batch->active) { | ||
524 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | ||
525 | if (batch->index) | ||
526 | __flush_tlb_pending(batch); | ||
527 | batch->active = 0; | ||
528 | } | ||
529 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
515 | 530 | ||
516 | local_irq_save(flags); | 531 | local_irq_save(flags); |
517 | 532 | ||
518 | account_system_vtime(current); | 533 | account_system_vtime(current); |
519 | account_process_vtime(current); | 534 | account_process_vtime(current); |
520 | calculate_steal_time(); | ||
521 | 535 | ||
522 | /* | 536 | /* |
523 | * We can't take a PMU exception inside _switch() since there is a | 537 | * We can't take a PMU exception inside _switch() since there is a |
@@ -527,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
527 | hard_irq_disable(); | 541 | hard_irq_disable(); |
528 | last = _switch(old_thread, new_thread); | 542 | last = _switch(old_thread, new_thread); |
529 | 543 | ||
544 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
545 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | ||
546 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | ||
547 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
548 | batch->active = 1; | ||
549 | } | ||
550 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
551 | |||
530 | local_irq_restore(flags); | 552 | local_irq_restore(flags); |
531 | 553 | ||
532 | return last; | 554 | return last; |
@@ -632,7 +654,7 @@ void show_regs(struct pt_regs * regs) | |||
632 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 654 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
633 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 655 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
634 | #else | 656 | #else |
635 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 657 | printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); |
636 | #endif | 658 | #endif |
637 | printk("TASK = %p[%d] '%s' THREAD: %p", | 659 | printk("TASK = %p[%d] '%s' THREAD: %p", |
638 | current, task_pid_nr(current), current->comm, task_thread_info(current)); | 660 | current, task_pid_nr(current), current->comm, task_thread_info(current)); |
@@ -671,11 +693,11 @@ void flush_thread(void) | |||
671 | { | 693 | { |
672 | discard_lazy_cpu_state(); | 694 | discard_lazy_cpu_state(); |
673 | 695 | ||
674 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 696 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
675 | flush_ptrace_hw_breakpoint(current); | 697 | flush_ptrace_hw_breakpoint(current); |
676 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 698 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
677 | set_debug_reg_defaults(¤t->thread); | 699 | set_debug_reg_defaults(¤t->thread); |
678 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 700 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
679 | } | 701 | } |
680 | 702 | ||
681 | void | 703 | void |
@@ -701,6 +723,8 @@ void prepare_to_copy(struct task_struct *tsk) | |||
701 | /* | 723 | /* |
702 | * Copy a thread.. | 724 | * Copy a thread.. |
703 | */ | 725 | */ |
726 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | ||
727 | |||
704 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 728 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
705 | unsigned long unused, struct task_struct *p, | 729 | unsigned long unused, struct task_struct *p, |
706 | struct pt_regs *regs) | 730 | struct pt_regs *regs) |
@@ -754,11 +778,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
754 | _ALIGN_UP(sizeof(struct thread_info), 16); | 778 | _ALIGN_UP(sizeof(struct thread_info), 16); |
755 | 779 | ||
756 | #ifdef CONFIG_PPC_STD_MMU_64 | 780 | #ifdef CONFIG_PPC_STD_MMU_64 |
757 | if (cpu_has_feature(CPU_FTR_SLB)) { | 781 | if (mmu_has_feature(MMU_FTR_SLB)) { |
758 | unsigned long sp_vsid; | 782 | unsigned long sp_vsid; |
759 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 783 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
760 | 784 | ||
761 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 785 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
762 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 786 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) |
763 | << SLB_VSID_SHIFT_1T; | 787 | << SLB_VSID_SHIFT_1T; |
764 | else | 788 | else |
@@ -768,6 +792,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
768 | p->thread.ksp_vsid = sp_vsid; | 792 | p->thread.ksp_vsid = sp_vsid; |
769 | } | 793 | } |
770 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 794 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
795 | #ifdef CONFIG_PPC64 | ||
796 | if (cpu_has_feature(CPU_FTR_DSCR)) { | ||
797 | if (current->thread.dscr_inherit) { | ||
798 | p->thread.dscr_inherit = 1; | ||
799 | p->thread.dscr = current->thread.dscr; | ||
800 | } else if (0 != dscr_default) { | ||
801 | p->thread.dscr_inherit = 1; | ||
802 | p->thread.dscr = dscr_default; | ||
803 | } else { | ||
804 | p->thread.dscr_inherit = 0; | ||
805 | p->thread.dscr = 0; | ||
806 | } | ||
807 | } | ||
808 | #endif | ||
771 | 809 | ||
772 | /* | 810 | /* |
773 | * The PPC64 ABI makes use of a TOC to contain function | 811 | * The PPC64 ABI makes use of a TOC to contain function |
@@ -1217,11 +1255,11 @@ void __ppc64_runlatch_off(void) | |||
1217 | 1255 | ||
1218 | static struct kmem_cache *thread_info_cache; | 1256 | static struct kmem_cache *thread_info_cache; |
1219 | 1257 | ||
1220 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 1258 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
1221 | { | 1259 | { |
1222 | struct thread_info *ti; | 1260 | struct thread_info *ti; |
1223 | 1261 | ||
1224 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 1262 | ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); |
1225 | if (unlikely(ti == NULL)) | 1263 | if (unlikely(ti == NULL)) |
1226 | return NULL; | 1264 | return NULL; |
1227 | #ifdef CONFIG_DEBUG_STACK_USAGE | 1265 | #ifdef CONFIG_DEBUG_STACK_USAGE |
@@ -1298,14 +1336,3 @@ unsigned long randomize_et_dyn(unsigned long base) | |||
1298 | 1336 | ||
1299 | return ret; | 1337 | return ret; |
1300 | } | 1338 | } |
1301 | |||
1302 | #ifdef CONFIG_SMP | ||
1303 | int arch_sd_sibling_asym_packing(void) | ||
1304 | { | ||
1305 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
1306 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
1307 | return SD_ASYM_PACKING; | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | #endif | ||