diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-29 16:50:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-29 16:50:11 -0400 |
commit | 15114c7e1cf41e7e3a792b1cf6b815b947ef6d1e (patch) | |
tree | 5d45673dcaefeda8092de38d7ed4d97bec6fac99 | |
parent | 15b77435edad42c1b25adaafce2be50e8d29b2fc (diff) | |
parent | bc6dc752f35488160ffac07ae91bed1bddaea32a (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt:
"Here are a few powerpc fixes. Arguably some of this should have come
to you earlier but I'm only just catching up after my medical leave.
Mostly these fixes regressions, a couple are long standing bugs."
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
powerpc/pseries: Fix software invalidate TCE
powerpc: check_and_cede_processor() never cedes
powerpc/ftrace: Do not trace restore_interrupts()
powerpc: Fix Section mismatch warnings in prom_init.c
ppc64: fix missing to check all bits of _TIF_USER_WORK_MASK in preempt
powerpc: Fix uninitialised error in numa.c
powerpc: Fix BPF_JIT code to link with multiple TOCs
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 97 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/iommu.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 2 |
8 files changed, 54 insertions, 64 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 32b394f3b854..6eb75b80488c 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -103,6 +103,11 @@ static inline void hard_irq_disable(void) | |||
103 | /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ | 103 | /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ |
104 | #define hard_irq_disable hard_irq_disable | 104 | #define hard_irq_disable hard_irq_disable |
105 | 105 | ||
106 | static inline bool lazy_irq_pending(void) | ||
107 | { | ||
108 | return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS); | ||
109 | } | ||
110 | |||
106 | /* | 111 | /* |
107 | * This is called by asynchronous interrupts to conditionally | 112 | * This is called by asynchronous interrupts to conditionally |
108 | * re-enable hard interrupts when soft-disabled after having | 113 | * re-enable hard interrupts when soft-disabled after having |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ed1718feb9d9..5971c85df136 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite) | |||
558 | mtmsrd r10,1 /* Update machine state */ | 558 | mtmsrd r10,1 /* Update machine state */ |
559 | #endif /* CONFIG_PPC_BOOK3E */ | 559 | #endif /* CONFIG_PPC_BOOK3E */ |
560 | 560 | ||
561 | #ifdef CONFIG_PREEMPT | ||
562 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ | 561 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ |
563 | li r0,_TIF_NEED_RESCHED /* bits to check */ | ||
564 | ld r3,_MSR(r1) | 562 | ld r3,_MSR(r1) |
565 | ld r4,TI_FLAGS(r9) | 563 | ld r4,TI_FLAGS(r9) |
566 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ | ||
567 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING | ||
568 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ | ||
569 | bne do_work | ||
570 | |||
571 | #else /* !CONFIG_PREEMPT */ | ||
572 | ld r3,_MSR(r1) /* Returning to user mode? */ | ||
573 | andi. r3,r3,MSR_PR | 564 | andi. r3,r3,MSR_PR |
574 | beq restore /* if not, just restore regs and return */ | 565 | beq resume_kernel |
575 | 566 | ||
576 | /* Check current_thread_info()->flags */ | 567 | /* Check current_thread_info()->flags */ |
568 | andi. r0,r4,_TIF_USER_WORK_MASK | ||
569 | beq restore | ||
570 | |||
571 | andi. r0,r4,_TIF_NEED_RESCHED | ||
572 | beq 1f | ||
573 | bl .restore_interrupts | ||
574 | bl .schedule | ||
575 | b .ret_from_except_lite | ||
576 | |||
577 | 1: bl .save_nvgprs | ||
578 | bl .restore_interrupts | ||
579 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
580 | bl .do_notify_resume | ||
581 | b .ret_from_except | ||
582 | |||
583 | resume_kernel: | ||
584 | #ifdef CONFIG_PREEMPT | ||
585 | /* Check if we need to preempt */ | ||
586 | andi. r0,r4,_TIF_NEED_RESCHED | ||
587 | beq+ restore | ||
588 | /* Check that preempt_count() == 0 and interrupts are enabled */ | ||
589 | lwz r8,TI_PREEMPT(r9) | ||
590 | cmpwi cr1,r8,0 | ||
591 | ld r0,SOFTE(r1) | ||
592 | cmpdi r0,0 | ||
593 | crandc eq,cr1*4+eq,eq | ||
594 | bne restore | ||
595 | |||
596 | /* | ||
597 | * Here we are preempting the current task. We want to make | ||
598 | * sure we are soft-disabled first | ||
599 | */ | ||
600 | SOFT_DISABLE_INTS(r3,r4) | ||
601 | 1: bl .preempt_schedule_irq | ||
602 | |||
603 | /* Re-test flags and eventually loop */ | ||
577 | clrrdi r9,r1,THREAD_SHIFT | 604 | clrrdi r9,r1,THREAD_SHIFT |
578 | ld r4,TI_FLAGS(r9) | 605 | ld r4,TI_FLAGS(r9) |
579 | andi. r0,r4,_TIF_USER_WORK_MASK | 606 | andi. r0,r4,_TIF_NEED_RESCHED |
580 | bne do_work | 607 | bne 1b |
581 | #endif /* !CONFIG_PREEMPT */ | 608 | #endif /* CONFIG_PREEMPT */ |
582 | 609 | ||
583 | .globl fast_exc_return_irq | 610 | .globl fast_exc_return_irq |
584 | fast_exc_return_irq: | 611 | fast_exc_return_irq: |
@@ -759,50 +786,6 @@ restore_check_irq_replay: | |||
759 | #endif /* CONFIG_PPC_BOOK3E */ | 786 | #endif /* CONFIG_PPC_BOOK3E */ |
760 | 1: b .ret_from_except /* What else to do here ? */ | 787 | 1: b .ret_from_except /* What else to do here ? */ |
761 | 788 | ||
762 | |||
763 | |||
764 | 3: | ||
765 | do_work: | ||
766 | #ifdef CONFIG_PREEMPT | ||
767 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | ||
768 | bne user_work | ||
769 | /* Check that preempt_count() == 0 and interrupts are enabled */ | ||
770 | lwz r8,TI_PREEMPT(r9) | ||
771 | cmpwi cr1,r8,0 | ||
772 | ld r0,SOFTE(r1) | ||
773 | cmpdi r0,0 | ||
774 | crandc eq,cr1*4+eq,eq | ||
775 | bne restore | ||
776 | |||
777 | /* | ||
778 | * Here we are preempting the current task. We want to make | ||
779 | * sure we are soft-disabled first | ||
780 | */ | ||
781 | SOFT_DISABLE_INTS(r3,r4) | ||
782 | 1: bl .preempt_schedule_irq | ||
783 | |||
784 | /* Re-test flags and eventually loop */ | ||
785 | clrrdi r9,r1,THREAD_SHIFT | ||
786 | ld r4,TI_FLAGS(r9) | ||
787 | andi. r0,r4,_TIF_NEED_RESCHED | ||
788 | bne 1b | ||
789 | b restore | ||
790 | |||
791 | user_work: | ||
792 | #endif /* CONFIG_PREEMPT */ | ||
793 | |||
794 | andi. r0,r4,_TIF_NEED_RESCHED | ||
795 | beq 1f | ||
796 | bl .restore_interrupts | ||
797 | bl .schedule | ||
798 | b .ret_from_except_lite | ||
799 | |||
800 | 1: bl .save_nvgprs | ||
801 | bl .restore_interrupts | ||
802 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
803 | bl .do_notify_resume | ||
804 | b .ret_from_except | ||
805 | |||
806 | unrecov_restore: | 789 | unrecov_restore: |
807 | addi r3,r1,STACK_FRAME_OVERHEAD | 790 | addi r3,r1,STACK_FRAME_OVERHEAD |
808 | bl .unrecoverable_exception | 791 | bl .unrecoverable_exception |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 7835a5e1ea5f..1b415027ec0e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore); | |||
277 | * NOTE: This is called with interrupts hard disabled but not marked | 277 | * NOTE: This is called with interrupts hard disabled but not marked |
278 | * as such in paca->irq_happened, so we need to resync this. | 278 | * as such in paca->irq_happened, so we need to resync this. |
279 | */ | 279 | */ |
280 | void restore_interrupts(void) | 280 | void notrace restore_interrupts(void) |
281 | { | 281 | { |
282 | if (irqs_disabled()) { | 282 | if (irqs_disabled()) { |
283 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | 283 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 1b488e5305c5..0794a3017b1b 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data { | |||
1312 | 1312 | ||
1313 | extern char opal_secondary_entry; | 1313 | extern char opal_secondary_entry; |
1314 | 1314 | ||
1315 | static void prom_query_opal(void) | 1315 | static void __init prom_query_opal(void) |
1316 | { | 1316 | { |
1317 | long rc; | 1317 | long rc; |
1318 | 1318 | ||
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void) | |||
1436 | prom_debug("prom_opal_hold_cpus: end...\n"); | 1436 | prom_debug("prom_opal_hold_cpus: end...\n"); |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | static void prom_opal_takeover(void) | 1439 | static void __init prom_opal_takeover(void) |
1440 | { | 1440 | { |
1441 | struct opal_secondary_data *data = &RELOC(opal_secondary_data); | 1441 | struct opal_secondary_data *data = &RELOC(opal_secondary_data); |
1442 | struct opal_takeover_args *args = &data->args; | 1442 | struct opal_takeover_args *args = &data->args; |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b6edbb3b4a54..6e8f677f5646 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -635,7 +635,7 @@ static inline int __init read_usm_ranges(const u32 **usm) | |||
635 | */ | 635 | */ |
636 | static void __init parse_drconf_memory(struct device_node *memory) | 636 | static void __init parse_drconf_memory(struct device_node *memory) |
637 | { | 637 | { |
638 | const u32 *dm, *usm; | 638 | const u32 *uninitialized_var(dm), *usm; |
639 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | 639 | unsigned int n, rc, ranges, is_kexec_kdump = 0; |
640 | unsigned long lmb_size, base, size, sz; | 640 | unsigned long lmb_size, base, size, sz; |
641 | int nid; | 641 | int nid; |
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index 55ba3855a97f..7d3a3b5619a2 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S | |||
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset: | |||
105 | mr r4, r_addr; \ | 105 | mr r4, r_addr; \ |
106 | li r6, SIZE; \ | 106 | li r6, SIZE; \ |
107 | bl skb_copy_bits; \ | 107 | bl skb_copy_bits; \ |
108 | nop; \ | ||
108 | /* R3 = 0 on success */ \ | 109 | /* R3 = 0 on success */ \ |
109 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ | 110 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ |
110 | ld r0, 16(r1); \ | 111 | ld r0, 16(r1); \ |
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh: | |||
156 | mr r4, r_addr; \ | 157 | mr r4, r_addr; \ |
157 | li r5, SIZE; \ | 158 | li r5, SIZE; \ |
158 | bl bpf_internal_load_pointer_neg_helper; \ | 159 | bl bpf_internal_load_pointer_neg_helper; \ |
160 | nop; \ | ||
159 | /* R3 != 0 on success */ \ | 161 | /* R3 != 0 on success */ \ |
160 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ | 162 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ |
161 | ld r0, 16(r1); \ | 163 | ld r0, 16(r1); \ |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 0915b1ad66ce..2d311c0caf8e 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, | |||
106 | tcep++; | 106 | tcep++; |
107 | } | 107 | } |
108 | 108 | ||
109 | if (tbl->it_type == TCE_PCI_SWINV_CREATE) | 109 | if (tbl->it_type & TCE_PCI_SWINV_CREATE) |
110 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); | 110 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); |
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) | |||
121 | while (npages--) | 121 | while (npages--) |
122 | *(tcep++) = 0; | 122 | *(tcep++) = 0; |
123 | 123 | ||
124 | if (tbl->it_type == TCE_PCI_SWINV_FREE) | 124 | if (tbl->it_type & TCE_PCI_SWINV_FREE) |
125 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); | 125 | tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); |
126 | } | 126 | } |
127 | 127 | ||
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 41a34bc4a9a2..e61483e8e960 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c | |||
@@ -106,7 +106,7 @@ static void check_and_cede_processor(void) | |||
106 | * we first hard disable then check. | 106 | * we first hard disable then check. |
107 | */ | 107 | */ |
108 | hard_irq_disable(); | 108 | hard_irq_disable(); |
109 | if (get_paca()->irq_happened == 0) | 109 | if (!lazy_irq_pending()) |
110 | cede_processor(); | 110 | cede_processor(); |
111 | } | 111 | } |
112 | 112 | ||