aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:09:39 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-07-29 20:09:39 -0400
commit73bcc49959e4e40911dd0dd634bf1b353827df66 (patch)
tree6b0c1d440c490a65c51ab5cf5aee7095cb4089d3 /arch/powerpc
parent8447c4d15e357a458c9051ddc84aa6c8b9c27000 (diff)
parent28a33cbc24e4256c143dce96c7d93bf423229f92 (diff)
Merge tag 'v3.5'
Linux 3.5 * tag 'v3.5': (1242 commits) Linux 3.5 Remove SYSTEM_SUSPEND_DISK system state kdb: Switch to nolock variants of kmsg_dump functions printk: Implement some unlocked kmsg_dump functions printk: Remove kdb_syslog_data kdb: Revive dmesg command dm raid1: set discard_zeroes_data_unsupported dm thin: do not send discards to shared blocks dm raid1: fix crash with mirror recovery and discard pnfs-obj: Fix __r4w_get_page when offset is beyond i_size pnfs-obj: don't leak objio_state if ore_write/read fails ore: Unlock r4w pages in exact reverse order of locking ore: Remove support of partial IO request (NFS crash) ore: Fix NFS crash by supporting any unaligned RAID IO UBIFS: fix a bug in empty space fix-up cx25821: Remove bad strcpy to read-only char* HID: hid-multitouch: add support for Zytronic panels MIPS: PCI: Move fixups from __init to __devinit. MIPS: Fix bug.h MIPS build regression MIPS: sync-r4k: remove redundant irq operation ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h14
-rw-r--r--arch/powerpc/kernel/entry_64.S97
-rw-r--r--arch/powerpc/kernel/irq.c50
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c11
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c61
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c17
-rw-r--r--arch/powerpc/xmon/xmon.c2
14 files changed, 200 insertions, 165 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c9aac24b02e2..0554ab062bdc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
86} 86}
87 87
88#ifdef CONFIG_PPC_BOOK3E 88#ifdef CONFIG_PPC_BOOK3E
89#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); 89#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
90#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); 90#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
91#else 91#else
92#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) 92#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
93#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) 93#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
@@ -100,6 +100,14 @@ static inline void hard_irq_disable(void)
100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; 100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
101} 101}
102 102
103/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
104#define hard_irq_disable hard_irq_disable
105
106static inline bool lazy_irq_pending(void)
107{
108 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
109}
110
103/* 111/*
104 * This is called by asynchronous interrupts to conditionally 112 * This is called by asynchronous interrupts to conditionally
105 * re-enable hard interrupts when soft-disabled after having 113 * re-enable hard interrupts when soft-disabled after having
@@ -117,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
117 return !regs->softe; 125 return !regs->softe;
118} 126}
119 127
128extern bool prep_irq_for_idle(void);
129
120#else /* CONFIG_PPC64 */ 130#else /* CONFIG_PPC64 */
121 131
122#define SET_MSR_EE(x) mtmsr(x) 132#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718feb9d9..5971c85df136 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
558 mtmsrd r10,1 /* Update machine state */ 558 mtmsrd r10,1 /* Update machine state */
559#endif /* CONFIG_PPC_BOOK3E */ 559#endif /* CONFIG_PPC_BOOK3E */
560 560
561#ifdef CONFIG_PREEMPT
562 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ 561 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
563 li r0,_TIF_NEED_RESCHED /* bits to check */
564 ld r3,_MSR(r1) 562 ld r3,_MSR(r1)
565 ld r4,TI_FLAGS(r9) 563 ld r4,TI_FLAGS(r9)
566 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
567 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
568 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
569 bne do_work
570
571#else /* !CONFIG_PREEMPT */
572 ld r3,_MSR(r1) /* Returning to user mode? */
573 andi. r3,r3,MSR_PR 564 andi. r3,r3,MSR_PR
574 beq restore /* if not, just restore regs and return */ 565 beq resume_kernel
575 566
576 /* Check current_thread_info()->flags */ 567 /* Check current_thread_info()->flags */
568 andi. r0,r4,_TIF_USER_WORK_MASK
569 beq restore
570
571 andi. r0,r4,_TIF_NEED_RESCHED
572 beq 1f
573 bl .restore_interrupts
574 bl .schedule
575 b .ret_from_except_lite
576
5771: bl .save_nvgprs
578 bl .restore_interrupts
579 addi r3,r1,STACK_FRAME_OVERHEAD
580 bl .do_notify_resume
581 b .ret_from_except
582
583resume_kernel:
584#ifdef CONFIG_PREEMPT
585 /* Check if we need to preempt */
586 andi. r0,r4,_TIF_NEED_RESCHED
587 beq+ restore
588 /* Check that preempt_count() == 0 and interrupts are enabled */
589 lwz r8,TI_PREEMPT(r9)
590 cmpwi cr1,r8,0
591 ld r0,SOFTE(r1)
592 cmpdi r0,0
593 crandc eq,cr1*4+eq,eq
594 bne restore
595
596 /*
597 * Here we are preempting the current task. We want to make
598 * sure we are soft-disabled first
599 */
600 SOFT_DISABLE_INTS(r3,r4)
6011: bl .preempt_schedule_irq
602
603 /* Re-test flags and eventually loop */
577 clrrdi r9,r1,THREAD_SHIFT 604 clrrdi r9,r1,THREAD_SHIFT
578 ld r4,TI_FLAGS(r9) 605 ld r4,TI_FLAGS(r9)
579 andi. r0,r4,_TIF_USER_WORK_MASK 606 andi. r0,r4,_TIF_NEED_RESCHED
580 bne do_work 607 bne 1b
581#endif /* !CONFIG_PREEMPT */ 608#endif /* CONFIG_PREEMPT */
582 609
583 .globl fast_exc_return_irq 610 .globl fast_exc_return_irq
584fast_exc_return_irq: 611fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
759#endif /* CONFIG_PPC_BOOK3E */ 786#endif /* CONFIG_PPC_BOOK3E */
7601: b .ret_from_except /* What else to do here ? */ 7871: b .ret_from_except /* What else to do here ? */
761 788
762
763
7643:
765do_work:
766#ifdef CONFIG_PREEMPT
767 andi. r0,r3,MSR_PR /* Returning to user mode? */
768 bne user_work
769 /* Check that preempt_count() == 0 and interrupts are enabled */
770 lwz r8,TI_PREEMPT(r9)
771 cmpwi cr1,r8,0
772 ld r0,SOFTE(r1)
773 cmpdi r0,0
774 crandc eq,cr1*4+eq,eq
775 bne restore
776
777 /*
778 * Here we are preempting the current task. We want to make
779 * sure we are soft-disabled first
780 */
781 SOFT_DISABLE_INTS(r3,r4)
7821: bl .preempt_schedule_irq
783
784 /* Re-test flags and eventually loop */
785 clrrdi r9,r1,THREAD_SHIFT
786 ld r4,TI_FLAGS(r9)
787 andi. r0,r4,_TIF_NEED_RESCHED
788 bne 1b
789 b restore
790
791user_work:
792#endif /* CONFIG_PREEMPT */
793
794 andi. r0,r4,_TIF_NEED_RESCHED
795 beq 1f
796 bl .restore_interrupts
797 bl .schedule
798 b .ret_from_except_lite
799
8001: bl .save_nvgprs
801 bl .restore_interrupts
802 addi r3,r1,STACK_FRAME_OVERHEAD
803 bl .do_notify_resume
804 b .ret_from_except
805
806unrecov_restore: 789unrecov_restore:
807 addi r3,r1,STACK_FRAME_OVERHEAD 790 addi r3,r1,STACK_FRAME_OVERHEAD
808 bl .unrecoverable_exception 791 bl .unrecoverable_exception
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7835a5e1ea5f..1f017bb7a7ce 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
229 */ 229 */
230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) 230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable(); 231 __hard_irq_disable();
232#ifdef CONFIG_TRACE_IRQFLAG 232#ifdef CONFIG_TRACE_IRQFLAGS
233 else { 233 else {
234 /* 234 /*
235 * We should already be hard disabled here. We had bugs 235 * We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
277 * NOTE: This is called with interrupts hard disabled but not marked 277 * NOTE: This is called with interrupts hard disabled but not marked
278 * as such in paca->irq_happened, so we need to resync this. 278 * as such in paca->irq_happened, so we need to resync this.
279 */ 279 */
280void restore_interrupts(void) 280void notrace restore_interrupts(void)
281{ 281{
282 if (irqs_disabled()) { 282 if (irqs_disabled()) {
283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
286 __hard_irq_enable(); 286 __hard_irq_enable();
287} 287}
288 288
289/*
290 * This is a helper to use when about to go into idle low-power
291 * when the latter has the side effect of re-enabling interrupts
292 * (such as calling H_CEDE under pHyp).
293 *
294 * You call this function with interrupts soft-disabled (this is
295 * already the case when ppc_md.power_save is called). The function
296 * will return whether to enter power save or just return.
297 *
298 * In the former case, it will have notified lockdep of interrupts
299 * being re-enabled and generally sanitized the lazy irq state,
300 * and in the latter case it will leave with interrupts hard
301 * disabled and marked as such, so the local_irq_enable() call
302 * in cpu_idle() will properly re-enable everything.
303 */
304bool prep_irq_for_idle(void)
305{
306 /*
307 * First we need to hard disable to ensure no interrupt
308 * occurs before we effectively enter the low power state
309 */
310 hard_irq_disable();
311
312 /*
313 * If anything happened while we were soft-disabled,
314 * we return now and do not enter the low power state.
315 */
316 if (lazy_irq_pending())
317 return false;
318
319 /* Tell lockdep we are about to re-enable */
320 trace_hardirqs_on();
321
322 /*
323 * Mark interrupts as soft-enabled and clear the
324 * PACA_IRQ_HARD_DIS from the pending mask since we
325 * are about to hard enable as well as a side effect
326 * of entering the low power state.
327 */
328 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
329 local_paca->soft_enabled = 1;
330
331 /* Tell the caller to enter the low power state */
332 return true;
333}
334
289#endif /* CONFIG_PPC64 */ 335#endif /* CONFIG_PPC64 */
290 336
291int arch_show_interrupts(struct seq_file *p, int prec) 337int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1b488e5305c5..0794a3017b1b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
1312 1312
1313extern char opal_secondary_entry; 1313extern char opal_secondary_entry;
1314 1314
1315static void prom_query_opal(void) 1315static void __init prom_query_opal(void)
1316{ 1316{
1317 long rc; 1317 long rc;
1318 1318
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
1436 prom_debug("prom_opal_hold_cpus: end...\n"); 1436 prom_debug("prom_opal_hold_cpus: end...\n");
1437} 1437}
1438 1438
1439static void prom_opal_takeover(void) 1439static void __init prom_opal_takeover(void)
1440{ 1440{
1441 struct opal_secondary_data *data = &RELOC(opal_secondary_data); 1441 struct opal_secondary_data *data = &RELOC(opal_secondary_data);
1442 struct opal_takeover_args *args = &data->args; 1442 struct opal_takeover_args *args = &data->args;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d623839..3abe1b86e583 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
268 return err; 268 return err;
269} 269}
270 270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) 271static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
272{ 272{
273 struct kvm *kvm = vcpu->kvm;
273 void *va; 274 void *va;
274 unsigned long nb; 275 unsigned long nb;
276 unsigned long gpa;
275 277
276 vpap->update_pending = 0; 278 /*
277 va = NULL; 279 * We need to pin the page pointed to by vpap->next_gpa,
278 if (vpap->next_gpa) { 280 * but we can't call kvmppc_pin_guest_page under the lock
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 281 * as it does get_user_pages() and down_read(). So we
280 if (nb < vpap->len) { 282 * have to drop the lock, pin the page, then get the lock
281 /* 283 * again and check that a new area didn't get registered
282 * If it's now too short, it must be that userspace 284 * in the meantime.
283 * has changed the mappings underlying guest memory, 285 */
284 * so unregister the region. 286 for (;;) {
285 */ 287 gpa = vpap->next_gpa;
288 spin_unlock(&vcpu->arch.vpa_update_lock);
289 va = NULL;
290 nb = 0;
291 if (gpa)
292 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
293 spin_lock(&vcpu->arch.vpa_update_lock);
294 if (gpa == vpap->next_gpa)
295 break;
296 /* sigh... unpin that one and try again */
297 if (va)
286 kvmppc_unpin_guest_page(kvm, va); 298 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL; 299 }
288 } 300
301 vpap->update_pending = 0;
302 if (va && nb < vpap->len) {
303 /*
304 * If it's now too short, it must be that userspace
305 * has changed the mappings underlying guest memory,
306 * so unregister the region.
307 */
308 kvmppc_unpin_guest_page(kvm, va);
309 va = NULL;
289 } 310 }
290 if (vpap->pinned_addr) 311 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); 312 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
296 317
297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 318static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{ 319{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock); 320 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) { 321 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa); 322 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 323 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 } 324 }
306 if (vcpu->arch.dtl.update_pending) { 325 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl); 326 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 327 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0; 328 vcpu->arch.dtl_index = 0;
310 } 329 }
311 if (vcpu->arch.slb_shadow.update_pending) 330 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); 331 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock); 332 spin_unlock(&vcpu->arch.vpa_update_lock);
314} 333}
315 334
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
800 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 819 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
801 long ret; 820 long ret;
802 u64 now; 821 u64 now;
803 int ptid, i; 822 int ptid, i, need_vpa_update;
804 823
805 /* don't start if any threads have a signal pending */ 824 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 825 need_vpa_update = 0;
826 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
807 if (signal_pending(vcpu->arch.run_task)) 827 if (signal_pending(vcpu->arch.run_task))
808 return 0; 828 return 0;
829 need_vpa_update |= vcpu->arch.vpa.update_pending |
830 vcpu->arch.slb_shadow.update_pending |
831 vcpu->arch.dtl.update_pending;
832 }
833
834 /*
835 * Initialize *vc, in particular vc->vcore_state, so we can
836 * drop the vcore lock if necessary.
837 */
838 vc->n_woken = 0;
839 vc->nap_count = 0;
840 vc->entry_exit_count = 0;
841 vc->vcore_state = VCORE_RUNNING;
842 vc->in_guest = 0;
843 vc->napping_threads = 0;
844
845 /*
846 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
847 * which can't be called with any spinlocks held.
848 */
849 if (need_vpa_update) {
850 spin_unlock(&vc->lock);
851 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
852 kvmppc_update_vpas(vcpu);
853 spin_lock(&vc->lock);
854 }
809 855
810 /* 856 /*
811 * Make sure we are running on thread 0, and that 857 * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
838 if (vcpu->arch.ceded) 884 if (vcpu->arch.ceded)
839 vcpu->arch.ptid = ptid++; 885 vcpu->arch.ptid = ptid++;
840 886
841 vc->n_woken = 0;
842 vc->nap_count = 0;
843 vc->entry_exit_count = 0;
844 vc->vcore_state = VCORE_RUNNING;
845 vc->stolen_tb += mftb() - vc->preempt_tb; 887 vc->stolen_tb += mftb() - vc->preempt_tb;
846 vc->in_guest = 0;
847 vc->pcpu = smp_processor_id(); 888 vc->pcpu = smp_processor_id();
848 vc->napping_threads = 0;
849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 889 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
850 kvmppc_start_thread(vcpu); 890 kvmppc_start_thread(vcpu);
851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
855 kvmppc_create_dtl_entry(vcpu, vc); 891 kvmppc_create_dtl_entry(vcpu, vc);
856 } 892 }
857 /* Grab any remaining hw threads so they can't go into the kernel */ 893 /* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..a1044f43becd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
810 lwz r3,VCORE_NAPPING_THREADS(r5) 810 lwz r3,VCORE_NAPPING_THREADS(r5)
811 lwz r4,VCPU_PTID(r9) 811 lwz r4,VCPU_PTID(r9)
812 li r0,1 812 li r0,1
813 sldi r0,r0,r4 813 sld r0,r0,r4
814 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 814 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
815 beq 43f 815 beq 43f
816 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 816 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 3ff9013d6e79..ee02b30878ed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
241 case H_PUT_TCE: 241 case H_PUT_TCE:
242 return kvmppc_h_pr_put_tce(vcpu); 242 return kvmppc_h_pr_put_tce(vcpu);
243 case H_CEDE: 243 case H_CEDE:
244 vcpu->arch.shared->msr |= MSR_EE;
244 kvm_vcpu_block(vcpu); 245 kvm_vcpu_block(vcpu);
245 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 246 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
246 vcpu->stat.halt_wakeup++; 247 vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b6edbb3b4a54..1e95556dc692 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -635,11 +635,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
635 */ 635 */
636static void __init parse_drconf_memory(struct device_node *memory) 636static void __init parse_drconf_memory(struct device_node *memory)
637{ 637{
638 const u32 *dm, *usm; 638 const u32 *uninitialized_var(dm), *usm;
639 unsigned int n, rc, ranges, is_kexec_kdump = 0; 639 unsigned int n, rc, ranges, is_kexec_kdump = 0;
640 unsigned long lmb_size, base, size, sz; 640 unsigned long lmb_size, base, size, sz;
641 int nid; 641 int nid;
642 struct assoc_arrays aa; 642 struct assoc_arrays aa = { .arrays = NULL };
643 643
644 n = of_get_drconf_memory(memory, &dm); 644 n = of_get_drconf_memory(memory, &dm);
645 if (!n) 645 if (!n)
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 55ba3855a97f..7d3a3b5619a2 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
105 mr r4, r_addr; \ 105 mr r4, r_addr; \
106 li r6, SIZE; \ 106 li r6, SIZE; \
107 bl skb_copy_bits; \ 107 bl skb_copy_bits; \
108 nop; \
108 /* R3 = 0 on success */ \ 109 /* R3 = 0 on success */ \
109 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 110 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
110 ld r0, 16(r1); \ 111 ld r0, 16(r1); \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
156 mr r4, r_addr; \ 157 mr r4, r_addr; \
157 li r5, SIZE; \ 158 li r5, SIZE; \
158 bl bpf_internal_load_pointer_neg_helper; \ 159 bl bpf_internal_load_pointer_neg_helper; \
160 nop; \
159 /* R3 != 0 on success */ \ 161 /* R3 != 0 on success */ \
160 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 162 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
161 ld r0, 16(r1); \ 163 ld r0, 16(r1); \
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index efdacc829576..d17e98bc0c10 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
42{ 42{
43 unsigned long ctrl, thread_switch_control; 43 unsigned long ctrl, thread_switch_control;
44 44
45 /* 45 /* Ensure our interrupt state is properly tracked */
46 * We need to hard disable interrupts, the local_irq_enable() done by 46 if (!prep_irq_for_idle())
47 * our caller upon return will hard re-enable. 47 return;
48 */
49 hard_irq_disable();
50 48
51 ctrl = mfspr(SPRN_CTRLF); 49 ctrl = mfspr(SPRN_CTRLF);
52 50
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
81 */ 79 */
82 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); 80 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
83 mtspr(SPRN_CTRLT, ctrl); 81 mtspr(SPRN_CTRLT, ctrl);
82
83 /* Re-enable interrupts in MSR */
84 __hard_irq_enable();
84} 85}
85 86
86static int cbe_system_reset_exception(struct pt_regs *regs) 87static int cbe_system_reset_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0915b1ad66ce..2d311c0caf8e 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
106 tcep++; 106 tcep++;
107 } 107 }
108 108
109 if (tbl->it_type == TCE_PCI_SWINV_CREATE) 109 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
110 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); 110 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
111 return 0; 111 return 0;
112} 112}
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
121 while (npages--) 121 while (npages--)
122 *(tcep++) = 0; 122 *(tcep++) = 0;
123 123
124 if (tbl->it_type == TCE_PCI_SWINV_FREE) 124 if (tbl->it_type & TCE_PCI_SWINV_FREE)
125 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); 125 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
126} 126}
127 127
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 36f957f31842..8733a86ad52e 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
68}; 68};
69 69
70static void oops_to_nvram(struct kmsg_dumper *dumper, 70static void oops_to_nvram(struct kmsg_dumper *dumper,
71 enum kmsg_dump_reason reason, 71 enum kmsg_dump_reason reason);
72 const char *old_msgs, unsigned long old_len,
73 const char *new_msgs, unsigned long new_len);
74 72
75static struct kmsg_dumper nvram_kmsg_dumper = { 73static struct kmsg_dumper nvram_kmsg_dumper = {
76 .dump = oops_to_nvram 74 .dump = oops_to_nvram
@@ -504,28 +502,6 @@ int __init pSeries_nvram_init(void)
504} 502}
505 503
506/* 504/*
507 * Try to capture the last capture_len bytes of the printk buffer. Return
508 * the amount actually captured.
509 */
510static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
511 const char *new_msgs, size_t new_len,
512 char *captured, size_t capture_len)
513{
514 if (new_len >= capture_len) {
515 memcpy(captured, new_msgs + (new_len - capture_len),
516 capture_len);
517 return capture_len;
518 } else {
519 /* Grab the end of old_msgs. */
520 size_t old_tail_len = min(old_len, capture_len - new_len);
521 memcpy(captured, old_msgs + (old_len - old_tail_len),
522 old_tail_len);
523 memcpy(captured + old_tail_len, new_msgs, new_len);
524 return old_tail_len + new_len;
525 }
526}
527
528/*
529 * Are we using the ibm,rtas-log for oops/panic reports? And if so, 505 * Are we using the ibm,rtas-log for oops/panic reports? And if so,
530 * would logging this oops/panic overwrite an RTAS event that rtas_errd 506 * would logging this oops/panic overwrite an RTAS event that rtas_errd
531 * hasn't had a chance to read and process? Return 1 if so, else 0. 507 * hasn't had a chance to read and process? Return 1 if so, else 0.
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
541 NVRAM_RTAS_READ_TIMEOUT); 517 NVRAM_RTAS_READ_TIMEOUT);
542} 518}
543 519
544/* Squeeze out each line's <n> severity prefix. */
545static size_t elide_severities(char *buf, size_t len)
546{
547 char *in, *out, *buf_end = buf + len;
548 /* Assume a <n> at the very beginning marks the start of a line. */
549 int newline = 1;
550
551 in = out = buf;
552 while (in < buf_end) {
553 if (newline && in+3 <= buf_end &&
554 *in == '<' && isdigit(in[1]) && in[2] == '>') {
555 in += 3;
556 newline = 0;
557 } else {
558 newline = (*in == '\n');
559 *out++ = *in++;
560 }
561 }
562 return out - buf;
563}
564
565/* Derived from logfs_compress() */ 520/* Derived from logfs_compress() */
566static int nvram_compress(const void *in, void *out, size_t inlen, 521static int nvram_compress(const void *in, void *out, size_t inlen,
567 size_t outlen) 522 size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
619 * partition. If that's too much, go back and capture uncompressed text. 574 * partition. If that's too much, go back and capture uncompressed text.
620 */ 575 */
621static void oops_to_nvram(struct kmsg_dumper *dumper, 576static void oops_to_nvram(struct kmsg_dumper *dumper,
622 enum kmsg_dump_reason reason, 577 enum kmsg_dump_reason reason)
623 const char *old_msgs, unsigned long old_len,
624 const char *new_msgs, unsigned long new_len)
625{ 578{
626 static unsigned int oops_count = 0; 579 static unsigned int oops_count = 0;
627 static bool panicking = false; 580 static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
660 return; 613 return;
661 614
662 if (big_oops_buf) { 615 if (big_oops_buf) {
663 text_len = capture_last_msgs(old_msgs, old_len, 616 kmsg_dump_get_buffer(dumper, false,
664 new_msgs, new_len, big_oops_buf, big_oops_buf_sz); 617 big_oops_buf, big_oops_buf_sz, &text_len);
665 text_len = elide_severities(big_oops_buf, text_len);
666 rc = zip_oops(text_len); 618 rc = zip_oops(text_len);
667 } 619 }
668 if (rc != 0) { 620 if (rc != 0) {
669 text_len = capture_last_msgs(old_msgs, old_len, 621 kmsg_dump_rewind(dumper);
670 new_msgs, new_len, oops_data, oops_data_sz); 622 kmsg_dump_get_buffer(dumper, true,
623 oops_data, oops_data_sz, &text_len);
671 err_type = ERR_TYPE_KERNEL_PANIC; 624 err_type = ERR_TYPE_KERNEL_PANIC;
672 *oops_len = (u16) text_len; 625 *oops_len = (u16) text_len;
673 } 626 }
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 41a34bc4a9a2..c71be66bd5dc 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -99,15 +99,18 @@ out:
99static void check_and_cede_processor(void) 99static void check_and_cede_processor(void)
100{ 100{
101 /* 101 /*
102 * Interrupts are soft-disabled at this point, 102 * Ensure our interrupt state is properly tracked,
103 * but not hard disabled. So an interrupt might have 103 * also checks if no interrupt has occurred while we
104 * occurred before entering NAP, and would be potentially 104 * were soft-disabled
105 * lost (edge events, decrementer events, etc...) unless
106 * we first hard disable then check.
107 */ 105 */
108 hard_irq_disable(); 106 if (prep_irq_for_idle()) {
109 if (get_paca()->irq_happened == 0)
110 cede_processor(); 107 cede_processor();
108#ifdef CONFIG_TRACE_IRQFLAGS
109 /* Ensure that H_CEDE returns with IRQs on */
110 if (WARN_ON(!(mfmsr() & MSR_EE)))
111 __hard_irq_enable();
112#endif
113 }
111} 114}
112 115
113static int dedicated_cede_loop(struct cpuidle_device *dev, 116static int dedicated_cede_loop(struct cpuidle_device *dev,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0f3ab06d2222..eab3492a45c5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
971 /* print cpus waiting or in xmon */ 971 /* print cpus waiting or in xmon */
972 printf("cpus stopped:"); 972 printf("cpus stopped:");
973 count = 0; 973 count = 0;
974 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 974 for_each_possible_cpu(cpu) {
975 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { 975 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
976 if (count == 0) 976 if (count == 0)
977 printf(" %x", cpu); 977 printf(" %x", cpu);