aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2012-07-11 11:08:35 -0400
committerGrant Likely <grant.likely@secretlab.ca>2012-07-11 11:08:35 -0400
commit80c1834fc86c2bbacb54a8fc3c04a8b0066b0996 (patch)
tree8200248706960af8b779e9144f5b51c670602228 /arch/powerpc
parent22076c7712be29a602de45b1c573f31adbd428a9 (diff)
parentbd0a521e88aa7a06ae7aabaed7ae196ed4ad867a (diff)
Merge tag 'v3.5-rc6' into irqdomain/next
Linux 3.5-rc6
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h8
-rw-r--r--arch/powerpc/kernel/entry_64.S97
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c61
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c2
-rw-r--r--arch/powerpc/xmon/xmon.c2
12 files changed, 132 insertions, 150 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c9aac24b02e2..6eb75b80488c 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -100,6 +100,14 @@ static inline void hard_irq_disable(void)
100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; 100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
101} 101}
102 102
103/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
104#define hard_irq_disable hard_irq_disable
105
106static inline bool lazy_irq_pending(void)
107{
108 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
109}
110
103/* 111/*
104 * This is called by asynchronous interrupts to conditionally 112 * This is called by asynchronous interrupts to conditionally
105 * re-enable hard interrupts when soft-disabled after having 113 * re-enable hard interrupts when soft-disabled after having
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718feb9d9..5971c85df136 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
558 mtmsrd r10,1 /* Update machine state */ 558 mtmsrd r10,1 /* Update machine state */
559#endif /* CONFIG_PPC_BOOK3E */ 559#endif /* CONFIG_PPC_BOOK3E */
560 560
561#ifdef CONFIG_PREEMPT
562 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ 561 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
563 li r0,_TIF_NEED_RESCHED /* bits to check */
564 ld r3,_MSR(r1) 562 ld r3,_MSR(r1)
565 ld r4,TI_FLAGS(r9) 563 ld r4,TI_FLAGS(r9)
566 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
567 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
568 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
569 bne do_work
570
571#else /* !CONFIG_PREEMPT */
572 ld r3,_MSR(r1) /* Returning to user mode? */
573 andi. r3,r3,MSR_PR 564 andi. r3,r3,MSR_PR
574 beq restore /* if not, just restore regs and return */ 565 beq resume_kernel
575 566
576 /* Check current_thread_info()->flags */ 567 /* Check current_thread_info()->flags */
568 andi. r0,r4,_TIF_USER_WORK_MASK
569 beq restore
570
571 andi. r0,r4,_TIF_NEED_RESCHED
572 beq 1f
573 bl .restore_interrupts
574 bl .schedule
575 b .ret_from_except_lite
576
5771: bl .save_nvgprs
578 bl .restore_interrupts
579 addi r3,r1,STACK_FRAME_OVERHEAD
580 bl .do_notify_resume
581 b .ret_from_except
582
583resume_kernel:
584#ifdef CONFIG_PREEMPT
585 /* Check if we need to preempt */
586 andi. r0,r4,_TIF_NEED_RESCHED
587 beq+ restore
588 /* Check that preempt_count() == 0 and interrupts are enabled */
589 lwz r8,TI_PREEMPT(r9)
590 cmpwi cr1,r8,0
591 ld r0,SOFTE(r1)
592 cmpdi r0,0
593 crandc eq,cr1*4+eq,eq
594 bne restore
595
596 /*
597 * Here we are preempting the current task. We want to make
598 * sure we are soft-disabled first
599 */
600 SOFT_DISABLE_INTS(r3,r4)
6011: bl .preempt_schedule_irq
602
603 /* Re-test flags and eventually loop */
577 clrrdi r9,r1,THREAD_SHIFT 604 clrrdi r9,r1,THREAD_SHIFT
578 ld r4,TI_FLAGS(r9) 605 ld r4,TI_FLAGS(r9)
579 andi. r0,r4,_TIF_USER_WORK_MASK 606 andi. r0,r4,_TIF_NEED_RESCHED
580 bne do_work 607 bne 1b
581#endif /* !CONFIG_PREEMPT */ 608#endif /* CONFIG_PREEMPT */
582 609
583 .globl fast_exc_return_irq 610 .globl fast_exc_return_irq
584fast_exc_return_irq: 611fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
759#endif /* CONFIG_PPC_BOOK3E */ 786#endif /* CONFIG_PPC_BOOK3E */
7601: b .ret_from_except /* What else to do here ? */ 7871: b .ret_from_except /* What else to do here ? */
761 788
762
763
7643:
765do_work:
766#ifdef CONFIG_PREEMPT
767 andi. r0,r3,MSR_PR /* Returning to user mode? */
768 bne user_work
769 /* Check that preempt_count() == 0 and interrupts are enabled */
770 lwz r8,TI_PREEMPT(r9)
771 cmpwi cr1,r8,0
772 ld r0,SOFTE(r1)
773 cmpdi r0,0
774 crandc eq,cr1*4+eq,eq
775 bne restore
776
777 /*
778 * Here we are preempting the current task. We want to make
779 * sure we are soft-disabled first
780 */
781 SOFT_DISABLE_INTS(r3,r4)
7821: bl .preempt_schedule_irq
783
784 /* Re-test flags and eventually loop */
785 clrrdi r9,r1,THREAD_SHIFT
786 ld r4,TI_FLAGS(r9)
787 andi. r0,r4,_TIF_NEED_RESCHED
788 bne 1b
789 b restore
790
791user_work:
792#endif /* CONFIG_PREEMPT */
793
794 andi. r0,r4,_TIF_NEED_RESCHED
795 beq 1f
796 bl .restore_interrupts
797 bl .schedule
798 b .ret_from_except_lite
799
8001: bl .save_nvgprs
801 bl .restore_interrupts
802 addi r3,r1,STACK_FRAME_OVERHEAD
803 bl .do_notify_resume
804 b .ret_from_except
805
806unrecov_restore: 789unrecov_restore:
807 addi r3,r1,STACK_FRAME_OVERHEAD 790 addi r3,r1,STACK_FRAME_OVERHEAD
808 bl .unrecoverable_exception 791 bl .unrecoverable_exception
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7835a5e1ea5f..1b415027ec0e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
277 * NOTE: This is called with interrupts hard disabled but not marked 277 * NOTE: This is called with interrupts hard disabled but not marked
278 * as such in paca->irq_happened, so we need to resync this. 278 * as such in paca->irq_happened, so we need to resync this.
279 */ 279 */
280void restore_interrupts(void) 280void notrace restore_interrupts(void)
281{ 281{
282 if (irqs_disabled()) { 282 if (irqs_disabled()) {
283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1b488e5305c5..0794a3017b1b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
1312 1312
1313extern char opal_secondary_entry; 1313extern char opal_secondary_entry;
1314 1314
1315static void prom_query_opal(void) 1315static void __init prom_query_opal(void)
1316{ 1316{
1317 long rc; 1317 long rc;
1318 1318
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
1436 prom_debug("prom_opal_hold_cpus: end...\n"); 1436 prom_debug("prom_opal_hold_cpus: end...\n");
1437} 1437}
1438 1438
1439static void prom_opal_takeover(void) 1439static void __init prom_opal_takeover(void)
1440{ 1440{
1441 struct opal_secondary_data *data = &RELOC(opal_secondary_data); 1441 struct opal_secondary_data *data = &RELOC(opal_secondary_data);
1442 struct opal_takeover_args *args = &data->args; 1442 struct opal_takeover_args *args = &data->args;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d623839..3abe1b86e583 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
268 return err; 268 return err;
269} 269}
270 270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) 271static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
272{ 272{
273 struct kvm *kvm = vcpu->kvm;
273 void *va; 274 void *va;
274 unsigned long nb; 275 unsigned long nb;
276 unsigned long gpa;
275 277
276 vpap->update_pending = 0; 278 /*
277 va = NULL; 279 * We need to pin the page pointed to by vpap->next_gpa,
278 if (vpap->next_gpa) { 280 * but we can't call kvmppc_pin_guest_page under the lock
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 281 * as it does get_user_pages() and down_read(). So we
280 if (nb < vpap->len) { 282 * have to drop the lock, pin the page, then get the lock
281 /* 283 * again and check that a new area didn't get registered
282 * If it's now too short, it must be that userspace 284 * in the meantime.
283 * has changed the mappings underlying guest memory, 285 */
284 * so unregister the region. 286 for (;;) {
285 */ 287 gpa = vpap->next_gpa;
288 spin_unlock(&vcpu->arch.vpa_update_lock);
289 va = NULL;
290 nb = 0;
291 if (gpa)
292 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
293 spin_lock(&vcpu->arch.vpa_update_lock);
294 if (gpa == vpap->next_gpa)
295 break;
296 /* sigh... unpin that one and try again */
297 if (va)
286 kvmppc_unpin_guest_page(kvm, va); 298 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL; 299 }
288 } 300
301 vpap->update_pending = 0;
302 if (va && nb < vpap->len) {
303 /*
304 * If it's now too short, it must be that userspace
305 * has changed the mappings underlying guest memory,
306 * so unregister the region.
307 */
308 kvmppc_unpin_guest_page(kvm, va);
309 va = NULL;
289 } 310 }
290 if (vpap->pinned_addr) 311 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); 312 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
296 317
297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 318static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{ 319{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock); 320 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) { 321 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa); 322 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 323 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 } 324 }
306 if (vcpu->arch.dtl.update_pending) { 325 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl); 326 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 327 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0; 328 vcpu->arch.dtl_index = 0;
310 } 329 }
311 if (vcpu->arch.slb_shadow.update_pending) 330 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); 331 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock); 332 spin_unlock(&vcpu->arch.vpa_update_lock);
314} 333}
315 334
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
800 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 819 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
801 long ret; 820 long ret;
802 u64 now; 821 u64 now;
803 int ptid, i; 822 int ptid, i, need_vpa_update;
804 823
805 /* don't start if any threads have a signal pending */ 824 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 825 need_vpa_update = 0;
826 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
807 if (signal_pending(vcpu->arch.run_task)) 827 if (signal_pending(vcpu->arch.run_task))
808 return 0; 828 return 0;
829 need_vpa_update |= vcpu->arch.vpa.update_pending |
830 vcpu->arch.slb_shadow.update_pending |
831 vcpu->arch.dtl.update_pending;
832 }
833
834 /*
835 * Initialize *vc, in particular vc->vcore_state, so we can
836 * drop the vcore lock if necessary.
837 */
838 vc->n_woken = 0;
839 vc->nap_count = 0;
840 vc->entry_exit_count = 0;
841 vc->vcore_state = VCORE_RUNNING;
842 vc->in_guest = 0;
843 vc->napping_threads = 0;
844
845 /*
846 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
847 * which can't be called with any spinlocks held.
848 */
849 if (need_vpa_update) {
850 spin_unlock(&vc->lock);
851 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
852 kvmppc_update_vpas(vcpu);
853 spin_lock(&vc->lock);
854 }
809 855
810 /* 856 /*
811 * Make sure we are running on thread 0, and that 857 * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
838 if (vcpu->arch.ceded) 884 if (vcpu->arch.ceded)
839 vcpu->arch.ptid = ptid++; 885 vcpu->arch.ptid = ptid++;
840 886
841 vc->n_woken = 0;
842 vc->nap_count = 0;
843 vc->entry_exit_count = 0;
844 vc->vcore_state = VCORE_RUNNING;
845 vc->stolen_tb += mftb() - vc->preempt_tb; 887 vc->stolen_tb += mftb() - vc->preempt_tb;
846 vc->in_guest = 0;
847 vc->pcpu = smp_processor_id(); 888 vc->pcpu = smp_processor_id();
848 vc->napping_threads = 0;
849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 889 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
850 kvmppc_start_thread(vcpu); 890 kvmppc_start_thread(vcpu);
851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
855 kvmppc_create_dtl_entry(vcpu, vc); 891 kvmppc_create_dtl_entry(vcpu, vc);
856 } 892 }
857 /* Grab any remaining hw threads so they can't go into the kernel */ 893 /* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..a1044f43becd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
810 lwz r3,VCORE_NAPPING_THREADS(r5) 810 lwz r3,VCORE_NAPPING_THREADS(r5)
811 lwz r4,VCPU_PTID(r9) 811 lwz r4,VCPU_PTID(r9)
812 li r0,1 812 li r0,1
813 sldi r0,r0,r4 813 sld r0,r0,r4
814 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 814 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
815 beq 43f 815 beq 43f
816 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 816 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b6edbb3b4a54..6e8f677f5646 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -635,7 +635,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
635 */ 635 */
636static void __init parse_drconf_memory(struct device_node *memory) 636static void __init parse_drconf_memory(struct device_node *memory)
637{ 637{
638 const u32 *dm, *usm; 638 const u32 *uninitialized_var(dm), *usm;
639 unsigned int n, rc, ranges, is_kexec_kdump = 0; 639 unsigned int n, rc, ranges, is_kexec_kdump = 0;
640 unsigned long lmb_size, base, size, sz; 640 unsigned long lmb_size, base, size, sz;
641 int nid; 641 int nid;
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 55ba3855a97f..7d3a3b5619a2 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
105 mr r4, r_addr; \ 105 mr r4, r_addr; \
106 li r6, SIZE; \ 106 li r6, SIZE; \
107 bl skb_copy_bits; \ 107 bl skb_copy_bits; \
108 nop; \
108 /* R3 = 0 on success */ \ 109 /* R3 = 0 on success */ \
109 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 110 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
110 ld r0, 16(r1); \ 111 ld r0, 16(r1); \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
156 mr r4, r_addr; \ 157 mr r4, r_addr; \
157 li r5, SIZE; \ 158 li r5, SIZE; \
158 bl bpf_internal_load_pointer_neg_helper; \ 159 bl bpf_internal_load_pointer_neg_helper; \
160 nop; \
159 /* R3 != 0 on success */ \ 161 /* R3 != 0 on success */ \
160 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ 162 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
161 ld r0, 16(r1); \ 163 ld r0, 16(r1); \
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index aab5fbc924e6..6b58a395dff6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
106 tcep++; 106 tcep++;
107 } 107 }
108 108
109 if (tbl->it_type == TCE_PCI_SWINV_CREATE) 109 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
110 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); 110 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
111 return 0; 111 return 0;
112} 112}
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
121 while (npages--) 121 while (npages--)
122 *(tcep++) = 0; 122 *(tcep++) = 0;
123 123
124 if (tbl->it_type == TCE_PCI_SWINV_FREE) 124 if (tbl->it_type & TCE_PCI_SWINV_FREE)
125 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); 125 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
126} 126}
127 127
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 36f957f31842..8733a86ad52e 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
68}; 68};
69 69
70static void oops_to_nvram(struct kmsg_dumper *dumper, 70static void oops_to_nvram(struct kmsg_dumper *dumper,
71 enum kmsg_dump_reason reason, 71 enum kmsg_dump_reason reason);
72 const char *old_msgs, unsigned long old_len,
73 const char *new_msgs, unsigned long new_len);
74 72
75static struct kmsg_dumper nvram_kmsg_dumper = { 73static struct kmsg_dumper nvram_kmsg_dumper = {
76 .dump = oops_to_nvram 74 .dump = oops_to_nvram
@@ -504,28 +502,6 @@ int __init pSeries_nvram_init(void)
504} 502}
505 503
506/* 504/*
507 * Try to capture the last capture_len bytes of the printk buffer. Return
508 * the amount actually captured.
509 */
510static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
511 const char *new_msgs, size_t new_len,
512 char *captured, size_t capture_len)
513{
514 if (new_len >= capture_len) {
515 memcpy(captured, new_msgs + (new_len - capture_len),
516 capture_len);
517 return capture_len;
518 } else {
519 /* Grab the end of old_msgs. */
520 size_t old_tail_len = min(old_len, capture_len - new_len);
521 memcpy(captured, old_msgs + (old_len - old_tail_len),
522 old_tail_len);
523 memcpy(captured + old_tail_len, new_msgs, new_len);
524 return old_tail_len + new_len;
525 }
526}
527
528/*
529 * Are we using the ibm,rtas-log for oops/panic reports? And if so, 505 * Are we using the ibm,rtas-log for oops/panic reports? And if so,
530 * would logging this oops/panic overwrite an RTAS event that rtas_errd 506 * would logging this oops/panic overwrite an RTAS event that rtas_errd
531 * hasn't had a chance to read and process? Return 1 if so, else 0. 507 * hasn't had a chance to read and process? Return 1 if so, else 0.
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
541 NVRAM_RTAS_READ_TIMEOUT); 517 NVRAM_RTAS_READ_TIMEOUT);
542} 518}
543 519
544/* Squeeze out each line's <n> severity prefix. */
545static size_t elide_severities(char *buf, size_t len)
546{
547 char *in, *out, *buf_end = buf + len;
548 /* Assume a <n> at the very beginning marks the start of a line. */
549 int newline = 1;
550
551 in = out = buf;
552 while (in < buf_end) {
553 if (newline && in+3 <= buf_end &&
554 *in == '<' && isdigit(in[1]) && in[2] == '>') {
555 in += 3;
556 newline = 0;
557 } else {
558 newline = (*in == '\n');
559 *out++ = *in++;
560 }
561 }
562 return out - buf;
563}
564
565/* Derived from logfs_compress() */ 520/* Derived from logfs_compress() */
566static int nvram_compress(const void *in, void *out, size_t inlen, 521static int nvram_compress(const void *in, void *out, size_t inlen,
567 size_t outlen) 522 size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
619 * partition. If that's too much, go back and capture uncompressed text. 574 * partition. If that's too much, go back and capture uncompressed text.
620 */ 575 */
621static void oops_to_nvram(struct kmsg_dumper *dumper, 576static void oops_to_nvram(struct kmsg_dumper *dumper,
622 enum kmsg_dump_reason reason, 577 enum kmsg_dump_reason reason)
623 const char *old_msgs, unsigned long old_len,
624 const char *new_msgs, unsigned long new_len)
625{ 578{
626 static unsigned int oops_count = 0; 579 static unsigned int oops_count = 0;
627 static bool panicking = false; 580 static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
660 return; 613 return;
661 614
662 if (big_oops_buf) { 615 if (big_oops_buf) {
663 text_len = capture_last_msgs(old_msgs, old_len, 616 kmsg_dump_get_buffer(dumper, false,
664 new_msgs, new_len, big_oops_buf, big_oops_buf_sz); 617 big_oops_buf, big_oops_buf_sz, &text_len);
665 text_len = elide_severities(big_oops_buf, text_len);
666 rc = zip_oops(text_len); 618 rc = zip_oops(text_len);
667 } 619 }
668 if (rc != 0) { 620 if (rc != 0) {
669 text_len = capture_last_msgs(old_msgs, old_len, 621 kmsg_dump_rewind(dumper);
670 new_msgs, new_len, oops_data, oops_data_sz); 622 kmsg_dump_get_buffer(dumper, true,
623 oops_data, oops_data_sz, &text_len);
671 err_type = ERR_TYPE_KERNEL_PANIC; 624 err_type = ERR_TYPE_KERNEL_PANIC;
672 *oops_len = (u16) text_len; 625 *oops_len = (u16) text_len;
673 } 626 }
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 41a34bc4a9a2..e61483e8e960 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -106,7 +106,7 @@ static void check_and_cede_processor(void)
106 * we first hard disable then check. 106 * we first hard disable then check.
107 */ 107 */
108 hard_irq_disable(); 108 hard_irq_disable();
109 if (get_paca()->irq_happened == 0) 109 if (!lazy_irq_pending())
110 cede_processor(); 110 cede_processor();
111} 111}
112 112
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0f3ab06d2222..eab3492a45c5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
971 /* print cpus waiting or in xmon */ 971 /* print cpus waiting or in xmon */
972 printf("cpus stopped:"); 972 printf("cpus stopped:");
973 count = 0; 973 count = 0;
974 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 974 for_each_possible_cpu(cpu) {
975 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { 975 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
976 if (count == 0) 976 if (count == 0)
977 printf(" %x", cpu); 977 printf(" %x", cpu);