aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c100
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S95
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/perf/power8-pmu.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
23 files changed, 294 insertions, 150 deletions
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
76 76
77src-plat-y := of.c 77src-plat-y := of.c epapr.c
78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
79 treeboot-walnut.c cuboot-acadia.c \ 79 treeboot-walnut.c cuboot-acadia.c \
80 cuboot-kilauea.c simpleboot.c \ 80 cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
97 prpmc2800.c 97 prpmc2800.c
98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c 100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
101 101
102src-wlib := $(sort $(src-wlib-y)) 102src-wlib := $(sort $(src-wlib-y))
103src-plat := $(sort $(src-plat-y)) 103src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
1extern void epapr_platform_init(unsigned long r3, unsigned long r4,
2 unsigned long r5, unsigned long r6,
3 unsigned long r7);
4
5void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
6 unsigned long r6, unsigned long r7)
7{
8 epapr_platform_init(r3, r4, r5, r6, r7);
9}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); 48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49} 49}
50 50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 51void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7) 52 unsigned long r6, unsigned long r7)
53{ 53{
54 epapr_magic = r6; 54 epapr_magic = r6;
55 ima_size = r7; 55 ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
26 26
27static unsigned long claim_base; 27static unsigned long claim_base;
28 28
29void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
30 unsigned long r6, unsigned long r7);
31
29static void *of_try_claim(unsigned long size) 32static void *of_try_claim(unsigned long size)
30{ 33{
31 unsigned long addr = 0; 34 unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
61 } 64 }
62} 65}
63 66
64void platform_init(unsigned long a1, unsigned long a2, void *promptr) 67static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
65{ 68{
66 platform_ops.image_hdr = of_image_hdr; 69 platform_ops.image_hdr = of_image_hdr;
67 platform_ops.malloc = of_try_claim; 70 platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
81 loader_info.initrd_size = a2; 84 loader_info.initrd_size = a2;
82 } 85 }
83} 86}
87
88void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
89 unsigned long r6, unsigned long r7)
90{
91 /* Detect OF vs. ePAPR boot */
92 if (r5)
93 of_platform_init(r3, r4, (void *)r5);
94 else
95 epapr_platform_init(r3, r4, r5, r6, r7);
96}
97
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
148 148
149case "$platform" in 149case "$platform" in
150pseries) 150pseries)
151 platformo=$object/of.o 151 platformo="$object/of.o $object/epapr.o"
152 link_address='0x4000000' 152 link_address='0x4000000'
153 ;; 153 ;;
154maple) 154maple)
155 platformo=$object/of.o 155 platformo="$object/of.o $object/epapr.o"
156 link_address='0x400000' 156 link_address='0x400000'
157 ;; 157 ;;
158pmac|chrp) 158pmac|chrp)
159 platformo=$object/of.o 159 platformo="$object/of.o $object/epapr.o"
160 ;; 160 ;;
161coff) 161coff)
162 platformo="$object/crt0.o $object/of.o" 162 platformo="$object/crt0.o $object/of.o $object/epapr.o"
163 lds=$object/zImage.coff.lds 163 lds=$object/zImage.coff.lds
164 link_address='0x500000' 164 link_address='0x500000'
165 pie= 165 pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
253 platformo="$object/treeboot-iss4xx.o" 253 platformo="$object/treeboot-iss4xx.o"
254 ;; 254 ;;
255epapr) 255epapr)
256 platformo="$object/epapr.o $object/epapr-wrapper.o"
256 link_address='0x20000000' 257 link_address='0x20000000'
257 pie=-pie 258 pie=-pie
258 ;; 259 ;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
149 149
150struct thread_struct { 150struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 151 unsigned long ksp; /* Kernel stack pointer */
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
153
154#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
155 unsigned long ksp_vsid; 153 unsigned long ksp_vsid;
156#endif 154#endif
@@ -162,6 +160,7 @@ struct thread_struct {
162#endif 160#endif
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */ 162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
165#endif 164#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 166 /*
@@ -321,7 +320,6 @@ struct thread_struct {
321#else 320#else
322#define INIT_THREAD { \ 321#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 322 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 324 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \ 325 .fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); 80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
81#else 81#else
82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
83 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
84 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
83#endif /* CONFIG_PPC64 */ 85#endif /* CONFIG_PPC64 */
84 86
85 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 87 DEFINE(KSP, offsetof(struct thread_struct, ksp));
86 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
87 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 88 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
88#ifdef CONFIG_BOOKE 89#ifdef CONFIG_BOOKE
89 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0adab06ce5c0..572bb5b95f35 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
661 /* number of bytes needed for the bitmap */ 661 /* number of bytes needed for the bitmap */
662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
663 663
664 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 664 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
665 if (!page) 665 if (!page)
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667 tbl->it_map = page_address(page); 667 tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..57d286a78f86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp;
499
500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503
504 /* Already there ? */
505 if (unlikely(curtp == irqtp)) {
506 __do_irq(regs);
507 set_irq_regs(old_regs);
508 return;
509 }
510
511 /* Prepare the thread_info in the irq stack */
512 irqtp->task = curtp->task;
513 irqtp->flags = 0;
514
515 /* Copy the preempt_count so that the [soft]irq checks work. */
516 irqtp->preempt_count = curtp->preempt_count;
517
518 /* Switch stack and call */
519 call_do_irq(regs, irqtp);
520
521 /* Restore stack limit */
522 irqtp->task = NULL;
523
524 /* Copy back updates to the thread_info */
525 if (irqtp->flags)
526 set_bits(irqtp->flags, &curtp->flags);
527
534 set_irq_regs(old_regs); 528 set_irq_regs(old_regs);
535} 529}
536 530
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 586 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 587 tp = softirq_ctx[i];
594 tp->cpu = i; 588 tp->cpu = i;
595 tp->preempt_count = 0;
596 589
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 590 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 591 tp = hardirq_ctx[i];
599 tp->cpu = i; 592 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 593 }
602} 594}
603 595
604static inline void do_softirq_onstack(void) 596static inline void do_softirq_onstack(void)
605{ 597{
606 struct thread_info *curtp, *irqtp; 598 struct thread_info *curtp, *irqtp;
607 unsigned long saved_sp_limit = current->thread.ksp_limit;
608 599
609 curtp = current_thread_info(); 600 curtp = current_thread_info();
610 irqtp = softirq_ctx[smp_processor_id()]; 601 irqtp = softirq_ctx[smp_processor_id()];
611 irqtp->task = curtp->task; 602 irqtp->task = curtp->task;
612 irqtp->flags = 0; 603 irqtp->flags = 0;
613 current->thread.ksp_limit = (unsigned long)irqtp +
614 _ALIGN_UP(sizeof(struct thread_info), 16);
615 call_do_softirq(irqtp); 604 call_do_softirq(irqtp);
616 current->thread.ksp_limit = saved_sp_limit;
617 irqtp->task = NULL; 605 irqtp->task = NULL;
618 606
619 /* Set any flag that may have been set on the 607 /* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
36 36
37 .text 37 .text
38 38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
39_GLOBAL(call_do_softirq) 43_GLOBAL(call_do_softirq)
40 mflr r0 44 mflr r0
41 stw r0,4(r1) 45 stw r0,4(r1)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3 49 mr r1,r3
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
44 bl __do_softirq 52 bl __do_softirq
53 lwz r10,8(r1)
45 lwz r1,0(r1) 54 lwz r1,0(r1)
46 lwz r0,4(r1) 55 lwz r0,4(r1)
56 stw r10,THREAD+KSP_LIMIT(r2)
47 mtlr r0 57 mtlr r0
48 blr 58 blr
49 59
50_GLOBAL(call_handle_irq) 60_GLOBAL(call_do_irq)
51 mflr r0 61 mflr r0
52 stw r0,4(r1) 62 stw r0,4(r1)
53 mtctr r6 63 lwz r10,THREAD+KSP_LIMIT(r2)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 64 addi r11,r3,THREAD_INFO_GAP
55 mr r1,r5 65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
56 bctrl 66 mr r1,r4
67 stw r10,8(r1)
68 stw r11,THREAD+KSP_LIMIT(r2)
69 bl __do_irq
70 lwz r10,8(r1)
57 lwz r1,0(r1) 71 lwz r1,0(r1)
58 lwz r0,4(r1) 72 lwz r0,4(r1)
73 stw r10,THREAD+KSP_LIMIT(r2)
59 mtlr r0 74 mtlr r0
60 blr 75 blr
61 76
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1000 kregs = (struct pt_regs *) sp; 1000 kregs = (struct pt_regs *) sp;
1001 sp -= STACK_FRAME_OVERHEAD; 1001 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp = sp; 1002 p->thread.ksp = sp;
1003#ifdef CONFIG_PPC32
1003 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1004 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1004 _ALIGN_UP(sizeof(struct thread_info), 16); 1005 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 1006#endif
1006#ifdef CONFIG_HAVE_HW_BREAKPOINT 1007#ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1008#endif 1009#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
196 196
197static cell_t __initdata regbuf[1024]; 197static cell_t __initdata regbuf[1024];
198 198
199static bool rtas_has_query_cpu_stopped;
200
199 201
200/* 202/*
201 * Error results ... some OF calls will return "-1" on error, some 203 * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val)); 1577 &val, sizeof(val));
1576 1578
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1583
1577#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) 1584#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */ 1585 /* PowerVN takeover hack */
1579 prom_rtas_data = base; 1586 prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817 1824
1825 /*
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1829 */
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1834 return;
1835 }
1836
1818 prom_debug("prom_hold_cpus: start...\n"); 1837 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3011 * On non-powermacs, put all CPUs in spin-loops. 3030 * On non-powermacs, put all CPUs in spin-loops.
3012 * 3031 *
3013 * PowerMacs use a different mechanism to spin CPUs 3032 * PowerMacs use a different mechanism to spin CPUs
3033 *
3034 * (This must be done after instanciating RTAS)
3014 */ 3035 */
3015 if (of_platform != PLATFORM_POWERMAC && 3036 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL) 3037 of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 27a90b99ef67..b4e667663d9b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/pmc.h> 19#include <asm/pmc.h>
20#include <asm/firmware.h>
20 21
21#include "cacheinfo.h" 22#include "cacheinfo.h"
22 23
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179SYSFS_PMCSETUP(dscr, SPRN_DSCR); 180SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180SYSFS_PMCSETUP(pir, SPRN_PIR); 181SYSFS_PMCSETUP(pir, SPRN_PIR);
181 182
183/*
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
187*/
182static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 188static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
183static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); 189static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
184static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 190static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
185static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 191static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
186static DEVICE_ATTR(pir, 0400, show_pir, NULL); 192static DEVICE_ATTR(pir, 0400, show_pir, NULL);
187 193
188unsigned long dscr_default = 0; 194unsigned long dscr_default = 0;
189EXPORT_SYMBOL(dscr_default); 195EXPORT_SYMBOL(dscr_default);
190 196
197static void add_write_permission_dev_attr(struct device_attribute *attr)
198{
199 attr->attr.mode |= 0200;
200}
201
191static ssize_t show_dscr_default(struct device *dev, 202static ssize_t show_dscr_default(struct device *dev,
192 struct device_attribute *attr, char *buf) 203 struct device_attribute *attr, char *buf)
193{ 204{
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
394 if (cpu_has_feature(CPU_FTR_MMCRA)) 405 if (cpu_has_feature(CPU_FTR_MMCRA))
395 device_create_file(s, &dev_attr_mmcra); 406 device_create_file(s, &dev_attr_mmcra);
396 407
397 if (cpu_has_feature(CPU_FTR_PURR)) 408 if (cpu_has_feature(CPU_FTR_PURR)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR))
410 add_write_permission_dev_attr(&dev_attr_purr);
398 device_create_file(s, &dev_attr_purr); 411 device_create_file(s, &dev_attr_purr);
412 }
399 413
400 if (cpu_has_feature(CPU_FTR_SPURR)) 414 if (cpu_has_feature(CPU_FTR_SPURR))
401 device_create_file(s, &dev_attr_spurr); 415 device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7b60b9851469..cd809eaa8b5c 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
79 TABORT(R3) 79 TABORT(R3)
80 blr 80 blr
81 81
82 .section ".toc","aw"
83DSCR_DEFAULT:
84 .tc dscr_default[TC],dscr_default
85
86 .section ".text"
82 87
83/* void tm_reclaim(struct thread_struct *thread, 88/* void tm_reclaim(struct thread_struct *thread,
84 * unsigned long orig_msr, 89 * unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
123 mr r15, r14 128 mr r15, r14
124 ori r15, r15, MSR_FP 129 ori r15, r15, MSR_FP
125 li r16, MSR_RI 130 li r16, MSR_RI
131 ori r16, r16, MSR_EE /* IRQs hard off */
126 andc r15, r15, r16 132 andc r15, r15, r16
127 oris r15, r15, MSR_VEC@h 133 oris r15, r15, MSR_VEC@h
128#ifdef CONFIG_VSX 134#ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
187 std r1, PACATMSCRATCH(r13) 193 std r1, PACATMSCRATCH(r13)
188 ld r1, PACAR1(r13) 194 ld r1, PACAR1(r13)
189 195
196 /* Store the PPR in r11 and reset to decent value */
197 std r11, GPR11(r1) /* Temporary stash */
198 mfspr r11, SPRN_PPR
199 HMT_MEDIUM
200
190 /* Now get some more GPRS free */ 201 /* Now get some more GPRS free */
191 std r7, GPR7(r1) /* Temporary stash */ 202 std r7, GPR7(r1) /* Temporary stash */
192 std r12, GPR12(r1) /* '' '' '' */ 203 std r12, GPR12(r1) /* '' '' '' */
193 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 204 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
194 205
206 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
207
195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 208 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
196 209
197 /* Make r7 look like an exception frame so that we 210 /* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
203 SAVE_GPR(0, r7) /* user r0 */ 216 SAVE_GPR(0, r7) /* user r0 */
204 SAVE_GPR(2, r7) /* user r2 */ 217 SAVE_GPR(2, r7) /* user r2 */
205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 218 SAVE_4GPRS(3, r7) /* user r3-r6 */
206 SAVE_4GPRS(8, r7) /* user r8-r11 */ 219 SAVE_GPR(8, r7) /* user r8 */
220 SAVE_GPR(9, r7) /* user r9 */
221 SAVE_GPR(10, r7) /* user r10 */
207 ld r3, PACATMSCRATCH(r13) /* user r1 */ 222 ld r3, PACATMSCRATCH(r13) /* user r1 */
208 ld r4, GPR7(r1) /* user r7 */ 223 ld r4, GPR7(r1) /* user r7 */
209 ld r5, GPR12(r1) /* user r12 */ 224 ld r5, GPR11(r1) /* user r11 */
210 GET_SCRATCH0(6) /* user r13 */ 225 ld r6, GPR12(r1) /* user r12 */
226 GET_SCRATCH0(8) /* user r13 */
211 std r3, GPR1(r7) 227 std r3, GPR1(r7)
212 std r4, GPR7(r7) 228 std r4, GPR7(r7)
213 std r5, GPR12(r7) 229 std r5, GPR11(r7)
214 std r6, GPR13(r7) 230 std r6, GPR12(r7)
231 std r8, GPR13(r7)
215 232
216 SAVE_NVGPRS(r7) /* user r14-r31 */ 233 SAVE_NVGPRS(r7) /* user r14-r31 */
217 234
@@ -234,14 +251,12 @@ dont_backup_fp:
234 std r6, _XER(r7) 251 std r6, _XER(r7)
235 252
236 253
237 /* ******************** TAR, PPR, DSCR ********** */ 254 /* ******************** TAR, DSCR ********** */
238 mfspr r3, SPRN_TAR 255 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR 256 mfspr r4, SPRN_DSCR
240 mfspr r5, SPRN_DSCR
241 257
242 std r3, THREAD_TM_TAR(r12) 258 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12) 259 std r4, THREAD_TM_DSCR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245 260
246 /* MSR and flags: We don't change CRs, and we don't need to alter 261 /* MSR and flags: We don't change CRs, and we don't need to alter
247 * MSR. 262 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
258 std r3, THREAD_TM_TFHAR(r12) 273 std r3, THREAD_TM_TFHAR(r12)
259 std r4, THREAD_TM_TFIAR(r12) 274 std r4, THREAD_TM_TFIAR(r12)
260 275
261 /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ 276 /* AMR is checkpointed too, but is unsupported by Linux. */
262 277
263 /* Restore original MSR/IRQ state & clear TM mode */ 278 /* Restore original MSR/IRQ state & clear TM mode */
264 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 279 ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
274 mtcr r4 289 mtcr r4
275 mtlr r0 290 mtlr r0
276 ld r2, 40(r1) 291 ld r2, 40(r1)
292
293 /* Load system default DSCR */
294 ld r4, DSCR_DEFAULT@toc(r2)
295 ld r0, 0(r4)
296 mtspr SPRN_DSCR, r0
297
277 blr 298 blr
278 299
279 300
@@ -358,25 +379,24 @@ dont_restore_fp:
358 379
359restore_gprs: 380restore_gprs:
360 381
361 /* ******************** TAR, PPR, DSCR ********** */ 382 /* ******************** CR,LR,CCR,MSR ********** */
362 ld r4, THREAD_TM_TAR(r3) 383 ld r4, _CTR(r7)
363 ld r5, THREAD_TM_PPR(r3) 384 ld r5, _LINK(r7)
364 ld r6, THREAD_TM_DSCR(r3) 385 ld r6, _CCR(r7)
386 ld r8, _XER(r7)
365 387
366 mtspr SPRN_TAR, r4 388 mtctr r4
367 mtspr SPRN_PPR, r5 389 mtlr r5
368 mtspr SPRN_DSCR, r6 390 mtcr r6
391 mtxer r8
369 392
370 /* ******************** CR,LR,CCR,MSR ********** */ 393 /* ******************** TAR ******************** */
371 ld r3, _CTR(r7) 394 ld r4, THREAD_TM_TAR(r3)
372 ld r4, _LINK(r7) 395 mtspr SPRN_TAR, r4
373 ld r5, _CCR(r7)
374 ld r6, _XER(r7)
375 396
376 mtctr r3 397 /* Load up the PPR and DSCR in GPRs only at this stage */
377 mtlr r4 398 ld r5, THREAD_TM_DSCR(r3)
378 mtcr r5 399 ld r6, THREAD_TM_PPR(r3)
379 mtxer r6
380 400
381 /* Clear the MSR RI since we are about to change R1. EE is already off 401 /* Clear the MSR RI since we are about to change R1. EE is already off
382 */ 402 */
@@ -384,19 +404,26 @@ restore_gprs:
384 mtmsrd r4, 1 404 mtmsrd r4, 1
385 405
386 REST_4GPRS(0, r7) /* GPR0-3 */ 406 REST_4GPRS(0, r7) /* GPR0-3 */
387 REST_GPR(4, r7) /* GPR4-6 */ 407 REST_GPR(4, r7) /* GPR4 */
388 REST_GPR(5, r7)
389 REST_GPR(6, r7)
390 REST_4GPRS(8, r7) /* GPR8-11 */ 408 REST_4GPRS(8, r7) /* GPR8-11 */
391 REST_2GPRS(12, r7) /* GPR12-13 */ 409 REST_2GPRS(12, r7) /* GPR12-13 */
392 410
393 REST_NVGPRS(r7) /* GPR14-31 */ 411 REST_NVGPRS(r7) /* GPR14-31 */
394 412
395 ld r7, GPR7(r7) /* GPR7 */ 413 /* Load up PPR and DSCR here so we don't run with user values for long
414 */
415 mtspr SPRN_DSCR, r5
416 mtspr SPRN_PPR, r6
417
418 REST_GPR(5, r7) /* GPR5-7 */
419 REST_GPR(6, r7)
420 ld r7, GPR7(r7)
396 421
397 /* Commit register state as checkpointed state: */ 422 /* Commit register state as checkpointed state: */
398 TRECHKPT 423 TRECHKPT
399 424
425 HMT_MEDIUM
426
400 /* Our transactional state has now changed. 427 /* Our transactional state has now changed.
401 * 428 *
402 * Now just get out of here. Transactional (current) state will be 429 * Now just get out of here. Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
419 mtcr r4 446 mtcr r4
420 mtlr r0 447 mtlr r0
421 ld r2, 40(r1) 448 ld r2, 40(r1)
449
450 /* Load system default DSCR */
451 ld r4, DSCR_DEFAULT@toc(r2)
452 ld r0, 0(r4)
453 mtspr SPRN_DSCR, r0
454
422 blr 455 blr
423 456
424 /* ****************************************************************** */ 457 /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 78a350670de3..d38cc08b16c7 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1530 const char *cp; 1530 const char *cp;
1531 1531
1532 dn = dev->of_node; 1532 dn = dev->of_node;
1533 if (!dn) 1533 if (!dn) {
1534 return -ENODEV; 1534 strcat(buf, "\n");
1535 return strlen(buf);
1536 }
1535 cp = of_get_property(dn, "compatible", NULL); 1537 cp = of_get_property(dn, "compatible", NULL);
1536 if (!cp) 1538 if (!cp) {
1537 return -ENODEV; 1539 strcat(buf, "\n");
1540 return strlen(buf);
1541 }
1538 1542
1539 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1543 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1540} 1544}
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
226 blr 226 blr
227 227
228 228
229 .macro source 229 .macro srcnr
230100: 230100:
231 .section __ex_table,"a" 231 .section __ex_table,"a"
232 .align 3 232 .align 3
233 .llong 100b,.Lsrc_error 233 .llong 100b,.Lsrc_error_nr
234 .previous 234 .previous
235 .endm 235 .endm
236 236
237 .macro dest 237 .macro source
238150:
239 .section __ex_table,"a"
240 .align 3
241 .llong 150b,.Lsrc_error
242 .previous
243 .endm
244
245 .macro dstnr
238200: 246200:
239 .section __ex_table,"a" 247 .section __ex_table,"a"
240 .align 3 248 .align 3
241 .llong 200b,.Ldest_error 249 .llong 200b,.Ldest_error_nr
250 .previous
251 .endm
252
253 .macro dest
254250:
255 .section __ex_table,"a"
256 .align 3
257 .llong 250b,.Ldest_error
242 .previous 258 .previous
243 .endm 259 .endm
244 260
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
269 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ 285 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
270 beq .Lcopy_aligned 286 beq .Lcopy_aligned
271 287
272 li r7,4 288 li r9,4
273 sub r6,r7,r6 289 sub r6,r9,r6
274 mtctr r6 290 mtctr r6
275 291
2761: 2921:
277source; lhz r6,0(r3) /* align to doubleword */ 293srcnr; lhz r6,0(r3) /* align to doubleword */
278 subi r5,r5,2 294 subi r5,r5,2
279 addi r3,r3,2 295 addi r3,r3,2
280 adde r0,r0,r6 296 adde r0,r0,r6
281dest; sth r6,0(r4) 297dstnr; sth r6,0(r4)
282 addi r4,r4,2 298 addi r4,r4,2
283 bdnz 1b 299 bdnz 1b
284 300
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
392 408
393 mtctr r6 409 mtctr r6
3943: 4103:
395source; ld r6,0(r3) 411srcnr; ld r6,0(r3)
396 addi r3,r3,8 412 addi r3,r3,8
397 adde r0,r0,r6 413 adde r0,r0,r6
398dest; std r6,0(r4) 414dstnr; std r6,0(r4)
399 addi r4,r4,8 415 addi r4,r4,8
400 bdnz 3b 416 bdnz 3b
401 417
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
405 srdi. r6,r5,2 421 srdi. r6,r5,2
406 beq .Lcopy_tail_halfword 422 beq .Lcopy_tail_halfword
407 423
408source; lwz r6,0(r3) 424srcnr; lwz r6,0(r3)
409 addi r3,r3,4 425 addi r3,r3,4
410 adde r0,r0,r6 426 adde r0,r0,r6
411dest; stw r6,0(r4) 427dstnr; stw r6,0(r4)
412 addi r4,r4,4 428 addi r4,r4,4
413 subi r5,r5,4 429 subi r5,r5,4
414 430
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
416 srdi. r6,r5,1 432 srdi. r6,r5,1
417 beq .Lcopy_tail_byte 433 beq .Lcopy_tail_byte
418 434
419source; lhz r6,0(r3) 435srcnr; lhz r6,0(r3)
420 addi r3,r3,2 436 addi r3,r3,2
421 adde r0,r0,r6 437 adde r0,r0,r6
422dest; sth r6,0(r4) 438dstnr; sth r6,0(r4)
423 addi r4,r4,2 439 addi r4,r4,2
424 subi r5,r5,2 440 subi r5,r5,2
425 441
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
427 andi. r6,r5,1 443 andi. r6,r5,1
428 beq .Lcopy_finish 444 beq .Lcopy_finish
429 445
430source; lbz r6,0(r3) 446srcnr; lbz r6,0(r3)
431 sldi r9,r6,8 /* Pad the byte out to 16 bits */ 447 sldi r9,r6,8 /* Pad the byte out to 16 bits */
432 adde r0,r0,r9 448 adde r0,r0,r9
433dest; stb r6,0(r4) 449dstnr; stb r6,0(r4)
434 450
435.Lcopy_finish: 451.Lcopy_finish:
436 addze r0,r0 /* add in final carry */ 452 addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
440 blr 456 blr
441 457
442.Lsrc_error: 458.Lsrc_error:
459 ld r14,STK_REG(R14)(r1)
460 ld r15,STK_REG(R15)(r1)
461 ld r16,STK_REG(R16)(r1)
462 addi r1,r1,STACKFRAMESIZE
463.Lsrc_error_nr:
443 cmpdi 0,r7,0 464 cmpdi 0,r7,0
444 beqlr 465 beqlr
445 li r6,-EFAULT 466 li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
447 blr 468 blr
448 469
449.Ldest_error: 470.Ldest_error:
471 ld r14,STK_REG(R14)(r1)
472 ld r15,STK_REG(R15)(r1)
473 ld r16,STK_REG(R16)(r1)
474 addi r1,r1,STACKFRAMESIZE
475.Ldest_error_nr:
450 cmpdi 0,r8,0 476 cmpdi 0,r8,0
451 beqlr 477 beqlr
452 li r6,-EFAULT 478 li r6,-EFAULT
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1505 */ 1505 */
1506 if ((ra == 1) && !(regs->msr & MSR_PR) \ 1506 if ((ra == 1) && !(regs->msr & MSR_PR) \
1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { 1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1508#ifdef CONFIG_PPC32
1508 /* 1509 /*
1509 * Check if we will touch kernel sack overflow 1510 * Check if we will touch kernel sack overflow
1510 */ 1511 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1513 err = -EINVAL; 1514 err = -EINVAL;
1514 break; 1515 break;
1515 } 1516 }
1516 1517#endif /* CONFIG_PPC32 */
1517 /* 1518 /*
1518 * Check if we already set since that means we'll 1519 * Check if we already set since that means we'll
1519 * lose the previous value. 1520 * lose the previous value.
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0cd9e4c6837..8ed035d2edb5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
300{ 300{
301} 301}
302 302
303void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size)
305{
306}
303#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
304 308
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1cf9c5b67f24..3fa93dc7fe75 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -297,12 +297,21 @@ void __init paging_init(void)
297} 297}
298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
299 299
300static void __init register_page_bootmem_info(void)
301{
302 int i;
303
304 for_each_online_node(i)
305 register_page_bootmem_info_node(NODE_DATA(i));
306}
307
300void __init mem_init(void) 308void __init mem_init(void)
301{ 309{
302#ifdef CONFIG_SWIOTLB 310#ifdef CONFIG_SWIOTLB
303 swiotlb_init(0); 311 swiotlb_init(0);
304#endif 312#endif
305 313
314 register_page_bootmem_info();
306 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 315 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 set_max_mapnr(max_pfn); 316 set_max_mapnr(max_pfn);
308 free_all_bootmem(); 317 free_all_bootmem();
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 2ee4a707f0df..a3f7abd2f13f 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -199,6 +199,7 @@
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_FAB_SHIFT 36
202#define MMCR1_DC_QUAL_SHIFT 47 203#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46 204#define MMCR1_IC_QUAL_SHIFT 46
204 205
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
388 * the threshold bits are used for the match value. 389 * the threshold bits are used for the match value.
389 */ 390 */
390 if (event_is_fab_match(event[i])) { 391 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & 392 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK; 393 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
393 } else { 394 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; 395 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT; 396 mmcra |= val << MMCRA_THR_CTL_SHIFT;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
233 233
234 alloc_bootmem_cpumask_var(&of_spin_mask); 234 alloc_bootmem_cpumask_var(&of_spin_mask);
235 235
236 /* Mark threads which are still spinning in hold loops. */ 236 /*
237 if (cpu_has_feature(CPU_FTR_SMT)) { 237 * Mark threads which are still spinning in hold loops
238 for_each_present_cpu(i) { 238 *
239 if (cpu_thread_in_core(i) == 0) 239 * We know prom_init will not have started them if RTAS supports
240 cpumask_set_cpu(i, of_spin_mask); 240 * query-cpu-stopped-state.
241 } 241 */
242 } else { 242 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
243 cpumask_copy(of_spin_mask, cpu_present_mask); 243 if (cpu_has_feature(CPU_FTR_SMT)) {
244 for_each_present_cpu(i) {
245 if (cpu_thread_in_core(i) == 0)
246 cpumask_set_cpu(i, of_spin_mask);
247 }
248 } else
249 cpumask_copy(of_spin_mask, cpu_present_mask);
250
251 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
244 } 252 }
245 253
246 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
247
248 /* Non-lpar has additional take/give timebase */ 254 /* Non-lpar has additional take/give timebase */
249 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 255 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
250 smp_ops->give_timebase = rtas_give_timebase; 256 smp_ops->give_timebase = rtas_give_timebase;