aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-07-25 10:40:14 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-07-25 10:40:14 -0400
commitff877ea80efa2015b6263766f78ee42c2a1b32f9 (patch)
tree85205005c611ab774702148558321c6fb92f1ccd /arch/powerpc/kernel
parent30821fee4f0cb3e6d241d9f7ddc37742212e3eb7 (diff)
parentd37e6bf68fc1eb34a4ad21d9ae8890ed37ea80e7 (diff)
Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile18
-rw-r--r--arch/powerpc/kernel/align.c70
-rw-r--r--arch/powerpc/kernel/asm-offsets.c27
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S1
-rw-r--r--arch/powerpc/kernel/cputable.c125
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/crash_dump.c7
-rw-r--r--arch/powerpc/kernel/dma_64.c45
-rw-r--r--arch/powerpc/kernel/entry_32.S285
-rw-r--r--arch/powerpc/kernel/entry_64.S82
-rw-r--r--arch/powerpc/kernel/fpu.S41
-rw-r--r--arch/powerpc/kernel/ftrace.c154
-rw-r--r--arch/powerpc/kernel/head_32.S6
-rw-r--r--arch/powerpc/kernel/head_40x.S24
-rw-r--r--arch/powerpc/kernel/head_44x.S295
-rw-r--r--arch/powerpc/kernel/head_64.S82
-rw-r--r--arch/powerpc/kernel/head_booke.h126
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S253
-rw-r--r--arch/powerpc/kernel/ibmebus.c16
-rw-r--r--arch/powerpc/kernel/idle_6xx.S2
-rw-r--r--arch/powerpc/kernel/idle_e500.S93
-rw-r--r--arch/powerpc/kernel/io.c3
-rw-r--r--arch/powerpc/kernel/iommu.c29
-rw-r--r--arch/powerpc/kernel/irq.c41
-rw-r--r--arch/powerpc/kernel/kgdb.c410
-rw-r--r--arch/powerpc/kernel/kprobes.c36
-rw-r--r--arch/powerpc/kernel/lparcfg.c6
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/misc.S5
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S33
-rw-r--r--arch/powerpc/kernel/module.c116
-rw-r--r--arch/powerpc/kernel/module_32.c72
-rw-r--r--arch/powerpc/kernel/module_64.c81
-rw-r--r--arch/powerpc/kernel/msi.c2
-rw-r--r--arch/powerpc/kernel/of_device.c48
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/ppc32.h1
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c8
-rw-r--r--arch/powerpc/kernel/process.c113
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/prom_parse.c44
-rw-r--r--arch/powerpc/kernel/ptrace.c114
-rw-r--r--arch/powerpc/kernel/ptrace32.c14
-rw-r--r--arch/powerpc/kernel/rtas-proc.c14
-rw-r--r--arch/powerpc/kernel/rtas.c8
-rw-r--r--arch/powerpc/kernel/rtas_flash.c4
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c55
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c12
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c184
-rw-r--r--arch/powerpc/kernel/signal_64.c101
-rw-r--r--arch/powerpc/kernel/smp.c238
-rw-r--r--arch/powerpc/kernel/softemu8xx.c4
-rw-r--r--arch/powerpc/kernel/stacktrace.c38
-rw-r--r--arch/powerpc/kernel/syscalls.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c15
-rw-r--r--arch/powerpc/kernel/tau_6xx.c4
-rw-r--r--arch/powerpc/kernel/time.c16
-rw-r--r--arch/powerpc/kernel/traps.c55
-rw-r--r--arch/powerpc/kernel/vdso.c10
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S5
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S11
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S39
68 files changed, 2605 insertions, 1177 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2346d271fbfd..1a4094704b1f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,6 +12,18 @@ CFLAGS_prom_init.o += -fPIC
12CFLAGS_btext.o += -fPIC 12CFLAGS_btext.o += -fPIC
13endif 13endif
14 14
15ifdef CONFIG_FTRACE
16# Do not trace early boot code
17CFLAGS_REMOVE_cputable.o = -pg
18CFLAGS_REMOVE_prom_init.o = -pg
19
20ifdef CONFIG_DYNAMIC_FTRACE
21# dynamic ftrace setup.
22CFLAGS_REMOVE_ftrace.o = -pg
23endif
24
25endif
26
15obj-y := cputable.o ptrace.o syscalls.o \ 27obj-y := cputable.o ptrace.o syscalls.o \
16 irq.o align.o signal_32.o pmc.o vdso.o \ 28 irq.o align.o signal_32.o pmc.o vdso.o \
17 init_task.o process.o systbl.o idle.o \ 29 init_task.o process.o systbl.o idle.o \
@@ -38,12 +50,13 @@ obj-$(CONFIG_IBMVIO) += vio.o
38obj-$(CONFIG_IBMEBUS) += ibmebus.o 50obj-$(CONFIG_IBMEBUS) += ibmebus.o
39obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o 51obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
40obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 52obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
53obj-$(CONFIG_E500) += idle_e500.o
41obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o 54obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
42obj-$(CONFIG_TAU) += tau_6xx.o 55obj-$(CONFIG_TAU) += tau_6xx.o
43obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \ 56obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \
44 swsusp_$(CONFIG_WORD_SIZE).o 57 swsusp_$(CONFIG_WORD_SIZE).o
45obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o 58obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
46obj-$(CONFIG_MODULES) += module_$(CONFIG_WORD_SIZE).o 59obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
47obj-$(CONFIG_44x) += cpu_setup_44x.o 60obj-$(CONFIG_44x) += cpu_setup_44x.o
48 61
49ifeq ($(CONFIG_PPC_MERGE),y) 62ifeq ($(CONFIG_PPC_MERGE),y)
@@ -61,6 +74,7 @@ obj-y += time.o prom.o traps.o setup-common.o \
61 misc_$(CONFIG_WORD_SIZE).o 74 misc_$(CONFIG_WORD_SIZE).o
62obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
63obj-$(CONFIG_PPC64) += dma_64.o iommu.o 76obj-$(CONFIG_PPC64) += dma_64.o iommu.o
77obj-$(CONFIG_KGDB) += kgdb.o
64obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
65obj-$(CONFIG_MODULES) += ppc_ksyms.o 79obj-$(CONFIG_MODULES) += ppc_ksyms.o
66obj-$(CONFIG_BOOTX_TEXT) += btext.o 80obj-$(CONFIG_BOOTX_TEXT) += btext.o
@@ -78,6 +92,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
78obj-$(CONFIG_AUDIT) += audit.o 92obj-$(CONFIG_AUDIT) += audit.o
79obj64-$(CONFIG_AUDIT) += compat_audit.o 93obj64-$(CONFIG_AUDIT) += compat_audit.o
80 94
95obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
96
81obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 97obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
82 98
83ifneq ($(CONFIG_PPC_INDIRECT_IO),y) 99ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index e06f75daeba3..367129789cc0 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -48,6 +48,7 @@ struct aligninfo {
48#define HARD 0x80 /* string, stwcx. */ 48#define HARD 0x80 /* string, stwcx. */
49#define E4 0x40 /* SPE endianness is word */ 49#define E4 0x40 /* SPE endianness is word */
50#define E8 0x80 /* SPE endianness is double word */ 50#define E8 0x80 /* SPE endianness is double word */
51#define SPLT 0x80 /* VSX SPLAT load */
51 52
52/* DSISR bits reported for a DCBZ instruction: */ 53/* DSISR bits reported for a DCBZ instruction: */
53#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ 54#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
@@ -363,10 +364,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
363 * Only POWER6 has these instructions, and it does true little-endian, 364 * Only POWER6 has these instructions, and it does true little-endian,
364 * so we don't need the address swizzling. 365 * so we don't need the address swizzling.
365 */ 366 */
366static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr, 367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
367 unsigned int reg, unsigned int flags) 368 unsigned int flags)
368{ 369{
369 char *ptr = (char *) &current->thread.fpr[reg]; 370 char *ptr = (char *) &current->thread.TS_FPR(reg);
370 int i, ret; 371 int i, ret;
371 372
372 if (!(flags & F)) 373 if (!(flags & F))
@@ -637,6 +638,36 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
637} 638}
638#endif /* CONFIG_SPE */ 639#endif /* CONFIG_SPE */
639 640
641#ifdef CONFIG_VSX
642/*
643 * Emulate VSX instructions...
644 */
645static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
646 unsigned int areg, struct pt_regs *regs,
647 unsigned int flags, unsigned int length)
648{
649 char *ptr = (char *) &current->thread.TS_FPR(reg);
650 int ret;
651
652 flush_vsx_to_thread(current);
653
654 if (flags & ST)
655 ret = __copy_to_user(addr, ptr, length);
656 else {
657 if (flags & SPLT){
658 ret = __copy_from_user(ptr, addr, length);
659 ptr += length;
660 }
661 ret |= __copy_from_user(ptr, addr, length);
662 }
663 if (flags & U)
664 regs->gpr[areg] = regs->dar;
665 if (ret)
666 return -EFAULT;
667 return 1;
668}
669#endif
670
640/* 671/*
641 * Called on alignment exception. Attempts to fixup 672 * Called on alignment exception. Attempts to fixup
642 * 673 *
@@ -647,7 +678,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
647 678
648int fix_alignment(struct pt_regs *regs) 679int fix_alignment(struct pt_regs *regs)
649{ 680{
650 unsigned int instr, nb, flags; 681 unsigned int instr, nb, flags, instruction = 0;
651 unsigned int reg, areg; 682 unsigned int reg, areg;
652 unsigned int dsisr; 683 unsigned int dsisr;
653 unsigned char __user *addr; 684 unsigned char __user *addr;
@@ -689,6 +720,7 @@ int fix_alignment(struct pt_regs *regs)
689 if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) 720 if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
690 instr = cpu_to_le32(instr); 721 instr = cpu_to_le32(instr);
691 dsisr = make_dsisr(instr); 722 dsisr = make_dsisr(instr);
723 instruction = instr;
692 } 724 }
693 725
694 /* extract the operation and registers from the dsisr */ 726 /* extract the operation and registers from the dsisr */
@@ -728,6 +760,30 @@ int fix_alignment(struct pt_regs *regs)
728 /* DAR has the operand effective address */ 760 /* DAR has the operand effective address */
729 addr = (unsigned char __user *)regs->dar; 761 addr = (unsigned char __user *)regs->dar;
730 762
763#ifdef CONFIG_VSX
764 if ((instruction & 0xfc00003e) == 0x7c000018) {
765 /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
766 reg |= (instruction & 0x1) << 5;
767 /* Simple inline decoder instead of a table */
768 if (instruction & 0x200)
769 nb = 16;
770 else if (instruction & 0x080)
771 nb = 8;
772 else
773 nb = 4;
774 flags = 0;
775 if (instruction & 0x100)
776 flags |= ST;
777 if (instruction & 0x040)
778 flags |= U;
779 /* splat load needs a special decoder */
780 if ((instruction & 0x400) == 0){
781 flags |= SPLT;
782 nb = 8;
783 }
784 return emulate_vsx(addr, reg, areg, regs, flags, nb);
785 }
786#endif
731 /* A size of 0 indicates an instruction we don't support, with 787 /* A size of 0 indicates an instruction we don't support, with
732 * the exception of DCBZ which is handled as a special case here 788 * the exception of DCBZ which is handled as a special case here
733 */ 789 */
@@ -759,7 +815,7 @@ int fix_alignment(struct pt_regs *regs)
759 815
760 /* Special case for 16-byte FP loads and stores */ 816 /* Special case for 16-byte FP loads and stores */
761 if (nb == 16) 817 if (nb == 16)
762 return emulate_fp_pair(regs, addr, reg, flags); 818 return emulate_fp_pair(addr, reg, flags);
763 819
764 /* If we are loading, get the data from user space, else 820 /* If we are loading, get the data from user space, else
765 * get it from register values 821 * get it from register values
@@ -784,7 +840,7 @@ int fix_alignment(struct pt_regs *regs)
784 return -EFAULT; 840 return -EFAULT;
785 } 841 }
786 } else if (flags & F) { 842 } else if (flags & F) {
787 data.dd = current->thread.fpr[reg]; 843 data.dd = current->thread.TS_FPR(reg);
788 if (flags & S) { 844 if (flags & S) {
789 /* Single-precision FP store requires conversion... */ 845 /* Single-precision FP store requires conversion... */
790#ifdef CONFIG_PPC_FPU 846#ifdef CONFIG_PPC_FPU
@@ -862,7 +918,7 @@ int fix_alignment(struct pt_regs *regs)
862 if (unlikely(ret)) 918 if (unlikely(ret))
863 return -EFAULT; 919 return -EFAULT;
864 } else if (flags & F) 920 } else if (flags & F)
865 current->thread.fpr[reg] = data.dd; 921 current->thread.TS_FPR(reg) = data.dd;
866 else 922 else
867 regs->gpr[reg] = data.ll; 923 regs->gpr[reg] = data.ll;
868 924
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ec9228d687b0..92768d3006f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -52,6 +52,10 @@
52#include <asm/iseries/alpaca.h> 52#include <asm/iseries/alpaca.h>
53#endif 53#endif
54 54
55#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
56#include "head_booke.h"
57#endif
58
55int main(void) 59int main(void)
56{ 60{
57 DEFINE(THREAD, offsetof(struct task_struct, thread)); 61 DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -74,6 +78,10 @@ int main(void)
74 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); 78 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
75 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 79 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
76#endif /* CONFIG_ALTIVEC */ 80#endif /* CONFIG_ALTIVEC */
81#ifdef CONFIG_VSX
82 DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
83 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
84#endif /* CONFIG_VSX */
77#ifdef CONFIG_PPC64 85#ifdef CONFIG_PPC64
78 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); 86 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
79#else /* CONFIG_PPC64 */ 87#else /* CONFIG_PPC64 */
@@ -242,6 +250,25 @@ int main(void)
242 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); 250 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
243#endif /* CONFIG_PPC64 */ 251#endif /* CONFIG_PPC64 */
244 252
253#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
254 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
255 DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
256 /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
257 DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
258 DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
259 DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
260 DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
261 DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
262 DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
263 DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
264 DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
265 DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
266 DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
267 DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
268 DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
269 DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
270#endif
271
245 DEFINE(CLONE_VM, CLONE_VM); 272 DEFINE(CLONE_VM, CLONE_VM);
246 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); 273 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
247 274
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index e3623e3e3451..5465e8de0e61 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -33,6 +33,7 @@ _GLOBAL(__setup_cpu_440grx)
33 mtlr r4 33 mtlr r4
34 blr 34 blr
35_GLOBAL(__setup_cpu_460ex) 35_GLOBAL(__setup_cpu_460ex)
36_GLOBAL(__setup_cpu_460gt)
36 b __init_fpu_44x 37 b __init_fpu_44x
37_GLOBAL(__setup_cpu_440gx) 38_GLOBAL(__setup_cpu_440gx)
38_GLOBAL(__setup_cpu_440spe) 39_GLOBAL(__setup_cpu_440spe)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e44d5530f0a6..b936a1dd0a50 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -37,6 +37,7 @@ extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); 37extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); 38extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); 39extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); 41extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
41extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); 42extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
42extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); 43extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -52,6 +53,8 @@ extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
52extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); 53extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
53extern void __restore_cpu_pa6t(void); 54extern void __restore_cpu_pa6t(void);
54extern void __restore_cpu_ppc970(void); 55extern void __restore_cpu_ppc970(void);
56extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
57extern void __restore_cpu_power7(void);
55#endif /* CONFIG_PPC64 */ 58#endif /* CONFIG_PPC64 */
56 59
57/* This table only contains "desktop" CPUs, it need to be filled with embedded 60/* This table only contains "desktop" CPUs, it need to be filled with embedded
@@ -67,7 +70,12 @@ extern void __restore_cpu_ppc970(void);
67 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) 70 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
68#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ 71#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
69 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ 72 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
70 PPC_FEATURE_TRUE_LE) 73 PPC_FEATURE_TRUE_LE | \
74 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
75#define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
76 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
77 PPC_FEATURE_TRUE_LE | \
78 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
71#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ 79#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
72 PPC_FEATURE_TRUE_LE | \ 80 PPC_FEATURE_TRUE_LE | \
73 PPC_FEATURE_HAS_ALTIVEC_COMP) 81 PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -347,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
347 .icache_bsize = 128, 355 .icache_bsize = 128,
348 .dcache_bsize = 128, 356 .dcache_bsize = 128,
349 .machine_check = machine_check_generic, 357 .machine_check = machine_check_generic,
358 .oprofile_cpu_type = "ppc64/compat-power5+",
350 .platform = "power5+", 359 .platform = "power5+",
351 }, 360 },
352 { /* Power6 */ 361 { /* Power6 */
@@ -378,8 +387,41 @@ static struct cpu_spec __initdata cpu_specs[] = {
378 .icache_bsize = 128, 387 .icache_bsize = 128,
379 .dcache_bsize = 128, 388 .dcache_bsize = 128,
380 .machine_check = machine_check_generic, 389 .machine_check = machine_check_generic,
390 .oprofile_cpu_type = "ppc64/compat-power6",
381 .platform = "power6", 391 .platform = "power6",
382 }, 392 },
393 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
394 .pvr_mask = 0xffffffff,
395 .pvr_value = 0x0f000003,
396 .cpu_name = "POWER7 (architected)",
397 .cpu_features = CPU_FTRS_POWER7,
398 .cpu_user_features = COMMON_USER_POWER7,
399 .icache_bsize = 128,
400 .dcache_bsize = 128,
401 .machine_check = machine_check_generic,
402 .oprofile_cpu_type = "ppc64/compat-power7",
403 .platform = "power7",
404 },
405 { /* Power7 */
406 .pvr_mask = 0xffff0000,
407 .pvr_value = 0x003f0000,
408 .cpu_name = "POWER7 (raw)",
409 .cpu_features = CPU_FTRS_POWER7,
410 .cpu_user_features = COMMON_USER_POWER7,
411 .icache_bsize = 128,
412 .dcache_bsize = 128,
413 .num_pmcs = 6,
414 .pmc_type = PPC_PMC_IBM,
415 .cpu_setup = __setup_cpu_power7,
416 .cpu_restore = __restore_cpu_power7,
417 .oprofile_cpu_type = "ppc64/power7",
418 .oprofile_type = PPC_OPROFILE_POWER4,
419 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
420 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
421 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
422 POWER6_MMCRA_OTHER,
423 .platform = "power7",
424 },
383 { /* Cell Broadband Engine */ 425 { /* Cell Broadband Engine */
384 .pvr_mask = 0xffff0000, 426 .pvr_mask = 0xffff0000,
385 .pvr_value = 0x00700000, 427 .pvr_value = 0x00700000,
@@ -1410,6 +1452,16 @@ static struct cpu_spec __initdata cpu_specs[] = {
1410 .machine_check = machine_check_440A, 1452 .machine_check = machine_check_440A,
1411 .platform = "ppc440", 1453 .platform = "ppc440",
1412 }, 1454 },
1455 { /* 440 in Xilinx Virtex-5 FXT */
1456 .pvr_mask = 0xfffffff0,
1457 .pvr_value = 0x7ff21910,
1458 .cpu_name = "440 in Virtex-5 FXT",
1459 .cpu_features = CPU_FTRS_44X,
1460 .cpu_user_features = COMMON_USER_BOOKE,
1461 .icache_bsize = 32,
1462 .dcache_bsize = 32,
1463 .platform = "ppc440",
1464 },
1413 { /* 460EX */ 1465 { /* 460EX */
1414 .pvr_mask = 0xffff0002, 1466 .pvr_mask = 0xffff0002,
1415 .pvr_value = 0x13020002, 1467 .pvr_value = 0x13020002,
@@ -1427,9 +1479,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
1427 .pvr_value = 0x13020000, 1479 .pvr_value = 0x13020000,
1428 .cpu_name = "460GT", 1480 .cpu_name = "460GT",
1429 .cpu_features = CPU_FTRS_44X, 1481 .cpu_features = CPU_FTRS_44X,
1430 .cpu_user_features = COMMON_USER_BOOKE, 1482 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1431 .icache_bsize = 32, 1483 .icache_bsize = 32,
1432 .dcache_bsize = 32, 1484 .dcache_bsize = 32,
1485 .cpu_setup = __setup_cpu_460gt,
1433 .machine_check = machine_check_440A, 1486 .machine_check = machine_check_440A,
1434 .platform = "ppc440", 1487 .platform = "ppc440",
1435 }, 1488 },
@@ -1491,7 +1544,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
1491 .pvr_mask = 0xffff0000, 1544 .pvr_mask = 0xffff0000,
1492 .pvr_value = 0x80200000, 1545 .pvr_value = 0x80200000,
1493 .cpu_name = "e500", 1546 .cpu_name = "e500",
1494 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1495 .cpu_features = CPU_FTRS_E500, 1547 .cpu_features = CPU_FTRS_E500,
1496 .cpu_user_features = COMMON_USER_BOOKE | 1548 .cpu_user_features = COMMON_USER_BOOKE |
1497 PPC_FEATURE_HAS_SPE_COMP | 1549 PPC_FEATURE_HAS_SPE_COMP |
@@ -1508,7 +1560,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
1508 .pvr_mask = 0xffff0000, 1560 .pvr_mask = 0xffff0000,
1509 .pvr_value = 0x80210000, 1561 .pvr_value = 0x80210000,
1510 .cpu_name = "e500v2", 1562 .cpu_name = "e500v2",
1511 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1512 .cpu_features = CPU_FTRS_E500_2, 1563 .cpu_features = CPU_FTRS_E500_2,
1513 .cpu_user_features = COMMON_USER_BOOKE | 1564 .cpu_user_features = COMMON_USER_BOOKE |
1514 PPC_FEATURE_HAS_SPE_COMP | 1565 PPC_FEATURE_HAS_SPE_COMP |
@@ -1522,6 +1573,20 @@ static struct cpu_spec __initdata cpu_specs[] = {
1522 .machine_check = machine_check_e500, 1573 .machine_check = machine_check_e500,
1523 .platform = "ppc8548", 1574 .platform = "ppc8548",
1524 }, 1575 },
1576 { /* e500mc */
1577 .pvr_mask = 0xffff0000,
1578 .pvr_value = 0x80230000,
1579 .cpu_name = "e500mc",
1580 .cpu_features = CPU_FTRS_E500MC,
1581 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1582 .icache_bsize = 64,
1583 .dcache_bsize = 64,
1584 .num_pmcs = 4,
1585 .oprofile_cpu_type = "ppc/e500", /* xxx - galak, e500mc? */
1586 .oprofile_type = PPC_OPROFILE_FSL_EMB,
1587 .machine_check = machine_check_e500,
1588 .platform = "ppce500mc",
1589 },
1525 { /* default match */ 1590 { /* default match */
1526 .pvr_mask = 0x00000000, 1591 .pvr_mask = 0x00000000,
1527 .pvr_value = 0x00000000, 1592 .pvr_value = 0x00000000,
@@ -1567,6 +1632,23 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1567 t->cpu_setup = s->cpu_setup; 1632 t->cpu_setup = s->cpu_setup;
1568 t->cpu_restore = s->cpu_restore; 1633 t->cpu_restore = s->cpu_restore;
1569 t->platform = s->platform; 1634 t->platform = s->platform;
1635 /*
1636 * If we have passed through this logic once
1637 * before and have pulled the default case
1638 * because the real PVR was not found inside
1639 * cpu_specs[], then we are possibly running in
1640 * compatibility mode. In that case, let the
1641 * oprofiler know which set of compatibility
1642 * counters to pull from by making sure the
1643 * oprofile_cpu_type string is set to that of
1644 * compatibility mode. If the oprofile_cpu_type
1645 * already has a value, then we are possibly
1646 * overriding a real PVR with a logical one, and,
1647 * in that case, keep the current value for
1648 * oprofile_cpu_type.
1649 */
1650 if (t->oprofile_cpu_type == NULL)
1651 t->oprofile_cpu_type = s->oprofile_cpu_type;
1570 } else 1652 } else
1571 *t = *s; 1653 *t = *s;
1572 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 1654 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
@@ -1587,38 +1669,3 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1587 BUG(); 1669 BUG();
1588 return NULL; 1670 return NULL;
1589} 1671}
1590
1591void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
1592{
1593 struct fixup_entry {
1594 unsigned long mask;
1595 unsigned long value;
1596 long start_off;
1597 long end_off;
1598 } *fcur, *fend;
1599
1600 fcur = fixup_start;
1601 fend = fixup_end;
1602
1603 for (; fcur < fend; fcur++) {
1604 unsigned int *pstart, *pend, *p;
1605
1606 if ((value & fcur->mask) == fcur->value)
1607 continue;
1608
1609 /* These PTRRELOCs will disappear once the new scheme for
1610 * modules and vdso is implemented
1611 */
1612 pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
1613 pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
1614
1615 for (p = pstart; p < pend; p++) {
1616 *p = 0x60000000u;
1617 asm volatile ("dcbst 0, %0" : : "r" (p));
1618 }
1619 asm volatile ("sync" : : : "memory");
1620 for (p = pstart; p < pend; p++)
1621 asm volatile ("icbi 0,%0" : : "r" (p));
1622 asm volatile ("sync; isync" : : : "memory");
1623 }
1624}
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index eae401de3f76..0a8439aafdd1 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,7 +48,7 @@ int crashing_cpu = -1;
48static cpumask_t cpus_in_crash = CPU_MASK_NONE; 48static cpumask_t cpus_in_crash = CPU_MASK_NONE;
49cpumask_t cpus_in_sr = CPU_MASK_NONE; 49cpumask_t cpus_in_sr = CPU_MASK_NONE;
50 50
51#define CRASH_HANDLER_MAX 1 51#define CRASH_HANDLER_MAX 2
52/* NULL terminated list of shutdown handles */ 52/* NULL terminated list of shutdown handles */
53static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; 53static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
54static DEFINE_SPINLOCK(crash_handlers_lock); 54static DEFINE_SPINLOCK(crash_handlers_lock);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ee3c5278db0..e0debcca0bfa 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -14,6 +14,7 @@
14#include <linux/crash_dump.h> 14#include <linux/crash_dump.h>
15#include <linux/bootmem.h> 15#include <linux/bootmem.h>
16#include <linux/lmb.h> 16#include <linux/lmb.h>
17#include <asm/code-patching.h>
17#include <asm/kdump.h> 18#include <asm/kdump.h>
18#include <asm/prom.h> 19#include <asm/prom.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
@@ -33,6 +34,8 @@ void __init reserve_kdump_trampoline(void)
33 34
34static void __init create_trampoline(unsigned long addr) 35static void __init create_trampoline(unsigned long addr)
35{ 36{
37 unsigned int *p = (unsigned int *)addr;
38
36 /* The maximum range of a single instruction branch, is the current 39 /* The maximum range of a single instruction branch, is the current
37 * instruction's address + (32 MB - 4) bytes. For the trampoline we 40 * instruction's address + (32 MB - 4) bytes. For the trampoline we
38 * need to branch to current address + 32 MB. So we insert a nop at 41 * need to branch to current address + 32 MB. So we insert a nop at
@@ -41,8 +44,8 @@ static void __init create_trampoline(unsigned long addr)
41 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires 44 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
42 * two instructions it doesn't require any registers. 45 * two instructions it doesn't require any registers.
43 */ 46 */
44 create_instruction(addr, 0x60000000); /* nop */ 47 patch_instruction(p, PPC_NOP_INSTR);
45 create_branch(addr + 4, addr + PHYSICAL_START, 0); 48 patch_branch(++p, addr + PHYSICAL_START, 0);
46} 49}
47 50
48void __init setup_kdump_trampoline(void) 51void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 3a317cb0636a..ae5708e3a312 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -15,15 +15,6 @@
15 * Generic iommu implementation 15 * Generic iommu implementation
16 */ 16 */
17 17
18static inline unsigned long device_to_mask(struct device *dev)
19{
20 if (dev->dma_mask && *dev->dma_mask)
21 return *dev->dma_mask;
22 /* Assume devices without mask can take 32 bit addresses */
23 return 0xfffffffful;
24}
25
26
27/* Allocates a contiguous real buffer and creates mappings over it. 18/* Allocates a contiguous real buffer and creates mappings over it.
28 * Returns the virtual address of the buffer and sets dma_handle 19 * Returns the virtual address of the buffer and sets dma_handle
29 * to the dma address (mapping) of the first page. 20 * to the dma address (mapping) of the first page.
@@ -50,32 +41,38 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
50 */ 41 */
51static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, 42static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
52 size_t size, 43 size_t size,
53 enum dma_data_direction direction) 44 enum dma_data_direction direction,
45 struct dma_attrs *attrs)
54{ 46{
55 return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, 47 return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
56 device_to_mask(dev), direction); 48 device_to_mask(dev), direction, attrs);
57} 49}
58 50
59 51
60static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, 52static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
61 size_t size, 53 size_t size,
62 enum dma_data_direction direction) 54 enum dma_data_direction direction,
55 struct dma_attrs *attrs)
63{ 56{
64 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); 57 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction,
58 attrs);
65} 59}
66 60
67 61
68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 62static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
69 int nelems, enum dma_data_direction direction) 63 int nelems, enum dma_data_direction direction,
64 struct dma_attrs *attrs)
70{ 65{
71 return iommu_map_sg(dev, sglist, nelems, 66 return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
72 device_to_mask(dev), direction); 67 device_to_mask(dev), direction, attrs);
73} 68}
74 69
75static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, 70static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
76 int nelems, enum dma_data_direction direction) 71 int nelems, enum dma_data_direction direction,
72 struct dma_attrs *attrs)
77{ 73{
78 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); 74 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction,
75 attrs);
79} 76}
80 77
81/* We support DMA to/from any memory page via the iommu */ 78/* We support DMA to/from any memory page via the iommu */
@@ -148,19 +145,22 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
148 145
149static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, 146static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
150 size_t size, 147 size_t size,
151 enum dma_data_direction direction) 148 enum dma_data_direction direction,
149 struct dma_attrs *attrs)
152{ 150{
153 return virt_to_abs(ptr) + get_dma_direct_offset(dev); 151 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
154} 152}
155 153
156static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, 154static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
157 size_t size, 155 size_t size,
158 enum dma_data_direction direction) 156 enum dma_data_direction direction,
157 struct dma_attrs *attrs)
159{ 158{
160} 159}
161 160
162static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 161static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
163 int nents, enum dma_data_direction direction) 162 int nents, enum dma_data_direction direction,
163 struct dma_attrs *attrs)
164{ 164{
165 struct scatterlist *sg; 165 struct scatterlist *sg;
166 int i; 166 int i;
@@ -174,7 +174,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
174} 174}
175 175
176static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 176static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
177 int nents, enum dma_data_direction direction) 177 int nents, enum dma_data_direction direction,
178 struct dma_attrs *attrs)
178{ 179{
179} 180}
180 181
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 0c8614d9875c..da52269aec1e 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -30,6 +30,7 @@
30#include <asm/ppc_asm.h> 30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/unistd.h> 32#include <asm/unistd.h>
33#include <asm/ftrace.h>
33 34
34#undef SHOW_SYSCALLS 35#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK 36#undef SHOW_SYSCALLS_TASK
@@ -44,29 +45,54 @@
44#endif 45#endif
45 46
46#ifdef CONFIG_BOOKE 47#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
57 .globl mcheck_transfer_to_handler 48 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler: 49mcheck_transfer_to_handler:
59 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK) 50 mfspr r0,SPRN_DSRR0
60 b transfer_to_handler_full 51 stw r0,_DSRR0(r11)
52 mfspr r0,SPRN_DSRR1
53 stw r0,_DSRR1(r11)
54 /* fall through */
61 55
62 .globl debug_transfer_to_handler 56 .globl debug_transfer_to_handler
63debug_transfer_to_handler: 57debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG) 58 mfspr r0,SPRN_CSRR0
65 b transfer_to_handler_full 59 stw r0,_CSRR0(r11)
60 mfspr r0,SPRN_CSRR1
61 stw r0,_CSRR1(r11)
62 /* fall through */
66 63
67 .globl crit_transfer_to_handler 64 .globl crit_transfer_to_handler
68crit_transfer_to_handler: 65crit_transfer_to_handler:
69 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT) 66#ifdef CONFIG_FSL_BOOKE
67 mfspr r0,SPRN_MAS0
68 stw r0,MAS0(r11)
69 mfspr r0,SPRN_MAS1
70 stw r0,MAS1(r11)
71 mfspr r0,SPRN_MAS2
72 stw r0,MAS2(r11)
73 mfspr r0,SPRN_MAS3
74 stw r0,MAS3(r11)
75 mfspr r0,SPRN_MAS6
76 stw r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78 mfspr r0,SPRN_MAS7
79 stw r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_FSL_BOOKE */
82#ifdef CONFIG_44x
83 mfspr r0,SPRN_MMUCR
84 stw r0,MMUCR(r11)
85#endif
86 mfspr r0,SPRN_SRR0
87 stw r0,_SRR0(r11)
88 mfspr r0,SPRN_SRR1
89 stw r0,_SRR1(r11)
90
91 mfspr r8,SPRN_SPRG3
92 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
94 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
95 stw r0,KSP_LIMIT(r8)
70 /* fall through */ 96 /* fall through */
71#endif 97#endif
72 98
@@ -77,6 +103,16 @@ crit_transfer_to_handler:
77 stw r0,GPR10(r11) 103 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0) 104 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11) 105 stw r0,GPR11(r11)
106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
111 mfspr r8,SPRN_SPRG3
112 lwz r0,KSP_LIMIT(r8)
113 stw r0,saved_ksp_limit@l(0)
114 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
115 stw r0,KSP_LIMIT(r8)
80 /* fall through */ 116 /* fall through */
81#endif 117#endif
82 118
@@ -141,13 +177,14 @@ transfer_to_handler:
141 cmplw r1,r9 /* if r1 <= ksp_limit */ 177 cmplw r1,r9 /* if r1 <= ksp_limit */
142 ble- stack_ovf /* then the kernel stack overflowed */ 178 ble- stack_ovf /* then the kernel stack overflowed */
1435: 1795:
144#ifdef CONFIG_6xx 180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
145 rlwinm r9,r1,0,0,31-THREAD_SHIFT 181 rlwinm r9,r1,0,0,31-THREAD_SHIFT
146 tophys(r9,r9) /* check local flags */ 182 tophys(r9,r9) /* check local flags */
147 lwz r12,TI_LOCAL_FLAGS(r9) 183 lwz r12,TI_LOCAL_FLAGS(r9)
148 mtcrf 0x01,r12 184 mtcrf 0x01,r12
149 bt- 31-TLF_NAPPING,4f 185 bt- 31-TLF_NAPPING,4f
150#endif /* CONFIG_6xx */ 186 bt- 31-TLF_SLEEPING,7f
187#endif /* CONFIG_6xx || CONFIG_E500 */
151 .globl transfer_to_handler_cont 188 .globl transfer_to_handler_cont
152transfer_to_handler_cont: 189transfer_to_handler_cont:
1533: 1903:
@@ -160,10 +197,17 @@ transfer_to_handler_cont:
160 SYNC 197 SYNC
161 RFI /* jump to handler, enable MMU */ 198 RFI /* jump to handler, enable MMU */
162 199
163#ifdef CONFIG_6xx 200#if defined (CONFIG_6xx) || defined(CONFIG_E500)
1644: rlwinm r12,r12,0,~_TLF_NAPPING 2014: rlwinm r12,r12,0,~_TLF_NAPPING
165 stw r12,TI_LOCAL_FLAGS(r9) 202 stw r12,TI_LOCAL_FLAGS(r9)
166 b power_save_6xx_restore 203 b power_save_ppc32_restore
204
2057: rlwinm r12,r12,0,~_TLF_SLEEPING
206 stw r12,TI_LOCAL_FLAGS(r9)
207 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
208 rlwinm r9,r9,0,~MSR_EE
209 lwz r12,_LINK(r11) /* and return to address in LR */
210 b fast_exception_return
167#endif 211#endif
168 212
169/* 213/*
@@ -668,7 +712,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
668 /* Check current_thread_info()->flags */ 712 /* Check current_thread_info()->flags */
669 rlwinm r9,r1,0,0,(31-THREAD_SHIFT) 713 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
670 lwz r9,TI_FLAGS(r9) 714 lwz r9,TI_FLAGS(r9)
671 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) 715 andi. r0,r9,_TIF_USER_WORK_MASK
672 bne do_work 716 bne do_work
673 717
674restore_user: 718restore_user:
@@ -859,17 +903,90 @@ exc_exit_restart_end:
859 exc_lvl_rfi; \ 903 exc_lvl_rfi; \
860 b .; /* prevent prefetch past exc_lvl_rfi */ 904 b .; /* prevent prefetch past exc_lvl_rfi */
861 905
906#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
907 lwz r9,_##exc_lvl_srr0(r1); \
908 lwz r10,_##exc_lvl_srr1(r1); \
909 mtspr SPRN_##exc_lvl_srr0,r9; \
910 mtspr SPRN_##exc_lvl_srr1,r10;
911
912#if defined(CONFIG_FSL_BOOKE)
913#ifdef CONFIG_PHYS_64BIT
914#define RESTORE_MAS7 \
915 lwz r11,MAS7(r1); \
916 mtspr SPRN_MAS7,r11;
917#else
918#define RESTORE_MAS7
919#endif /* CONFIG_PHYS_64BIT */
920#define RESTORE_MMU_REGS \
921 lwz r9,MAS0(r1); \
922 lwz r10,MAS1(r1); \
923 lwz r11,MAS2(r1); \
924 mtspr SPRN_MAS0,r9; \
925 lwz r9,MAS3(r1); \
926 mtspr SPRN_MAS1,r10; \
927 lwz r10,MAS6(r1); \
928 mtspr SPRN_MAS2,r11; \
929 mtspr SPRN_MAS3,r9; \
930 mtspr SPRN_MAS6,r10; \
931 RESTORE_MAS7;
932#elif defined(CONFIG_44x)
933#define RESTORE_MMU_REGS \
934 lwz r9,MMUCR(r1); \
935 mtspr SPRN_MMUCR,r9;
936#else
937#define RESTORE_MMU_REGS
938#endif
939
940#ifdef CONFIG_40x
862 .globl ret_from_crit_exc 941 .globl ret_from_crit_exc
863ret_from_crit_exc: 942ret_from_crit_exc:
943 mfspr r9,SPRN_SPRG3
944 lis r10,saved_ksp_limit@ha;
945 lwz r10,saved_ksp_limit@l(r10);
946 tovirt(r9,r9);
947 stw r10,KSP_LIMIT(r9)
948 lis r9,crit_srr0@ha;
949 lwz r9,crit_srr0@l(r9);
950 lis r10,crit_srr1@ha;
951 lwz r10,crit_srr1@l(r10);
952 mtspr SPRN_SRR0,r9;
953 mtspr SPRN_SRR1,r10;
864 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) 954 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
955#endif /* CONFIG_40x */
865 956
866#ifdef CONFIG_BOOKE 957#ifdef CONFIG_BOOKE
958 .globl ret_from_crit_exc
959ret_from_crit_exc:
960 mfspr r9,SPRN_SPRG3
961 lwz r10,SAVED_KSP_LIMIT(r1)
962 stw r10,KSP_LIMIT(r9)
963 RESTORE_xSRR(SRR0,SRR1);
964 RESTORE_MMU_REGS;
965 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
966
867 .globl ret_from_debug_exc 967 .globl ret_from_debug_exc
868ret_from_debug_exc: 968ret_from_debug_exc:
969 mfspr r9,SPRN_SPRG3
970 lwz r10,SAVED_KSP_LIMIT(r1)
971 stw r10,KSP_LIMIT(r9)
972 lwz r9,THREAD_INFO-THREAD(r9)
973 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
974 lwz r10,TI_PREEMPT(r10)
975 stw r10,TI_PREEMPT(r9)
976 RESTORE_xSRR(SRR0,SRR1);
977 RESTORE_xSRR(CSRR0,CSRR1);
978 RESTORE_MMU_REGS;
869 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) 979 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
870 980
871 .globl ret_from_mcheck_exc 981 .globl ret_from_mcheck_exc
872ret_from_mcheck_exc: 982ret_from_mcheck_exc:
983 mfspr r9,SPRN_SPRG3
984 lwz r10,SAVED_KSP_LIMIT(r1)
985 stw r10,KSP_LIMIT(r9)
986 RESTORE_xSRR(SRR0,SRR1);
987 RESTORE_xSRR(CSRR0,CSRR1);
988 RESTORE_xSRR(DSRR0,DSRR1);
989 RESTORE_MMU_REGS;
873 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) 990 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
874#endif /* CONFIG_BOOKE */ 991#endif /* CONFIG_BOOKE */
875 992
@@ -925,7 +1042,7 @@ recheck:
925 lwz r9,TI_FLAGS(r9) 1042 lwz r9,TI_FLAGS(r9)
926 andi. r0,r9,_TIF_NEED_RESCHED 1043 andi. r0,r9,_TIF_NEED_RESCHED
927 bne- do_resched 1044 bne- do_resched
928 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK 1045 andi. r0,r9,_TIF_USER_WORK_MASK
929 beq restore_user 1046 beq restore_user
930do_user_signal: /* r10 contains MSR_KERNEL here */ 1047do_user_signal: /* r10 contains MSR_KERNEL here */
931 ori r10,r10,MSR_EE 1048 ori r10,r10,MSR_EE
@@ -1035,3 +1152,129 @@ machine_check_in_rtas:
1035 /* XXX load up BATs and panic */ 1152 /* XXX load up BATs and panic */
1036 1153
1037#endif /* CONFIG_PPC_RTAS */ 1154#endif /* CONFIG_PPC_RTAS */
1155
1156#ifdef CONFIG_FTRACE
1157#ifdef CONFIG_DYNAMIC_FTRACE
1158_GLOBAL(mcount)
1159_GLOBAL(_mcount)
1160 stwu r1,-48(r1)
1161 stw r3, 12(r1)
1162 stw r4, 16(r1)
1163 stw r5, 20(r1)
1164 stw r6, 24(r1)
1165 mflr r3
1166 stw r7, 28(r1)
1167 mfcr r5
1168 stw r8, 32(r1)
1169 stw r9, 36(r1)
1170 stw r10,40(r1)
1171 stw r3, 44(r1)
1172 stw r5, 8(r1)
1173 subi r3, r3, MCOUNT_INSN_SIZE
1174 .globl mcount_call
1175mcount_call:
1176 bl ftrace_stub
1177 nop
1178 lwz r6, 8(r1)
1179 lwz r0, 44(r1)
1180 lwz r3, 12(r1)
1181 mtctr r0
1182 lwz r4, 16(r1)
1183 mtcr r6
1184 lwz r5, 20(r1)
1185 lwz r6, 24(r1)
1186 lwz r0, 52(r1)
1187 lwz r7, 28(r1)
1188 lwz r8, 32(r1)
1189 mtlr r0
1190 lwz r9, 36(r1)
1191 lwz r10,40(r1)
1192 addi r1, r1, 48
1193 bctr
1194
1195_GLOBAL(ftrace_caller)
1196 /* Based off of objdump optput from glibc */
1197 stwu r1,-48(r1)
1198 stw r3, 12(r1)
1199 stw r4, 16(r1)
1200 stw r5, 20(r1)
1201 stw r6, 24(r1)
1202 mflr r3
1203 lwz r4, 52(r1)
1204 mfcr r5
1205 stw r7, 28(r1)
1206 stw r8, 32(r1)
1207 stw r9, 36(r1)
1208 stw r10,40(r1)
1209 stw r3, 44(r1)
1210 stw r5, 8(r1)
1211 subi r3, r3, MCOUNT_INSN_SIZE
1212.globl ftrace_call
1213ftrace_call:
1214 bl ftrace_stub
1215 nop
1216 lwz r6, 8(r1)
1217 lwz r0, 44(r1)
1218 lwz r3, 12(r1)
1219 mtctr r0
1220 lwz r4, 16(r1)
1221 mtcr r6
1222 lwz r5, 20(r1)
1223 lwz r6, 24(r1)
1224 lwz r0, 52(r1)
1225 lwz r7, 28(r1)
1226 lwz r8, 32(r1)
1227 mtlr r0
1228 lwz r9, 36(r1)
1229 lwz r10,40(r1)
1230 addi r1, r1, 48
1231 bctr
1232#else
1233_GLOBAL(mcount)
1234_GLOBAL(_mcount)
1235 stwu r1,-48(r1)
1236 stw r3, 12(r1)
1237 stw r4, 16(r1)
1238 stw r5, 20(r1)
1239 stw r6, 24(r1)
1240 mflr r3
1241 lwz r4, 52(r1)
1242 mfcr r5
1243 stw r7, 28(r1)
1244 stw r8, 32(r1)
1245 stw r9, 36(r1)
1246 stw r10,40(r1)
1247 stw r3, 44(r1)
1248 stw r5, 8(r1)
1249
1250 subi r3, r3, MCOUNT_INSN_SIZE
1251 LOAD_REG_ADDR(r5, ftrace_trace_function)
1252 lwz r5,0(r5)
1253
1254 mtctr r5
1255 bctrl
1256
1257 nop
1258
1259 lwz r6, 8(r1)
1260 lwz r0, 44(r1)
1261 lwz r3, 12(r1)
1262 mtctr r0
1263 lwz r4, 16(r1)
1264 mtcr r6
1265 lwz r5, 20(r1)
1266 lwz r6, 24(r1)
1267 lwz r0, 52(r1)
1268 lwz r7, 28(r1)
1269 lwz r8, 32(r1)
1270 mtlr r0
1271 lwz r9, 36(r1)
1272 lwz r10,40(r1)
1273 addi r1, r1, 48
1274 bctr
1275#endif
1276
1277_GLOBAL(ftrace_stub)
1278 blr
1279
1280#endif /* CONFIG_MCOUNT */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c0db5b769e55..d7369243ae44 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -31,6 +31,7 @@
31#include <asm/bug.h> 31#include <asm/bug.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/irqflags.h> 33#include <asm/irqflags.h>
34#include <asm/ftrace.h>
34 35
35/* 36/*
36 * System calls. 37 * System calls.
@@ -353,6 +354,11 @@ _GLOBAL(_switch)
353 mflr r20 /* Return to switch caller */ 354 mflr r20 /* Return to switch caller */
354 mfmsr r22 355 mfmsr r22
355 li r0, MSR_FP 356 li r0, MSR_FP
357#ifdef CONFIG_VSX
358BEGIN_FTR_SECTION
359 oris r0,r0,MSR_VSX@h /* Disable VSX */
360END_FTR_SECTION_IFSET(CPU_FTR_VSX)
361#endif /* CONFIG_VSX */
356#ifdef CONFIG_ALTIVEC 362#ifdef CONFIG_ALTIVEC
357BEGIN_FTR_SECTION 363BEGIN_FTR_SECTION
358 oris r0,r0,MSR_VEC@h /* Disable altivec */ 364 oris r0,r0,MSR_VEC@h /* Disable altivec */
@@ -383,16 +389,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
383 389
384 ld r8,KSP(r4) /* new stack pointer */ 390 ld r8,KSP(r4) /* new stack pointer */
385BEGIN_FTR_SECTION 391BEGIN_FTR_SECTION
386 b 2f 392 BEGIN_FTR_SECTION_NESTED(95)
387END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
388BEGIN_FTR_SECTION
389 clrrdi r6,r8,28 /* get its ESID */ 393 clrrdi r6,r8,28 /* get its ESID */
390 clrrdi r9,r1,28 /* get current sp ESID */ 394 clrrdi r9,r1,28 /* get current sp ESID */
391END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) 395 FTR_SECTION_ELSE_NESTED(95)
392BEGIN_FTR_SECTION
393 clrrdi r6,r8,40 /* get its 1T ESID */ 396 clrrdi r6,r8,40 /* get its 1T ESID */
394 clrrdi r9,r1,40 /* get current sp 1T ESID */ 397 clrrdi r9,r1,40 /* get current sp 1T ESID */
395END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 398 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
399FTR_SECTION_ELSE
400 b 2f
401ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
396 clrldi. r0,r6,2 /* is new ESID c00000000? */ 402 clrldi. r0,r6,2 /* is new ESID c00000000? */
397 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 403 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
398 cror eq,4*cr1+eq,eq 404 cror eq,4*cr1+eq,eq
@@ -870,3 +876,67 @@ _GLOBAL(enter_prom)
870 ld r0,16(r1) 876 ld r0,16(r1)
871 mtlr r0 877 mtlr r0
872 blr 878 blr
879
880#ifdef CONFIG_FTRACE
881#ifdef CONFIG_DYNAMIC_FTRACE
882_GLOBAL(mcount)
883_GLOBAL(_mcount)
884 /* Taken from output of objdump from lib64/glibc */
885 mflr r3
886 stdu r1, -112(r1)
887 std r3, 128(r1)
888 subi r3, r3, MCOUNT_INSN_SIZE
889 .globl mcount_call
890mcount_call:
891 bl ftrace_stub
892 nop
893 ld r0, 128(r1)
894 mtlr r0
895 addi r1, r1, 112
896 blr
897
898_GLOBAL(ftrace_caller)
899 /* Taken from output of objdump from lib64/glibc */
900 mflr r3
901 ld r11, 0(r1)
902 stdu r1, -112(r1)
903 std r3, 128(r1)
904 ld r4, 16(r11)
905 subi r3, r3, MCOUNT_INSN_SIZE
906.globl ftrace_call
907ftrace_call:
908 bl ftrace_stub
909 nop
910 ld r0, 128(r1)
911 mtlr r0
912 addi r1, r1, 112
913_GLOBAL(ftrace_stub)
914 blr
915#else
916_GLOBAL(mcount)
917 blr
918
919_GLOBAL(_mcount)
920 /* Taken from output of objdump from lib64/glibc */
921 mflr r3
922 ld r11, 0(r1)
923 stdu r1, -112(r1)
924 std r3, 128(r1)
925 ld r4, 16(r11)
926
927 subi r3, r3, MCOUNT_INSN_SIZE
928 LOAD_REG_ADDR(r5,ftrace_trace_function)
929 ld r5,0(r5)
930 ld r5,0(r5)
931 mtctr r5
932 bctrl
933
934 nop
935 ld r0, 128(r1)
936 mtlr r0
937 addi r1, r1, 112
938_GLOBAL(ftrace_stub)
939 blr
940
941#endif
942#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 821e152e093c..a088c064ae40 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -24,6 +24,29 @@
24#include <asm/ppc_asm.h> 24#include <asm/ppc_asm.h>
25#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26 26
27#ifdef CONFIG_VSX
28#define REST_32FPVSRS(n,c,base) \
29BEGIN_FTR_SECTION \
30 b 2f; \
31END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
32 REST_32FPRS(n,base); \
33 b 3f; \
342: REST_32VSRS(n,c,base); \
353:
36
37#define SAVE_32FPVSRS(n,c,base) \
38BEGIN_FTR_SECTION \
39 b 2f; \
40END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
41 SAVE_32FPRS(n,base); \
42 b 3f; \
432: SAVE_32VSRS(n,c,base); \
443:
45#else
46#define REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
47#define SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
48#endif
49
27/* 50/*
28 * This task wants to use the FPU now. 51 * This task wants to use the FPU now.
29 * On UP, disable FP for the task which had the FPU previously, 52 * On UP, disable FP for the task which had the FPU previously,
@@ -34,6 +57,11 @@
34_GLOBAL(load_up_fpu) 57_GLOBAL(load_up_fpu)
35 mfmsr r5 58 mfmsr r5
36 ori r5,r5,MSR_FP 59 ori r5,r5,MSR_FP
60#ifdef CONFIG_VSX
61BEGIN_FTR_SECTION
62 oris r5,r5,MSR_VSX@h
63END_FTR_SECTION_IFSET(CPU_FTR_VSX)
64#endif
37 SYNC 65 SYNC
38 MTMSRD(r5) /* enable use of fpu now */ 66 MTMSRD(r5) /* enable use of fpu now */
39 isync 67 isync
@@ -50,7 +78,7 @@ _GLOBAL(load_up_fpu)
50 beq 1f 78 beq 1f
51 toreal(r4) 79 toreal(r4)
52 addi r4,r4,THREAD /* want last_task_used_math->thread */ 80 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4) 81 SAVE_32FPVSRS(0, r5, r4)
54 mffs fr0 82 mffs fr0
55 stfd fr0,THREAD_FPSCR(r4) 83 stfd fr0,THREAD_FPSCR(r4)
56 PPC_LL r5,PT_REGS(r4) 84 PPC_LL r5,PT_REGS(r4)
@@ -77,7 +105,7 @@ _GLOBAL(load_up_fpu)
77#endif 105#endif
78 lfd fr0,THREAD_FPSCR(r5) 106 lfd fr0,THREAD_FPSCR(r5)
79 MTFSF_L(fr0) 107 MTFSF_L(fr0)
80 REST_32FPRS(0, r5) 108 REST_32FPVSRS(0, r4, r5)
81#ifndef CONFIG_SMP 109#ifndef CONFIG_SMP
82 subi r4,r5,THREAD 110 subi r4,r5,THREAD
83 fromreal(r4) 111 fromreal(r4)
@@ -85,7 +113,7 @@ _GLOBAL(load_up_fpu)
85#endif /* CONFIG_SMP */ 113#endif /* CONFIG_SMP */
86 /* restore registers and return */ 114 /* restore registers and return */
87 /* we haven't used ctr or xer or lr */ 115 /* we haven't used ctr or xer or lr */
88 b fast_exception_return 116 blr
89 117
90/* 118/*
91 * giveup_fpu(tsk) 119 * giveup_fpu(tsk)
@@ -96,6 +124,11 @@ _GLOBAL(load_up_fpu)
96_GLOBAL(giveup_fpu) 124_GLOBAL(giveup_fpu)
97 mfmsr r5 125 mfmsr r5
98 ori r5,r5,MSR_FP 126 ori r5,r5,MSR_FP
127#ifdef CONFIG_VSX
128BEGIN_FTR_SECTION
129 oris r5,r5,MSR_VSX@h
130END_FTR_SECTION_IFSET(CPU_FTR_VSX)
131#endif
99 SYNC_601 132 SYNC_601
100 ISYNC_601 133 ISYNC_601
101 MTMSRD(r5) /* enable use of fpu now */ 134 MTMSRD(r5) /* enable use of fpu now */
@@ -106,7 +139,7 @@ _GLOBAL(giveup_fpu)
106 addi r3,r3,THREAD /* want THREAD of task */ 139 addi r3,r3,THREAD /* want THREAD of task */
107 PPC_LL r5,PT_REGS(r3) 140 PPC_LL r5,PT_REGS(r3)
108 PPC_LCMPI 0,r5,0 141 PPC_LCMPI 0,r5,0
109 SAVE_32FPRS(0, r3) 142 SAVE_32FPVSRS(0, r4 ,r3)
110 mffs fr0 143 mffs fr0
111 stfd fr0,THREAD_FPSCR(r3) 144 stfd fr0,THREAD_FPSCR(r3)
112 beq 1f 145 beq 1f
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
new file mode 100644
index 000000000000..3855ceb937b0
--- /dev/null
+++ b/arch/powerpc/kernel/ftrace.c
@@ -0,0 +1,154 @@
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
7 *
8 */
9
10#include <linux/spinlock.h>
11#include <linux/hardirq.h>
12#include <linux/ftrace.h>
13#include <linux/percpu.h>
14#include <linux/init.h>
15#include <linux/list.h>
16
17#include <asm/cacheflush.h>
18#include <asm/ftrace.h>
19
20
21static unsigned int ftrace_nop = 0x60000000;
22
23#ifdef CONFIG_PPC32
24# define GET_ADDR(addr) addr
25#else
26/* PowerPC64's functions are data that points to the functions */
27# define GET_ADDR(addr) *(unsigned long *)addr
28#endif
29
30
31static unsigned int notrace ftrace_calc_offset(long ip, long addr)
32{
33 return (int)(addr - ip);
34}
35
36notrace unsigned char *ftrace_nop_replace(void)
37{
38 return (char *)&ftrace_nop;
39}
40
41notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
42{
43 static unsigned int op;
44
45 /*
46 * It would be nice to just use create_function_call, but that will
47 * update the code itself. Here we need to just return the
48 * instruction that is going to be modified, without modifying the
49 * code.
50 */
51 addr = GET_ADDR(addr);
52
53 /* Set to "bl addr" */
54 op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc);
55
56 /*
57 * No locking needed, this must be called via kstop_machine
58 * which in essence is like running on a uniprocessor machine.
59 */
60 return (unsigned char *)&op;
61}
62
63#ifdef CONFIG_PPC64
64# define _ASM_ALIGN " .align 3 "
65# define _ASM_PTR " .llong "
66#else
67# define _ASM_ALIGN " .align 2 "
68# define _ASM_PTR " .long "
69#endif
70
71notrace int
72ftrace_modify_code(unsigned long ip, unsigned char *old_code,
73 unsigned char *new_code)
74{
75 unsigned replaced;
76 unsigned old = *(unsigned *)old_code;
77 unsigned new = *(unsigned *)new_code;
78 int faulted = 0;
79
80 /*
81 * Note: Due to modules and __init, code can
82 * disappear and change, we need to protect against faulting
83 * as well as code changing.
84 *
85 * No real locking needed, this code is run through
86 * kstop_machine.
87 */
88 asm volatile (
89 "1: lwz %1, 0(%2)\n"
90 " cmpw %1, %5\n"
91 " bne 2f\n"
92 " stwu %3, 0(%2)\n"
93 "2:\n"
94 ".section .fixup, \"ax\"\n"
95 "3: li %0, 1\n"
96 " b 2b\n"
97 ".previous\n"
98 ".section __ex_table,\"a\"\n"
99 _ASM_ALIGN "\n"
100 _ASM_PTR "1b, 3b\n"
101 ".previous"
102 : "=r"(faulted), "=r"(replaced)
103 : "r"(ip), "r"(new),
104 "0"(faulted), "r"(old)
105 : "memory");
106
107 if (replaced != old && replaced != new)
108 faulted = 2;
109
110 if (!faulted)
111 flush_icache_range(ip, ip + 8);
112
113 return faulted;
114}
115
116notrace int ftrace_update_ftrace_func(ftrace_func_t func)
117{
118 unsigned long ip = (unsigned long)(&ftrace_call);
119 unsigned char old[MCOUNT_INSN_SIZE], *new;
120 int ret;
121
122 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
123 new = ftrace_call_replace(ip, (unsigned long)func);
124 ret = ftrace_modify_code(ip, old, new);
125
126 return ret;
127}
128
129notrace int ftrace_mcount_set(unsigned long *data)
130{
131 unsigned long ip = (long)(&mcount_call);
132 unsigned long *addr = data;
133 unsigned char old[MCOUNT_INSN_SIZE], *new;
134
135 /*
136 * Replace the mcount stub with a pointer to the
137 * ip recorder function.
138 */
139 memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
140 new = ftrace_call_replace(ip, *addr);
141 *addr = ftrace_modify_code(ip, old, new);
142
143 return 0;
144}
145
146int __init ftrace_dyn_arch_init(void *data)
147{
148 /* This is running in kstop_machine */
149
150 ftrace_mcount_set(data);
151
152 return 0;
153}
154
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 785af9b56591..99ee2f0f0f2b 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -421,8 +421,10 @@ BEGIN_FTR_SECTION
421 b ProgramCheck 421 b ProgramCheck
422END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) 422END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
423 EXCEPTION_PROLOG 423 EXCEPTION_PROLOG
424 bne load_up_fpu /* if from user, just load it up */ 424 beq 1f
425 addi r3,r1,STACK_FRAME_OVERHEAD 425 bl load_up_fpu /* if from user, just load it up */
426 b fast_exception_return
4271: addi r3,r1,STACK_FRAME_OVERHEAD
426 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) 428 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
427 429
428/* Decrementer */ 430/* Decrementer */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8552e67e3a8b..56d8e5d90c5b 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -93,6 +93,12 @@ _ENTRY(crit_r10)
93 .space 4 93 .space 4
94_ENTRY(crit_r11) 94_ENTRY(crit_r11)
95 .space 4 95 .space 4
96_ENTRY(crit_srr0)
97 .space 4
98_ENTRY(crit_srr1)
99 .space 4
100_ENTRY(saved_ksp_limit)
101 .space 4
96 102
97/* 103/*
98 * Exception vector entry code. This code runs with address translation 104 * Exception vector entry code. This code runs with address translation
@@ -148,14 +154,14 @@ _ENTRY(crit_r11)
148 mfcr r10; /* save CR in r10 for now */\ 154 mfcr r10; /* save CR in r10 for now */\
149 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ 155 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
150 andi. r11,r11,MSR_PR; \ 156 andi. r11,r11,MSR_PR; \
151 lis r11,critical_stack_top@h; \ 157 lis r11,critirq_ctx@ha; \
152 ori r11,r11,critical_stack_top@l; \ 158 tophys(r11,r11); \
159 lwz r11,critirq_ctx@l(r11); \
153 beq 1f; \ 160 beq 1f; \
154 /* COMING FROM USER MODE */ \ 161 /* COMING FROM USER MODE */ \
155 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ 162 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
156 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ 163 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
157 addi r11,r11,THREAD_SIZE; \ 1641: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\
1581: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
159 tophys(r11,r11); \ 165 tophys(r11,r11); \
160 stw r10,_CCR(r11); /* save various registers */\ 166 stw r10,_CCR(r11); /* save various registers */\
161 stw r12,GPR12(r11); \ 167 stw r12,GPR12(r11); \
@@ -996,16 +1002,6 @@ empty_zero_page:
996swapper_pg_dir: 1002swapper_pg_dir:
997 .space PGD_TABLE_SIZE 1003 .space PGD_TABLE_SIZE
998 1004
999
1000/* Stack for handling critical exceptions from kernel mode */
1001 .section .bss
1002 .align 12
1003exception_stack_bottom:
1004 .space 4096
1005critical_stack_top:
1006 .globl exception_stack_top
1007exception_stack_top:
1008
1009/* Room for two PTE pointers, usually the kernel and current user pointers 1005/* Room for two PTE pointers, usually the kernel and current user pointers
1010 * to their respective root page table. 1006 * to their respective root page table.
1011 */ 1007 */
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 22b5d2c459a3..f3a1ea9d7fe4 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -293,119 +293,9 @@ interrupt_base:
293 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) 293 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
294 294
295 /* Data Storage Interrupt */ 295 /* Data Storage Interrupt */
296 START_EXCEPTION(DataStorage) 296 DATA_STORAGE_EXCEPTION
297 mtspr SPRN_SPRG0, r10 /* Save some working registers */
298 mtspr SPRN_SPRG1, r11
299 mtspr SPRN_SPRG4W, r12
300 mtspr SPRN_SPRG5W, r13
301 mfcr r11
302 mtspr SPRN_SPRG7W, r11
303
304 /*
305 * Check if it was a store fault, if not then bail
306 * because a user tried to access a kernel or
307 * read-protected page. Otherwise, get the
308 * offending address and handle it.
309 */
310 mfspr r10, SPRN_ESR
311 andis. r10, r10, ESR_ST@h
312 beq 2f
313
314 mfspr r10, SPRN_DEAR /* Get faulting address */
315
316 /* If we are faulting a kernel address, we have to use the
317 * kernel page tables.
318 */
319 lis r11, PAGE_OFFSET@h
320 cmplw r10, r11
321 blt+ 3f
322 lis r11, swapper_pg_dir@h
323 ori r11, r11, swapper_pg_dir@l
324
325 mfspr r12,SPRN_MMUCR
326 rlwinm r12,r12,0,0,23 /* Clear TID */
327
328 b 4f
329
330 /* Get the PGD for the current thread */
3313:
332 mfspr r11,SPRN_SPRG3
333 lwz r11,PGDIR(r11)
334
335 /* Load PID into MMUCR TID */
336 mfspr r12,SPRN_MMUCR /* Get MMUCR */
337 mfspr r13,SPRN_PID /* Get PID */
338 rlwimi r12,r13,0,24,31 /* Set TID */
339
3404:
341 mtspr SPRN_MMUCR,r12
342
343 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
344 lwzx r11, r12, r11 /* Get pgd/pmd entry */
345 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
346 beq 2f /* Bail if no table */
347
348 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
349 lwz r11, 4(r12) /* Get pte entry */
350
351 andi. r13, r11, _PAGE_RW /* Is it writeable? */
352 beq 2f /* Bail if not */
353
354 /* Update 'changed'.
355 */
356 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
357 stw r11, 4(r12) /* Update Linux page table */
358
359 li r13, PPC44x_TLB_SR@l /* Set SR */
360 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
361 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
362 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
363 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
364 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
365 and r12, r12, r11 /* HWEXEC/RW & USER */
366 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
367 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
368
369 rlwimi r11,r13,0,26,31 /* Insert static perms */
370
371 /*
372 * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added
373 * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see
374 * include/asm-powerpc/pgtable-ppc32.h for details).
375 */
376 rlwinm r11,r11,0,20,10
377
378 /* find the TLB index that caused the fault. It has to be here. */
379 tlbsx r10, 0, r10
380
381 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
382
383 /* Done...restore registers and get out of here.
384 */
385 mfspr r11, SPRN_SPRG7R
386 mtcr r11
387 mfspr r13, SPRN_SPRG5R
388 mfspr r12, SPRN_SPRG4R
389 297
390 mfspr r11, SPRN_SPRG1 298 /* Instruction Storage Interrupt */
391 mfspr r10, SPRN_SPRG0
392 rfi /* Force context change */
393
3942:
395 /*
396 * The bailout. Restore registers to pre-exception conditions
397 * and call the heavyweights to help us out.
398 */
399 mfspr r11, SPRN_SPRG7R
400 mtcr r11
401 mfspr r13, SPRN_SPRG5R
402 mfspr r12, SPRN_SPRG4R
403
404 mfspr r11, SPRN_SPRG1
405 mfspr r10, SPRN_SPRG0
406 b data_access
407
408 /* Instruction Storage Interrupt */
409 INSTRUCTION_STORAGE_EXCEPTION 299 INSTRUCTION_STORAGE_EXCEPTION
410 300
411 /* External Input Interrupt */ 301 /* External Input Interrupt */
@@ -423,7 +313,6 @@ interrupt_base:
423#else 313#else
424 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 314 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
425#endif 315#endif
426
427 /* System Call Interrupt */ 316 /* System Call Interrupt */
428 START_EXCEPTION(SystemCall) 317 START_EXCEPTION(SystemCall)
429 NORMAL_EXCEPTION_PROLOG 318 NORMAL_EXCEPTION_PROLOG
@@ -484,18 +373,57 @@ interrupt_base:
4844: 3734:
485 mtspr SPRN_MMUCR,r12 374 mtspr SPRN_MMUCR,r12
486 375
376 /* Mask of required permission bits. Note that while we
377 * do copy ESR:ST to _PAGE_RW position as trying to write
378 * to an RO page is pretty common, we don't do it with
379 * _PAGE_DIRTY. We could do it, but it's a fairly rare
380 * event so I'd rather take the overhead when it happens
381 * rather than adding an instruction here. We should measure
382 * whether the whole thing is worth it in the first place
383 * as we could avoid loading SPRN_ESR completely in the first
384 * place...
385 *
386 * TODO: Is it worth doing that mfspr & rlwimi in the first
387 * place or can we save a couple of instructions here ?
388 */
389 mfspr r12,SPRN_ESR
390 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
391 rlwimi r13,r12,10,30,30
392
393 /* Load the PTE */
487 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 394 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
488 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 395 lwzx r11, r12, r11 /* Get pgd/pmd entry */
489 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 396 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
490 beq 2f /* Bail if no table */ 397 beq 2f /* Bail if no table */
491 398
492 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 399 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
493 lwz r11, 4(r12) /* Get pte entry */ 400 lwz r11, 0(r12) /* Get high word of pte entry */
494 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 401 lwz r12, 4(r12) /* Get low word of pte entry */
495 beq 2f /* Bail if not present */
496 402
497 ori r11, r11, _PAGE_ACCESSED 403 lis r10,tlb_44x_index@ha
498 stw r11, 4(r12) 404
405 andc. r13,r13,r12 /* Check permission */
406
407 /* Load the next available TLB index */
408 lwz r13,tlb_44x_index@l(r10)
409
410 bne 2f /* Bail if permission mismach */
411
412 /* Increment, rollover, and store TLB index */
413 addi r13,r13,1
414
415 /* Compare with watermark (instruction gets patched) */
416 .globl tlb_44x_patch_hwater_D
417tlb_44x_patch_hwater_D:
418 cmpwi 0,r13,1 /* reserve entries */
419 ble 5f
420 li r13,0
4215:
422 /* Store the next available TLB index */
423 stw r13,tlb_44x_index@l(r10)
424
425 /* Re-load the faulting address */
426 mfspr r10,SPRN_DEAR
499 427
500 /* Jump to common tlb load */ 428 /* Jump to common tlb load */
501 b finish_tlb_load 429 b finish_tlb_load
@@ -510,7 +438,7 @@ interrupt_base:
510 mfspr r12, SPRN_SPRG4R 438 mfspr r12, SPRN_SPRG4R
511 mfspr r11, SPRN_SPRG1 439 mfspr r11, SPRN_SPRG1
512 mfspr r10, SPRN_SPRG0 440 mfspr r10, SPRN_SPRG0
513 b data_access 441 b DataStorage
514 442
515 /* Instruction TLB Error Interrupt */ 443 /* Instruction TLB Error Interrupt */
516 /* 444 /*
@@ -554,18 +482,42 @@ interrupt_base:
5544: 4824:
555 mtspr SPRN_MMUCR,r12 483 mtspr SPRN_MMUCR,r12
556 484
485 /* Make up the required permissions */
486 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
487
557 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 488 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
558 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 489 lwzx r11, r12, r11 /* Get pgd/pmd entry */
559 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 490 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
560 beq 2f /* Bail if no table */ 491 beq 2f /* Bail if no table */
561 492
562 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 493 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
563 lwz r11, 4(r12) /* Get pte entry */ 494 lwz r11, 0(r12) /* Get high word of pte entry */
564 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 495 lwz r12, 4(r12) /* Get low word of pte entry */
565 beq 2f /* Bail if not present */
566 496
567 ori r11, r11, _PAGE_ACCESSED 497 lis r10,tlb_44x_index@ha
568 stw r11, 4(r12) 498
499 andc. r13,r13,r12 /* Check permission */
500
501 /* Load the next available TLB index */
502 lwz r13,tlb_44x_index@l(r10)
503
504 bne 2f /* Bail if permission mismach */
505
506 /* Increment, rollover, and store TLB index */
507 addi r13,r13,1
508
509 /* Compare with watermark (instruction gets patched) */
510 .globl tlb_44x_patch_hwater_I
511tlb_44x_patch_hwater_I:
512 cmpwi 0,r13,1 /* reserve entries */
513 ble 5f
514 li r13,0
5155:
516 /* Store the next available TLB index */
517 stw r13,tlb_44x_index@l(r10)
518
519 /* Re-load the faulting address */
520 mfspr r10,SPRN_SRR0
569 521
570 /* Jump to common TLB load point */ 522 /* Jump to common TLB load point */
571 b finish_tlb_load 523 b finish_tlb_load
@@ -587,86 +539,40 @@ interrupt_base:
587 539
588/* 540/*
589 * Local functions 541 * Local functions
590 */ 542 */
591 /*
592 * Data TLB exceptions will bail out to this point
593 * if they can't resolve the lightweight TLB fault.
594 */
595data_access:
596 NORMAL_EXCEPTION_PROLOG
597 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
598 stw r5,_ESR(r11)
599 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
600 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
601 543
602/* 544/*
603 545
604 * Both the instruction and data TLB miss get to this 546 * Both the instruction and data TLB miss get to this
605 * point to load the TLB. 547 * point to load the TLB.
606 * r10 - EA of fault 548 * r10 - EA of fault
607 * r11 - available to use 549 * r11 - PTE high word value
608 * r12 - Pointer to the 64-bit PTE 550 * r12 - PTE low word value
609 * r13 - available to use 551 * r13 - TLB index
610 * MMUCR - loaded with proper value when we get here 552 * MMUCR - loaded with proper value when we get here
611 * Upon exit, we reload everything and RFI. 553 * Upon exit, we reload everything and RFI.
612 */ 554 */
613finish_tlb_load: 555finish_tlb_load:
614 /* 556 /* Combine RPN & ERPN an write WS 0 */
615 * We set execute, because we don't have the granularity to 557 rlwimi r11,r12,0,0,19
616 * properly set this at the page level (Linux problem). 558 tlbwe r11,r13,PPC44x_TLB_XLAT
617 * If shared is set, we cause a zero PID->TID load.
618 * Many of these bits are software only. Bits we don't set
619 * here we (properly should) assume have the appropriate value.
620 */
621
622 /* Load the next available TLB index */
623 lis r13, tlb_44x_index@ha
624 lwz r13, tlb_44x_index@l(r13)
625 /* Load the TLB high watermark */
626 lis r11, tlb_44x_hwater@ha
627 lwz r11, tlb_44x_hwater@l(r11)
628
629 /* Increment, rollover, and store TLB index */
630 addi r13, r13, 1
631 cmpw 0, r13, r11 /* reserve entries */
632 ble 7f
633 li r13, 0
6347:
635 /* Store the next available TLB index */
636 lis r11, tlb_44x_index@ha
637 stw r13, tlb_44x_index@l(r11)
638
639 lwz r11, 0(r12) /* Get MS word of PTE */
640 lwz r12, 4(r12) /* Get LS word of PTE */
641 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
642 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
643 559
644 /* 560 /*
645 * Create PAGEID. This is the faulting address, 561 * Create WS1. This is the faulting address (EPN),
646 * page size, and valid flag. 562 * page size, and valid flag.
647 */ 563 */
648 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K 564 li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K
649 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ 565 rlwimi r10,r11,0,20,31 /* Insert valid and page size*/
650 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ 566 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
651 567
652 li r10, PPC44x_TLB_SR@l /* Set SR */ 568 /* And WS 2 */
653 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ 569 li r10,0xf85 /* Mask to apply from PTE */
654 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ 570 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
655 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ 571 and r11,r12,r10 /* Mask PTE bits to keep */
656 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ 572 andi. r10,r12,_PAGE_USER /* User page ? */
657 and r11, r12, r11 /* HWEXEC & USER */ 573 beq 1f /* nope, leave U bits empty */
658 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ 574 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
659 5751: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
660 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
661
662 /*
663 * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added
664 * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see
665 * include/asm-powerpc/pgtable-ppc32.h for details).
666 */
667 rlwinm r12, r12, 0, 20, 10
668
669 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
670 576
671 /* Done...restore registers and get out of here. 577 /* Done...restore registers and get out of here.
672 */ 578 */
@@ -742,15 +648,6 @@ empty_zero_page:
742swapper_pg_dir: 648swapper_pg_dir:
743 .space PGD_TABLE_SIZE 649 .space PGD_TABLE_SIZE
744 650
745/* Reserved 4k for the critical exception stack & 4k for the machine
746 * check stack per CPU for kernel mode exceptions */
747 .section .bss
748 .align 12
749exception_stack_bottom:
750 .space BOOKE_EXCEPTION_STACK_SIZE
751 .globl exception_stack_top
752exception_stack_top:
753
754/* 651/*
755 * Room for two PTE pointers, usually the kernel and current user pointers 652 * Room for two PTE pointers, usually the kernel and current user pointers
756 * to their respective root page table. 653 * to their respective root page table.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 25e84c0e1166..cc8fb474d520 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -275,7 +275,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
275 . = 0xf00 275 . = 0xf00
276 b performance_monitor_pSeries 276 b performance_monitor_pSeries
277 277
278 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) 278 . = 0xf20
279 b altivec_unavailable_pSeries
280
281 . = 0xf40
282 b vsx_unavailable_pSeries
279 283
280#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
281 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 285 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
@@ -295,6 +299,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
295 299
296 /* moved from 0xf00 */ 300 /* moved from 0xf00 */
297 STD_EXCEPTION_PSERIES(., performance_monitor) 301 STD_EXCEPTION_PSERIES(., performance_monitor)
302 STD_EXCEPTION_PSERIES(., altivec_unavailable)
303 STD_EXCEPTION_PSERIES(., vsx_unavailable)
298 304
299/* 305/*
300 * An interrupt came in while soft-disabled; clear EE in SRR1, 306 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -739,7 +745,8 @@ fp_unavailable_common:
739 ENABLE_INTS 745 ENABLE_INTS
740 bl .kernel_fp_unavailable_exception 746 bl .kernel_fp_unavailable_exception
741 BUG_OPCODE 747 BUG_OPCODE
7421: b .load_up_fpu 7481: bl .load_up_fpu
749 b fast_exception_return
743 750
744 .align 7 751 .align 7
745 .globl altivec_unavailable_common 752 .globl altivec_unavailable_common
@@ -747,7 +754,10 @@ altivec_unavailable_common:
747 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 754 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
748#ifdef CONFIG_ALTIVEC 755#ifdef CONFIG_ALTIVEC
749BEGIN_FTR_SECTION 756BEGIN_FTR_SECTION
750 bne .load_up_altivec /* if from user, just load it up */ 757 beq 1f
758 bl .load_up_altivec
759 b fast_exception_return
7601:
751END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 761END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
752#endif 762#endif
753 bl .save_nvgprs 763 bl .save_nvgprs
@@ -827,9 +837,70 @@ _STATIC(load_up_altivec)
827 std r4,0(r3) 837 std r4,0(r3)
828#endif /* CONFIG_SMP */ 838#endif /* CONFIG_SMP */
829 /* restore registers and return */ 839 /* restore registers and return */
830 b fast_exception_return 840 blr
831#endif /* CONFIG_ALTIVEC */ 841#endif /* CONFIG_ALTIVEC */
832 842
843 .align 7
844 .globl vsx_unavailable_common
845vsx_unavailable_common:
846 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
847#ifdef CONFIG_VSX
848BEGIN_FTR_SECTION
849 bne .load_up_vsx
8501:
851END_FTR_SECTION_IFSET(CPU_FTR_VSX)
852#endif
853 bl .save_nvgprs
854 addi r3,r1,STACK_FRAME_OVERHEAD
855 ENABLE_INTS
856 bl .vsx_unavailable_exception
857 b .ret_from_except
858
859#ifdef CONFIG_VSX
860/*
861 * load_up_vsx(unused, unused, tsk)
862 * Disable VSX for the task which had it previously,
863 * and save its vector registers in its thread_struct.
864 * Reuse the fp and vsx saves, but first check to see if they have
865 * been saved already.
866 * On entry: r13 == 'current' && last_task_used_vsx != 'current'
867 */
868_STATIC(load_up_vsx)
869/* Load FP and VSX registers if they haven't been done yet */
870 andi. r5,r12,MSR_FP
871 beql+ load_up_fpu /* skip if already loaded */
872 andis. r5,r12,MSR_VEC@h
873 beql+ load_up_altivec /* skip if already loaded */
874
875#ifndef CONFIG_SMP
876 ld r3,last_task_used_vsx@got(r2)
877 ld r4,0(r3)
878 cmpdi 0,r4,0
879 beq 1f
880 /* Disable VSX for last_task_used_vsx */
881 addi r4,r4,THREAD
882 ld r5,PT_REGS(r4)
883 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
884 lis r6,MSR_VSX@h
885 andc r6,r4,r6
886 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
8871:
888#endif /* CONFIG_SMP */
889 ld r4,PACACURRENT(r13)
890 addi r4,r4,THREAD /* Get THREAD */
891 li r6,1
892 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
893 /* enable use of VSX after return */
894 oris r12,r12,MSR_VSX@h
895 std r12,_MSR(r1)
896#ifndef CONFIG_SMP
897 /* Update last_task_used_math to 'current' */
898 ld r4,PACACURRENT(r13)
899 std r4,0(r3)
900#endif /* CONFIG_SMP */
901 b fast_exception_return
902#endif /* CONFIG_VSX */
903
833/* 904/*
834 * Hash table stuff 905 * Hash table stuff
835 */ 906 */
@@ -1127,7 +1198,6 @@ _GLOBAL(generic_secondary_smp_init)
11273: HMT_LOW 11983: HMT_LOW
1128 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1199 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1129 /* start. */ 1200 /* start. */
1130 sync
1131 1201
1132#ifndef CONFIG_SMP 1202#ifndef CONFIG_SMP
1133 b 3b /* Never go on non-SMP */ 1203 b 3b /* Never go on non-SMP */
@@ -1135,6 +1205,8 @@ _GLOBAL(generic_secondary_smp_init)
1135 cmpwi 0,r23,0 1205 cmpwi 0,r23,0
1136 beq 3b /* Loop until told to go */ 1206 beq 3b /* Loop until told to go */
1137 1207
1208 sync /* order paca.run and cur_cpu_spec */
1209
1138 /* See if we need to call a cpu state restore handler */ 1210 /* See if we need to call a cpu state restore handler */
1139 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) 1211 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1140 ld r23,0(r23) 1212 ld r23,0(r23)
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index aefafc6330c9..fce2df988504 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -43,9 +43,7 @@
43 SAVE_2GPRS(7, r11) 43 SAVE_2GPRS(7, r11)
44 44
45/* To handle the additional exception priority levels on 40x and Book-E 45/* To handle the additional exception priority levels on 40x and Book-E
46 * processors we allocate a 4k stack per additional priority level. The various 46 * processors we allocate a stack per additional priority level.
47 * head_xxx.S files allocate space (exception_stack_top) for each priority's
48 * stack times the number of CPUs
49 * 47 *
50 * On 40x critical is the only additional level 48 * On 40x critical is the only additional level
51 * On 44x/e500 we have critical and machine check 49 * On 44x/e500 we have critical and machine check
@@ -61,36 +59,37 @@
61 * going to critical or their own debug level we aren't currently 59 * going to critical or their own debug level we aren't currently
62 * providing configurations that micro-optimize space usage. 60 * providing configurations that micro-optimize space usage.
63 */ 61 */
64#ifdef CONFIG_44x
65#define NUM_EXCEPTION_LVLS 2
66#else
67#define NUM_EXCEPTION_LVLS 3
68#endif
69#define BOOKE_EXCEPTION_STACK_SIZE (4096 * NUM_EXCEPTION_LVLS)
70 62
71/* CRIT_SPRG only used in critical exception handling */ 63/* CRIT_SPRG only used in critical exception handling */
72#define CRIT_SPRG SPRN_SPRG2 64#define CRIT_SPRG SPRN_SPRG2
73/* MCHECK_SPRG only used in machine check exception handling */ 65/* MCHECK_SPRG only used in machine check exception handling */
74#define MCHECK_SPRG SPRN_SPRG6W 66#define MCHECK_SPRG SPRN_SPRG6W
75 67
76#define MCHECK_STACK_TOP (exception_stack_top - 4096) 68#define MCHECK_STACK_BASE mcheckirq_ctx
77#define CRIT_STACK_TOP (exception_stack_top) 69#define CRIT_STACK_BASE critirq_ctx
78 70
79/* only on e200 for now */ 71/* only on e500mc/e200 */
80#define DEBUG_STACK_TOP (exception_stack_top - 8192) 72#define DEBUG_STACK_BASE dbgirq_ctx
73#ifdef CONFIG_PPC_E500MC
74#define DEBUG_SPRG SPRN_SPRG9
75#else
81#define DEBUG_SPRG SPRN_SPRG6W 76#define DEBUG_SPRG SPRN_SPRG6W
77#endif
78
79#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
82 80
83#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
84#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 82#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
85 mfspr r8,SPRN_PIR; \ 83 mfspr r8,SPRN_PIR; \
86 mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ 84 slwi r8,r8,2; \
87 neg r8,r8; \ 85 addis r8,r8,level##_STACK_BASE@ha; \
88 addis r8,r8,level##_STACK_TOP@ha; \ 86 lwz r8,level##_STACK_BASE@l(r8); \
89 addi r8,r8,level##_STACK_TOP@l 87 addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
90#else 88#else
91#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 89#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
92 lis r8,level##_STACK_TOP@h; \ 90 lis r8,level##_STACK_BASE@ha; \
93 ori r8,r8,level##_STACK_TOP@l 91 lwz r8,level##_STACK_BASE@l(r8); \
92 addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
94#endif 93#endif
95 94
96/* 95/*
@@ -104,22 +103,36 @@
104#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ 103#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \
105 mtspr exc_level##_SPRG,r8; \ 104 mtspr exc_level##_SPRG,r8; \
106 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ 105 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
107 stw r10,GPR10-INT_FRAME_SIZE(r8); \ 106 stw r9,GPR9(r8); /* save various registers */\
108 stw r11,GPR11-INT_FRAME_SIZE(r8); \ 107 mfcr r9; /* save CR in r9 for now */\
109 mfcr r10; /* save CR in r10 for now */\ 108 stw r10,GPR10(r8); \
110 mfspr r11,exc_level_srr1; /* check whether user or kernel */\ 109 stw r11,GPR11(r8); \
111 andi. r11,r11,MSR_PR; \ 110 stw r9,_CCR(r8); /* save CR on stack */\
112 mr r11,r8; \ 111 mfspr r10,exc_level_srr1; /* check whether user or kernel */\
113 mfspr r8,exc_level##_SPRG; \ 112 andi. r10,r10,MSR_PR; \
114 beq 1f; \
115 /* COMING FROM USER MODE */ \
116 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ 113 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
117 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ 114 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
118 addi r11,r11,THREAD_SIZE; \ 115 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
1191: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ 116 beq 1f; \
120 stw r10,_CCR(r11); /* save various registers */\ 117 /* COMING FROM USER MODE */ \
121 stw r12,GPR12(r11); \ 118 stw r9,_CCR(r11); /* save CR */\
119 lwz r10,GPR10(r8); /* copy regs from exception stack */\
120 lwz r9,GPR9(r8); \
121 stw r10,GPR10(r11); \
122 lwz r10,GPR11(r8); \
122 stw r9,GPR9(r11); \ 123 stw r9,GPR9(r11); \
124 stw r10,GPR11(r11); \
125 b 2f; \
126 /* COMING FROM PRIV MODE */ \
1271: lwz r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r11); \
128 lwz r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r11); \
129 stw r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r8); \
130 stw r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r8); \
131 lwz r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r11); \
132 stw r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r8); \
133 mr r11,r8; \
1342: mfspr r8,exc_level##_SPRG; \
135 stw r12,GPR12(r11); /* save various registers */\
123 mflr r10; \ 136 mflr r10; \
124 stw r10,_LINK(r11); \ 137 stw r10,_LINK(r11); \
125 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ 138 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
@@ -231,7 +244,7 @@ label:
231 * the code where the exception occurred (since exception entry \ 244 * the code where the exception occurred (since exception entry \
232 * doesn't turn off DE automatically). We simulate the effect \ 245 * doesn't turn off DE automatically). We simulate the effect \
233 * of turning off DE on entry to an exception handler by turning \ 246 * of turning off DE on entry to an exception handler by turning \
234 * off DE in the CSRR1 value and clearing the debug status. \ 247 * off DE in the DSRR1 value and clearing the debug status. \
235 */ \ 248 */ \
236 mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ 249 mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
237 andis. r10,r10,DBSR_IC@h; \ 250 andis. r10,r10,DBSR_IC@h; \
@@ -262,17 +275,17 @@ label:
262 lwz r12,GPR12(r11); \ 275 lwz r12,GPR12(r11); \
263 mtspr DEBUG_SPRG,r8; \ 276 mtspr DEBUG_SPRG,r8; \
264 BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \ 277 BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \
265 lwz r10,GPR10-INT_FRAME_SIZE(r8); \ 278 lwz r10,GPR10(r8); \
266 lwz r11,GPR11-INT_FRAME_SIZE(r8); \ 279 lwz r11,GPR11(r8); \
267 mfspr r8,DEBUG_SPRG; \ 280 mfspr r8,DEBUG_SPRG; \
268 \ 281 \
269 RFDI; \ 282 RFDI; \
270 b .; \ 283 b .; \
271 \ 284 \
272 /* continue normal handling for a critical exception... */ \ 285 /* continue normal handling for a debug exception... */ \
2732: mfspr r4,SPRN_DBSR; \ 2862: mfspr r4,SPRN_DBSR; \
274 addi r3,r1,STACK_FRAME_OVERHEAD; \ 287 addi r3,r1,STACK_FRAME_OVERHEAD; \
275 EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc) 288 EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc)
276 289
277#define DEBUG_CRIT_EXCEPTION \ 290#define DEBUG_CRIT_EXCEPTION \
278 START_EXCEPTION(DebugCrit); \ 291 START_EXCEPTION(DebugCrit); \
@@ -315,8 +328,8 @@ label:
315 lwz r12,GPR12(r11); \ 328 lwz r12,GPR12(r11); \
316 mtspr CRIT_SPRG,r8; \ 329 mtspr CRIT_SPRG,r8; \
317 BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \ 330 BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \
318 lwz r10,GPR10-INT_FRAME_SIZE(r8); \ 331 lwz r10,GPR10(r8); \
319 lwz r11,GPR11-INT_FRAME_SIZE(r8); \ 332 lwz r11,GPR11(r8); \
320 mfspr r8,CRIT_SPRG; \ 333 mfspr r8,CRIT_SPRG; \
321 \ 334 \
322 rfci; \ 335 rfci; \
@@ -327,6 +340,14 @@ label:
327 addi r3,r1,STACK_FRAME_OVERHEAD; \ 340 addi r3,r1,STACK_FRAME_OVERHEAD; \
328 EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) 341 EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
329 342
343#define DATA_STORAGE_EXCEPTION \
344 START_EXCEPTION(DataStorage) \
345 NORMAL_EXCEPTION_PROLOG; \
346 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
347 stw r5,_ESR(r11); \
348 mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
349 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
350
330#define INSTRUCTION_STORAGE_EXCEPTION \ 351#define INSTRUCTION_STORAGE_EXCEPTION \
331 START_EXCEPTION(InstructionStorage) \ 352 START_EXCEPTION(InstructionStorage) \
332 NORMAL_EXCEPTION_PROLOG; \ 353 NORMAL_EXCEPTION_PROLOG; \
@@ -363,8 +384,31 @@ label:
363#define FP_UNAVAILABLE_EXCEPTION \ 384#define FP_UNAVAILABLE_EXCEPTION \
364 START_EXCEPTION(FloatingPointUnavailable) \ 385 START_EXCEPTION(FloatingPointUnavailable) \
365 NORMAL_EXCEPTION_PROLOG; \ 386 NORMAL_EXCEPTION_PROLOG; \
366 bne load_up_fpu; /* if from user, just load it up */ \ 387 beq 1f; \
367 addi r3,r1,STACK_FRAME_OVERHEAD; \ 388 bl load_up_fpu; /* if from user, just load it up */ \
389 b fast_exception_return; \
3901: addi r3,r1,STACK_FRAME_OVERHEAD; \
368 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) 391 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
369 392
393#ifndef __ASSEMBLY__
394struct exception_regs {
395 unsigned long mas0;
396 unsigned long mas1;
397 unsigned long mas2;
398 unsigned long mas3;
399 unsigned long mas6;
400 unsigned long mas7;
401 unsigned long srr0;
402 unsigned long srr1;
403 unsigned long csrr0;
404 unsigned long csrr1;
405 unsigned long dsrr0;
406 unsigned long dsrr1;
407 unsigned long saved_ksp_limit;
408};
409
410/* ensure this structure is always sized to a multiple of the stack alignment */
411#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16)
412
413#endif /* __ASSEMBLY__ */
370#endif /* __HEAD_BOOKE_H__ */ 414#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index e581524d85bc..3cb52fa0eda3 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -39,6 +39,7 @@
39#include <asm/thread_info.h> 39#include <asm/thread_info.h>
40#include <asm/ppc_asm.h> 40#include <asm/ppc_asm.h>
41#include <asm/asm-offsets.h> 41#include <asm/asm-offsets.h>
42#include <asm/cache.h>
42#include "head_booke.h" 43#include "head_booke.h"
43 44
44/* As with the other PowerPC ports, it is expected that when code 45/* As with the other PowerPC ports, it is expected that when code
@@ -150,16 +151,11 @@ skpinv: addi r6,r6,1 /* Increment */
150 /* Invalidate TLB0 */ 151 /* Invalidate TLB0 */
151 li r6,0x04 152 li r6,0x04
152 tlbivax 0,r6 153 tlbivax 0,r6
153#ifdef CONFIG_SMP 154 TLBSYNC
154 tlbsync
155#endif
156 /* Invalidate TLB1 */ 155 /* Invalidate TLB1 */
157 li r6,0x0c 156 li r6,0x0c
158 tlbivax 0,r6 157 tlbivax 0,r6
159#ifdef CONFIG_SMP 158 TLBSYNC
160 tlbsync
161#endif
162 msync
163 159
164/* 3. Setup a temp mapping and jump to it */ 160/* 3. Setup a temp mapping and jump to it */
165 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 161 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
@@ -237,10 +233,7 @@ skpinv: addi r6,r6,1 /* Increment */
237 /* Invalidate TLB1 */ 233 /* Invalidate TLB1 */
238 li r9,0x0c 234 li r9,0x0c
239 tlbivax 0,r9 235 tlbivax 0,r9
240#ifdef CONFIG_SMP 236 TLBSYNC
241 tlbsync
242#endif
243 msync
244 237
245/* 6. Setup KERNELBASE mapping in TLB1[0] */ 238/* 6. Setup KERNELBASE mapping in TLB1[0] */
246 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 239 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
@@ -282,10 +275,7 @@ skpinv: addi r6,r6,1 /* Increment */
282 /* Invalidate TLB1 */ 275 /* Invalidate TLB1 */
283 li r9,0x0c 276 li r9,0x0c
284 tlbivax 0,r9 277 tlbivax 0,r9
285#ifdef CONFIG_SMP 278 TLBSYNC
286 tlbsync
287#endif
288 msync
289 279
290 /* Establish the interrupt vector offsets */ 280 /* Establish the interrupt vector offsets */
291 SET_IVOR(0, CriticalInput); 281 SET_IVOR(0, CriticalInput);
@@ -304,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
304 SET_IVOR(13, DataTLBError); 294 SET_IVOR(13, DataTLBError);
305 SET_IVOR(14, InstructionTLBError); 295 SET_IVOR(14, InstructionTLBError);
306 SET_IVOR(15, DebugDebug); 296 SET_IVOR(15, DebugDebug);
307#if defined(CONFIG_E500) 297#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
308 SET_IVOR(15, DebugCrit); 298 SET_IVOR(15, DebugCrit);
309#endif 299#endif
310 SET_IVOR(32, SPEUnavailable); 300 SET_IVOR(32, SPEUnavailable);
@@ -313,6 +303,9 @@ skpinv: addi r6,r6,1 /* Increment */
313#ifndef CONFIG_E200 303#ifndef CONFIG_E200
314 SET_IVOR(35, PerformanceMonitor); 304 SET_IVOR(35, PerformanceMonitor);
315#endif 305#endif
306#ifdef CONFIG_PPC_E500MC
307 SET_IVOR(36, Doorbell);
308#endif
316 309
317 /* Establish the interrupt vector base */ 310 /* Establish the interrupt vector base */
318 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 311 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
@@ -479,90 +472,16 @@ interrupt_base:
479 472
480 /* Data Storage Interrupt */ 473 /* Data Storage Interrupt */
481 START_EXCEPTION(DataStorage) 474 START_EXCEPTION(DataStorage)
482 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 475 NORMAL_EXCEPTION_PROLOG
483 mtspr SPRN_SPRG1, r11 476 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
484 mtspr SPRN_SPRG4W, r12 477 stw r5,_ESR(r11)
485 mtspr SPRN_SPRG5W, r13 478 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
486 mfcr r11 479 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
487 mtspr SPRN_SPRG7W, r11 480 bne 1f
488 481 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
489 /* 4821:
490 * Check if it was a store fault, if not then bail 483 addi r3,r1,STACK_FRAME_OVERHEAD
491 * because a user tried to access a kernel or 484 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
492 * read-protected page. Otherwise, get the
493 * offending address and handle it.
494 */
495 mfspr r10, SPRN_ESR
496 andis. r10, r10, ESR_ST@h
497 beq 2f
498
499 mfspr r10, SPRN_DEAR /* Get faulting address */
500
501 /* If we are faulting a kernel address, we have to use the
502 * kernel page tables.
503 */
504 lis r11, PAGE_OFFSET@h
505 cmplw 0, r10, r11
506 bge 2f
507
508 /* Get the PGD for the current thread */
5093:
510 mfspr r11,SPRN_SPRG3
511 lwz r11,PGDIR(r11)
5124:
513 FIND_PTE
514
515 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
516 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
517 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
518 bne 2f /* Bail if not */
519
520 /* Update 'changed'. */
521 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
522 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
523
524 /* MAS2 not updated as the entry does exist in the tlb, this
525 fault taken to detect state transition (eg: COW -> DIRTY)
526 */
527 andi. r11, r11, _PAGE_HWEXEC
528 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
529 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
530
531 /* update search PID in MAS6, AS = 0 */
532 mfspr r12, SPRN_PID0
533 slwi r12, r12, 16
534 mtspr SPRN_MAS6, r12
535
536 /* find the TLB index that caused the fault. It has to be here. */
537 tlbsx 0, r10
538
539 /* only update the perm bits, assume the RPN is fine */
540 mfspr r12, SPRN_MAS3
541 rlwimi r12, r11, 0, 20, 31
542 mtspr SPRN_MAS3,r12
543 tlbwe
544
545 /* Done...restore registers and get out of here. */
546 mfspr r11, SPRN_SPRG7R
547 mtcr r11
548 mfspr r13, SPRN_SPRG5R
549 mfspr r12, SPRN_SPRG4R
550 mfspr r11, SPRN_SPRG1
551 mfspr r10, SPRN_SPRG0
552 rfi /* Force context change */
553
5542:
555 /*
556 * The bailout. Restore registers to pre-exception conditions
557 * and call the heavyweights to help us out.
558 */
559 mfspr r11, SPRN_SPRG7R
560 mtcr r11
561 mfspr r13, SPRN_SPRG5R
562 mfspr r12, SPRN_SPRG4R
563 mfspr r11, SPRN_SPRG1
564 mfspr r10, SPRN_SPRG0
565 b data_access
566 485
567 /* Instruction Storage Interrupt */ 486 /* Instruction Storage Interrupt */
568 INSTRUCTION_STORAGE_EXCEPTION 487 INSTRUCTION_STORAGE_EXCEPTION
@@ -641,15 +560,30 @@ interrupt_base:
641 lwz r11,PGDIR(r11) 560 lwz r11,PGDIR(r11)
642 561
6434: 5624:
563 /* Mask of required permission bits. Note that while we
564 * do copy ESR:ST to _PAGE_RW position as trying to write
565 * to an RO page is pretty common, we don't do it with
566 * _PAGE_DIRTY. We could do it, but it's a fairly rare
567 * event so I'd rather take the overhead when it happens
568 * rather than adding an instruction here. We should measure
569 * whether the whole thing is worth it in the first place
570 * as we could avoid loading SPRN_ESR completely in the first
571 * place...
572 *
573 * TODO: Is it worth doing that mfspr & rlwimi in the first
574 * place or can we save a couple of instructions here ?
575 */
576 mfspr r12,SPRN_ESR
577 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
578 rlwimi r13,r12,11,29,29
579
644 FIND_PTE 580 FIND_PTE
645 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 581 andc. r13,r13,r11 /* Check permission */
646 beq 2f /* Bail if not present */ 582 bne 2f /* Bail if permission mismach */
647 583
648#ifdef CONFIG_PTE_64BIT 584#ifdef CONFIG_PTE_64BIT
649 lwz r13, 0(r12) 585 lwz r13, 0(r12)
650#endif 586#endif
651 ori r11, r11, _PAGE_ACCESSED
652 stw r11, PTE_FLAGS_OFFSET(r12)
653 587
654 /* Jump to common tlb load */ 588 /* Jump to common tlb load */
655 b finish_tlb_load 589 b finish_tlb_load
@@ -663,7 +597,7 @@ interrupt_base:
663 mfspr r12, SPRN_SPRG4R 597 mfspr r12, SPRN_SPRG4R
664 mfspr r11, SPRN_SPRG1 598 mfspr r11, SPRN_SPRG1
665 mfspr r10, SPRN_SPRG0 599 mfspr r10, SPRN_SPRG0
666 b data_access 600 b DataStorage
667 601
668 /* Instruction TLB Error Interrupt */ 602 /* Instruction TLB Error Interrupt */
669 /* 603 /*
@@ -701,15 +635,16 @@ interrupt_base:
701 lwz r11,PGDIR(r11) 635 lwz r11,PGDIR(r11)
702 636
7034: 6374:
638 /* Make up the required permissions */
639 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
640
704 FIND_PTE 641 FIND_PTE
705 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 642 andc. r13,r13,r11 /* Check permission */
706 beq 2f /* Bail if not present */ 643 bne 2f /* Bail if permission mismach */
707 644
708#ifdef CONFIG_PTE_64BIT 645#ifdef CONFIG_PTE_64BIT
709 lwz r13, 0(r12) 646 lwz r13, 0(r12)
710#endif 647#endif
711 ori r11, r11, _PAGE_ACCESSED
712 stw r11, PTE_FLAGS_OFFSET(r12)
713 648
714 /* Jump to common TLB load point */ 649 /* Jump to common TLB load point */
715 b finish_tlb_load 650 b finish_tlb_load
@@ -750,10 +685,13 @@ interrupt_base:
750 /* Performance Monitor */ 685 /* Performance Monitor */
751 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 686 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
752 687
688#ifdef CONFIG_PPC_E500MC
689 EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
690#endif
753 691
754 /* Debug Interrupt */ 692 /* Debug Interrupt */
755 DEBUG_DEBUG_EXCEPTION 693 DEBUG_DEBUG_EXCEPTION
756#if defined(CONFIG_E500) 694#if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
757 DEBUG_CRIT_EXCEPTION 695 DEBUG_CRIT_EXCEPTION
758#endif 696#endif
759 697
@@ -761,29 +699,13 @@ interrupt_base:
761 * Local functions 699 * Local functions
762 */ 700 */
763 701
764 /*
765 * Data TLB exceptions will bail out to this point
766 * if they can't resolve the lightweight TLB fault.
767 */
768data_access:
769 NORMAL_EXCEPTION_PROLOG
770 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
771 stw r5,_ESR(r11)
772 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
773 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
774 bne 1f
775 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7761:
777 addi r3,r1,STACK_FRAME_OVERHEAD
778 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
779
780/* 702/*
781
782 * Both the instruction and data TLB miss get to this 703 * Both the instruction and data TLB miss get to this
783 * point to load the TLB. 704 * point to load the TLB.
784 * r10 - EA of fault 705 * r10 - EA of fault
785 * r11 - TLB (info from Linux PTE) 706 * r11 - TLB (info from Linux PTE)
786 * r12, r13 - available to use 707 * r12 - available to use
708 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
787 * CR5 - results of addr >= PAGE_OFFSET 709 * CR5 - results of addr >= PAGE_OFFSET
788 * MAS0, MAS1 - loaded with proper value when we get here 710 * MAS0, MAS1 - loaded with proper value when we get here
789 * MAS2, MAS3 - will need additional info from Linux PTE 711 * MAS2, MAS3 - will need additional info from Linux PTE
@@ -805,20 +727,14 @@ finish_tlb_load:
805#endif 727#endif
806 mtspr SPRN_MAS2, r12 728 mtspr SPRN_MAS2, r12
807 729
808 bge 5, 1f 730 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
809 731 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
810 /* is user addr */ 732 and r12, r11, r10
811 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
812 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 733 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
813 srwi r10, r12, 1 734 slwi r10, r12, 1
814 or r12, r12, r10 /* Copy user perms into supervisor */ 735 or r10, r10, r12
815 iseleq r12, 0, r12 736 iseleq r12, r12, r10
816 b 2f 737
817
818 /* is kernel addr */
8191: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
820 ori r12, r12, (MAS3_SX | MAS3_SR)
821
822#ifdef CONFIG_PTE_64BIT 738#ifdef CONFIG_PTE_64BIT
8232: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 7392: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
824 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 740 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
@@ -1065,6 +981,52 @@ _GLOBAL(set_context)
1065 isync /* Force context change */ 981 isync /* Force context change */
1066 blr 982 blr
1067 983
984_GLOBAL(flush_dcache_L1)
985 mfspr r3,SPRN_L1CFG0
986
987 rlwinm r5,r3,9,3 /* Extract cache block size */
988 twlgti r5,1 /* Only 32 and 64 byte cache blocks
989 * are currently defined.
990 */
991 li r4,32
992 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
993 * log2(number of ways)
994 */
995 slw r5,r4,r5 /* r5 = cache block size */
996
997 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
998 mulli r7,r7,13 /* An 8-way cache will require 13
999 * loads per set.
1000 */
1001 slw r7,r7,r6
1002
1003 /* save off HID0 and set DCFA */
1004 mfspr r8,SPRN_HID0
1005 ori r9,r8,HID0_DCFA@l
1006 mtspr SPRN_HID0,r9
1007 isync
1008
1009 lis r4,KERNELBASE@h
1010 mtctr r7
1011
10121: lwz r3,0(r4) /* Load... */
1013 add r4,r4,r5
1014 bdnz 1b
1015
1016 msync
1017 lis r4,KERNELBASE@h
1018 mtctr r7
1019
10201: dcbf 0,r4 /* ...and flush. */
1021 add r4,r4,r5
1022 bdnz 1b
1023
1024 /* restore HID0 */
1025 mtspr SPRN_HID0,r8
1026 isync
1027
1028 blr
1029
1068/* 1030/*
1069 * We put a few things here that have to be page-aligned. This stuff 1031 * We put a few things here that have to be page-aligned. This stuff
1070 * goes at the beginning of the data segment, which is page-aligned. 1032 * goes at the beginning of the data segment, which is page-aligned.
@@ -1080,15 +1042,6 @@ empty_zero_page:
1080swapper_pg_dir: 1042swapper_pg_dir:
1081 .space PGD_TABLE_SIZE 1043 .space PGD_TABLE_SIZE
1082 1044
1083/* Reserved 4k for the critical exception stack & 4k for the machine
1084 * check stack per CPU for kernel mode exceptions */
1085 .section .bss
1086 .align 12
1087exception_stack_bottom:
1088 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1089 .globl exception_stack_top
1090exception_stack_top:
1091
1092/* 1045/*
1093 * Room for two PTE pointers, usually the kernel and current user pointers 1046 * Room for two PTE pointers, usually the kernel and current user pointers
1094 * to their respective root page table. 1047 * to their respective root page table.
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 9971159c8040..9d42eb57aea3 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -53,7 +53,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
53struct bus_type ibmebus_bus_type; 53struct bus_type ibmebus_bus_type;
54 54
55/* These devices will automatically be added to the bus during init */ 55/* These devices will automatically be added to the bus during init */
56static struct of_device_id __initdata builtin_matches[] = { 56static struct of_device_id __initdata ibmebus_matches[] = {
57 { .compatible = "IBM,lhca" }, 57 { .compatible = "IBM,lhca" },
58 { .compatible = "IBM,lhea" }, 58 { .compatible = "IBM,lhea" },
59 {}, 59 {},
@@ -82,7 +82,8 @@ static void ibmebus_free_coherent(struct device *dev,
82static dma_addr_t ibmebus_map_single(struct device *dev, 82static dma_addr_t ibmebus_map_single(struct device *dev,
83 void *ptr, 83 void *ptr,
84 size_t size, 84 size_t size,
85 enum dma_data_direction direction) 85 enum dma_data_direction direction,
86 struct dma_attrs *attrs)
86{ 87{
87 return (dma_addr_t)(ptr); 88 return (dma_addr_t)(ptr);
88} 89}
@@ -90,14 +91,16 @@ static dma_addr_t ibmebus_map_single(struct device *dev,
90static void ibmebus_unmap_single(struct device *dev, 91static void ibmebus_unmap_single(struct device *dev,
91 dma_addr_t dma_addr, 92 dma_addr_t dma_addr,
92 size_t size, 93 size_t size,
93 enum dma_data_direction direction) 94 enum dma_data_direction direction,
95 struct dma_attrs *attrs)
94{ 96{
95 return; 97 return;
96} 98}
97 99
98static int ibmebus_map_sg(struct device *dev, 100static int ibmebus_map_sg(struct device *dev,
99 struct scatterlist *sgl, 101 struct scatterlist *sgl,
100 int nents, enum dma_data_direction direction) 102 int nents, enum dma_data_direction direction,
103 struct dma_attrs *attrs)
101{ 104{
102 struct scatterlist *sg; 105 struct scatterlist *sg;
103 int i; 106 int i;
@@ -112,7 +115,8 @@ static int ibmebus_map_sg(struct device *dev,
112 115
113static void ibmebus_unmap_sg(struct device *dev, 116static void ibmebus_unmap_sg(struct device *dev,
114 struct scatterlist *sg, 117 struct scatterlist *sg,
115 int nents, enum dma_data_direction direction) 118 int nents, enum dma_data_direction direction,
119 struct dma_attrs *attrs)
116{ 120{
117 return; 121 return;
118} 122}
@@ -350,7 +354,7 @@ static int __init ibmebus_bus_init(void)
350 return err; 354 return err;
351 } 355 }
352 356
353 err = ibmebus_create_devices(builtin_matches); 357 err = ibmebus_create_devices(ibmebus_matches);
354 if (err) { 358 if (err) {
355 device_unregister(&ibmebus_bus_device); 359 device_unregister(&ibmebus_bus_device);
356 bus_unregister(&ibmebus_bus_type); 360 bus_unregister(&ibmebus_bus_type);
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 01bcd52bbf8e..019b02d8844f 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -153,7 +153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
153 * address of current. R11 points to the exception frame (physical 153 * address of current. R11 points to the exception frame (physical
154 * address). We have to preserve r10. 154 * address). We have to preserve r10.
155 */ 155 */
156_GLOBAL(power_save_6xx_restore) 156_GLOBAL(power_save_ppc32_restore)
157 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ 157 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
158 stw r9,_NIP(r11) /* make it do a blr */ 158 stw r9,_NIP(r11) /* make it do a blr */
159 159
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
new file mode 100644
index 000000000000..06304034b393
--- /dev/null
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -0,0 +1,93 @@
1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
3 * Dave Liu <daveliu@freescale.com>
4 * copy from idle_6xx.S and modify for e500 based processor,
5 * implement the power_save function in idle.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/threads.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/cputable.h>
17#include <asm/thread_info.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20
21 .text
22
23_GLOBAL(e500_idle)
24 rlwinm r3,r1,0,0,31-THREAD_SHIFT /* current thread_info */
25 lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */
26 ori r4,r4,_TLF_NAPPING /* so when we take an exception */
27 stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */
28
29 /* Check if we can nap or doze, put HID0 mask in r3 */
30 lis r3,0
31BEGIN_FTR_SECTION
32 lis r3,HID0_DOZE@h
33END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
34
35BEGIN_FTR_SECTION
36 /* Now check if user enabled NAP mode */
37 lis r4,powersave_nap@ha
38 lwz r4,powersave_nap@l(r4)
39 cmpwi 0,r4,0
40 beq 1f
41 stwu r1,-16(r1)
42 mflr r0
43 stw r0,20(r1)
44 bl flush_dcache_L1
45 lwz r0,20(r1)
46 addi r1,r1,16
47 mtlr r0
48 lis r3,HID0_NAP@h
49END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
50BEGIN_FTR_SECTION
51 msync
52 li r7,L2CSR0_L2FL@l
53 mtspr SPRN_L2CSR0,r7
542:
55 mfspr r7,SPRN_L2CSR0
56 andi. r4,r7,L2CSR0_L2FL@l
57 bne 2b
58END_FTR_SECTION_IFSET(CPU_FTR_L2CSR|CPU_FTR_CAN_NAP)
591:
60 /* Go to NAP or DOZE now */
61 mfspr r4,SPRN_HID0
62 rlwinm r4,r4,0,~(HID0_DOZE|HID0_NAP|HID0_SLEEP)
63 or r4,r4,r3
64 isync
65 mtspr SPRN_HID0,r4
66 isync
67
68 mfmsr r7
69 oris r7,r7,MSR_WE@h
70 ori r7,r7,MSR_EE
71 msync
72 mtmsr r7
73 isync
742: b 2b
75
76/*
77 * Return from NAP/DOZE mode, restore some CPU specific registers,
78 * r2 containing physical address of current.
79 * r11 points to the exception frame (physical address).
80 * We have to preserve r10.
81 */
82_GLOBAL(power_save_ppc32_restore)
83 lwz r9,_LINK(r11) /* interrupted in e500_idle */
84 stw r9,_NIP(r11) /* make it do a blr */
85
86#ifdef CONFIG_SMP
87 mfspr r12,SPRN_SPRG3
88 lwz r11,TI_CPU(r12) /* get cpu number * 4 */
89 slwi r11,r11,2
90#else
91 li r11,0
92#endif
93 b transfer_to_handler_cont
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e31aca9208eb..1882bf419fa6 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -120,7 +120,8 @@ EXPORT_SYMBOL(_outsl_ns);
120 120
121#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0) 121#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
122 122
123void _memset_io(volatile void __iomem *addr, int c, unsigned long n) 123notrace void
124_memset_io(volatile void __iomem *addr, int c, unsigned long n)
124{ 125{
125 void *p = (void __force *)addr; 126 void *p = (void __force *)addr;
126 u32 lc = c; 127 u32 lc = c;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0c663669bc32..2385f68c1751 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
187 void *page, unsigned int npages, 187 void *page, unsigned int npages,
188 enum dma_data_direction direction, 188 enum dma_data_direction direction,
189 unsigned long mask, unsigned int align_order) 189 unsigned long mask, unsigned int align_order,
190 struct dma_attrs *attrs)
190{ 191{
191 unsigned long entry, flags; 192 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE; 193 dma_addr_t ret = DMA_ERROR_CODE;
@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
205 206
206 /* Put the TCEs in the HW table */ 207 /* Put the TCEs in the HW table */
207 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 208 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
208 direction); 209 direction, attrs);
209 210
210 211
211 /* Flush/invalidate TLB caches if necessary */ 212 /* Flush/invalidate TLB caches if necessary */
@@ -267,11 +268,11 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
267 spin_unlock_irqrestore(&(tbl->it_lock), flags); 268 spin_unlock_irqrestore(&(tbl->it_lock), flags);
268} 269}
269 270
270int iommu_map_sg(struct device *dev, struct scatterlist *sglist, 271int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
271 int nelems, unsigned long mask, 272 struct scatterlist *sglist, int nelems,
272 enum dma_data_direction direction) 273 unsigned long mask, enum dma_data_direction direction,
274 struct dma_attrs *attrs)
273{ 275{
274 struct iommu_table *tbl = dev->archdata.dma_data;
275 dma_addr_t dma_next = 0, dma_addr; 276 dma_addr_t dma_next = 0, dma_addr;
276 unsigned long flags; 277 unsigned long flags;
277 struct scatterlist *s, *outs, *segstart; 278 struct scatterlist *s, *outs, *segstart;
@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
336 npages, entry, dma_addr); 337 npages, entry, dma_addr);
337 338
338 /* Insert into HW table */ 339 /* Insert into HW table */
339 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); 340 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
341 direction, attrs);
340 342
341 /* If we are in an open segment, try merging */ 343 /* If we are in an open segment, try merging */
342 if (segstart != s) { 344 if (segstart != s) {
@@ -412,7 +414,8 @@ int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
412 414
413 415
414void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 416void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
415 int nelems, enum dma_data_direction direction) 417 int nelems, enum dma_data_direction direction,
418 struct dma_attrs *attrs)
416{ 419{
417 struct scatterlist *sg; 420 struct scatterlist *sg;
418 unsigned long flags; 421 unsigned long flags;
@@ -554,7 +557,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
554 */ 557 */
555dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, 558dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
556 void *vaddr, size_t size, unsigned long mask, 559 void *vaddr, size_t size, unsigned long mask,
557 enum dma_data_direction direction) 560 enum dma_data_direction direction, struct dma_attrs *attrs)
558{ 561{
559 dma_addr_t dma_handle = DMA_ERROR_CODE; 562 dma_addr_t dma_handle = DMA_ERROR_CODE;
560 unsigned long uaddr; 563 unsigned long uaddr;
@@ -572,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
572 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 575 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
573 576
574 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 577 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
575 mask >> IOMMU_PAGE_SHIFT, align); 578 mask >> IOMMU_PAGE_SHIFT, align,
579 attrs);
576 if (dma_handle == DMA_ERROR_CODE) { 580 if (dma_handle == DMA_ERROR_CODE) {
577 if (printk_ratelimit()) { 581 if (printk_ratelimit()) {
578 printk(KERN_INFO "iommu_alloc failed, " 582 printk(KERN_INFO "iommu_alloc failed, "
@@ -587,7 +591,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
587} 591}
588 592
589void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 593void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
590 size_t size, enum dma_data_direction direction) 594 size_t size, enum dma_data_direction direction,
595 struct dma_attrs *attrs)
591{ 596{
592 unsigned int npages; 597 unsigned int npages;
593 598
@@ -640,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
640 nio_pages = size >> IOMMU_PAGE_SHIFT; 645 nio_pages = size >> IOMMU_PAGE_SHIFT;
641 io_order = get_iommu_order(size); 646 io_order = get_iommu_order(size);
642 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 647 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
643 mask >> IOMMU_PAGE_SHIFT, io_order); 648 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
644 if (mapping == DMA_ERROR_CODE) { 649 if (mapping == DMA_ERROR_CODE) {
645 free_pages((unsigned long)ret, order); 650 free_pages((unsigned long)ret, order);
646 return NULL; 651 return NULL;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bcc249d90c4d..6ac8612da3c3 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_desc);
98 98
99int distribute_irqs = 1; 99int distribute_irqs = 1;
100 100
101static inline unsigned long get_hard_enabled(void) 101static inline notrace unsigned long get_hard_enabled(void)
102{ 102{
103 unsigned long enabled; 103 unsigned long enabled;
104 104
@@ -108,13 +108,13 @@ static inline unsigned long get_hard_enabled(void)
108 return enabled; 108 return enabled;
109} 109}
110 110
111static inline void set_soft_enabled(unsigned long enable) 111static inline notrace void set_soft_enabled(unsigned long enable)
112{ 112{
113 __asm__ __volatile__("stb %0,%1(13)" 113 __asm__ __volatile__("stb %0,%1(13)"
114 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 114 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
115} 115}
116 116
117void raw_local_irq_restore(unsigned long en) 117notrace void raw_local_irq_restore(unsigned long en)
118{ 118{
119 /* 119 /*
120 * get_paca()->soft_enabled = en; 120 * get_paca()->soft_enabled = en;
@@ -356,9 +356,42 @@ void __init init_IRQ(void)
356{ 356{
357 if (ppc_md.init_IRQ) 357 if (ppc_md.init_IRQ)
358 ppc_md.init_IRQ(); 358 ppc_md.init_IRQ();
359
360 exc_lvl_ctx_init();
361
359 irq_ctx_init(); 362 irq_ctx_init();
360} 363}
361 364
365#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
366struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
367struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
368struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
369
370void exc_lvl_ctx_init(void)
371{
372 struct thread_info *tp;
373 int i;
374
375 for_each_possible_cpu(i) {
376 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
377 tp = critirq_ctx[i];
378 tp->cpu = i;
379 tp->preempt_count = 0;
380
381#ifdef CONFIG_BOOKE
382 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
383 tp = dbgirq_ctx[i];
384 tp->cpu = i;
385 tp->preempt_count = 0;
386
387 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
388 tp = mcheckirq_ctx[i];
389 tp->cpu = i;
390 tp->preempt_count = HARDIRQ_OFFSET;
391#endif
392 }
393}
394#endif
362 395
363#ifdef CONFIG_IRQSTACKS 396#ifdef CONFIG_IRQSTACKS
364struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 397struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
@@ -465,7 +498,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
465 host->revmap_type = revmap_type; 498 host->revmap_type = revmap_type;
466 host->inval_irq = inval_irq; 499 host->inval_irq = inval_irq;
467 host->ops = ops; 500 host->ops = ops;
468 host->of_node = of_node; 501 host->of_node = of_node_get(of_node);
469 502
470 if (host->ops->match == NULL) 503 if (host->ops->match == NULL)
471 host->ops->match = default_irq_host_match; 504 host->ops->match = default_irq_host_match;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644
index 000000000000..b4fdf2f2743c
--- /dev/null
+++ b/arch/powerpc/kernel/kgdb.c
@@ -0,0 +1,410 @@
1/*
2 * PowerPC backend to the KGDB stub.
3 *
4 * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5 * Copyright (C) 2003 Timesys Corporation.
6 * Copyright (C) 2004-2006 MontaVista Software, Inc.
7 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8 * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9 * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10 * Copyright (C) 2007-2008 Wind River Systems, Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program as licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/kgdb.h>
20#include <linux/smp.h>
21#include <linux/signal.h>
22#include <linux/ptrace.h>
23#include <asm/current.h>
24#include <asm/processor.h>
25#include <asm/machdep.h>
26
27/*
28 * This table contains the mapping between PowerPC hardware trap types, and
29 * signals, which are primarily what GDB understands. GDB and the kernel
30 * don't always agree on values, so we use constants taken from gdb-6.2.
31 */
32static struct hard_trap_info
33{
34 unsigned int tt; /* Trap type code for powerpc */
35 unsigned char signo; /* Signal that we map this trap into */
36} hard_trap_info[] = {
37 { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
38 { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
39 { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
40 { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
41 { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
42 { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
43 { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
44 { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
45 { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
46 { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
47#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
48 { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
49#if defined(CONFIG_FSL_BOOKE)
50 { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
51 { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
52 { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
53 { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
54 { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
55 { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */
56 { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
57 { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
58 { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
59#else /* ! CONFIG_FSL_BOOKE */
60 { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
61 { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
62 { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
63 { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
64 { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
65#endif
66#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
67 { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
68#if defined(CONFIG_8xx)
69 { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
70#else /* ! CONFIG_8xx */
71 { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
72 { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
73 { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
74#if defined(CONFIG_PPC64)
75 { 0x1200, 0x05 /* SIGILL */ }, /* system error */
76 { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
77 { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
78 { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
79 { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
80#else /* ! CONFIG_PPC64 */
81 { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
82 { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
83 { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
84 { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
85#endif
86#endif
87#endif
88 { 0x0000, 0x00 } /* Must be last */
89};
90
91static int computeSignal(unsigned int tt)
92{
93 struct hard_trap_info *ht;
94
95 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
96 if (ht->tt == tt)
97 return ht->signo;
98
99 return SIGHUP; /* default for things we don't know about */
100}
101
102static int kgdb_call_nmi_hook(struct pt_regs *regs)
103{
104 kgdb_nmicallback(raw_smp_processor_id(), regs);
105 return 0;
106}
107
108#ifdef CONFIG_SMP
109void kgdb_roundup_cpus(unsigned long flags)
110{
111 smp_send_debugger_break(MSG_ALL_BUT_SELF);
112}
113#endif
114
115/* KGDB functions to use existing PowerPC64 hooks. */
116static int kgdb_debugger(struct pt_regs *regs)
117{
118 return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
119}
120
121static int kgdb_handle_breakpoint(struct pt_regs *regs)
122{
123 if (user_mode(regs))
124 return 0;
125
126 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
127 return 0;
128
129 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
130 regs->nip += 4;
131
132 return 1;
133}
134
135static int kgdb_singlestep(struct pt_regs *regs)
136{
137 struct thread_info *thread_info, *exception_thread_info;
138
139 if (user_mode(regs))
140 return 0;
141
142 /*
143 * On Book E and perhaps other processsors, singlestep is handled on
144 * the critical exception stack. This causes current_thread_info()
145 * to fail, since it it locates the thread_info by masking off
146 * the low bits of the current stack pointer. We work around
147 * this issue by copying the thread_info from the kernel stack
148 * before calling kgdb_handle_exception, and copying it back
149 * afterwards. On most processors the copy is avoided since
150 * exception_thread_info == thread_info.
151 */
152 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
153 exception_thread_info = current_thread_info();
154
155 if (thread_info != exception_thread_info)
156 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
157
158 kgdb_handle_exception(0, SIGTRAP, 0, regs);
159
160 if (thread_info != exception_thread_info)
161 memcpy(thread_info, exception_thread_info, sizeof *thread_info);
162
163 return 1;
164}
165
166static int kgdb_iabr_match(struct pt_regs *regs)
167{
168 if (user_mode(regs))
169 return 0;
170
171 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
172 return 0;
173 return 1;
174}
175
176static int kgdb_dabr_match(struct pt_regs *regs)
177{
178 if (user_mode(regs))
179 return 0;
180
181 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
182 return 0;
183 return 1;
184}
185
186#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
187
188#define PACK32(ptr, src) do { \
189 u32 *ptr32; \
190 ptr32 = (u32 *)ptr; \
191 *(ptr32++) = (src); \
192 ptr = (unsigned long *)ptr32; \
193 } while (0)
194
195
196void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
197{
198 unsigned long *ptr = gdb_regs;
199 int reg;
200
201 memset(gdb_regs, 0, NUMREGBYTES);
202
203 for (reg = 0; reg < 32; reg++)
204 PACK64(ptr, regs->gpr[reg]);
205
206#ifdef CONFIG_FSL_BOOKE
207#ifdef CONFIG_SPE
208 for (reg = 0; reg < 32; reg++)
209 PACK64(ptr, current->thread.evr[reg]);
210#else
211 ptr += 32;
212#endif
213#else
214 /* fp registers not used by kernel, leave zero */
215 ptr += 32 * 8 / sizeof(long);
216#endif
217
218 PACK64(ptr, regs->nip);
219 PACK64(ptr, regs->msr);
220 PACK32(ptr, regs->ccr);
221 PACK64(ptr, regs->link);
222 PACK64(ptr, regs->ctr);
223 PACK32(ptr, regs->xer);
224
225 BUG_ON((unsigned long)ptr >
226 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
227}
228
229void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
230{
231 struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
232 STACK_FRAME_OVERHEAD);
233 unsigned long *ptr = gdb_regs;
234 int reg;
235
236 memset(gdb_regs, 0, NUMREGBYTES);
237
238 /* Regs GPR0-2 */
239 for (reg = 0; reg < 3; reg++)
240 PACK64(ptr, regs->gpr[reg]);
241
242 /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
243 ptr += 11;
244
245 /* Regs GPR14-31 */
246 for (reg = 14; reg < 32; reg++)
247 PACK64(ptr, regs->gpr[reg]);
248
249#ifdef CONFIG_FSL_BOOKE
250#ifdef CONFIG_SPE
251 for (reg = 0; reg < 32; reg++)
252 PACK64(ptr, p->thread.evr[reg]);
253#else
254 ptr += 32;
255#endif
256#else
257 /* fp registers not used by kernel, leave zero */
258 ptr += 32 * 8 / sizeof(long);
259#endif
260
261 PACK64(ptr, regs->nip);
262 PACK64(ptr, regs->msr);
263 PACK32(ptr, regs->ccr);
264 PACK64(ptr, regs->link);
265 PACK64(ptr, regs->ctr);
266 PACK32(ptr, regs->xer);
267
268 BUG_ON((unsigned long)ptr >
269 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
270}
271
272#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
273
274#define UNPACK32(dest, ptr) do { \
275 u32 *ptr32; \
276 ptr32 = (u32 *)ptr; \
277 dest = *(ptr32++); \
278 ptr = (unsigned long *)ptr32; \
279 } while (0)
280
281void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
282{
283 unsigned long *ptr = gdb_regs;
284 int reg;
285#ifdef CONFIG_SPE
286 union {
287 u32 v32[2];
288 u64 v64;
289 } acc;
290#endif
291
292 for (reg = 0; reg < 32; reg++)
293 UNPACK64(regs->gpr[reg], ptr);
294
295#ifdef CONFIG_FSL_BOOKE
296#ifdef CONFIG_SPE
297 for (reg = 0; reg < 32; reg++)
298 UNPACK64(current->thread.evr[reg], ptr);
299#else
300 ptr += 32;
301#endif
302#else
303 /* fp registers not used by kernel, leave zero */
304 ptr += 32 * 8 / sizeof(int);
305#endif
306
307 UNPACK64(regs->nip, ptr);
308 UNPACK64(regs->msr, ptr);
309 UNPACK32(regs->ccr, ptr);
310 UNPACK64(regs->link, ptr);
311 UNPACK64(regs->ctr, ptr);
312 UNPACK32(regs->xer, ptr);
313
314 BUG_ON((unsigned long)ptr >
315 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
316}
317
318/*
319 * This function does PowerPC specific procesing for interfacing to gdb.
320 */
321int kgdb_arch_handle_exception(int vector, int signo, int err_code,
322 char *remcom_in_buffer, char *remcom_out_buffer,
323 struct pt_regs *linux_regs)
324{
325 char *ptr = &remcom_in_buffer[1];
326 unsigned long addr;
327
328 switch (remcom_in_buffer[0]) {
329 /*
330 * sAA..AA Step one instruction from AA..AA
331 * This will return an error to gdb ..
332 */
333 case 's':
334 case 'c':
335 /* handle the optional parameter */
336 if (kgdb_hex2long(&ptr, &addr))
337 linux_regs->nip = addr;
338
339 atomic_set(&kgdb_cpu_doing_single_step, -1);
340 /* set the trace bit if we're stepping */
341 if (remcom_in_buffer[0] == 's') {
342#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
343 mtspr(SPRN_DBCR0,
344 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
345 linux_regs->msr |= MSR_DE;
346#else
347 linux_regs->msr |= MSR_SE;
348#endif
349 kgdb_single_step = 1;
350 if (kgdb_contthread)
351 atomic_set(&kgdb_cpu_doing_single_step,
352 raw_smp_processor_id());
353 }
354 return 0;
355 }
356
357 return -1;
358}
359
360/*
361 * Global data
362 */
363struct kgdb_arch arch_kgdb_ops = {
364 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
365};
366
367static int kgdb_not_implemented(struct pt_regs *regs)
368{
369 return 0;
370}
371
372static void *old__debugger_ipi;
373static void *old__debugger;
374static void *old__debugger_bpt;
375static void *old__debugger_sstep;
376static void *old__debugger_iabr_match;
377static void *old__debugger_dabr_match;
378static void *old__debugger_fault_handler;
379
380int kgdb_arch_init(void)
381{
382 old__debugger_ipi = __debugger_ipi;
383 old__debugger = __debugger;
384 old__debugger_bpt = __debugger_bpt;
385 old__debugger_sstep = __debugger_sstep;
386 old__debugger_iabr_match = __debugger_iabr_match;
387 old__debugger_dabr_match = __debugger_dabr_match;
388 old__debugger_fault_handler = __debugger_fault_handler;
389
390 __debugger_ipi = kgdb_call_nmi_hook;
391 __debugger = kgdb_debugger;
392 __debugger_bpt = kgdb_handle_breakpoint;
393 __debugger_sstep = kgdb_singlestep;
394 __debugger_iabr_match = kgdb_iabr_match;
395 __debugger_dabr_match = kgdb_dabr_match;
396 __debugger_fault_handler = kgdb_not_implemented;
397
398 return 0;
399}
400
401void kgdb_arch_exit(void)
402{
403 __debugger_ipi = old__debugger_ipi;
404 __debugger = old__debugger;
405 __debugger_bpt = old__debugger_bpt;
406 __debugger_sstep = old__debugger_sstep;
407 __debugger_iabr_match = old__debugger_iabr_match;
408 __debugger_dabr_match = old__debugger_dabr_match;
409 __debugger_fault_handler = old__debugger_fault_handler;
410}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c176c513566b..4ba2af125450 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -34,6 +34,13 @@
34#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
35#include <asm/sstep.h> 35#include <asm/sstep.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/system.h>
38
39#ifdef CONFIG_BOOKE
40#define MSR_SINGLESTEP (MSR_DE)
41#else
42#define MSR_SINGLESTEP (MSR_SE)
43#endif
37 44
38DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 45DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
39DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 46DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -53,7 +60,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
53 ret = -EINVAL; 60 ret = -EINVAL;
54 } 61 }
55 62
56 /* insn must be on a special executable page on ppc64 */ 63 /* insn must be on a special executable page on ppc64. This is
64 * not explicitly required on ppc32 (right now), but it doesn't hurt */
57 if (!ret) { 65 if (!ret) {
58 p->ainsn.insn = get_insn_slot(); 66 p->ainsn.insn = get_insn_slot();
59 if (!p->ainsn.insn) 67 if (!p->ainsn.insn)
@@ -95,7 +103,16 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
95 103
96static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 104static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
97{ 105{
98 regs->msr |= MSR_SE; 106 /* We turn off async exceptions to ensure that the single step will
107 * be for the instruction we have the kprobe on, if we dont its
108 * possible we'd get the single step reported for an exception handler
109 * like Decrementer or External Interrupt */
110 regs->msr &= ~MSR_EE;
111 regs->msr |= MSR_SINGLESTEP;
112#ifdef CONFIG_BOOKE
113 regs->msr &= ~MSR_CE;
114 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
115#endif
99 116
100 /* 117 /*
101 * On powerpc we should single step on the original 118 * On powerpc we should single step on the original
@@ -158,7 +175,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
158 kprobe_opcode_t insn = *p->ainsn.insn; 175 kprobe_opcode_t insn = *p->ainsn.insn;
159 if (kcb->kprobe_status == KPROBE_HIT_SS && 176 if (kcb->kprobe_status == KPROBE_HIT_SS &&
160 is_trap(insn)) { 177 is_trap(insn)) {
161 regs->msr &= ~MSR_SE; 178 /* Turn off 'trace' bits */
179 regs->msr &= ~MSR_SINGLESTEP;
162 regs->msr |= kcb->kprobe_saved_msr; 180 regs->msr |= kcb->kprobe_saved_msr;
163 goto no_kprobe; 181 goto no_kprobe;
164 } 182 }
@@ -376,6 +394,10 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
376 if (!cur) 394 if (!cur)
377 return 0; 395 return 0;
378 396
397 /* make sure we got here for instruction we have a kprobe on */
398 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
399 return 0;
400
379 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 401 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
380 kcb->kprobe_status = KPROBE_HIT_SSDONE; 402 kcb->kprobe_status = KPROBE_HIT_SSDONE;
381 cur->post_handler(cur, regs, 0); 403 cur->post_handler(cur, regs, 0);
@@ -395,10 +417,10 @@ out:
395 417
396 /* 418 /*
397 * if somebody else is singlestepping across a probe point, msr 419 * if somebody else is singlestepping across a probe point, msr
398 * will have SE set, in which case, continue the remaining processing 420 * will have DE/SE set, in which case, continue the remaining processing
399 * of do_debug, as if this is not a probe hit. 421 * of do_debug, as if this is not a probe hit.
400 */ 422 */
401 if (regs->msr & MSR_SE) 423 if (regs->msr & MSR_SINGLESTEP)
402 return 0; 424 return 0;
403 425
404 return 1; 426 return 1;
@@ -421,7 +443,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
421 * normal page fault. 443 * normal page fault.
422 */ 444 */
423 regs->nip = (unsigned long)cur->addr; 445 regs->nip = (unsigned long)cur->addr;
424 regs->msr &= ~MSR_SE; 446 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
425 regs->msr |= kcb->kprobe_saved_msr; 447 regs->msr |= kcb->kprobe_saved_msr;
426 if (kcb->kprobe_status == KPROBE_REENTER) 448 if (kcb->kprobe_status == KPROBE_REENTER)
427 restore_previous_kprobe(kcb); 449 restore_previous_kprobe(kcb);
@@ -498,7 +520,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
498#ifdef CONFIG_PPC64 520#ifdef CONFIG_PPC64
499unsigned long arch_deref_entry_point(void *entry) 521unsigned long arch_deref_entry_point(void *entry)
500{ 522{
501 return (unsigned long)(((func_descr_t *)entry)->entry); 523 return ((func_descr_t *)entry)->entry;
502} 524}
503#endif 525#endif
504 526
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1e656b43ad7f..827a5726a035 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -573,7 +573,7 @@ static int lparcfg_open(struct inode *inode, struct file *file)
573 return single_open(file, lparcfg_data, NULL); 573 return single_open(file, lparcfg_data, NULL);
574} 574}
575 575
576const struct file_operations lparcfg_fops = { 576static const struct file_operations lparcfg_fops = {
577 .owner = THIS_MODULE, 577 .owner = THIS_MODULE,
578 .read = seq_read, 578 .read = seq_read,
579 .write = lparcfg_write, 579 .write = lparcfg_write,
@@ -581,7 +581,7 @@ const struct file_operations lparcfg_fops = {
581 .release = single_release, 581 .release = single_release,
582}; 582};
583 583
584int __init lparcfg_init(void) 584static int __init lparcfg_init(void)
585{ 585{
586 struct proc_dir_entry *ent; 586 struct proc_dir_entry *ent;
587 mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; 587 mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
@@ -601,7 +601,7 @@ int __init lparcfg_init(void)
601 return 0; 601 return 0;
602} 602}
603 603
604void __exit lparcfg_cleanup(void) 604static void __exit lparcfg_cleanup(void)
605{ 605{
606 if (proc_ppc64_lparcfg) 606 if (proc_ppc64_lparcfg)
607 remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent); 607 remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 704375bda73a..a168514d8609 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -158,7 +158,7 @@ void kexec_copy_flush(struct kimage *image)
158 * on calling the interrupts, but we would like to call it off irq level 158 * on calling the interrupts, but we would like to call it off irq level
159 * so that the interrupt controller is clean. 159 * so that the interrupt controller is clean.
160 */ 160 */
161void kexec_smp_down(void *arg) 161static void kexec_smp_down(void *arg)
162{ 162{
163 if (ppc_md.kexec_cpu_down) 163 if (ppc_md.kexec_cpu_down)
164 ppc_md.kexec_cpu_down(0, 1); 164 ppc_md.kexec_cpu_down(0, 1);
@@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void)
172{ 172{
173 int my_cpu, i, notified=-1; 173 int my_cpu, i, notified=-1;
174 174
175 smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); 175 smp_call_function(kexec_smp_down, NULL, /* wait */0);
176 my_cpu = get_cpu(); 176 my_cpu = get_cpu();
177 177
178 /* check the others cpus are now down (via paca hw cpu id == -1) */ 178 /* check the others cpus are now down (via paca hw cpu id == -1) */
@@ -249,7 +249,7 @@ static void kexec_prepare_cpus(void)
249 * We could use a smaller stack if we don't care about anything using 249 * We could use a smaller stack if we don't care about anything using
250 * current, but that audit has not been performed. 250 * current, but that audit has not been performed.
251 */ 251 */
252union thread_union kexec_stack 252static union thread_union kexec_stack
253 __attribute__((__section__(".data.init_task"))) = { }; 253 __attribute__((__section__(".data.init_task"))) = { };
254 254
255/* Our assembly helper, in kexec_stub.S */ 255/* Our assembly helper, in kexec_stub.S */
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 7b9160220698..85cb6f340846 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -116,3 +116,8 @@ _GLOBAL(longjmp)
116 mtlr r0 116 mtlr r0
117 mr r3,r4 117 mr r3,r4
118 blr 118 blr
119
120_GLOBAL(__setup_cpu_power7)
121_GLOBAL(__restore_cpu_power7)
122 /* place holder */
123 blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 89aaaa6f3561..6321ae36f729 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
489 * 489 *
490 * flush_icache_range(unsigned long start, unsigned long stop) 490 * flush_icache_range(unsigned long start, unsigned long stop)
491 */ 491 */
492_GLOBAL(__flush_icache_range) 492_KPROBE(__flush_icache_range)
493BEGIN_FTR_SECTION 493BEGIN_FTR_SECTION
494 blr /* for 601, do nothing */ 494 blr /* for 601, do nothing */
495END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 495END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 942951e76586..4dd70cf7bb4e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -506,6 +506,39 @@ _GLOBAL(giveup_altivec)
506 506
507#endif /* CONFIG_ALTIVEC */ 507#endif /* CONFIG_ALTIVEC */
508 508
509#ifdef CONFIG_VSX
510/*
511 * __giveup_vsx(tsk)
512 * Disable VSX for the task given as the argument.
513 * Does NOT save vsx registers.
514 * Enables the VSX for use in the kernel on return.
515 */
516_GLOBAL(__giveup_vsx)
517 mfmsr r5
518 oris r5,r5,MSR_VSX@h
519 mtmsrd r5 /* enable use of VSX now */
520 isync
521
522 cmpdi 0,r3,0
523 beqlr- /* if no previous owner, done */
524 addi r3,r3,THREAD /* want THREAD of task */
525 ld r5,PT_REGS(r3)
526 cmpdi 0,r5,0
527 beq 1f
528 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
529 lis r3,MSR_VSX@h
530 andc r4,r4,r3 /* disable VSX for previous task */
531 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5321:
533#ifndef CONFIG_SMP
534 li r5,0
535 ld r4,last_task_used_vsx@got(r2)
536 std r5,0(r4)
537#endif /* CONFIG_SMP */
538 blr
539
540#endif /* CONFIG_VSX */
541
509/* kexec_wait(phys_cpu) 542/* kexec_wait(phys_cpu)
510 * 543 *
511 * wait for the flag to change, indicating this kernel is going away but 544 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
new file mode 100644
index 000000000000..af07003573c4
--- /dev/null
+++ b/arch/powerpc/kernel/module.c
@@ -0,0 +1,116 @@
1/* Kernel module help for powerpc.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3 Copyright (C) 2008 Freescale Semiconductor, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/
19#include <linux/module.h>
20#include <linux/elf.h>
21#include <linux/moduleloader.h>
22#include <linux/err.h>
23#include <linux/vmalloc.h>
24#include <linux/bug.h>
25#include <asm/module.h>
26#include <asm/uaccess.h>
27#include <asm/firmware.h>
28#include <linux/sort.h>
29
30#include "setup.h"
31
32LIST_HEAD(module_bug_list);
33
34void *module_alloc(unsigned long size)
35{
36 if (size == 0)
37 return NULL;
38
39 return vmalloc_exec(size);
40}
41
42/* Free memory returned from module_alloc */
43void module_free(struct module *mod, void *module_region)
44{
45 vfree(module_region);
46 /* FIXME: If module_region == mod->init_region, trim exception
47 table entries. */
48}
49
50static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
51 const Elf_Shdr *sechdrs,
52 const char *name)
53{
54 char *secstrings;
55 unsigned int i;
56
57 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
58 for (i = 1; i < hdr->e_shnum; i++)
59 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
60 return &sechdrs[i];
61 return NULL;
62}
63
64int module_finalize(const Elf_Ehdr *hdr,
65 const Elf_Shdr *sechdrs, struct module *me)
66{
67 const Elf_Shdr *sect;
68 int err;
69
70 err = module_bug_finalize(hdr, sechdrs, me);
71 if (err)
72 return err;
73
74 /* Apply feature fixups */
75 sect = find_section(hdr, sechdrs, "__ftr_fixup");
76 if (sect != NULL)
77 do_feature_fixups(cur_cpu_spec->cpu_features,
78 (void *)sect->sh_addr,
79 (void *)sect->sh_addr + sect->sh_size);
80
81#ifdef CONFIG_PPC64
82 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
83 if (sect != NULL)
84 do_feature_fixups(powerpc_firmware_features,
85 (void *)sect->sh_addr,
86 (void *)sect->sh_addr + sect->sh_size);
87#endif
88
89 sect = find_section(hdr, sechdrs, "__lwsync_fixup");
90 if (sect != NULL)
91 do_lwsync_fixups(cur_cpu_spec->cpu_features,
92 (void *)sect->sh_addr,
93 (void *)sect->sh_addr + sect->sh_size);
94
95 return 0;
96}
97
98void module_arch_cleanup(struct module *mod)
99{
100 module_bug_cleanup(mod);
101}
102
103struct bug_entry *module_find_bug(unsigned long bugaddr)
104{
105 struct mod_arch_specific *mod;
106 unsigned int i;
107 struct bug_entry *bug;
108
109 list_for_each_entry(mod, &module_bug_list, bug_list) {
110 bug = mod->bug_table;
111 for (i = 0; i < mod->num_bugs; ++i, ++bug)
112 if (bugaddr == bug->bug_addr)
113 return bug;
114 }
115 return NULL;
116}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index eab313858315..2df91a03462a 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -34,23 +34,6 @@
34#define DEBUGP(fmt , ...) 34#define DEBUGP(fmt , ...)
35#endif 35#endif
36 36
37LIST_HEAD(module_bug_list);
38
39void *module_alloc(unsigned long size)
40{
41 if (size == 0)
42 return NULL;
43 return vmalloc(size);
44}
45
46/* Free memory returned from module_alloc */
47void module_free(struct module *mod, void *module_region)
48{
49 vfree(module_region);
50 /* FIXME: If module_region == mod->init_region, trim exception
51 table entries. */
52}
53
54/* Count how many different relocations (different symbol, different 37/* Count how many different relocations (different symbol, different
55 addend) */ 38 addend) */
56static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) 39static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
@@ -325,58 +308,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
325 } 308 }
326 return 0; 309 return 0;
327} 310}
328
329static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
330 const Elf_Shdr *sechdrs,
331 const char *name)
332{
333 char *secstrings;
334 unsigned int i;
335
336 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
337 for (i = 1; i < hdr->e_shnum; i++)
338 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
339 return &sechdrs[i];
340 return NULL;
341}
342
343int module_finalize(const Elf_Ehdr *hdr,
344 const Elf_Shdr *sechdrs,
345 struct module *me)
346{
347 const Elf_Shdr *sect;
348 int err;
349
350 err = module_bug_finalize(hdr, sechdrs, me);
351 if (err) /* never true, currently */
352 return err;
353
354 /* Apply feature fixups */
355 sect = find_section(hdr, sechdrs, "__ftr_fixup");
356 if (sect != NULL)
357 do_feature_fixups(cur_cpu_spec->cpu_features,
358 (void *)sect->sh_addr,
359 (void *)sect->sh_addr + sect->sh_size);
360
361 return 0;
362}
363
364void module_arch_cleanup(struct module *mod)
365{
366 module_bug_cleanup(mod);
367}
368
369struct bug_entry *module_find_bug(unsigned long bugaddr)
370{
371 struct mod_arch_specific *mod;
372 unsigned int i;
373 struct bug_entry *bug;
374
375 list_for_each_entry(mod, &module_bug_list, bug_list) {
376 bug = mod->bug_table;
377 for (i = 0; i < mod->num_bugs; ++i, ++bug)
378 if (bugaddr == bug->bug_addr)
379 return bug;
380 }
381 return NULL;
382}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 3a82b02b784b..ee6a2982d567 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -24,6 +24,7 @@
24#include <asm/module.h> 24#include <asm/module.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/firmware.h> 26#include <asm/firmware.h>
27#include <asm/code-patching.h>
27#include <linux/sort.h> 28#include <linux/sort.h>
28 29
29#include "setup.h" 30#include "setup.h"
@@ -101,22 +102,6 @@ static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
101 return _count_relocs; 102 return _count_relocs;
102} 103}
103 104
104void *module_alloc(unsigned long size)
105{
106 if (size == 0)
107 return NULL;
108
109 return vmalloc_exec(size);
110}
111
112/* Free memory returned from module_alloc */
113void module_free(struct module *mod, void *module_region)
114{
115 vfree(module_region);
116 /* FIXME: If module_region == mod->init_region, trim exception
117 table entries. */
118}
119
120static int relacmp(const void *_x, const void *_y) 105static int relacmp(const void *_x, const void *_y)
121{ 106{
122 const Elf64_Rela *x, *y; 107 const Elf64_Rela *x, *y;
@@ -346,7 +331,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
346 restore r2. */ 331 restore r2. */
347static int restore_r2(u32 *instruction, struct module *me) 332static int restore_r2(u32 *instruction, struct module *me)
348{ 333{
349 if (*instruction != 0x60000000) { 334 if (*instruction != PPC_NOP_INSTR) {
350 printk("%s: Expect noop after relocate, got %08x\n", 335 printk("%s: Expect noop after relocate, got %08x\n",
351 me->name, *instruction); 336 me->name, *instruction);
352 return 0; 337 return 0;
@@ -466,65 +451,3 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
466 451
467 return 0; 452 return 0;
468} 453}
469
470LIST_HEAD(module_bug_list);
471
472static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
473 const Elf_Shdr *sechdrs,
474 const char *name)
475{
476 char *secstrings;
477 unsigned int i;
478
479 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
480 for (i = 1; i < hdr->e_shnum; i++)
481 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
482 return &sechdrs[i];
483 return NULL;
484}
485
486int module_finalize(const Elf_Ehdr *hdr,
487 const Elf_Shdr *sechdrs, struct module *me)
488{
489 const Elf_Shdr *sect;
490 int err;
491
492 err = module_bug_finalize(hdr, sechdrs, me);
493 if (err)
494 return err;
495
496 /* Apply feature fixups */
497 sect = find_section(hdr, sechdrs, "__ftr_fixup");
498 if (sect != NULL)
499 do_feature_fixups(cur_cpu_spec->cpu_features,
500 (void *)sect->sh_addr,
501 (void *)sect->sh_addr + sect->sh_size);
502
503 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
504 if (sect != NULL)
505 do_feature_fixups(powerpc_firmware_features,
506 (void *)sect->sh_addr,
507 (void *)sect->sh_addr + sect->sh_size);
508
509 return 0;
510}
511
512void module_arch_cleanup(struct module *mod)
513{
514 module_bug_cleanup(mod);
515}
516
517struct bug_entry *module_find_bug(unsigned long bugaddr)
518{
519 struct mod_arch_specific *mod;
520 unsigned int i;
521 struct bug_entry *bug;
522
523 list_for_each_entry(mod, &module_bug_list, bug_list) {
524 bug = mod->bug_table;
525 for (i = 0; i < mod->num_bugs; ++i, ++bug)
526 if (bugaddr == bug->bug_addr)
527 return bug;
528 }
529 return NULL;
530}
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index c62d1012c013..3bb7d3dd28be 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -34,5 +34,5 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
34 34
35void arch_teardown_msi_irqs(struct pci_dev *dev) 35void arch_teardown_msi_irqs(struct pci_dev *dev)
36{ 36{
37 return ppc_md.teardown_msi_irqs(dev); 37 ppc_md.teardown_msi_irqs(dev);
38} 38}
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 5748ddb47d9f..e9be908f199b 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -89,54 +89,6 @@ struct of_device *of_device_alloc(struct device_node *np,
89} 89}
90EXPORT_SYMBOL(of_device_alloc); 90EXPORT_SYMBOL(of_device_alloc);
91 91
92ssize_t of_device_get_modalias(struct of_device *ofdev,
93 char *str, ssize_t len)
94{
95 const char *compat;
96 int cplen, i;
97 ssize_t tsize, csize, repend;
98
99 /* Name & Type */
100 csize = snprintf(str, len, "of:N%sT%s",
101 ofdev->node->name, ofdev->node->type);
102
103 /* Get compatible property if any */
104 compat = of_get_property(ofdev->node, "compatible", &cplen);
105 if (!compat)
106 return csize;
107
108 /* Find true end (we tolerate multiple \0 at the end */
109 for (i=(cplen-1); i>=0 && !compat[i]; i--)
110 cplen--;
111 if (!cplen)
112 return csize;
113 cplen++;
114
115 /* Check space (need cplen+1 chars including final \0) */
116 tsize = csize + cplen;
117 repend = tsize;
118
119 if (csize>=len) /* @ the limit, all is already filled */
120 return tsize;
121
122 if (tsize>=len) { /* limit compat list */
123 cplen = len-csize-1;
124 repend = len;
125 }
126
127 /* Copy and do char replacement */
128 memcpy(&str[csize+1], compat, cplen);
129 for (i=csize; i<repend; i++) {
130 char c = str[i];
131 if (c=='\0')
132 str[i] = 'C';
133 else if (c==' ')
134 str[i] = '_';
135 }
136
137 return tsize;
138}
139
140int of_device_uevent(struct device *dev, struct kobj_uevent_env *env) 92int of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
141{ 93{
142 struct of_device *ofdev; 94 struct of_device *ofdev;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 063cdd413049..224e9a11765c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -598,6 +598,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
598 res->start = pci_addr; 598 res->start = pci_addr;
599 break; 599 break;
600 case 2: /* PCI Memory space */ 600 case 2: /* PCI Memory space */
601 case 3: /* PCI 64 bits Memory space */
601 printk(KERN_INFO 602 printk(KERN_INFO
602 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 603 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
603 cpu_addr, cpu_addr + size - 1, pci_addr, 604 cpu_addr, cpu_addr + size - 1, pci_addr,
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
index 90e562771791..dc16aefe1dd0 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/kernel/ppc32.h
@@ -120,6 +120,7 @@ struct mcontext32 {
120 elf_fpregset_t mc_fregs; 120 elf_fpregset_t mc_fregs;
121 unsigned int mc_pad[2]; 121 unsigned int mc_pad[2];
122 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16))); 122 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
123 elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16)));
123}; 124};
124 125
125struct ucontext32 { 126struct ucontext32 {
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d3ac631cbd26..e1ea4fe5cfbd 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -42,6 +42,7 @@
42#include <asm/div64.h> 42#include <asm/div64.h>
43#include <asm/signal.h> 43#include <asm/signal.h>
44#include <asm/dcr.h> 44#include <asm/dcr.h>
45#include <asm/ftrace.h>
45 46
46#ifdef CONFIG_PPC32 47#ifdef CONFIG_PPC32
47extern void transfer_to_handler(void); 48extern void transfer_to_handler(void);
@@ -67,6 +68,10 @@ EXPORT_SYMBOL(single_step_exception);
67EXPORT_SYMBOL(sys_sigreturn); 68EXPORT_SYMBOL(sys_sigreturn);
68#endif 69#endif
69 70
71#ifdef CONFIG_FTRACE
72EXPORT_SYMBOL(_mcount);
73#endif
74
70EXPORT_SYMBOL(strcpy); 75EXPORT_SYMBOL(strcpy);
71EXPORT_SYMBOL(strncpy); 76EXPORT_SYMBOL(strncpy);
72EXPORT_SYMBOL(strcat); 77EXPORT_SYMBOL(strcat);
@@ -102,6 +107,9 @@ EXPORT_SYMBOL(giveup_fpu);
102#ifdef CONFIG_ALTIVEC 107#ifdef CONFIG_ALTIVEC
103EXPORT_SYMBOL(giveup_altivec); 108EXPORT_SYMBOL(giveup_altivec);
104#endif /* CONFIG_ALTIVEC */ 109#endif /* CONFIG_ALTIVEC */
110#ifdef CONFIG_VSX
111EXPORT_SYMBOL(giveup_vsx);
112#endif /* CONFIG_VSX */
105#ifdef CONFIG_SPE 113#ifdef CONFIG_SPE
106EXPORT_SYMBOL(giveup_spe); 114EXPORT_SYMBOL(giveup_spe);
107#endif /* CONFIG_SPE */ 115#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7de41c3948ec..219f3634115e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -53,6 +53,7 @@ extern unsigned long _get_SP(void);
53#ifndef CONFIG_SMP 53#ifndef CONFIG_SMP
54struct task_struct *last_task_used_math = NULL; 54struct task_struct *last_task_used_math = NULL;
55struct task_struct *last_task_used_altivec = NULL; 55struct task_struct *last_task_used_altivec = NULL;
56struct task_struct *last_task_used_vsx = NULL;
56struct task_struct *last_task_used_spe = NULL; 57struct task_struct *last_task_used_spe = NULL;
57#endif 58#endif
58 59
@@ -104,17 +105,6 @@ void enable_kernel_fp(void)
104} 105}
105EXPORT_SYMBOL(enable_kernel_fp); 106EXPORT_SYMBOL(enable_kernel_fp);
106 107
107int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
108{
109 if (!tsk->thread.regs)
110 return 0;
111 flush_fp_to_thread(current);
112
113 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
114
115 return 1;
116}
117
118#ifdef CONFIG_ALTIVEC 108#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void) 109void enable_kernel_altivec(void)
120{ 110{
@@ -148,36 +138,48 @@ void flush_altivec_to_thread(struct task_struct *tsk)
148 preempt_enable(); 138 preempt_enable();
149 } 139 }
150} 140}
141#endif /* CONFIG_ALTIVEC */
151 142
152int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs) 143#ifdef CONFIG_VSX
144#if 0
145/* not currently used, but some crazy RAID module might want to later */
146void enable_kernel_vsx(void)
153{ 147{
154 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save 148 WARN_ON(preemptible());
155 * separately, see below */
156 const int nregs = ELF_NVRREG - 2;
157 elf_vrreg_t *reg;
158 u32 *dest;
159
160 if (tsk == current)
161 flush_altivec_to_thread(tsk);
162
163 reg = (elf_vrreg_t *)vrregs;
164
165 /* copy the 32 vr registers */
166 memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg));
167 reg += nregs;
168 149
169 /* copy the vscr */ 150#ifdef CONFIG_SMP
170 memcpy(reg, &tsk->thread.vscr, sizeof(*reg)); 151 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
171 reg++; 152 giveup_vsx(current);
153 else
154 giveup_vsx(NULL); /* just enable vsx for kernel - force */
155#else
156 giveup_vsx(last_task_used_vsx);
157#endif /* CONFIG_SMP */
158}
159EXPORT_SYMBOL(enable_kernel_vsx);
160#endif
172 161
173 /* vrsave is stored in the high 32bit slot of the final 128bits */ 162void giveup_vsx(struct task_struct *tsk)
174 memset(reg, 0, sizeof(*reg)); 163{
175 dest = (u32 *)reg; 164 giveup_fpu(tsk);
176 *dest = tsk->thread.vrsave; 165 giveup_altivec(tsk);
166 __giveup_vsx(tsk);
167}
177 168
178 return 1; 169void flush_vsx_to_thread(struct task_struct *tsk)
170{
171 if (tsk->thread.regs) {
172 preempt_disable();
173 if (tsk->thread.regs->msr & MSR_VSX) {
174#ifdef CONFIG_SMP
175 BUG_ON(tsk != current);
176#endif
177 giveup_vsx(tsk);
178 }
179 preempt_enable();
180 }
179} 181}
180#endif /* CONFIG_ALTIVEC */ 182#endif /* CONFIG_VSX */
181 183
182#ifdef CONFIG_SPE 184#ifdef CONFIG_SPE
183 185
@@ -209,14 +211,6 @@ void flush_spe_to_thread(struct task_struct *tsk)
209 preempt_enable(); 211 preempt_enable();
210 } 212 }
211} 213}
212
213int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
214{
215 flush_spe_to_thread(current);
216 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
217 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
218 return 1;
219}
220#endif /* CONFIG_SPE */ 214#endif /* CONFIG_SPE */
221 215
222#ifndef CONFIG_SMP 216#ifndef CONFIG_SMP
@@ -233,6 +227,10 @@ void discard_lazy_cpu_state(void)
233 if (last_task_used_altivec == current) 227 if (last_task_used_altivec == current)
234 last_task_used_altivec = NULL; 228 last_task_used_altivec = NULL;
235#endif /* CONFIG_ALTIVEC */ 229#endif /* CONFIG_ALTIVEC */
230#ifdef CONFIG_VSX
231 if (last_task_used_vsx == current)
232 last_task_used_vsx = NULL;
233#endif /* CONFIG_VSX */
236#ifdef CONFIG_SPE 234#ifdef CONFIG_SPE
237 if (last_task_used_spe == current) 235 if (last_task_used_spe == current)
238 last_task_used_spe = NULL; 236 last_task_used_spe = NULL;
@@ -297,6 +295,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
297 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 295 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
298 giveup_altivec(prev); 296 giveup_altivec(prev);
299#endif /* CONFIG_ALTIVEC */ 297#endif /* CONFIG_ALTIVEC */
298#ifdef CONFIG_VSX
299 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
300 /* VMX and FPU registers are already save here */
301 __giveup_vsx(prev);
302#endif /* CONFIG_VSX */
300#ifdef CONFIG_SPE 303#ifdef CONFIG_SPE
301 /* 304 /*
302 * If the previous thread used spe in the last quantum 305 * If the previous thread used spe in the last quantum
@@ -317,6 +320,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
317 if (new->thread.regs && last_task_used_altivec == new) 320 if (new->thread.regs && last_task_used_altivec == new)
318 new->thread.regs->msr |= MSR_VEC; 321 new->thread.regs->msr |= MSR_VEC;
319#endif /* CONFIG_ALTIVEC */ 322#endif /* CONFIG_ALTIVEC */
323#ifdef CONFIG_VSX
324 if (new->thread.regs && last_task_used_vsx == new)
325 new->thread.regs->msr |= MSR_VSX;
326#endif /* CONFIG_VSX */
320#ifdef CONFIG_SPE 327#ifdef CONFIG_SPE
321 /* Avoid the trap. On smp this this never happens since 328 /* Avoid the trap. On smp this this never happens since
322 * we don't set last_task_used_spe 329 * we don't set last_task_used_spe
@@ -417,6 +424,8 @@ static struct regbit {
417 {MSR_EE, "EE"}, 424 {MSR_EE, "EE"},
418 {MSR_PR, "PR"}, 425 {MSR_PR, "PR"},
419 {MSR_FP, "FP"}, 426 {MSR_FP, "FP"},
427 {MSR_VEC, "VEC"},
428 {MSR_VSX, "VSX"},
420 {MSR_ME, "ME"}, 429 {MSR_ME, "ME"},
421 {MSR_IR, "IR"}, 430 {MSR_IR, "IR"},
422 {MSR_DR, "DR"}, 431 {MSR_DR, "DR"},
@@ -484,10 +493,8 @@ void show_regs(struct pt_regs * regs)
484 * Lookup NIP late so we have the best change of getting the 493 * Lookup NIP late so we have the best change of getting the
485 * above info out without failing 494 * above info out without failing
486 */ 495 */
487 printk("NIP ["REG"] ", regs->nip); 496 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
488 print_symbol("%s\n", regs->nip); 497 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
489 printk("LR ["REG"] ", regs->link);
490 print_symbol("%s\n", regs->link);
491#endif 498#endif
492 show_stack(current, (unsigned long *) regs->gpr[1]); 499 show_stack(current, (unsigned long *) regs->gpr[1]);
493 if (!user_mode(regs)) 500 if (!user_mode(regs))
@@ -534,6 +541,7 @@ void prepare_to_copy(struct task_struct *tsk)
534{ 541{
535 flush_fp_to_thread(current); 542 flush_fp_to_thread(current);
536 flush_altivec_to_thread(current); 543 flush_altivec_to_thread(current);
544 flush_vsx_to_thread(current);
537 flush_spe_to_thread(current); 545 flush_spe_to_thread(current);
538} 546}
539 547
@@ -689,6 +697,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
689#endif 697#endif
690 698
691 discard_lazy_cpu_state(); 699 discard_lazy_cpu_state();
700#ifdef CONFIG_VSX
701 current->thread.used_vsr = 0;
702#endif
692 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 703 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
693 current->thread.fpscr.val = 0; 704 current->thread.fpscr.val = 0;
694#ifdef CONFIG_ALTIVEC 705#ifdef CONFIG_ALTIVEC
@@ -971,8 +982,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
971 newsp = stack[0]; 982 newsp = stack[0];
972 ip = stack[STACK_FRAME_LR_SAVE]; 983 ip = stack[STACK_FRAME_LR_SAVE];
973 if (!firstframe || ip != lr) { 984 if (!firstframe || ip != lr) {
974 printk("["REG"] ["REG"] ", sp, ip); 985 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
975 print_symbol("%s", ip);
976 if (firstframe) 986 if (firstframe)
977 printk(" (unreliable)"); 987 printk(" (unreliable)");
978 printk("\n"); 988 printk("\n");
@@ -987,10 +997,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
987 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 997 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
988 struct pt_regs *regs = (struct pt_regs *) 998 struct pt_regs *regs = (struct pt_regs *)
989 (sp + STACK_FRAME_OVERHEAD); 999 (sp + STACK_FRAME_OVERHEAD);
990 printk("--- Exception: %lx", regs->trap);
991 print_symbol(" at %s\n", regs->nip);
992 lr = regs->link; 1000 lr = regs->link;
993 print_symbol(" LR = %s\n", lr); 1001 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1002 regs->trap, (void *)regs->nip, (void *)lr);
994 firstframe = 1; 1003 firstframe = 1;
995 } 1004 }
996 1005
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 2aefe2a4129a..87d83c56b31e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -609,6 +609,10 @@ static struct feature_property {
609 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 609 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
610 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 610 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
611#endif /* CONFIG_ALTIVEC */ 611#endif /* CONFIG_ALTIVEC */
612#ifdef CONFIG_VSX
613 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
614 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
615#endif /* CONFIG_VSX */
612#ifdef CONFIG_PPC64 616#ifdef CONFIG_PPC64
613 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, 617 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
614 {"ibm,purr", 1, CPU_FTR_PURR, 0}, 618 {"ibm,purr", 1, CPU_FTR_PURR, 0},
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 6d6df1e60325..1ea8c8d3ce89 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -620,6 +620,7 @@ static void __init early_cmdline_parse(void)
620#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */ 620#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
621#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */ 621#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
622#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */ 622#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
623#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
623 624
624/* Option vector 2: Open Firmware options supported */ 625/* Option vector 2: Open Firmware options supported */
625#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */ 626#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
@@ -650,6 +651,8 @@ static void __init early_cmdline_parse(void)
650static unsigned char ibm_architecture_vec[] = { 651static unsigned char ibm_architecture_vec[] = {
651 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 652 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
652 W(0xffff0000), W(0x003e0000), /* POWER6 */ 653 W(0xffff0000), W(0x003e0000), /* POWER6 */
654 W(0xffff0000), W(0x003f0000), /* POWER7 */
655 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
653 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ 656 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
654 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ 657 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
655 5 - 1, /* 5 option vectors */ 658 5 - 1, /* 5 option vectors */
@@ -658,7 +661,7 @@ static unsigned char ibm_architecture_vec[] = {
658 3 - 2, /* length */ 661 3 - 2, /* length */
659 0, /* don't ignore, don't halt */ 662 0, /* don't ignore, don't halt */
660 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 663 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
661 OV1_PPC_2_04 | OV1_PPC_2_05, 664 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06,
662 665
663 /* option vector 2: Open Firmware options supported */ 666 /* option vector 2: Open Firmware options supported */
664 34 - 2, /* length */ 667 34 - 2, /* length */
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 90eb3a3e383e..bc1fb27368af 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -128,12 +128,35 @@ static void of_bus_pci_count_cells(struct device_node *np,
128 *sizec = 2; 128 *sizec = 2;
129} 129}
130 130
131static unsigned int of_bus_pci_get_flags(const u32 *addr)
132{
133 unsigned int flags = 0;
134 u32 w = addr[0];
135
136 switch((w >> 24) & 0x03) {
137 case 0x01:
138 flags |= IORESOURCE_IO;
139 break;
140 case 0x02: /* 32 bits */
141 case 0x03: /* 64 bits */
142 flags |= IORESOURCE_MEM;
143 break;
144 }
145 if (w & 0x40000000)
146 flags |= IORESOURCE_PREFETCH;
147 return flags;
148}
149
131static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) 150static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
132{ 151{
133 u64 cp, s, da; 152 u64 cp, s, da;
153 unsigned int af, rf;
154
155 af = of_bus_pci_get_flags(addr);
156 rf = of_bus_pci_get_flags(range);
134 157
135 /* Check address type match */ 158 /* Check address type match */
136 if ((addr[0] ^ range[0]) & 0x03000000) 159 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
137 return OF_BAD_ADDR; 160 return OF_BAD_ADDR;
138 161
139 /* Read address values, skipping high cell */ 162 /* Read address values, skipping high cell */
@@ -153,25 +176,6 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
153 return of_bus_default_translate(addr + 1, offset, na - 1); 176 return of_bus_default_translate(addr + 1, offset, na - 1);
154} 177}
155 178
156static unsigned int of_bus_pci_get_flags(const u32 *addr)
157{
158 unsigned int flags = 0;
159 u32 w = addr[0];
160
161 switch((w >> 24) & 0x03) {
162 case 0x01:
163 flags |= IORESOURCE_IO;
164 break;
165 case 0x02: /* 32 bits */
166 case 0x03: /* 64 bits */
167 flags |= IORESOURCE_MEM;
168 break;
169 }
170 if (w & 0x40000000)
171 flags |= IORESOURCE_PREFETCH;
172 return flags;
173}
174
175const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 179const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
176 unsigned int *flags) 180 unsigned int *flags)
177{ 181{
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 2a9fe97e4521..8feb93e7890c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -215,29 +215,56 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
215 unsigned int pos, unsigned int count, 215 unsigned int pos, unsigned int count,
216 void *kbuf, void __user *ubuf) 216 void *kbuf, void __user *ubuf)
217{ 217{
218#ifdef CONFIG_VSX
219 double buf[33];
220 int i;
221#endif
218 flush_fp_to_thread(target); 222 flush_fp_to_thread(target);
219 223
224#ifdef CONFIG_VSX
225 /* copy to local buffer then write that out */
226 for (i = 0; i < 32 ; i++)
227 buf[i] = target->thread.TS_FPR(i);
228 memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
229 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
230
231#else
220 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 232 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
221 offsetof(struct thread_struct, fpr[32])); 233 offsetof(struct thread_struct, TS_FPR(32)));
222 234
223 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 235 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
224 &target->thread.fpr, 0, -1); 236 &target->thread.fpr, 0, -1);
237#endif
225} 238}
226 239
227static int fpr_set(struct task_struct *target, const struct user_regset *regset, 240static int fpr_set(struct task_struct *target, const struct user_regset *regset,
228 unsigned int pos, unsigned int count, 241 unsigned int pos, unsigned int count,
229 const void *kbuf, const void __user *ubuf) 242 const void *kbuf, const void __user *ubuf)
230{ 243{
244#ifdef CONFIG_VSX
245 double buf[33];
246 int i;
247#endif
231 flush_fp_to_thread(target); 248 flush_fp_to_thread(target);
232 249
250#ifdef CONFIG_VSX
251 /* copy to local buffer then write that out */
252 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
253 if (i)
254 return i;
255 for (i = 0; i < 32 ; i++)
256 target->thread.TS_FPR(i) = buf[i];
257 memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
258 return 0;
259#else
233 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 260 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
234 offsetof(struct thread_struct, fpr[32])); 261 offsetof(struct thread_struct, TS_FPR(32)));
235 262
236 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 263 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
237 &target->thread.fpr, 0, -1); 264 &target->thread.fpr, 0, -1);
265#endif
238} 266}
239 267
240
241#ifdef CONFIG_ALTIVEC 268#ifdef CONFIG_ALTIVEC
242/* 269/*
243 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. 270 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
@@ -323,6 +350,56 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
323} 350}
324#endif /* CONFIG_ALTIVEC */ 351#endif /* CONFIG_ALTIVEC */
325 352
353#ifdef CONFIG_VSX
354/*
355 * Currently to set and and get all the vsx state, you need to call
356 * the fp and VMX calls aswell. This only get/sets the lower 32
357 * 128bit VSX registers.
358 */
359
360static int vsr_active(struct task_struct *target,
361 const struct user_regset *regset)
362{
363 flush_vsx_to_thread(target);
364 return target->thread.used_vsr ? regset->n : 0;
365}
366
367static int vsr_get(struct task_struct *target, const struct user_regset *regset,
368 unsigned int pos, unsigned int count,
369 void *kbuf, void __user *ubuf)
370{
371 double buf[32];
372 int ret, i;
373
374 flush_vsx_to_thread(target);
375
376 for (i = 0; i < 32 ; i++)
377 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
378 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
379 buf, 0, 32 * sizeof(double));
380
381 return ret;
382}
383
384static int vsr_set(struct task_struct *target, const struct user_regset *regset,
385 unsigned int pos, unsigned int count,
386 const void *kbuf, const void __user *ubuf)
387{
388 double buf[32];
389 int ret,i;
390
391 flush_vsx_to_thread(target);
392
393 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
394 buf, 0, 32 * sizeof(double));
395 for (i = 0; i < 32 ; i++)
396 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
397
398
399 return ret;
400}
401#endif /* CONFIG_VSX */
402
326#ifdef CONFIG_SPE 403#ifdef CONFIG_SPE
327 404
328/* 405/*
@@ -399,6 +476,9 @@ enum powerpc_regset {
399#ifdef CONFIG_ALTIVEC 476#ifdef CONFIG_ALTIVEC
400 REGSET_VMX, 477 REGSET_VMX,
401#endif 478#endif
479#ifdef CONFIG_VSX
480 REGSET_VSX,
481#endif
402#ifdef CONFIG_SPE 482#ifdef CONFIG_SPE
403 REGSET_SPE, 483 REGSET_SPE,
404#endif 484#endif
@@ -422,6 +502,13 @@ static const struct user_regset native_regsets[] = {
422 .active = vr_active, .get = vr_get, .set = vr_set 502 .active = vr_active, .get = vr_get, .set = vr_set
423 }, 503 },
424#endif 504#endif
505#ifdef CONFIG_VSX
506 [REGSET_VSX] = {
507 .core_note_type = NT_PPC_VSX, .n = 32,
508 .size = sizeof(double), .align = sizeof(double),
509 .active = vsr_active, .get = vsr_get, .set = vsr_set
510 },
511#endif
425#ifdef CONFIG_SPE 512#ifdef CONFIG_SPE
426 [REGSET_SPE] = { 513 [REGSET_SPE] = {
427 .n = 35, 514 .n = 35,
@@ -728,7 +815,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
728 tmp = ptrace_get_reg(child, (int) index); 815 tmp = ptrace_get_reg(child, (int) index);
729 } else { 816 } else {
730 flush_fp_to_thread(child); 817 flush_fp_to_thread(child);
731 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; 818 tmp = ((unsigned long *)child->thread.fpr)
819 [TS_FPRWIDTH * (index - PT_FPR0)];
732 } 820 }
733 ret = put_user(tmp,(unsigned long __user *) data); 821 ret = put_user(tmp,(unsigned long __user *) data);
734 break; 822 break;
@@ -755,7 +843,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
755 ret = ptrace_put_reg(child, index, data); 843 ret = ptrace_put_reg(child, index, data);
756 } else { 844 } else {
757 flush_fp_to_thread(child); 845 flush_fp_to_thread(child);
758 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; 846 ((unsigned long *)child->thread.fpr)
847 [TS_FPRWIDTH * (index - PT_FPR0)] = data;
759 ret = 0; 848 ret = 0;
760 } 849 }
761 break; 850 break;
@@ -820,6 +909,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
820 sizeof(u32)), 909 sizeof(u32)),
821 (const void __user *) data); 910 (const void __user *) data);
822#endif 911#endif
912#ifdef CONFIG_VSX
913 case PTRACE_GETVSRREGS:
914 return copy_regset_to_user(child, &user_ppc_native_view,
915 REGSET_VSX,
916 0, (32 * sizeof(vector128) +
917 sizeof(u32)),
918 (void __user *) data);
919
920 case PTRACE_SETVSRREGS:
921 return copy_regset_from_user(child, &user_ppc_native_view,
922 REGSET_VSX,
923 0, (32 * sizeof(vector128) +
924 sizeof(u32)),
925 (const void __user *) data);
926#endif
823#ifdef CONFIG_SPE 927#ifdef CONFIG_SPE
824 case PTRACE_GETEVRREGS: 928 case PTRACE_GETEVRREGS:
825 /* Get the child spe register state. */ 929 /* Get the child spe register state. */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 4c1de6af4c09..67bf1a1e7e14 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -64,6 +64,11 @@ static long compat_ptrace_old(struct task_struct *child, long request,
64 return -EPERM; 64 return -EPERM;
65} 65}
66 66
67/* Macros to workout the correct index for the FPR in the thread struct */
68#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
69#define FPRHALF(i) (((i) - PT_FPR0) & 1)
70#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) + FPRHALF(i)
71
67long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 72long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
68 compat_ulong_t caddr, compat_ulong_t cdata) 73 compat_ulong_t caddr, compat_ulong_t cdata)
69{ 74{
@@ -122,7 +127,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
122 * to be an array of unsigned int (32 bits) - the 127 * to be an array of unsigned int (32 bits) - the
123 * index passed in is based on this assumption. 128 * index passed in is based on this assumption.
124 */ 129 */
125 tmp = ((unsigned int *)child->thread.fpr)[index - PT_FPR0]; 130 tmp = ((unsigned int *)child->thread.fpr)
131 [FPRINDEX(index)];
126 } 132 }
127 ret = put_user((unsigned int)tmp, (u32 __user *)data); 133 ret = put_user((unsigned int)tmp, (u32 __user *)data);
128 break; 134 break;
@@ -162,7 +168,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
162 CHECK_FULL_REGS(child->thread.regs); 168 CHECK_FULL_REGS(child->thread.regs);
163 if (numReg >= PT_FPR0) { 169 if (numReg >= PT_FPR0) {
164 flush_fp_to_thread(child); 170 flush_fp_to_thread(child);
165 tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0]; 171 tmp = ((unsigned long int *)child->thread.fpr)
172 [FPRINDEX(numReg)];
166 } else { /* register within PT_REGS struct */ 173 } else { /* register within PT_REGS struct */
167 tmp = ptrace_get_reg(child, numReg); 174 tmp = ptrace_get_reg(child, numReg);
168 } 175 }
@@ -217,7 +224,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
217 * to be an array of unsigned int (32 bits) - the 224 * to be an array of unsigned int (32 bits) - the
218 * index passed in is based on this assumption. 225 * index passed in is based on this assumption.
219 */ 226 */
220 ((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data; 227 ((unsigned int *)child->thread.fpr)
228 [FPRINDEX(index)] = data;
221 ret = 0; 229 ret = 0;
222 } 230 }
223 break; 231 break;
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index f9c6abc84a94..1be9fe38bcb5 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -160,7 +160,7 @@ static int sensors_open(struct inode *inode, struct file *file)
160 return single_open(file, ppc_rtas_sensors_show, NULL); 160 return single_open(file, ppc_rtas_sensors_show, NULL);
161} 161}
162 162
163const struct file_operations ppc_rtas_sensors_operations = { 163static const struct file_operations ppc_rtas_sensors_operations = {
164 .open = sensors_open, 164 .open = sensors_open,
165 .read = seq_read, 165 .read = seq_read,
166 .llseek = seq_lseek, 166 .llseek = seq_lseek,
@@ -172,7 +172,7 @@ static int poweron_open(struct inode *inode, struct file *file)
172 return single_open(file, ppc_rtas_poweron_show, NULL); 172 return single_open(file, ppc_rtas_poweron_show, NULL);
173} 173}
174 174
175const struct file_operations ppc_rtas_poweron_operations = { 175static const struct file_operations ppc_rtas_poweron_operations = {
176 .open = poweron_open, 176 .open = poweron_open,
177 .read = seq_read, 177 .read = seq_read,
178 .llseek = seq_lseek, 178 .llseek = seq_lseek,
@@ -185,7 +185,7 @@ static int progress_open(struct inode *inode, struct file *file)
185 return single_open(file, ppc_rtas_progress_show, NULL); 185 return single_open(file, ppc_rtas_progress_show, NULL);
186} 186}
187 187
188const struct file_operations ppc_rtas_progress_operations = { 188static const struct file_operations ppc_rtas_progress_operations = {
189 .open = progress_open, 189 .open = progress_open,
190 .read = seq_read, 190 .read = seq_read,
191 .llseek = seq_lseek, 191 .llseek = seq_lseek,
@@ -198,7 +198,7 @@ static int clock_open(struct inode *inode, struct file *file)
198 return single_open(file, ppc_rtas_clock_show, NULL); 198 return single_open(file, ppc_rtas_clock_show, NULL);
199} 199}
200 200
201const struct file_operations ppc_rtas_clock_operations = { 201static const struct file_operations ppc_rtas_clock_operations = {
202 .open = clock_open, 202 .open = clock_open,
203 .read = seq_read, 203 .read = seq_read,
204 .llseek = seq_lseek, 204 .llseek = seq_lseek,
@@ -211,7 +211,7 @@ static int tone_freq_open(struct inode *inode, struct file *file)
211 return single_open(file, ppc_rtas_tone_freq_show, NULL); 211 return single_open(file, ppc_rtas_tone_freq_show, NULL);
212} 212}
213 213
214const struct file_operations ppc_rtas_tone_freq_operations = { 214static const struct file_operations ppc_rtas_tone_freq_operations = {
215 .open = tone_freq_open, 215 .open = tone_freq_open,
216 .read = seq_read, 216 .read = seq_read,
217 .llseek = seq_lseek, 217 .llseek = seq_lseek,
@@ -224,7 +224,7 @@ static int tone_volume_open(struct inode *inode, struct file *file)
224 return single_open(file, ppc_rtas_tone_volume_show, NULL); 224 return single_open(file, ppc_rtas_tone_volume_show, NULL);
225} 225}
226 226
227const struct file_operations ppc_rtas_tone_volume_operations = { 227static const struct file_operations ppc_rtas_tone_volume_operations = {
228 .open = tone_volume_open, 228 .open = tone_volume_open,
229 .read = seq_read, 229 .read = seq_read,
230 .llseek = seq_lseek, 230 .llseek = seq_lseek,
@@ -237,7 +237,7 @@ static int rmo_buf_open(struct inode *inode, struct file *file)
237 return single_open(file, ppc_rtas_rmo_buf_show, NULL); 237 return single_open(file, ppc_rtas_rmo_buf_show, NULL);
238} 238}
239 239
240const struct file_operations ppc_rtas_rmo_buf_ops = { 240static const struct file_operations ppc_rtas_rmo_buf_ops = {
241 .open = rmo_buf_open, 241 .open = rmo_buf_open,
242 .read = seq_read, 242 .read = seq_read,
243 .llseek = seq_lseek, 243 .llseek = seq_lseek,
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 34843c318419..c680f1bbd387 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -340,8 +340,8 @@ int rtas_get_error_log_max(void)
340EXPORT_SYMBOL(rtas_get_error_log_max); 340EXPORT_SYMBOL(rtas_get_error_log_max);
341 341
342 342
343char rtas_err_buf[RTAS_ERROR_LOG_MAX]; 343static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
344int rtas_last_error_token; 344static int rtas_last_error_token;
345 345
346/** Return a copy of the detailed error text associated with the 346/** Return a copy of the detailed error text associated with the
347 * most recent failed call to rtas. Because the error text 347 * most recent failed call to rtas. Because the error text
@@ -484,7 +484,7 @@ unsigned int rtas_busy_delay(int status)
484} 484}
485EXPORT_SYMBOL(rtas_busy_delay); 485EXPORT_SYMBOL(rtas_busy_delay);
486 486
487int rtas_error_rc(int rtas_rc) 487static int rtas_error_rc(int rtas_rc)
488{ 488{
489 int rc; 489 int rc;
490 490
@@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
747 /* Call function on all CPUs. One of us will make the 747 /* Call function on all CPUs. One of us will make the
748 * rtas call 748 * rtas call
749 */ 749 */
750 if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) 750 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
751 data.error = -EINVAL; 751 data.error = -EINVAL;
752 752
753 wait_for_completion(&done); 753 wait_for_completion(&done);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 0a5e22b22729..09ded5c424a9 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -731,7 +731,7 @@ static const struct file_operations validate_flash_operations = {
731 .release = validate_flash_release, 731 .release = validate_flash_release,
732}; 732};
733 733
734int __init rtas_flash_init(void) 734static int __init rtas_flash_init(void)
735{ 735{
736 int rc; 736 int rc;
737 737
@@ -817,7 +817,7 @@ cleanup:
817 return rc; 817 return rc;
818} 818}
819 819
820void __exit rtas_flash_cleanup(void) 820static void __exit rtas_flash_cleanup(void)
821{ 821{
822 rtas_flash_term_hook = NULL; 822 rtas_flash_term_hook = NULL;
823 823
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 3ab88a9dc70d..589a2797eac2 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -155,12 +155,12 @@ static int rtas_pci_write_config(struct pci_bus *bus,
155 return PCIBIOS_DEVICE_NOT_FOUND; 155 return PCIBIOS_DEVICE_NOT_FOUND;
156} 156}
157 157
158struct pci_ops rtas_pci_ops = { 158static struct pci_ops rtas_pci_ops = {
159 .read = rtas_pci_read_config, 159 .read = rtas_pci_read_config,
160 .write = rtas_pci_write_config, 160 .write = rtas_pci_write_config,
161}; 161};
162 162
163int is_python(struct device_node *dev) 163static int is_python(struct device_node *dev)
164{ 164{
165 const char *model = of_get_property(dev, "model", NULL); 165 const char *model = of_get_property(dev, "model", NULL);
166 166
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index db540eab09f4..61a3f4132087 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -500,6 +500,7 @@ void __init smp_setup_cpu_sibling_map(void)
500} 500}
501#endif /* CONFIG_SMP */ 501#endif /* CONFIG_SMP */
502 502
503#ifdef CONFIG_PCSPKR_PLATFORM
503static __init int add_pcspkr(void) 504static __init int add_pcspkr(void)
504{ 505{
505 struct device_node *np; 506 struct device_node *np;
@@ -522,6 +523,7 @@ static __init int add_pcspkr(void)
522 return ret; 523 return ret;
523} 524}
524device_initcall(add_pcspkr); 525device_initcall(add_pcspkr);
526#endif /* CONFIG_PCSPKR_PLATFORM */
525 527
526void probe_machine(void) 528void probe_machine(void)
527{ 529{
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 5112a4aa801d..066e65c59b58 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -43,10 +43,6 @@
43 43
44#define DBG(fmt...) 44#define DBG(fmt...)
45 45
46#if defined CONFIG_KGDB
47#include <asm/kgdb.h>
48#endif
49
50extern void bootx_init(unsigned long r4, unsigned long phys); 46extern void bootx_init(unsigned long r4, unsigned long phys);
51 47
52int boot_cpuid; 48int boot_cpuid;
@@ -81,7 +77,7 @@ int ucache_bsize;
81 * from the address that it was linked at, so we must use RELOC/PTRRELOC 77 * from the address that it was linked at, so we must use RELOC/PTRRELOC
82 * to access static data (including strings). -- paulus 78 * to access static data (including strings). -- paulus
83 */ 79 */
84unsigned long __init early_init(unsigned long dt_ptr) 80notrace unsigned long __init early_init(unsigned long dt_ptr)
85{ 81{
86 unsigned long offset = reloc_offset(); 82 unsigned long offset = reloc_offset();
87 struct cpu_spec *spec; 83 struct cpu_spec *spec;
@@ -101,6 +97,10 @@ unsigned long __init early_init(unsigned long dt_ptr)
101 PTRRELOC(&__start___ftr_fixup), 97 PTRRELOC(&__start___ftr_fixup),
102 PTRRELOC(&__stop___ftr_fixup)); 98 PTRRELOC(&__stop___ftr_fixup));
103 99
100 do_lwsync_fixups(spec->cpu_features,
101 PTRRELOC(&__start___lwsync_fixup),
102 PTRRELOC(&__stop___lwsync_fixup));
103
104 return KERNELBASE + offset; 104 return KERNELBASE + offset;
105} 105}
106 106
@@ -111,7 +111,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
111 * This is called very early on the boot process, after a minimal 111 * This is called very early on the boot process, after a minimal
112 * MMU environment has been set up but before MMU_init is called. 112 * MMU environment has been set up but before MMU_init is called.
113 */ 113 */
114void __init machine_init(unsigned long dt_ptr, unsigned long phys) 114notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys)
115{ 115{
116 /* Enable early debugging if any specified (see udbg.h) */ 116 /* Enable early debugging if any specified (see udbg.h) */
117 udbg_early_init(); 117 udbg_early_init();
@@ -127,13 +127,18 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
127 ppc_md.power_save = ppc6xx_idle; 127 ppc_md.power_save = ppc6xx_idle;
128#endif 128#endif
129 129
130#ifdef CONFIG_E500
131 if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
132 cpu_has_feature(CPU_FTR_CAN_NAP))
133 ppc_md.power_save = e500_idle;
134#endif
130 if (ppc_md.progress) 135 if (ppc_md.progress)
131 ppc_md.progress("id mach(): done", 0x200); 136 ppc_md.progress("id mach(): done", 0x200);
132} 137}
133 138
134#ifdef CONFIG_BOOKE_WDT 139#ifdef CONFIG_BOOKE_WDT
135/* Checks wdt=x and wdt_period=xx command-line option */ 140/* Checks wdt=x and wdt_period=xx command-line option */
136int __init early_parse_wdt(char *p) 141notrace int __init early_parse_wdt(char *p)
137{ 142{
138 if (p && strncmp(p, "0", 1) != 0) 143 if (p && strncmp(p, "0", 1) != 0)
139 booke_wdt_enabled = 1; 144 booke_wdt_enabled = 1;
@@ -248,6 +253,28 @@ static void __init irqstack_early_init(void)
248#define irqstack_early_init() 253#define irqstack_early_init()
249#endif 254#endif
250 255
256#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
257static void __init exc_lvl_early_init(void)
258{
259 unsigned int i;
260
261 /* interrupt stacks must be in lowmem, we get that for free on ppc32
262 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
263 for_each_possible_cpu(i) {
264 critirq_ctx[i] = (struct thread_info *)
265 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
266#ifdef CONFIG_BOOKE
267 dbgirq_ctx[i] = (struct thread_info *)
268 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
269 mcheckirq_ctx[i] = (struct thread_info *)
270 __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
271#endif
272 }
273}
274#else
275#define exc_lvl_early_init()
276#endif
277
251/* Warning, IO base is not yet inited */ 278/* Warning, IO base is not yet inited */
252void __init setup_arch(char **cmdline_p) 279void __init setup_arch(char **cmdline_p)
253{ 280{
@@ -271,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
271 298
272 xmon_setup(); 299 xmon_setup();
273 300
274#if defined(CONFIG_KGDB)
275 if (ppc_md.kgdb_map_scc)
276 ppc_md.kgdb_map_scc();
277 set_debug_traps();
278 if (strstr(cmd_line, "gdb")) {
279 if (ppc_md.progress)
280 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
281 printk("kgdb breakpoint activated\n");
282 breakpoint();
283 }
284#endif
285
286 /* 301 /*
287 * Set cache line size based on type of cpu as a default. 302 * Set cache line size based on type of cpu as a default.
288 * Systems with OF can look in the properties on the cpu node(s) 303 * Systems with OF can look in the properties on the cpu node(s)
@@ -305,6 +320,8 @@ void __init setup_arch(char **cmdline_p)
305 init_mm.end_data = (unsigned long) _edata; 320 init_mm.end_data = (unsigned long) _edata;
306 init_mm.brk = klimit; 321 init_mm.brk = klimit;
307 322
323 exc_lvl_early_init();
324
308 irqstack_early_init(); 325 irqstack_early_init();
309 326
310 /* set up the bootmem stuff with available memory */ 327 /* set up the bootmem stuff with available memory */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 098fd96a394a..04d8de9f0fc6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -363,6 +363,8 @@ void __init setup_system(void)
363 &__start___ftr_fixup, &__stop___ftr_fixup); 363 &__start___ftr_fixup, &__stop___ftr_fixup);
364 do_feature_fixups(powerpc_firmware_features, 364 do_feature_fixups(powerpc_firmware_features,
365 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 365 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
366 do_lwsync_fixups(cur_cpu_spec->cpu_features,
367 &__start___lwsync_fixup, &__stop___lwsync_fixup);
366 368
367 /* 369 /*
368 * Unflatten the device-tree passed by prom_init or kexec 370 * Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index a65a44fbe523..ad55488939c3 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -120,7 +120,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
120 int ret; 120 int ret;
121 int is32 = is_32bit_task(); 121 int is32 = is_32bit_task();
122 122
123 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 123 if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
124 oldset = &current->saved_sigmask; 124 oldset = &current->saved_sigmask;
125 else if (!oldset) 125 else if (!oldset)
126 oldset = &current->blocked; 126 oldset = &current->blocked;
@@ -131,9 +131,10 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
131 check_syscall_restart(regs, &ka, signr > 0); 131 check_syscall_restart(regs, &ka, signr > 0);
132 132
133 if (signr <= 0) { 133 if (signr <= 0) {
134 struct thread_info *ti = current_thread_info();
134 /* No signal to deliver -- put the saved sigmask back */ 135 /* No signal to deliver -- put the saved sigmask back */
135 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 136 if (ti->local_flags & _TLF_RESTORE_SIGMASK) {
136 clear_thread_flag(TIF_RESTORE_SIGMASK); 137 ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
137 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 138 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
138 } 139 }
139 return 0; /* no signals delivered */ 140 return 0; /* no signals delivered */
@@ -169,10 +170,9 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
169 170
170 /* 171 /*
171 * A signal was successfully delivered; the saved sigmask is in 172 * A signal was successfully delivered; the saved sigmask is in
172 * its frame, and we can clear the TIF_RESTORE_SIGMASK flag. 173 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
173 */ 174 */
174 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 175 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
175 clear_thread_flag(TIF_RESTORE_SIGMASK);
176 } 176 }
177 177
178 return ret; 178 return ret;
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 77efb3d5465a..28f4b9f5fe5e 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -24,6 +24,16 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
24 siginfo_t *info, sigset_t *oldset, 24 siginfo_t *info, sigset_t *oldset,
25 struct pt_regs *regs); 25 struct pt_regs *regs);
26 26
27extern unsigned long copy_fpr_to_user(void __user *to,
28 struct task_struct *task);
29extern unsigned long copy_fpr_from_user(struct task_struct *task,
30 void __user *from);
31#ifdef CONFIG_VSX
32extern unsigned long copy_vsx_to_user(void __user *to,
33 struct task_struct *task);
34extern unsigned long copy_vsx_from_user(struct task_struct *task,
35 void __user *from);
36#endif
27 37
28#ifdef CONFIG_PPC64 38#ifdef CONFIG_PPC64
29 39
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index ad6943468ee9..3e80aa32b8b0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -68,6 +68,13 @@
68#define ucontext ucontext32 68#define ucontext ucontext32
69 69
70/* 70/*
71 * Userspace code may pass a ucontext which doesn't include VSX added
72 * at the end. We need to check for this case.
73 */
74#define UCONTEXTSIZEWITHOUTVSX \
75 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
76
77/*
71 * Returning 0 means we return to userspace via 78 * Returning 0 means we return to userspace via
72 * ret_from_except and thus restore all user 79 * ret_from_except and thus restore all user
73 * registers from *regs. This is what we need 80 * registers from *regs. This is what we need
@@ -243,7 +250,7 @@ long sys_sigsuspend(old_sigset_t mask)
243 250
244 current->state = TASK_INTERRUPTIBLE; 251 current->state = TASK_INTERRUPTIBLE;
245 schedule(); 252 schedule();
246 set_thread_flag(TIF_RESTORE_SIGMASK); 253 set_restore_sigmask();
247 return -ERESTARTNOHAND; 254 return -ERESTARTNOHAND;
248} 255}
249 256
@@ -328,6 +335,75 @@ struct rt_sigframe {
328 int abigap[56]; 335 int abigap[56];
329}; 336};
330 337
338#ifdef CONFIG_VSX
339unsigned long copy_fpr_to_user(void __user *to,
340 struct task_struct *task)
341{
342 double buf[ELF_NFPREG];
343 int i;
344
345 /* save FPR copy to local buffer then write to the thread_struct */
346 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
347 buf[i] = task->thread.TS_FPR(i);
348 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
349 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
350}
351
352unsigned long copy_fpr_from_user(struct task_struct *task,
353 void __user *from)
354{
355 double buf[ELF_NFPREG];
356 int i;
357
358 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
359 return 1;
360 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
361 task->thread.TS_FPR(i) = buf[i];
362 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
363
364 return 0;
365}
366
367unsigned long copy_vsx_to_user(void __user *to,
368 struct task_struct *task)
369{
370 double buf[ELF_NVSRHALFREG];
371 int i;
372
373 /* save FPR copy to local buffer then write to the thread_struct */
374 for (i = 0; i < ELF_NVSRHALFREG; i++)
375 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
376 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
377}
378
379unsigned long copy_vsx_from_user(struct task_struct *task,
380 void __user *from)
381{
382 double buf[ELF_NVSRHALFREG];
383 int i;
384
385 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
386 return 1;
387 for (i = 0; i < ELF_NVSRHALFREG ; i++)
388 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
389 return 0;
390}
391#else
392inline unsigned long copy_fpr_to_user(void __user *to,
393 struct task_struct *task)
394{
395 return __copy_to_user(to, task->thread.fpr,
396 ELF_NFPREG * sizeof(double));
397}
398
399inline unsigned long copy_fpr_from_user(struct task_struct *task,
400 void __user *from)
401{
402 return __copy_from_user(task->thread.fpr, from,
403 ELF_NFPREG * sizeof(double));
404}
405#endif
406
331/* 407/*
332 * Save the current user registers on the user stack. 408 * Save the current user registers on the user stack.
333 * We only save the altivec/spe registers if the process has used 409 * We only save the altivec/spe registers if the process has used
@@ -336,13 +412,13 @@ struct rt_sigframe {
336static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, 412static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
337 int sigret) 413 int sigret)
338{ 414{
415 unsigned long msr = regs->msr;
416
339 /* Make sure floating point registers are stored in regs */ 417 /* Make sure floating point registers are stored in regs */
340 flush_fp_to_thread(current); 418 flush_fp_to_thread(current);
341 419
342 /* save general and floating-point registers */ 420 /* save general registers */
343 if (save_general_regs(regs, frame) || 421 if (save_general_regs(regs, frame))
344 __copy_to_user(&frame->mc_fregs, current->thread.fpr,
345 ELF_NFPREG * sizeof(double)))
346 return 1; 422 return 1;
347 423
348#ifdef CONFIG_ALTIVEC 424#ifdef CONFIG_ALTIVEC
@@ -354,8 +430,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
354 return 1; 430 return 1;
355 /* set MSR_VEC in the saved MSR value to indicate that 431 /* set MSR_VEC in the saved MSR value to indicate that
356 frame->mc_vregs contains valid data */ 432 frame->mc_vregs contains valid data */
357 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) 433 msr |= MSR_VEC;
358 return 1;
359 } 434 }
360 /* else assert((regs->msr & MSR_VEC) == 0) */ 435 /* else assert((regs->msr & MSR_VEC) == 0) */
361 436
@@ -367,7 +442,22 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
367 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) 442 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
368 return 1; 443 return 1;
369#endif /* CONFIG_ALTIVEC */ 444#endif /* CONFIG_ALTIVEC */
370 445 if (copy_fpr_to_user(&frame->mc_fregs, current))
446 return 1;
447#ifdef CONFIG_VSX
448 /*
449 * Copy VSR 0-31 upper half from thread_struct to local
450 * buffer, then write that to userspace. Also set MSR_VSX in
451 * the saved MSR value to indicate that frame->mc_vregs
452 * contains valid data
453 */
454 if (current->thread.used_vsr) {
455 __giveup_vsx(current);
456 if (copy_vsx_to_user(&frame->mc_vsregs, current))
457 return 1;
458 msr |= MSR_VSX;
459 }
460#endif /* CONFIG_VSX */
371#ifdef CONFIG_SPE 461#ifdef CONFIG_SPE
372 /* save spe registers */ 462 /* save spe registers */
373 if (current->thread.used_spe) { 463 if (current->thread.used_spe) {
@@ -377,8 +467,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
377 return 1; 467 return 1;
378 /* set MSR_SPE in the saved MSR value to indicate that 468 /* set MSR_SPE in the saved MSR value to indicate that
379 frame->mc_vregs contains valid data */ 469 frame->mc_vregs contains valid data */
380 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR])) 470 msr |= MSR_SPE;
381 return 1;
382 } 471 }
383 /* else assert((regs->msr & MSR_SPE) == 0) */ 472 /* else assert((regs->msr & MSR_SPE) == 0) */
384 473
@@ -387,6 +476,8 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
387 return 1; 476 return 1;
388#endif /* CONFIG_SPE */ 477#endif /* CONFIG_SPE */
389 478
479 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
480 return 1;
390 if (sigret) { 481 if (sigret) {
391 /* Set up the sigreturn trampoline: li r0,sigret; sc */ 482 /* Set up the sigreturn trampoline: li r0,sigret; sc */
392 if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) 483 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -409,6 +500,9 @@ static long restore_user_regs(struct pt_regs *regs,
409 long err; 500 long err;
410 unsigned int save_r2 = 0; 501 unsigned int save_r2 = 0;
411 unsigned long msr; 502 unsigned long msr;
503#ifdef CONFIG_VSX
504 int i;
505#endif
412 506
413 /* 507 /*
414 * restore general registers but not including MSR or SOFTE. Also 508 * restore general registers but not including MSR or SOFTE. Also
@@ -436,16 +530,11 @@ static long restore_user_regs(struct pt_regs *regs,
436 */ 530 */
437 discard_lazy_cpu_state(); 531 discard_lazy_cpu_state();
438 532
439 /* force the process to reload the FP registers from
440 current->thread when it next does FP instructions */
441 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
442 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
443 sizeof(sr->mc_fregs)))
444 return 1;
445
446#ifdef CONFIG_ALTIVEC 533#ifdef CONFIG_ALTIVEC
447 /* force the process to reload the altivec registers from 534 /*
448 current->thread when it next does altivec instructions */ 535 * Force the process to reload the altivec registers from
536 * current->thread when it next does altivec instructions
537 */
449 regs->msr &= ~MSR_VEC; 538 regs->msr &= ~MSR_VEC;
450 if (msr & MSR_VEC) { 539 if (msr & MSR_VEC) {
451 /* restore altivec registers from the stack */ 540 /* restore altivec registers from the stack */
@@ -459,6 +548,31 @@ static long restore_user_regs(struct pt_regs *regs,
459 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 548 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
460 return 1; 549 return 1;
461#endif /* CONFIG_ALTIVEC */ 550#endif /* CONFIG_ALTIVEC */
551 if (copy_fpr_from_user(current, &sr->mc_fregs))
552 return 1;
553
554#ifdef CONFIG_VSX
555 /*
556 * Force the process to reload the VSX registers from
557 * current->thread when it next does VSX instruction.
558 */
559 regs->msr &= ~MSR_VSX;
560 if (msr & MSR_VSX) {
561 /*
562 * Restore altivec registers from the stack to a local
563 * buffer, then write this out to the thread_struct
564 */
565 if (copy_vsx_from_user(current, &sr->mc_vsregs))
566 return 1;
567 } else if (current->thread.used_vsr)
568 for (i = 0; i < 32 ; i++)
569 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
570#endif /* CONFIG_VSX */
571 /*
572 * force the process to reload the FP registers from
573 * current->thread when it next does FP instructions
574 */
575 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
462 576
463#ifdef CONFIG_SPE 577#ifdef CONFIG_SPE
464 /* force the process to reload the spe registers from 578 /* force the process to reload the spe registers from
@@ -823,12 +937,42 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
823{ 937{
824 unsigned char tmp; 938 unsigned char tmp;
825 939
940#ifdef CONFIG_PPC64
941 unsigned long new_msr = 0;
942
943 if (new_ctx &&
944 __get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR]))
945 return -EFAULT;
946 /*
947 * Check that the context is not smaller than the original
948 * size (with VMX but without VSX)
949 */
950 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
951 return -EINVAL;
952 /*
953 * If the new context state sets the MSR VSX bits but
954 * it doesn't provide VSX state.
955 */
956 if ((ctx_size < sizeof(struct ucontext)) &&
957 (new_msr & MSR_VSX))
958 return -EINVAL;
959#ifdef CONFIG_VSX
960 /*
961 * If userspace doesn't provide enough room for VSX data,
962 * but current thread has used VSX, we don't have anywhere
963 * to store the full context back into.
964 */
965 if ((ctx_size < sizeof(struct ucontext)) &&
966 (current->thread.used_vsr && old_ctx))
967 return -EINVAL;
968#endif
969#else
826 /* Context size is for future use. Right now, we only make sure 970 /* Context size is for future use. Right now, we only make sure
827 * we are passed something we understand 971 * we are passed something we understand
828 */ 972 */
829 if (ctx_size < sizeof(struct ucontext)) 973 if (ctx_size < sizeof(struct ucontext))
830 return -EINVAL; 974 return -EINVAL;
831 975#endif
832 if (old_ctx != NULL) { 976 if (old_ctx != NULL) {
833 struct mcontext __user *mctx; 977 struct mcontext __user *mctx;
834 978
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index da7c058e3731..65ad925c3a8f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -112,11 +112,29 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
112#else /* CONFIG_ALTIVEC */ 112#else /* CONFIG_ALTIVEC */
113 err |= __put_user(0, &sc->v_regs); 113 err |= __put_user(0, &sc->v_regs);
114#endif /* CONFIG_ALTIVEC */ 114#endif /* CONFIG_ALTIVEC */
115 flush_fp_to_thread(current);
116 /* copy fpr regs and fpscr */
117 err |= copy_fpr_to_user(&sc->fp_regs, current);
118#ifdef CONFIG_VSX
119 /*
120 * Copy VSX low doubleword to local buffer for formatting,
121 * then out to userspace. Update v_regs to point after the
122 * VMX data.
123 */
124 if (current->thread.used_vsr) {
125 __giveup_vsx(current);
126 v_regs += ELF_NVRREG;
127 err |= copy_vsx_to_user(v_regs, current);
128 /* set MSR_VSX in the MSR value in the frame to
129 * indicate that sc->vs_reg) contains valid data.
130 */
131 msr |= MSR_VSX;
132 }
133#endif /* CONFIG_VSX */
115 err |= __put_user(&sc->gp_regs, &sc->regs); 134 err |= __put_user(&sc->gp_regs, &sc->regs);
116 WARN_ON(!FULL_REGS(regs)); 135 WARN_ON(!FULL_REGS(regs));
117 err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); 136 err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
118 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 137 err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
119 err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
120 err |= __put_user(signr, &sc->signal); 138 err |= __put_user(signr, &sc->signal);
121 err |= __put_user(handler, &sc->handler); 139 err |= __put_user(handler, &sc->handler);
122 if (set != NULL) 140 if (set != NULL)
@@ -137,29 +155,32 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
137#endif 155#endif
138 unsigned long err = 0; 156 unsigned long err = 0;
139 unsigned long save_r13 = 0; 157 unsigned long save_r13 = 0;
140 elf_greg_t *gregs = (elf_greg_t *)regs;
141 unsigned long msr; 158 unsigned long msr;
159#ifdef CONFIG_VSX
142 int i; 160 int i;
161#endif
143 162
144 /* If this is not a signal return, we preserve the TLS in r13 */ 163 /* If this is not a signal return, we preserve the TLS in r13 */
145 if (!sig) 164 if (!sig)
146 save_r13 = regs->gpr[13]; 165 save_r13 = regs->gpr[13];
147 166
148 /* copy everything before MSR */ 167 /* copy the GPRs */
149 err |= __copy_from_user(regs, &sc->gp_regs, 168 err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr));
150 PT_MSR*sizeof(unsigned long)); 169 err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]);
151
152 /* get MSR separately, transfer the LE bit if doing signal return */ 170 /* get MSR separately, transfer the LE bit if doing signal return */
153 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 171 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
154 if (sig) 172 if (sig)
155 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 173 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
156 174 err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]);
175 err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]);
176 err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
177 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
178 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
157 /* skip SOFTE */ 179 /* skip SOFTE */
158 for (i = PT_MSR+1; i <= PT_RESULT; i++) { 180 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
159 if (i == PT_SOFTE) 181 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
160 continue; 182 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
161 err |= __get_user(gregs[i], &sc->gp_regs[i]); 183 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
162 }
163 184
164 if (!sig) 185 if (!sig)
165 regs->gpr[13] = save_r13; 186 regs->gpr[13] = save_r13;
@@ -180,9 +201,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
180 * This has to be done before copying stuff into current->thread.fpr/vr 201 * This has to be done before copying stuff into current->thread.fpr/vr
181 * for the reasons explained in the previous comment. 202 * for the reasons explained in the previous comment.
182 */ 203 */
183 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); 204 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
184
185 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
186 205
187#ifdef CONFIG_ALTIVEC 206#ifdef CONFIG_ALTIVEC
188 err |= __get_user(v_regs, &sc->v_regs); 207 err |= __get_user(v_regs, &sc->v_regs);
@@ -202,7 +221,23 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
202 else 221 else
203 current->thread.vrsave = 0; 222 current->thread.vrsave = 0;
204#endif /* CONFIG_ALTIVEC */ 223#endif /* CONFIG_ALTIVEC */
224 /* restore floating point */
225 err |= copy_fpr_from_user(current, &sc->fp_regs);
226#ifdef CONFIG_VSX
227 /*
228 * Get additional VSX data. Update v_regs to point after the
229 * VMX data. Copy VSX low doubleword from userspace to local
230 * buffer for formatting, then into the taskstruct.
231 */
232 v_regs += ELF_NVRREG;
233 if ((msr & MSR_VSX) != 0)
234 err |= copy_vsx_from_user(current, v_regs);
235 else
236 for (i = 0; i < 32 ; i++)
237 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
205 238
239#else
240#endif
206 return err; 241 return err;
207} 242}
208 243
@@ -233,6 +268,13 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
233} 268}
234 269
235/* 270/*
271 * Userspace code may pass a ucontext which doesn't include VSX added
272 * at the end. We need to check for this case.
273 */
274#define UCONTEXTSIZEWITHOUTVSX \
275 (sizeof(struct ucontext) - 32*sizeof(long))
276
277/*
236 * Handle {get,set,swap}_context operations 278 * Handle {get,set,swap}_context operations
237 */ 279 */
238int sys_swapcontext(struct ucontext __user *old_ctx, 280int sys_swapcontext(struct ucontext __user *old_ctx,
@@ -241,13 +283,34 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
241{ 283{
242 unsigned char tmp; 284 unsigned char tmp;
243 sigset_t set; 285 sigset_t set;
286 unsigned long new_msr = 0;
244 287
245 /* Context size is for future use. Right now, we only make sure 288 if (new_ctx &&
246 * we are passed something we understand 289 __get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
290 return -EFAULT;
291 /*
292 * Check that the context is not smaller than the original
293 * size (with VMX but without VSX)
247 */ 294 */
248 if (ctx_size < sizeof(struct ucontext)) 295 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
249 return -EINVAL; 296 return -EINVAL;
250 297 /*
298 * If the new context state sets the MSR VSX bits but
299 * it doesn't provide VSX state.
300 */
301 if ((ctx_size < sizeof(struct ucontext)) &&
302 (new_msr & MSR_VSX))
303 return -EINVAL;
304#ifdef CONFIG_VSX
305 /*
306 * If userspace doesn't provide enough room for VSX data,
307 * but current thread has used VSX, we don't have anywhere
308 * to store the full context back into.
309 */
310 if ((ctx_size < sizeof(struct ucontext)) &&
311 (current->thread.used_vsr && old_ctx))
312 return -EINVAL;
313#endif
251 if (old_ctx != NULL) { 314 if (old_ctx != NULL) {
252 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 315 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
253 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0) 316 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1457aa0a08f1..f5ae9fa222ea 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
72 72
73static volatile unsigned int cpu_callin_map[NR_CPUS]; 73static volatile unsigned int cpu_callin_map[NR_CPUS];
74 74
75void smp_call_function_interrupt(void);
76
77int smt_enabled_at_boot = 1; 75int smt_enabled_at_boot = 1;
78 76
79static int ipi_fail_ok;
80
81static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 77static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
82 78
83#ifdef CONFIG_PPC64 79#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
99{ 95{
100 switch(msg) { 96 switch(msg) {
101 case PPC_MSG_CALL_FUNCTION: 97 case PPC_MSG_CALL_FUNCTION:
102 smp_call_function_interrupt(); 98 generic_smp_call_function_interrupt();
103 break; 99 break;
104 case PPC_MSG_RESCHEDULE: 100 case PPC_MSG_RESCHEDULE:
105 /* XXX Do we have to do this? */ 101 /* XXX Do we have to do this? */
106 set_need_resched(); 102 set_need_resched();
107 break; 103 break;
104 case PPC_MSG_CALL_FUNC_SINGLE:
105 generic_smp_call_function_single_interrupt();
106 break;
108 case PPC_MSG_DEBUGGER_BREAK: 107 case PPC_MSG_DEBUGGER_BREAK:
109 if (crash_ipi_function_ptr) { 108 if (crash_ipi_function_ptr) {
110 crash_ipi_function_ptr(get_irq_regs()); 109 crash_ipi_function_ptr(get_irq_regs());
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu)
128 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 127 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
129} 128}
130 129
130void arch_send_call_function_single_ipi(int cpu)
131{
132 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
133}
134
135void arch_send_call_function_ipi(cpumask_t mask)
136{
137 unsigned int cpu;
138
139 for_each_cpu_mask(cpu, mask)
140 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
141}
142
131#ifdef CONFIG_DEBUGGER 143#ifdef CONFIG_DEBUGGER
132void smp_send_debugger_break(int cpu) 144void smp_send_debugger_break(int cpu)
133{ 145{
@@ -154,223 +166,13 @@ static void stop_this_cpu(void *dummy)
154 ; 166 ;
155} 167}
156 168
157/*
158 * Structure and data for smp_call_function(). This is designed to minimise
159 * static memory requirements. It also looks cleaner.
160 * Stolen from the i386 version.
161 */
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
163
164static struct call_data_struct {
165 void (*func) (void *info);
166 void *info;
167 atomic_t started;
168 atomic_t finished;
169 int wait;
170} *call_data;
171
172/* delay of at least 8 seconds */
173#define SMP_CALL_TIMEOUT 8
174
175/*
176 * These functions send a 'generic call function' IPI to other online
177 * CPUS in the system.
178 *
179 * [SUMMARY] Run a function on other CPUs.
180 * <func> The function to run. This must be fast and non-blocking.
181 * <info> An arbitrary pointer to pass to the function.
182 * <nonatomic> currently unused.
183 * <wait> If true, wait (atomically) until function has completed on other CPUs.
184 * [RETURNS] 0 on success, else a negative status code. Does not return until
185 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
186 * <map> is a cpu map of the cpus to send IPI to.
187 *
188 * You must not call this function with disabled interrupts or from a
189 * hardware interrupt handler or from a bottom half handler.
190 */
191static int __smp_call_function_map(void (*func) (void *info), void *info,
192 int nonatomic, int wait, cpumask_t map)
193{
194 struct call_data_struct data;
195 int ret = -1, num_cpus;
196 int cpu;
197 u64 timeout;
198
199 if (unlikely(smp_ops == NULL))
200 return ret;
201
202 data.func = func;
203 data.info = info;
204 atomic_set(&data.started, 0);
205 data.wait = wait;
206 if (wait)
207 atomic_set(&data.finished, 0);
208
209 /* remove 'self' from the map */
210 if (cpu_isset(smp_processor_id(), map))
211 cpu_clear(smp_processor_id(), map);
212
213 /* sanity check the map, remove any non-online processors. */
214 cpus_and(map, map, cpu_online_map);
215
216 num_cpus = cpus_weight(map);
217 if (!num_cpus)
218 goto done;
219
220 call_data = &data;
221 smp_wmb();
222 /* Send a message to all CPUs in the map */
223 for_each_cpu_mask(cpu, map)
224 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
225
226 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
227
228 /* Wait for indication that they have received the message */
229 while (atomic_read(&data.started) != num_cpus) {
230 HMT_low();
231 if (get_tb() >= timeout) {
232 printk("smp_call_function on cpu %d: other cpus not "
233 "responding (%d)\n", smp_processor_id(),
234 atomic_read(&data.started));
235 if (!ipi_fail_ok)
236 debugger(NULL);
237 goto out;
238 }
239 }
240
241 /* optionally wait for the CPUs to complete */
242 if (wait) {
243 while (atomic_read(&data.finished) != num_cpus) {
244 HMT_low();
245 if (get_tb() >= timeout) {
246 printk("smp_call_function on cpu %d: other "
247 "cpus not finishing (%d/%d)\n",
248 smp_processor_id(),
249 atomic_read(&data.finished),
250 atomic_read(&data.started));
251 debugger(NULL);
252 goto out;
253 }
254 }
255 }
256
257 done:
258 ret = 0;
259
260 out:
261 call_data = NULL;
262 HMT_medium();
263 return ret;
264}
265
266static int __smp_call_function(void (*func)(void *info), void *info,
267 int nonatomic, int wait)
268{
269 int ret;
270 spin_lock(&call_lock);
271 ret =__smp_call_function_map(func, info, nonatomic, wait,
272 cpu_online_map);
273 spin_unlock(&call_lock);
274 return ret;
275}
276
277int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
278 int wait)
279{
280 /* Can deadlock when called with interrupts disabled */
281 WARN_ON(irqs_disabled());
282
283 return __smp_call_function(func, info, nonatomic, wait);
284}
285EXPORT_SYMBOL(smp_call_function);
286
287int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
288 int nonatomic, int wait)
289{
290 cpumask_t map = CPU_MASK_NONE;
291 int ret = 0;
292
293 /* Can deadlock when called with interrupts disabled */
294 WARN_ON(irqs_disabled());
295
296 if (!cpu_online(cpu))
297 return -EINVAL;
298
299 cpu_set(cpu, map);
300 if (cpu != get_cpu()) {
301 spin_lock(&call_lock);
302 ret = __smp_call_function_map(func, info, nonatomic, wait, map);
303 spin_unlock(&call_lock);
304 } else {
305 local_irq_disable();
306 func(info);
307 local_irq_enable();
308 }
309 put_cpu();
310 return ret;
311}
312EXPORT_SYMBOL(smp_call_function_single);
313
314void smp_send_stop(void) 169void smp_send_stop(void)
315{ 170{
316 int nolock; 171 smp_call_function(stop_this_cpu, NULL, 0);
317
318 /* It's OK to fail sending the IPI, since the alternative is to
319 * be stuck forever waiting on the other CPU to take the interrupt.
320 *
321 * It's better to at least continue and go through reboot, since this
322 * function is usually called at panic or reboot time in the first
323 * place.
324 */
325 ipi_fail_ok = 1;
326
327 /* Don't deadlock in case we got called through panic */
328 nolock = !spin_trylock(&call_lock);
329 __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
330 if (!nolock)
331 spin_unlock(&call_lock);
332}
333
334void smp_call_function_interrupt(void)
335{
336 void (*func) (void *info);
337 void *info;
338 int wait;
339
340 /* call_data will be NULL if the sender timed out while
341 * waiting on us to receive the call.
342 */
343 if (!call_data)
344 return;
345
346 func = call_data->func;
347 info = call_data->info;
348 wait = call_data->wait;
349
350 if (!wait)
351 smp_mb__before_atomic_inc();
352
353 /*
354 * Notify initiating CPU that I've grabbed the data and am
355 * about to execute the function
356 */
357 atomic_inc(&call_data->started);
358 /*
359 * At this point the info structure may be out of scope unless wait==1
360 */
361 (*func)(info);
362 if (wait) {
363 smp_mb__before_atomic_inc();
364 atomic_inc(&call_data->finished);
365 }
366} 172}
367 173
368extern struct gettimeofday_struct do_gtod;
369
370struct thread_info *current_set[NR_CPUS]; 174struct thread_info *current_set[NR_CPUS];
371 175
372DECLARE_PER_CPU(unsigned int, pvr);
373
374static void __devinit smp_store_cpu_info(int id) 176static void __devinit smp_store_cpu_info(int id)
375{ 177{
376 per_cpu(pvr, id) = mfspr(SPRN_PVR); 178 per_cpu(pvr, id) = mfspr(SPRN_PVR);
@@ -596,9 +398,9 @@ int __devinit start_secondary(void *unused)
596 398
597 secondary_cpu_time_init(); 399 secondary_cpu_time_init();
598 400
599 spin_lock(&call_lock); 401 ipi_call_lock();
600 cpu_set(cpu, cpu_online_map); 402 cpu_set(cpu, cpu_online_map);
601 spin_unlock(&call_lock); 403 ipi_call_unlock();
602 404
603 local_irq_enable(); 405 local_irq_enable();
604 406
diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c
index 67d6f6890edc..c906c4bf6835 100644
--- a/arch/powerpc/kernel/softemu8xx.c
+++ b/arch/powerpc/kernel/softemu8xx.c
@@ -124,7 +124,7 @@ int Soft_emulate_8xx(struct pt_regs *regs)
124 disp = instword & 0xffff; 124 disp = instword & 0xffff;
125 125
126 ea = (u32 *)(regs->gpr[idxreg] + disp); 126 ea = (u32 *)(regs->gpr[idxreg] + disp);
127 ip = (u32 *)&current->thread.fpr[flreg]; 127 ip = (u32 *)&current->thread.TS_FPR(flreg);
128 128
129 switch ( inst ) 129 switch ( inst )
130 { 130 {
@@ -168,7 +168,7 @@ int Soft_emulate_8xx(struct pt_regs *regs)
168 break; 168 break;
169 case FMR: 169 case FMR:
170 /* assume this is a fp move -- Cort */ 170 /* assume this is a fp move -- Cort */
171 memcpy(ip, &current->thread.fpr[(instword>>11)&0x1f], 171 memcpy(ip, &current->thread.TS_FPR((instword>>11)&0x1f),
172 sizeof(double)); 172 sizeof(double));
173 break; 173 break;
174 default: 174 default:
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 962944038430..f2589645870a 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -10,33 +10,35 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/stacktrace.h> 15#include <linux/stacktrace.h>
16#include <linux/module.h>
15#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h>
16 19
17/* 20/*
18 * Save stack-backtrace addresses into a stack_trace buffer. 21 * Save stack-backtrace addresses into a stack_trace buffer.
19 */ 22 */
20void save_stack_trace(struct stack_trace *trace) 23static void save_context_stack(struct stack_trace *trace, unsigned long sp,
24 struct task_struct *tsk, int savesched)
21{ 25{
22 unsigned long sp;
23
24 asm("mr %0,1" : "=r" (sp));
25
26 for (;;) { 26 for (;;) {
27 unsigned long *stack = (unsigned long *) sp; 27 unsigned long *stack = (unsigned long *) sp;
28 unsigned long newsp, ip; 28 unsigned long newsp, ip;
29 29
30 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) 30 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
31 return; 31 return;
32 32
33 newsp = stack[0]; 33 newsp = stack[0];
34 ip = stack[STACK_FRAME_LR_SAVE]; 34 ip = stack[STACK_FRAME_LR_SAVE];
35 35
36 if (!trace->skip) 36 if (savesched || !in_sched_functions(ip)) {
37 trace->entries[trace->nr_entries++] = ip; 37 if (!trace->skip)
38 else 38 trace->entries[trace->nr_entries++] = ip;
39 trace->skip--; 39 else
40 trace->skip--;
41 }
40 42
41 if (trace->nr_entries >= trace->max_entries) 43 if (trace->nr_entries >= trace->max_entries)
42 return; 44 return;
@@ -44,3 +46,19 @@ void save_stack_trace(struct stack_trace *trace)
44 sp = newsp; 46 sp = newsp;
45 } 47 }
46} 48}
49
50void save_stack_trace(struct stack_trace *trace)
51{
52 unsigned long sp;
53
54 asm("mr %0,1" : "=r" (sp));
55
56 save_context_stack(trace, sp, current, 1);
57}
58EXPORT_SYMBOL_GPL(save_stack_trace);
59
60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
61{
62 save_context_stack(trace, tsk->thread.ksp, tsk, 0);
63}
64EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 4fe69ca24481..c04832c4a02e 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -143,6 +143,9 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
143 struct file * file = NULL; 143 struct file * file = NULL;
144 unsigned long ret = -EINVAL; 144 unsigned long ret = -EINVAL;
145 145
146 if (!arch_validate_prot(prot))
147 goto out;
148
146 if (shift) { 149 if (shift) {
147 if (off & ((1 << shift) - 1)) 150 if (off & ((1 << shift) - 1))
148 goto out; 151 goto out;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c8127f832df0..aba0ba95f062 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -28,7 +28,9 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
28/* Time in microseconds we delay before sleeping in the idle loop */ 28/* Time in microseconds we delay before sleeping in the idle loop */
29DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; 29DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
30 30
31static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf, 31static ssize_t store_smt_snooze_delay(struct sys_device *dev,
32 struct sysdev_attribute *attr,
33 const char *buf,
32 size_t count) 34 size_t count)
33{ 35{
34 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 36 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
@@ -44,7 +46,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
44 return count; 46 return count;
45} 47}
46 48
47static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf) 49static ssize_t show_smt_snooze_delay(struct sys_device *dev,
50 struct sysdev_attribute *attr,
51 char *buf)
48{ 52{
49 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 53 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
50 54
@@ -152,14 +156,17 @@ static unsigned long write_##NAME(unsigned long val) \
152 mtspr(ADDRESS, val); \ 156 mtspr(ADDRESS, val); \
153 return 0; \ 157 return 0; \
154} \ 158} \
155static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ 159static ssize_t show_##NAME(struct sys_device *dev, \
160 struct sysdev_attribute *attr, \
161 char *buf) \
156{ \ 162{ \
157 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 163 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
158 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \ 164 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
159 return sprintf(buf, "%lx\n", val); \ 165 return sprintf(buf, "%lx\n", val); \
160} \ 166} \
161static ssize_t __used \ 167static ssize_t __used \
162 store_##NAME(struct sys_device *dev, const char *buf, size_t count) \ 168 store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
169 const char *buf, size_t count) \
163{ \ 170{ \
164 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 171 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
165 unsigned long val; \ 172 unsigned long val; \
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 368a4934f7ee..c3a56d65c5a9 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused)
192 192
193 /* schedule ourselves to be run again */ 193 /* schedule ourselves to be run again */
194 mod_timer(&tau_timer, jiffies + shrink_timer) ; 194 mod_timer(&tau_timer, jiffies + shrink_timer) ;
195 on_each_cpu(tau_timeout, NULL, 1, 0); 195 on_each_cpu(tau_timeout, NULL, 0);
196} 196}
197 197
198/* 198/*
@@ -234,7 +234,7 @@ int __init TAU_init(void)
234 tau_timer.expires = jiffies + shrink_timer; 234 tau_timer.expires = jiffies + shrink_timer;
235 add_timer(&tau_timer); 235 add_timer(&tau_timer);
236 236
237 on_each_cpu(TAU_init_smp, NULL, 1, 0); 237 on_each_cpu(TAU_init_smp, NULL, 0);
238 238
239 printk("Thermal assist unit "); 239 printk("Thermal assist unit ");
240#ifdef CONFIG_TAU_INT 240#ifdef CONFIG_TAU_INT
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 73401e83739a..e2ee66b5831d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -129,7 +129,7 @@ static unsigned long __initdata iSeries_recal_titan;
129static signed long __initdata iSeries_recal_tb; 129static signed long __initdata iSeries_recal_tb;
130 130
131/* Forward declaration is only needed for iSereis compiles */ 131/* Forward declaration is only needed for iSereis compiles */
132void __init clocksource_init(void); 132static void __init clocksource_init(void);
133#endif 133#endif
134 134
135#define XSEC_PER_SEC (1024*1024) 135#define XSEC_PER_SEC (1024*1024)
@@ -150,8 +150,8 @@ u64 tb_to_xs;
150unsigned tb_to_us; 150unsigned tb_to_us;
151 151
152#define TICKLEN_SCALE NTP_SCALE_SHIFT 152#define TICKLEN_SCALE NTP_SCALE_SHIFT
153u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 153static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
154u64 ticklen_to_xs; /* 0.64 fraction */ 154static u64 ticklen_to_xs; /* 0.64 fraction */
155 155
156/* If last_tick_len corresponds to about 1/HZ seconds, then 156/* If last_tick_len corresponds to about 1/HZ seconds, then
157 last_tick_len << TICKLEN_SHIFT will be about 2^63. */ 157 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
@@ -164,7 +164,7 @@ static u64 tb_to_ns_scale __read_mostly;
164static unsigned tb_to_ns_shift __read_mostly; 164static unsigned tb_to_ns_shift __read_mostly;
165static unsigned long boot_tb __read_mostly; 165static unsigned long boot_tb __read_mostly;
166 166
167struct gettimeofday_struct do_gtod; 167static struct gettimeofday_struct do_gtod;
168 168
169extern struct timezone sys_tz; 169extern struct timezone sys_tz;
170static long timezone_offset; 170static long timezone_offset;
@@ -322,7 +322,7 @@ void snapshot_timebases(void)
322{ 322{
323 if (!cpu_has_feature(CPU_FTR_PURR)) 323 if (!cpu_has_feature(CPU_FTR_PURR))
324 return; 324 return;
325 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 325 on_each_cpu(snapshot_tb_and_purr, NULL, 1);
326} 326}
327 327
328/* 328/*
@@ -742,10 +742,6 @@ void __init generic_calibrate_decr(void)
742 } 742 }
743 743
744#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 744#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
745 /* Set the time base to zero */
746 mtspr(SPRN_TBWL, 0);
747 mtspr(SPRN_TBWU, 0);
748
749 /* Clear any pending timer interrupts */ 745 /* Clear any pending timer interrupts */
750 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 746 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
751 747
@@ -832,7 +828,7 @@ void update_vsyscall_tz(void)
832 ++vdso_data->tb_update_count; 828 ++vdso_data->tb_update_count;
833} 829}
834 830
835void __init clocksource_init(void) 831static void __init clocksource_init(void)
836{ 832{
837 struct clocksource *clock; 833 struct clocksource *clock;
838 834
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4b5b7ff4f78b..878fbddb6ae1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -967,6 +967,20 @@ void altivec_unavailable_exception(struct pt_regs *regs)
967 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 967 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
968} 968}
969 969
970void vsx_unavailable_exception(struct pt_regs *regs)
971{
972 if (user_mode(regs)) {
973 /* A user program has executed an vsx instruction,
974 but this kernel doesn't support vsx. */
975 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
976 return;
977 }
978
979 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
980 "%lx at %lx\n", regs->trap, regs->nip);
981 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
982}
983
970void performance_monitor_exception(struct pt_regs *regs) 984void performance_monitor_exception(struct pt_regs *regs)
971{ 985{
972 perf_irq(regs); 986 perf_irq(regs);
@@ -1030,21 +1044,29 @@ void SoftwareEmulation(struct pt_regs *regs)
1030 1044
1031#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 1045#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
1032 1046
1033void DebugException(struct pt_regs *regs, unsigned long debug_status) 1047void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1034{ 1048{
1035 if (debug_status & DBSR_IC) { /* instruction completion */ 1049 if (debug_status & DBSR_IC) { /* instruction completion */
1036 regs->msr &= ~MSR_DE; 1050 regs->msr &= ~MSR_DE;
1051
1052 /* Disable instruction completion */
1053 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1054 /* Clear the instruction completion event */
1055 mtspr(SPRN_DBSR, DBSR_IC);
1056
1057 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1058 5, SIGTRAP) == NOTIFY_STOP) {
1059 return;
1060 }
1061
1062 if (debugger_sstep(regs))
1063 return;
1064
1037 if (user_mode(regs)) { 1065 if (user_mode(regs)) {
1038 current->thread.dbcr0 &= ~DBCR0_IC; 1066 current->thread.dbcr0 &= ~DBCR0_IC;
1039 } else {
1040 /* Disable instruction completion */
1041 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1042 /* Clear the instruction completion event */
1043 mtspr(SPRN_DBSR, DBSR_IC);
1044 if (debugger_sstep(regs))
1045 return;
1046 } 1067 }
1047 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 1068
1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1048 } 1070 }
1049} 1071}
1050#endif /* CONFIG_4xx || CONFIG_BOOKE */ 1072#endif /* CONFIG_4xx || CONFIG_BOOKE */
@@ -1091,6 +1113,21 @@ void altivec_assist_exception(struct pt_regs *regs)
1091} 1113}
1092#endif /* CONFIG_ALTIVEC */ 1114#endif /* CONFIG_ALTIVEC */
1093 1115
1116#ifdef CONFIG_VSX
1117void vsx_assist_exception(struct pt_regs *regs)
1118{
1119 if (!user_mode(regs)) {
1120 printk(KERN_EMERG "VSX assist exception in kernel mode"
1121 " at %lx\n", regs->nip);
1122 die("Kernel VSX assist exception", regs, SIGILL);
1123 }
1124
1125 flush_vsx_to_thread(current);
1126 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1127 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1128}
1129#endif /* CONFIG_VSX */
1130
1094#ifdef CONFIG_FSL_BOOKE 1131#ifdef CONFIG_FSL_BOOKE
1095void CacheLockingException(struct pt_regs *regs, unsigned long address, 1132void CacheLockingException(struct pt_regs *regs, unsigned long address,
1096 unsigned long error_code) 1133 unsigned long error_code)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ce245a850db2..f177c60ea766 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -571,6 +571,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
571 if (start64) 571 if (start64)
572 do_feature_fixups(powerpc_firmware_features, 572 do_feature_fixups(powerpc_firmware_features,
573 start64, start64 + size64); 573 start64, start64 + size64);
574
575 start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
576 if (start64)
577 do_lwsync_fixups(cur_cpu_spec->cpu_features,
578 start64, start64 + size64);
574#endif /* CONFIG_PPC64 */ 579#endif /* CONFIG_PPC64 */
575 580
576 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); 581 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
@@ -585,6 +590,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
585 start32, start32 + size32); 590 start32, start32 + size32);
586#endif /* CONFIG_PPC64 */ 591#endif /* CONFIG_PPC64 */
587 592
593 start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
594 if (start32)
595 do_lwsync_fixups(cur_cpu_spec->cpu_features,
596 start32, start32 + size32);
597
588 return 0; 598 return 0;
589} 599}
590 600
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 9352ab5200e5..be3b6a41dc09 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -24,7 +24,7 @@ SECTIONS
24 24
25 . = ALIGN(16); 25 . = ALIGN(16);
26 .text : { 26 .text : {
27 *(.text .stub .text.* .gnu.linkonce.t.*) 27 *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
28 } 28 }
29 PROVIDE(__etext = .); 29 PROVIDE(__etext = .);
30 PROVIDE(_etext = .); 30 PROVIDE(_etext = .);
@@ -33,6 +33,9 @@ SECTIONS
33 . = ALIGN(8); 33 . = ALIGN(8);
34 __ftr_fixup : { *(__ftr_fixup) } 34 __ftr_fixup : { *(__ftr_fixup) }
35 35
36 . = ALIGN(8);
37 __lwsync_fixup : { *(__lwsync_fixup) }
38
36#ifdef CONFIG_PPC64 39#ifdef CONFIG_PPC64
37 . = ALIGN(8); 40 . = ALIGN(8);
38 __fw_ftr_fixup : { *(__fw_ftr_fixup) } 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) }
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 932b3fdb34b9..d0b2526dd38d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -24,7 +24,7 @@ SECTIONS
24 24
25 . = ALIGN(16); 25 . = ALIGN(16);
26 .text : { 26 .text : {
27 *(.text .stub .text.* .gnu.linkonce.t.*) 27 *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
28 *(.sfpr .glink) 28 *(.sfpr .glink)
29 } :text 29 } :text
30 PROVIDE(__etext = .); 30 PROVIDE(__etext = .);
@@ -35,6 +35,9 @@ SECTIONS
35 __ftr_fixup : { *(__ftr_fixup) } 35 __ftr_fixup : { *(__ftr_fixup) }
36 36
37 . = ALIGN(8); 37 . = ALIGN(8);
38 __lwsync_fixup : { *(__lwsync_fixup) }
39
40 . = ALIGN(8);
38 __fw_ftr_fixup : { *(__fw_ftr_fixup) } 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) }
39 42
40 /* 43 /*
@@ -43,15 +46,15 @@ SECTIONS
43 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 46 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
44 .rodata1 : { *(.rodata1) } 47 .rodata1 : { *(.rodata1) }
45 48
49 .dynamic : { *(.dynamic) } :text :dynamic
50
46 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 51 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
47 .eh_frame : { KEEP (*(.eh_frame)) } :text 52 .eh_frame : { KEEP (*(.eh_frame)) } :text
48 .gcc_except_table : { *(.gcc_except_table) } 53 .gcc_except_table : { *(.gcc_except_table) }
54 .rela.dyn ALIGN(8) : { *(.rela.dyn) }
49 55
50 .opd ALIGN(8) : { KEEP (*(.opd)) } 56 .opd ALIGN(8) : { KEEP (*(.opd)) }
51 .got ALIGN(8) : { *(.got .toc) } 57 .got ALIGN(8) : { *(.got .toc) }
52 .rela.dyn ALIGN(8) : { *(.rela.dyn) }
53
54 .dynamic : { *(.dynamic) } :text :dynamic
55 58
56 _end = .; 59 _end = .;
57 PROVIDE(end = .); 60 PROVIDE(end = .);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0c3000bf8d75..a914411bced5 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,25 @@
9 9
10ENTRY(_stext) 10ENTRY(_stext)
11 11
12PHDRS {
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
16
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
23
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
29}
30
12#ifdef CONFIG_PPC64 31#ifdef CONFIG_PPC64
13OUTPUT_ARCH(powerpc:common64) 32OUTPUT_ARCH(powerpc:common64)
14jiffies = jiffies_64; 33jiffies = jiffies_64;
@@ -35,7 +54,7 @@ SECTIONS
35 ALIGN_FUNCTION(); 54 ALIGN_FUNCTION();
36 *(.text.head) 55 *(.text.head)
37 _text = .; 56 _text = .;
38 *(.text .fixup .text.init.refok .exit.text.refok) 57 *(.text .fixup .text.init.refok .exit.text.refok __ftr_alt_*)
39 SCHED_TEXT 58 SCHED_TEXT
40 LOCK_TEXT 59 LOCK_TEXT
41 KPROBES_TEXT 60 KPROBES_TEXT
@@ -50,7 +69,7 @@ SECTIONS
50 . = ALIGN(PAGE_SIZE); 69 . = ALIGN(PAGE_SIZE);
51 _etext = .; 70 _etext = .;
52 PROVIDE32 (etext = .); 71 PROVIDE32 (etext = .);
53 } 72 } :kernel
54 73
55 /* Read-only data */ 74 /* Read-only data */
56 RODATA 75 RODATA
@@ -62,9 +81,13 @@ SECTIONS
62 __stop___ex_table = .; 81 __stop___ex_table = .;
63 } 82 }
64 83
65 NOTES 84 NOTES :kernel :notes
66 85
67 BUG_TABLE 86 /* The dummy segment contents for the bug workaround mentioned above
87 near PHDRS. */
88 .dummy : {
89 LONG(0xf177)
90 } :kernel :dummy
68 91
69/* 92/*
70 * Init sections discarded at runtime 93 * Init sections discarded at runtime
@@ -76,7 +99,7 @@ SECTIONS
76 _sinittext = .; 99 _sinittext = .;
77 INIT_TEXT 100 INIT_TEXT
78 _einittext = .; 101 _einittext = .;
79 } 102 } :kernel
80 103
81 /* .exit.text is discarded at runtime, not link time, 104 /* .exit.text is discarded at runtime, not link time,
82 * to deal with references from __bug_table 105 * to deal with references from __bug_table
@@ -127,6 +150,12 @@ SECTIONS
127 *(__ftr_fixup) 150 *(__ftr_fixup)
128 __stop___ftr_fixup = .; 151 __stop___ftr_fixup = .;
129 } 152 }
153 . = ALIGN(8);
154 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
155 __start___lwsync_fixup = .;
156 *(__lwsync_fixup)
157 __stop___lwsync_fixup = .;
158 }
130#ifdef CONFIG_PPC64 159#ifdef CONFIG_PPC64
131 . = ALIGN(8); 160 . = ALIGN(8);
132 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { 161 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {