aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/fpu.S16
-rw-r--r--arch/powerpc/kernel/head_64.S65
-rw-r--r--arch/powerpc/kernel/misc_64.S33
-rw-r--r--arch/powerpc/kernel/ppc32.h1
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/process.c107
-rw-r--r--arch/powerpc/kernel/ptrace.c70
-rw-r--r--arch/powerpc/kernel/signal_32.c33
-rw-r--r--arch/powerpc/kernel/signal_64.c31
-rw-r--r--arch/powerpc/kernel/traps.c29
-rw-r--r--include/asm-powerpc/elf.h6
-rw-r--r--include/asm-powerpc/ptrace.h12
-rw-r--r--include/asm-powerpc/reg.h2
-rw-r--r--include/asm-powerpc/sigcontext.h37
-rw-r--r--include/asm-powerpc/system.h9
16 files changed, 451 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6ca3044e2e32..12eb95a80ce9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -353,6 +353,11 @@ _GLOBAL(_switch)
353 mflr r20 /* Return to switch caller */ 353 mflr r20 /* Return to switch caller */
354 mfmsr r22 354 mfmsr r22
355 li r0, MSR_FP 355 li r0, MSR_FP
356#ifdef CONFIG_VSX
357BEGIN_FTR_SECTION
358 oris r0,r0,MSR_VSX@h /* Disable VSX */
359END_FTR_SECTION_IFSET(CPU_FTR_VSX)
360#endif /* CONFIG_VSX */
356#ifdef CONFIG_ALTIVEC 361#ifdef CONFIG_ALTIVEC
357BEGIN_FTR_SECTION 362BEGIN_FTR_SECTION
358 oris r0,r0,MSR_VEC@h /* Disable altivec */ 363 oris r0,r0,MSR_VEC@h /* Disable altivec */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 15247fe171a8..a088c064ae40 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -57,6 +57,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
57_GLOBAL(load_up_fpu) 57_GLOBAL(load_up_fpu)
58 mfmsr r5 58 mfmsr r5
59 ori r5,r5,MSR_FP 59 ori r5,r5,MSR_FP
60#ifdef CONFIG_VSX
61BEGIN_FTR_SECTION
62 oris r5,r5,MSR_VSX@h
63END_FTR_SECTION_IFSET(CPU_FTR_VSX)
64#endif
60 SYNC 65 SYNC
61 MTMSRD(r5) /* enable use of fpu now */ 66 MTMSRD(r5) /* enable use of fpu now */
62 isync 67 isync
@@ -73,7 +78,7 @@ _GLOBAL(load_up_fpu)
73 beq 1f 78 beq 1f
74 toreal(r4) 79 toreal(r4)
75 addi r4,r4,THREAD /* want last_task_used_math->thread */ 80 addi r4,r4,THREAD /* want last_task_used_math->thread */
76 SAVE_32FPRS(0, r4) 81 SAVE_32FPVSRS(0, r5, r4)
77 mffs fr0 82 mffs fr0
78 stfd fr0,THREAD_FPSCR(r4) 83 stfd fr0,THREAD_FPSCR(r4)
79 PPC_LL r5,PT_REGS(r4) 84 PPC_LL r5,PT_REGS(r4)
@@ -100,7 +105,7 @@ _GLOBAL(load_up_fpu)
100#endif 105#endif
101 lfd fr0,THREAD_FPSCR(r5) 106 lfd fr0,THREAD_FPSCR(r5)
102 MTFSF_L(fr0) 107 MTFSF_L(fr0)
103 REST_32FPRS(0, r5) 108 REST_32FPVSRS(0, r4, r5)
104#ifndef CONFIG_SMP 109#ifndef CONFIG_SMP
105 subi r4,r5,THREAD 110 subi r4,r5,THREAD
106 fromreal(r4) 111 fromreal(r4)
@@ -119,6 +124,11 @@ _GLOBAL(load_up_fpu)
119_GLOBAL(giveup_fpu) 124_GLOBAL(giveup_fpu)
120 mfmsr r5 125 mfmsr r5
121 ori r5,r5,MSR_FP 126 ori r5,r5,MSR_FP
127#ifdef CONFIG_VSX
128BEGIN_FTR_SECTION
129 oris r5,r5,MSR_VSX@h
130END_FTR_SECTION_IFSET(CPU_FTR_VSX)
131#endif
122 SYNC_601 132 SYNC_601
123 ISYNC_601 133 ISYNC_601
124 MTMSRD(r5) /* enable use of fpu now */ 134 MTMSRD(r5) /* enable use of fpu now */
@@ -129,7 +139,7 @@ _GLOBAL(giveup_fpu)
129 addi r3,r3,THREAD /* want THREAD of task */ 139 addi r3,r3,THREAD /* want THREAD of task */
130 PPC_LL r5,PT_REGS(r3) 140 PPC_LL r5,PT_REGS(r3)
131 PPC_LCMPI 0,r5,0 141 PPC_LCMPI 0,r5,0
132 SAVE_32FPRS(0, r3) 142 SAVE_32FPVSRS(0, r4 ,r3)
133 mffs fr0 143 mffs fr0
134 stfd fr0,THREAD_FPSCR(r3) 144 stfd fr0,THREAD_FPSCR(r3)
135 beq 1f 145 beq 1f
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 64433731d995..ecced1eb03ae 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -278,6 +278,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
278 . = 0xf20 278 . = 0xf20
279 b altivec_unavailable_pSeries 279 b altivec_unavailable_pSeries
280 280
281 . = 0xf40
282 b vsx_unavailable_pSeries
283
281#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
282 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 285 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
283#endif /* CONFIG_CBE_RAS */ 286#endif /* CONFIG_CBE_RAS */
@@ -297,6 +300,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
297 /* moved from 0xf00 */ 300 /* moved from 0xf00 */
298 STD_EXCEPTION_PSERIES(., performance_monitor) 301 STD_EXCEPTION_PSERIES(., performance_monitor)
299 STD_EXCEPTION_PSERIES(., altivec_unavailable) 302 STD_EXCEPTION_PSERIES(., altivec_unavailable)
303 STD_EXCEPTION_PSERIES(., vsx_unavailable)
300 304
301/* 305/*
302 * An interrupt came in while soft-disabled; clear EE in SRR1, 306 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -836,6 +840,67 @@ _STATIC(load_up_altivec)
836 blr 840 blr
837#endif /* CONFIG_ALTIVEC */ 841#endif /* CONFIG_ALTIVEC */
838 842
843 .align 7
844 .globl vsx_unavailable_common
845vsx_unavailable_common:
846 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
847#ifdef CONFIG_VSX
848BEGIN_FTR_SECTION
849 bne .load_up_vsx
8501:
851END_FTR_SECTION_IFSET(CPU_FTR_VSX)
852#endif
853 bl .save_nvgprs
854 addi r3,r1,STACK_FRAME_OVERHEAD
855 ENABLE_INTS
856 bl .vsx_unavailable_exception
857 b .ret_from_except
858
859#ifdef CONFIG_VSX
860/*
861 * load_up_vsx(unused, unused, tsk)
862 * Disable VSX for the task which had it previously,
863 * and save its vector registers in its thread_struct.
864 * Reuse the fp and vsx saves, but first check to see if they have
865 * been saved already.
866 * On entry: r13 == 'current' && last_task_used_vsx != 'current'
867 */
868_STATIC(load_up_vsx)
869/* Load FP and VSX registers if they haven't been done yet */
870 andi. r5,r12,MSR_FP
871 beql+ load_up_fpu /* skip if already loaded */
872 andis. r5,r12,MSR_VEC@h
873 beql+ load_up_altivec /* skip if already loaded */
874
875#ifndef CONFIG_SMP
876 ld r3,last_task_used_vsx@got(r2)
877 ld r4,0(r3)
878 cmpdi 0,r4,0
879 beq 1f
880 /* Disable VSX for last_task_used_vsx */
881 addi r4,r4,THREAD
882 ld r5,PT_REGS(r4)
883 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
884 lis r6,MSR_VSX@h
885 andc r6,r4,r6
886 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
8871:
888#endif /* CONFIG_SMP */
889 ld r4,PACACURRENT(r13)
890 addi r4,r4,THREAD /* Get THREAD */
891 li r6,1
892 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
893 /* enable use of VSX after return */
894 oris r12,r12,MSR_VSX@h
895 std r12,_MSR(r1)
896#ifndef CONFIG_SMP
897 /* Update last_task_used_math to 'current' */
898 ld r4,PACACURRENT(r13)
899 std r4,0(r3)
900#endif /* CONFIG_SMP */
901 b fast_exception_return
902#endif /* CONFIG_VSX */
903
839/* 904/*
840 * Hash table stuff 905 * Hash table stuff
841 */ 906 */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 942951e76586..31b9026cf1e3 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -506,6 +506,39 @@ _GLOBAL(giveup_altivec)
506 506
507#endif /* CONFIG_ALTIVEC */ 507#endif /* CONFIG_ALTIVEC */
508 508
509#ifdef CONFIG_VSX
510/*
511 * giveup_vsx(tsk)
512 * Disable VSX for the task given as the argument,
513 * and save the vector registers in its thread_struct.
514 * Enables the VSX for use in the kernel on return.
515 */
516_GLOBAL(giveup_vsx)
517 mfmsr r5
518 oris r5,r5,MSR_VSX@h
519 mtmsrd r5 /* enable use of VSX now */
520 isync
521
522 cmpdi 0,r3,0
523 beqlr- /* if no previous owner, done */
524 addi r3,r3,THREAD /* want THREAD of task */
525 ld r5,PT_REGS(r3)
526 cmpdi 0,r5,0
527 beq 1f
528 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
529 lis r3,MSR_VSX@h
530 andc r4,r4,r3 /* disable VSX for previous task */
531 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5321:
533#ifndef CONFIG_SMP
534 li r5,0
535 ld r4,last_task_used_vsx@got(r2)
536 std r5,0(r4)
537#endif /* CONFIG_SMP */
538 blr
539
540#endif /* CONFIG_VSX */
541
509/* kexec_wait(phys_cpu) 542/* kexec_wait(phys_cpu)
510 * 543 *
511 * wait for the flag to change, indicating this kernel is going away but 544 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
index 90e562771791..dc16aefe1dd0 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/kernel/ppc32.h
@@ -120,6 +120,7 @@ struct mcontext32 {
120 elf_fpregset_t mc_fregs; 120 elf_fpregset_t mc_fregs;
121 unsigned int mc_pad[2]; 121 unsigned int mc_pad[2];
122 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16))); 122 elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
123 elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16)));
123}; 124};
124 125
125struct ucontext32 { 126struct ucontext32 {
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d3ac631cbd26..958ecb9ae7dc 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -102,6 +102,9 @@ EXPORT_SYMBOL(giveup_fpu);
102#ifdef CONFIG_ALTIVEC 102#ifdef CONFIG_ALTIVEC
103EXPORT_SYMBOL(giveup_altivec); 103EXPORT_SYMBOL(giveup_altivec);
104#endif /* CONFIG_ALTIVEC */ 104#endif /* CONFIG_ALTIVEC */
105#ifdef CONFIG_VSX
106EXPORT_SYMBOL(giveup_vsx);
107#endif /* CONFIG_VSX */
105#ifdef CONFIG_SPE 108#ifdef CONFIG_SPE
106EXPORT_SYMBOL(giveup_spe); 109EXPORT_SYMBOL(giveup_spe);
107#endif /* CONFIG_SPE */ 110#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 582df70439cb..d52ded366f14 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -53,6 +53,7 @@ extern unsigned long _get_SP(void);
53#ifndef CONFIG_SMP 53#ifndef CONFIG_SMP
54struct task_struct *last_task_used_math = NULL; 54struct task_struct *last_task_used_math = NULL;
55struct task_struct *last_task_used_altivec = NULL; 55struct task_struct *last_task_used_altivec = NULL;
56struct task_struct *last_task_used_vsx = NULL;
56struct task_struct *last_task_used_spe = NULL; 57struct task_struct *last_task_used_spe = NULL;
57#endif 58#endif
58 59
@@ -106,11 +107,23 @@ EXPORT_SYMBOL(enable_kernel_fp);
106 107
107int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) 108int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
108{ 109{
110#ifdef CONFIG_VSX
111 int i;
112 elf_fpreg_t *reg;
113#endif
114
109 if (!tsk->thread.regs) 115 if (!tsk->thread.regs)
110 return 0; 116 return 0;
111 flush_fp_to_thread(current); 117 flush_fp_to_thread(current);
112 118
119#ifdef CONFIG_VSX
120 reg = (elf_fpreg_t *)fpregs;
121 for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
122 *reg = tsk->thread.TS_FPR(i);
123 memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
124#else
113 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs)); 125 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
126#endif
114 127
115 return 1; 128 return 1;
116} 129}
@@ -149,7 +162,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
149 } 162 }
150} 163}
151 164
152int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs) 165int dump_task_altivec(struct task_struct *tsk, elf_vrreg_t *vrregs)
153{ 166{
154 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save 167 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
155 * separately, see below */ 168 * separately, see below */
@@ -179,6 +192,80 @@ int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
179} 192}
180#endif /* CONFIG_ALTIVEC */ 193#endif /* CONFIG_ALTIVEC */
181 194
195#ifdef CONFIG_VSX
196#if 0
197/* not currently used, but some crazy RAID module might want to later */
198void enable_kernel_vsx(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
204 giveup_vsx(current);
205 else
206 giveup_vsx(NULL); /* just enable vsx for kernel - force */
207#else
208 giveup_vsx(last_task_used_vsx);
209#endif /* CONFIG_SMP */
210}
211EXPORT_SYMBOL(enable_kernel_vsx);
212#endif
213
214void flush_vsx_to_thread(struct task_struct *tsk)
215{
216 if (tsk->thread.regs) {
217 preempt_disable();
218 if (tsk->thread.regs->msr & MSR_VSX) {
219#ifdef CONFIG_SMP
220 BUG_ON(tsk != current);
221#endif
222 giveup_vsx(tsk);
223 }
224 preempt_enable();
225 }
226}
227
228/*
229 * This dumps the lower half 64bits of the first 32 VSX registers.
230 * This needs to be called with dump_task_fp and dump_task_altivec to
231 * get all the VSX state.
232 */
233int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
234{
235 elf_vrreg_t *reg;
236 double buf[32];
237 int i;
238
239 if (tsk == current)
240 flush_vsx_to_thread(tsk);
241
242 reg = (elf_vrreg_t *)vrregs;
243
244 for (i = 0; i < 32 ; i++)
245 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
246 memcpy(reg, buf, sizeof(buf));
247
248 return 1;
249}
250#endif /* CONFIG_VSX */
251
252int dump_task_vector(struct task_struct *tsk, elf_vrregset_t *vrregs)
253{
254 int rc = 0;
255 elf_vrreg_t *regs = (elf_vrreg_t *)vrregs;
256#ifdef CONFIG_ALTIVEC
257 rc = dump_task_altivec(tsk, regs);
258 if (rc)
259 return rc;
260 regs += ELF_NVRREG;
261#endif
262
263#ifdef CONFIG_VSX
264 rc = dump_task_vsx(tsk, regs);
265#endif
266 return rc;
267}
268
182#ifdef CONFIG_SPE 269#ifdef CONFIG_SPE
183 270
184void enable_kernel_spe(void) 271void enable_kernel_spe(void)
@@ -233,6 +320,10 @@ void discard_lazy_cpu_state(void)
233 if (last_task_used_altivec == current) 320 if (last_task_used_altivec == current)
234 last_task_used_altivec = NULL; 321 last_task_used_altivec = NULL;
235#endif /* CONFIG_ALTIVEC */ 322#endif /* CONFIG_ALTIVEC */
323#ifdef CONFIG_VSX
324 if (last_task_used_vsx == current)
325 last_task_used_vsx = NULL;
326#endif /* CONFIG_VSX */
236#ifdef CONFIG_SPE 327#ifdef CONFIG_SPE
237 if (last_task_used_spe == current) 328 if (last_task_used_spe == current)
238 last_task_used_spe = NULL; 329 last_task_used_spe = NULL;
@@ -297,6 +388,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
297 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 388 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
298 giveup_altivec(prev); 389 giveup_altivec(prev);
299#endif /* CONFIG_ALTIVEC */ 390#endif /* CONFIG_ALTIVEC */
391#ifdef CONFIG_VSX
392 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
393 giveup_vsx(prev);
394#endif /* CONFIG_VSX */
300#ifdef CONFIG_SPE 395#ifdef CONFIG_SPE
301 /* 396 /*
302 * If the previous thread used spe in the last quantum 397 * If the previous thread used spe in the last quantum
@@ -317,6 +412,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
317 if (new->thread.regs && last_task_used_altivec == new) 412 if (new->thread.regs && last_task_used_altivec == new)
318 new->thread.regs->msr |= MSR_VEC; 413 new->thread.regs->msr |= MSR_VEC;
319#endif /* CONFIG_ALTIVEC */ 414#endif /* CONFIG_ALTIVEC */
415#ifdef CONFIG_VSX
416 if (new->thread.regs && last_task_used_vsx == new)
417 new->thread.regs->msr |= MSR_VSX;
418#endif /* CONFIG_VSX */
320#ifdef CONFIG_SPE 419#ifdef CONFIG_SPE
321 /* Avoid the trap. On smp this this never happens since 420 /* Avoid the trap. On smp this this never happens since
322 * we don't set last_task_used_spe 421 * we don't set last_task_used_spe
@@ -417,6 +516,8 @@ static struct regbit {
417 {MSR_EE, "EE"}, 516 {MSR_EE, "EE"},
418 {MSR_PR, "PR"}, 517 {MSR_PR, "PR"},
419 {MSR_FP, "FP"}, 518 {MSR_FP, "FP"},
519 {MSR_VEC, "VEC"},
520 {MSR_VSX, "VSX"},
420 {MSR_ME, "ME"}, 521 {MSR_ME, "ME"},
421 {MSR_IR, "IR"}, 522 {MSR_IR, "IR"},
422 {MSR_DR, "DR"}, 523 {MSR_DR, "DR"},
@@ -534,6 +635,7 @@ void prepare_to_copy(struct task_struct *tsk)
534{ 635{
535 flush_fp_to_thread(current); 636 flush_fp_to_thread(current);
536 flush_altivec_to_thread(current); 637 flush_altivec_to_thread(current);
638 flush_vsx_to_thread(current);
537 flush_spe_to_thread(current); 639 flush_spe_to_thread(current);
538} 640}
539 641
@@ -689,6 +791,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
689#endif 791#endif
690 792
691 discard_lazy_cpu_state(); 793 discard_lazy_cpu_state();
794#ifdef CONFIG_VSX
795 current->thread.used_vsr = 0;
796#endif
692 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 797 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
693 current->thread.fpscr.val = 0; 798 current->thread.fpscr.val = 0;
694#ifdef CONFIG_ALTIVEC 799#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 70fbde84b83f..4e203a89e189 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -350,6 +350,51 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
350} 350}
351#endif /* CONFIG_ALTIVEC */ 351#endif /* CONFIG_ALTIVEC */
352 352
353#ifdef CONFIG_VSX
354/*
355 * Currently to set and and get all the vsx state, you need to call
356 * the fp and VMX calls aswell. This only get/sets the lower 32
357 * 128bit VSX registers.
358 */
359
360static int vsr_active(struct task_struct *target,
361 const struct user_regset *regset)
362{
363 flush_vsx_to_thread(target);
364 return target->thread.used_vsr ? regset->n : 0;
365}
366
367static int vsr_get(struct task_struct *target, const struct user_regset *regset,
368 unsigned int pos, unsigned int count,
369 void *kbuf, void __user *ubuf)
370{
371 int ret;
372
373 flush_vsx_to_thread(target);
374
375 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
376 target->thread.fpr, 0,
377 32 * sizeof(vector128));
378
379 return ret;
380}
381
382static int vsr_set(struct task_struct *target, const struct user_regset *regset,
383 unsigned int pos, unsigned int count,
384 const void *kbuf, const void __user *ubuf)
385{
386 int ret;
387
388 flush_vsx_to_thread(target);
389
390 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
391 target->thread.fpr, 0,
392 32 * sizeof(vector128));
393
394 return ret;
395}
396#endif /* CONFIG_VSX */
397
353#ifdef CONFIG_SPE 398#ifdef CONFIG_SPE
354 399
355/* 400/*
@@ -426,6 +471,9 @@ enum powerpc_regset {
426#ifdef CONFIG_ALTIVEC 471#ifdef CONFIG_ALTIVEC
427 REGSET_VMX, 472 REGSET_VMX,
428#endif 473#endif
474#ifdef CONFIG_VSX
475 REGSET_VSX,
476#endif
429#ifdef CONFIG_SPE 477#ifdef CONFIG_SPE
430 REGSET_SPE, 478 REGSET_SPE,
431#endif 479#endif
@@ -449,6 +497,13 @@ static const struct user_regset native_regsets[] = {
449 .active = vr_active, .get = vr_get, .set = vr_set 497 .active = vr_active, .get = vr_get, .set = vr_set
450 }, 498 },
451#endif 499#endif
500#ifdef CONFIG_VSX
501 [REGSET_VSX] = {
502 .n = 32,
503 .size = sizeof(vector128), .align = sizeof(vector128),
504 .active = vsr_active, .get = vsr_get, .set = vsr_set
505 },
506#endif
452#ifdef CONFIG_SPE 507#ifdef CONFIG_SPE
453 [REGSET_SPE] = { 508 [REGSET_SPE] = {
454 .n = 35, 509 .n = 35,
@@ -849,6 +904,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
849 sizeof(u32)), 904 sizeof(u32)),
850 (const void __user *) data); 905 (const void __user *) data);
851#endif 906#endif
907#ifdef CONFIG_VSX
908 case PTRACE_GETVSRREGS:
909 return copy_regset_to_user(child, &user_ppc_native_view,
910 REGSET_VSX,
911 0, (32 * sizeof(vector128) +
912 sizeof(u32)),
913 (void __user *) data);
914
915 case PTRACE_SETVSRREGS:
916 return copy_regset_from_user(child, &user_ppc_native_view,
917 REGSET_VSX,
918 0, (32 * sizeof(vector128) +
919 sizeof(u32)),
920 (const void __user *) data);
921#endif
852#ifdef CONFIG_SPE 922#ifdef CONFIG_SPE
853 case PTRACE_GETEVRREGS: 923 case PTRACE_GETEVRREGS:
854 /* Get the child spe register state. */ 924 /* Get the child spe register state. */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index f7fa395b9fb5..349d3487d920 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -378,6 +378,21 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
378 memcpy(&buf[i], &current->thread.fpscr, sizeof(double)); 378 memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
379 if (__copy_to_user(&frame->mc_fregs, buf, ELF_NFPREG * sizeof(double))) 379 if (__copy_to_user(&frame->mc_fregs, buf, ELF_NFPREG * sizeof(double)))
380 return 1; 380 return 1;
381 /*
382 * Copy VSR 0-31 upper half from thread_struct to local
383 * buffer, then write that to userspace. Also set MSR_VSX in
384 * the saved MSR value to indicate that frame->mc_vregs
385 * contains valid data
386 */
387 if (current->thread.used_vsr) {
388 flush_vsx_to_thread(current);
389 for (i = 0; i < 32 ; i++)
390 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
391 if (__copy_to_user(&frame->mc_vsregs, buf,
392 ELF_NVSRHALFREG * sizeof(double)))
393 return 1;
394 msr |= MSR_VSX;
395 }
381#else 396#else
382 /* save floating-point registers */ 397 /* save floating-point registers */
383 if (__copy_to_user(&frame->mc_fregs, current->thread.fpr, 398 if (__copy_to_user(&frame->mc_fregs, current->thread.fpr,
@@ -482,6 +497,24 @@ static long restore_user_regs(struct pt_regs *regs,
482 for (i = 0; i < 32 ; i++) 497 for (i = 0; i < 32 ; i++)
483 current->thread.TS_FPR(i) = buf[i]; 498 current->thread.TS_FPR(i) = buf[i];
484 memcpy(&current->thread.fpscr, &buf[i], sizeof(double)); 499 memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
500 /*
501 * Force the process to reload the VSX registers from
502 * current->thread when it next does VSX instruction.
503 */
504 regs->msr &= ~MSR_VSX;
505 if (msr & MSR_VSX) {
506 /*
507 * Restore altivec registers from the stack to a local
508 * buffer, then write this out to the thread_struct
509 */
510 if (__copy_from_user(buf, &sr->mc_vsregs,
511 sizeof(sr->mc_vsregs)))
512 return 1;
513 for (i = 0; i < 32 ; i++)
514 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
515 } else if (current->thread.used_vsr)
516 for (i = 0; i < 32 ; i++)
517 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
485#else 518#else
486 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs, 519 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
487 sizeof(sr->mc_fregs))) 520 sizeof(sr->mc_fregs)))
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index a587b33cd6b9..34f37e59bacc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -123,6 +123,22 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
123 buf[i] = current->thread.TS_FPR(i); 123 buf[i] = current->thread.TS_FPR(i);
124 memcpy(&buf[i], &current->thread.fpscr, sizeof(double)); 124 memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
125 err |= __copy_to_user(&sc->fp_regs, buf, FP_REGS_SIZE); 125 err |= __copy_to_user(&sc->fp_regs, buf, FP_REGS_SIZE);
126 /*
127 * Copy VSX low doubleword to local buffer for formatting,
128 * then out to userspace. Update v_regs to point after the
129 * VMX data.
130 */
131 if (current->thread.used_vsr) {
132 flush_vsx_to_thread(current);
133 v_regs += ELF_NVRREG;
134 for (i = 0; i < 32 ; i++)
135 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
136 err |= __copy_to_user(v_regs, buf, 32 * sizeof(double));
137 /* set MSR_VSX in the MSR value in the frame to
138 * indicate that sc->vs_reg) contains valid data.
139 */
140 msr |= MSR_VSX;
141 }
126#else /* CONFIG_VSX */ 142#else /* CONFIG_VSX */
127 /* copy fpr regs and fpscr */ 143 /* copy fpr regs and fpscr */
128 err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE); 144 err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
@@ -197,7 +213,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
197 * This has to be done before copying stuff into current->thread.fpr/vr 213 * This has to be done before copying stuff into current->thread.fpr/vr
198 * for the reasons explained in the previous comment. 214 * for the reasons explained in the previous comment.
199 */ 215 */
200 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); 216 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
201 217
202#ifdef CONFIG_ALTIVEC 218#ifdef CONFIG_ALTIVEC
203 err |= __get_user(v_regs, &sc->v_regs); 219 err |= __get_user(v_regs, &sc->v_regs);
@@ -226,6 +242,19 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
226 current->thread.TS_FPR(i) = buf[i]; 242 current->thread.TS_FPR(i) = buf[i];
227 memcpy(&current->thread.fpscr, &buf[i], sizeof(double)); 243 memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
228 244
245 /*
246 * Get additional VSX data. Update v_regs to point after the
247 * VMX data. Copy VSX low doubleword from userspace to local
248 * buffer for formatting, then into the taskstruct.
249 */
250 v_regs += ELF_NVRREG;
251 if ((msr & MSR_VSX) != 0)
252 err |= __copy_from_user(buf, v_regs, 32 * sizeof(double));
253 else
254 memset(buf, 0, 32 * sizeof(double));
255
256 for (i = 0; i < 32 ; i++)
257 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
229#else 258#else
230 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); 259 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
231#endif 260#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b463d48145a4..878fbddb6ae1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -967,6 +967,20 @@ void altivec_unavailable_exception(struct pt_regs *regs)
967 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 967 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
968} 968}
969 969
970void vsx_unavailable_exception(struct pt_regs *regs)
971{
972 if (user_mode(regs)) {
973 /* A user program has executed an vsx instruction,
974 but this kernel doesn't support vsx. */
975 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
976 return;
977 }
978
979 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
980 "%lx at %lx\n", regs->trap, regs->nip);
981 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
982}
983
970void performance_monitor_exception(struct pt_regs *regs) 984void performance_monitor_exception(struct pt_regs *regs)
971{ 985{
972 perf_irq(regs); 986 perf_irq(regs);
@@ -1099,6 +1113,21 @@ void altivec_assist_exception(struct pt_regs *regs)
1099} 1113}
1100#endif /* CONFIG_ALTIVEC */ 1114#endif /* CONFIG_ALTIVEC */
1101 1115
1116#ifdef CONFIG_VSX
1117void vsx_assist_exception(struct pt_regs *regs)
1118{
1119 if (!user_mode(regs)) {
1120 printk(KERN_EMERG "VSX assist exception in kernel mode"
1121 " at %lx\n", regs->nip);
1122 die("Kernel VSX assist exception", regs, SIGILL);
1123 }
1124
1125 flush_vsx_to_thread(current);
1126 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1127 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1128}
1129#endif /* CONFIG_VSX */
1130
1102#ifdef CONFIG_FSL_BOOKE 1131#ifdef CONFIG_FSL_BOOKE
1103void CacheLockingException(struct pt_regs *regs, unsigned long address, 1132void CacheLockingException(struct pt_regs *regs, unsigned long address,
1104 unsigned long error_code) 1133 unsigned long error_code)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index d1e3bda0625d..746e53d60cbe 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -109,6 +109,7 @@ typedef elf_gregset_t32 compat_elf_gregset_t;
109#ifdef __powerpc64__ 109#ifdef __powerpc64__
110# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */ 110# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
111# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */ 111# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
112# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
112# define ELF_GREG_TYPE elf_greg_t64 113# define ELF_GREG_TYPE elf_greg_t64
113#else 114#else
114# define ELF_NEVRREG 34 /* includes acc (as 2) */ 115# define ELF_NEVRREG 34 /* includes acc (as 2) */
@@ -158,6 +159,7 @@ typedef __vector128 elf_vrreg_t;
158typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; 159typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
159#ifdef __powerpc64__ 160#ifdef __powerpc64__
160typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; 161typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
162typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
161#endif 163#endif
162 164
163#ifdef __KERNEL__ 165#ifdef __KERNEL__
@@ -219,8 +221,8 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
219typedef elf_vrregset_t elf_fpxregset_t; 221typedef elf_vrregset_t elf_fpxregset_t;
220 222
221#ifdef CONFIG_ALTIVEC 223#ifdef CONFIG_ALTIVEC
222extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs); 224extern int dump_task_vector(struct task_struct *, elf_vrregset_t *vrregs);
223#define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_altivec(tsk, regs) 225#define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_vector(tsk, regs)
224#define ELF_CORE_XFPREG_TYPE NT_PPC_VMX 226#define ELF_CORE_XFPREG_TYPE NT_PPC_VMX
225#endif 227#endif
226 228
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index 38d87e5e569d..3d6e31024e56 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -224,6 +224,14 @@ extern void user_disable_single_step(struct task_struct *);
224#define PT_VRSAVE_32 (PT_VR0 + 33*4) 224#define PT_VRSAVE_32 (PT_VR0 + 33*4)
225#endif 225#endif
226 226
227/*
228 * Only store first 32 VSRs here. The second 32 VSRs in VR0-31
229 */
230#define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */
231#define PT_VSR31 (PT_VSR0 + 2*31)
232#ifdef __KERNEL__
233#define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
234#endif
227#endif /* __powerpc64__ */ 235#endif /* __powerpc64__ */
228 236
229/* 237/*
@@ -246,6 +254,10 @@ extern void user_disable_single_step(struct task_struct *);
246#define PTRACE_GETEVRREGS 20 254#define PTRACE_GETEVRREGS 20
247#define PTRACE_SETEVRREGS 21 255#define PTRACE_SETEVRREGS 21
248 256
257/* Get the first 32 128bit VSX registers */
258#define PTRACE_GETVSRREGS 27
259#define PTRACE_SETVSRREGS 28
260
249/* 261/*
250 * Get or set a debug register. The first 16 are DABR registers and the 262 * Get or set a debug register. The first 16 are DABR registers and the
251 * second 16 are IABR registers. 263 * second 16 are IABR registers.
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 7256efb5c140..bbccadfee0d6 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -30,6 +30,7 @@
30#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ 30#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
31#define MSR_HV_LG 60 /* Hypervisor state */ 31#define MSR_HV_LG 60 /* Hypervisor state */
32#define MSR_VEC_LG 25 /* Enable AltiVec */ 32#define MSR_VEC_LG 25 /* Enable AltiVec */
33#define MSR_VSX_LG 23 /* Enable VSX */
33#define MSR_POW_LG 18 /* Enable Power Management */ 34#define MSR_POW_LG 18 /* Enable Power Management */
34#define MSR_WE_LG 18 /* Wait State Enable */ 35#define MSR_WE_LG 18 /* Wait State Enable */
35#define MSR_TGPR_LG 17 /* TLB Update registers in use */ 36#define MSR_TGPR_LG 17 /* TLB Update registers in use */
@@ -71,6 +72,7 @@
71#endif 72#endif
72 73
73#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ 74#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
75#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
74#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ 76#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
75#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ 77#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
76#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ 78#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
diff --git a/include/asm-powerpc/sigcontext.h b/include/asm-powerpc/sigcontext.h
index 165d630e1cf3..9c1f24fd5d11 100644
--- a/include/asm-powerpc/sigcontext.h
+++ b/include/asm-powerpc/sigcontext.h
@@ -43,9 +43,44 @@ struct sigcontext {
43 * it must be copied via a vector register to/from storage) or as a word. 43 * it must be copied via a vector register to/from storage) or as a word.
44 * The entry with index 33 contains the vrsave as the first word (offset 0) 44 * The entry with index 33 contains the vrsave as the first word (offset 0)
45 * within the quadword. 45 * within the quadword.
46 *
47 * Part of the VSX data is stored here also by extending vmx_restore
48 * by an additional 32 double words. Architecturally the layout of
49 * the VSR registers and how they overlap on top of the legacy FPR and
50 * VR registers is shown below:
51 *
52 * VSR doubleword 0 VSR doubleword 1
53 * ----------------------------------------------------------------
54 * VSR[0] | FPR[0] | |
55 * ----------------------------------------------------------------
56 * VSR[1] | FPR[1] | |
57 * ----------------------------------------------------------------
58 * | ... | |
59 * | ... | |
60 * ----------------------------------------------------------------
61 * VSR[30] | FPR[30] | |
62 * ----------------------------------------------------------------
63 * VSR[31] | FPR[31] | |
64 * ----------------------------------------------------------------
65 * VSR[32] | VR[0] |
66 * ----------------------------------------------------------------
67 * VSR[33] | VR[1] |
68 * ----------------------------------------------------------------
69 * | ... |
70 * | ... |
71 * ----------------------------------------------------------------
72 * VSR[62] | VR[30] |
73 * ----------------------------------------------------------------
74 * VSR[63] | VR[31] |
75 * ----------------------------------------------------------------
76 *
77 * FPR/VSR 0-31 doubleword 0 is stored in fp_regs, and VMX/VSR 32-63
78 * is stored at the start of vmx_reserve. vmx_reserve is extended for
79 * backwards compatility to store VSR 0-31 doubleword 1 after the VMX
80 * registers and vscr/vrsave.
46 */ 81 */
47 elf_vrreg_t __user *v_regs; 82 elf_vrreg_t __user *v_regs;
48 long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1]; 83 long vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
49#endif 84#endif
50}; 85};
51 86
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 2642a92b724f..0c12c66733f6 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -139,6 +139,7 @@ extern void enable_kernel_altivec(void);
139extern void giveup_altivec(struct task_struct *); 139extern void giveup_altivec(struct task_struct *);
140extern void load_up_altivec(struct task_struct *); 140extern void load_up_altivec(struct task_struct *);
141extern int emulate_altivec(struct pt_regs *); 141extern int emulate_altivec(struct pt_regs *);
142extern void giveup_vsx(struct task_struct *);
142extern void enable_kernel_spe(void); 143extern void enable_kernel_spe(void);
143extern void giveup_spe(struct task_struct *); 144extern void giveup_spe(struct task_struct *);
144extern void load_up_spe(struct task_struct *); 145extern void load_up_spe(struct task_struct *);
@@ -162,6 +163,14 @@ static inline void flush_altivec_to_thread(struct task_struct *t)
162} 163}
163#endif 164#endif
164 165
166#ifdef CONFIG_VSX
167extern void flush_vsx_to_thread(struct task_struct *);
168#else
169static inline void flush_vsx_to_thread(struct task_struct *t)
170{
171}
172#endif
173
165#ifdef CONFIG_SPE 174#ifdef CONFIG_SPE
166extern void flush_spe_to_thread(struct task_struct *); 175extern void flush_spe_to_thread(struct task_struct *);
167#else 176#else