diff options
author | Michael Neuling <mikey@neuling.org> | 2008-07-11 02:31:09 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-14 22:29:25 -0400 |
commit | cd6f37be7fdc9fea407379745350f6630b9d3cdd (patch) | |
tree | ca8c813d84c6db6ba6052a42c0fb4e8aff411422 | |
parent | 7c29217096d83f657e6ee70479af09b46f4275f6 (diff) |
powerpc: Add VSX load/store alignment exception handler
VSX loads and stores will take an alignment exception when the address
is not on a 4 byte boundary.
This add support for these alignment exceptions and will emulate the
requested load or store.
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/kernel/align.c | 58 |
1 files changed, 57 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index f22b5d0a4a97..367129789cc0 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -48,6 +48,7 @@ struct aligninfo { | |||
48 | #define HARD 0x80 /* string, stwcx. */ | 48 | #define HARD 0x80 /* string, stwcx. */ |
49 | #define E4 0x40 /* SPE endianness is word */ | 49 | #define E4 0x40 /* SPE endianness is word */ |
50 | #define E8 0x80 /* SPE endianness is double word */ | 50 | #define E8 0x80 /* SPE endianness is double word */ |
51 | #define SPLT 0x80 /* VSX SPLAT load */ | ||
51 | 52 | ||
52 | /* DSISR bits reported for a DCBZ instruction: */ | 53 | /* DSISR bits reported for a DCBZ instruction: */ |
53 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ | 54 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ |
@@ -637,6 +638,36 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, | |||
637 | } | 638 | } |
638 | #endif /* CONFIG_SPE */ | 639 | #endif /* CONFIG_SPE */ |
639 | 640 | ||
641 | #ifdef CONFIG_VSX | ||
642 | /* | ||
643 | * Emulate VSX instructions... | ||
644 | */ | ||
645 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | ||
646 | unsigned int areg, struct pt_regs *regs, | ||
647 | unsigned int flags, unsigned int length) | ||
648 | { | ||
649 | char *ptr = (char *) ¤t->thread.TS_FPR(reg); | ||
650 | int ret; | ||
651 | |||
652 | flush_vsx_to_thread(current); | ||
653 | |||
654 | if (flags & ST) | ||
655 | ret = __copy_to_user(addr, ptr, length); | ||
656 | else { | ||
657 | if (flags & SPLT){ | ||
658 | ret = __copy_from_user(ptr, addr, length); | ||
659 | ptr += length; | ||
660 | } | ||
661 | ret |= __copy_from_user(ptr, addr, length); | ||
662 | } | ||
663 | if (flags & U) | ||
664 | regs->gpr[areg] = regs->dar; | ||
665 | if (ret) | ||
666 | return -EFAULT; | ||
667 | return 1; | ||
668 | } | ||
669 | #endif | ||
670 | |||
640 | /* | 671 | /* |
641 | * Called on alignment exception. Attempts to fixup | 672 | * Called on alignment exception. Attempts to fixup |
642 | * | 673 | * |
@@ -647,7 +678,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, | |||
647 | 678 | ||
648 | int fix_alignment(struct pt_regs *regs) | 679 | int fix_alignment(struct pt_regs *regs) |
649 | { | 680 | { |
650 | unsigned int instr, nb, flags; | 681 | unsigned int instr, nb, flags, instruction = 0; |
651 | unsigned int reg, areg; | 682 | unsigned int reg, areg; |
652 | unsigned int dsisr; | 683 | unsigned int dsisr; |
653 | unsigned char __user *addr; | 684 | unsigned char __user *addr; |
@@ -689,6 +720,7 @@ int fix_alignment(struct pt_regs *regs) | |||
689 | if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) | 720 | if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) |
690 | instr = cpu_to_le32(instr); | 721 | instr = cpu_to_le32(instr); |
691 | dsisr = make_dsisr(instr); | 722 | dsisr = make_dsisr(instr); |
723 | instruction = instr; | ||
692 | } | 724 | } |
693 | 725 | ||
694 | /* extract the operation and registers from the dsisr */ | 726 | /* extract the operation and registers from the dsisr */ |
@@ -728,6 +760,30 @@ int fix_alignment(struct pt_regs *regs) | |||
728 | /* DAR has the operand effective address */ | 760 | /* DAR has the operand effective address */ |
729 | addr = (unsigned char __user *)regs->dar; | 761 | addr = (unsigned char __user *)regs->dar; |
730 | 762 | ||
763 | #ifdef CONFIG_VSX | ||
764 | if ((instruction & 0xfc00003e) == 0x7c000018) { | ||
765 | /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ | ||
766 | reg |= (instruction & 0x1) << 5; | ||
767 | /* Simple inline decoder instead of a table */ | ||
768 | if (instruction & 0x200) | ||
769 | nb = 16; | ||
770 | else if (instruction & 0x080) | ||
771 | nb = 8; | ||
772 | else | ||
773 | nb = 4; | ||
774 | flags = 0; | ||
775 | if (instruction & 0x100) | ||
776 | flags |= ST; | ||
777 | if (instruction & 0x040) | ||
778 | flags |= U; | ||
779 | /* splat load needs a special decoder */ | ||
780 | if ((instruction & 0x400) == 0){ | ||
781 | flags |= SPLT; | ||
782 | nb = 8; | ||
783 | } | ||
784 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | ||
785 | } | ||
786 | #endif | ||
731 | /* A size of 0 indicates an instruction we don't support, with | 787 | /* A size of 0 indicates an instruction we don't support, with |
732 | * the exception of DCBZ which is handled as a special case here | 788 | * the exception of DCBZ which is handled as a special case here |
733 | */ | 789 | */ |