diff options
author | Mark Nelson <markn@au1.ibm.com> | 2008-11-10 19:53:34 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-11-19 00:04:54 -0500 |
commit | a4e22f02f5b6518c1484faea1f88d81802b9feac (patch) | |
tree | 62652cde79d43420b8587344cb27ae8f9f1d12ef | |
parent | 7526ff76f84178f8c926de7e590e4c5f9d4a2e62 (diff) |
powerpc: Update 64bit __copy_tofrom_user() using CPU_FTR_UNALIGNED_LD_STD
In exactly the same way that we updated memcpy() with new feature
sections in commit 25d6e2d7c58ddc4a3b614fc5381591c0cfe66556 ("powerpc:
Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD"), we do the same
thing here for __copy_tofrom_user(). Once again this is purely a
performance tweak for Cell and Power6 - this has no effect on all the
other 64bit powerpc chips.
We can make these same changes to __copy_tofrom_user() because the
basic copy algorithm is the same as in memcpy() - this version just
has all the exception handling logic needed when copying to or from
userspace as well as a special case for copying whole 4K pages that
are page aligned.
CPU_FTR_UNALIGNED_LD_STD CPU was added in commit
4ec577a28980a0790df3c3dfe9c81f6e2222acfb ("powerpc: Add new CPU
feature: CPU_FTR_UNALIGNED_LD_STD").
We also make the same simple one line change from cmpldi r1,... to
cmpldi cr1,... for consistency.
Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/lib/copyuser_64.S | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 25ec5378afa4..70693a5c12a1 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user) | |||
26 | andi. r6,r6,7 | 26 | andi. r6,r6,7 |
27 | PPC_MTOCRF 0x01,r5 | 27 | PPC_MTOCRF 0x01,r5 |
28 | blt cr1,.Lshort_copy | 28 | blt cr1,.Lshort_copy |
29 | /* Below we want to nop out the bne if we're on a CPU that has the | ||
30 | * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit | ||
31 | * cleared. | ||
32 | * At the time of writing the only CPU that has this combination of bits | ||
33 | * set is Power6. | ||
34 | */ | ||
35 | BEGIN_FTR_SECTION | ||
36 | nop | ||
37 | FTR_SECTION_ELSE | ||
29 | bne .Ldst_unaligned | 38 | bne .Ldst_unaligned |
39 | ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ | ||
40 | CPU_FTR_UNALIGNED_LD_STD) | ||
30 | .Ldst_aligned: | 41 | .Ldst_aligned: |
31 | andi. r0,r4,7 | ||
32 | addi r3,r3,-16 | 42 | addi r3,r3,-16 |
43 | BEGIN_FTR_SECTION | ||
44 | andi. r0,r4,7 | ||
33 | bne .Lsrc_unaligned | 45 | bne .Lsrc_unaligned |
46 | END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | ||
34 | srdi r7,r5,4 | 47 | srdi r7,r5,4 |
35 | 20: ld r9,0(r4) | 48 | 20: ld r9,0(r4) |
36 | addi r4,r4,-8 | 49 | addi r4,r4,-8 |
@@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user) | |||
138 | PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ | 151 | PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ |
139 | subf r5,r6,r5 | 152 | subf r5,r6,r5 |
140 | li r7,0 | 153 | li r7,0 |
141 | cmpldi r1,r5,16 | 154 | cmpldi cr1,r5,16 |
142 | bf cr7*4+3,1f | 155 | bf cr7*4+3,1f |
143 | 35: lbz r0,0(r4) | 156 | 35: lbz r0,0(r4) |
144 | 81: stb r0,0(r3) | 157 | 81: stb r0,0(r3) |