aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2012-04-17 22:21:52 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-04-30 01:37:26 -0400
commit694caf0255dcab506d1e174c96a65ab65d96e108 (patch)
treeb5bb6facfc517062c319c742b54c4fceffa56c9b /arch/powerpc/lib
parent6cd3209967469f6e89d329deda6bb0b4700e7b62 (diff)
powerpc: Remove CONFIG_POWER4_ONLY
Remove CONFIG_POWER4_ONLY, the option is badly named and only does two things: - It wraps the MMU segment table code. With feature fixups there is little downside to compiling this in. - It uses the newer mtocrf instruction in various assembly functions. Instead of making this a compile option just do it at runtime via a feature fixup. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/copyuser_64.S6
-rw-r--r--arch/powerpc/lib/mem_64.S6
-rw-r--r--arch/powerpc/lib/memcpy_64.S6
3 files changed, 9 insertions, 9 deletions
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 773d38f90aaa..d73a59014900 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base)
30 dcbt 0,r4 30 dcbt 0,r4
31 beq .Lcopy_page_4K 31 beq .Lcopy_page_4K
32 andi. r6,r6,7 32 andi. r6,r6,7
33 PPC_MTOCRF 0x01,r5 33 PPC_MTOCRF(0x01,r5)
34 blt cr1,.Lshort_copy 34 blt cr1,.Lshort_copy
35/* Below we want to nop out the bne if we're on a CPU that has the 35/* Below we want to nop out the bne if we're on a CPU that has the
36 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit 36 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
186 blr 186 blr
187 187
188.Ldst_unaligned: 188.Ldst_unaligned:
189 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 189 PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
190 subf r5,r6,r5 190 subf r5,r6,r5
191 li r7,0 191 li r7,0
192 cmpldi cr1,r5,16 192 cmpldi cr1,r5,16
@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2012: bf cr7*4+1,3f 2012: bf cr7*4+1,3f
20237: lwzx r0,r7,r4 20237: lwzx r0,r7,r4
20383: stwx r0,r7,r3 20383: stwx r0,r7,r3
2043: PPC_MTOCRF 0x01,r5 2043: PPC_MTOCRF(0x01,r5)
205 add r4,r6,r4 205 add r4,r6,r4
206 add r3,r6,r3 206 add r3,r6,r3
207 b .Ldst_aligned 207 b .Ldst_aligned
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 11ce045e21fd..f4fcb0bc6563 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -19,7 +19,7 @@ _GLOBAL(memset)
19 rlwimi r4,r4,16,0,15 19 rlwimi r4,r4,16,0,15
20 cmplw cr1,r5,r0 /* do we get that far? */ 20 cmplw cr1,r5,r0 /* do we get that far? */
21 rldimi r4,r4,32,0 21 rldimi r4,r4,32,0
22 PPC_MTOCRF 1,r0 22 PPC_MTOCRF(1,r0)
23 mr r6,r3 23 mr r6,r3
24 blt cr1,8f 24 blt cr1,8f
25 beq+ 3f /* if already 8-byte aligned */ 25 beq+ 3f /* if already 8-byte aligned */
@@ -49,7 +49,7 @@ _GLOBAL(memset)
49 bdnz 4b 49 bdnz 4b
505: srwi. r0,r5,3 505: srwi. r0,r5,3
51 clrlwi r5,r5,29 51 clrlwi r5,r5,29
52 PPC_MTOCRF 1,r0 52 PPC_MTOCRF(1,r0)
53 beq 8f 53 beq 8f
54 bf 29,6f 54 bf 29,6f
55 std r4,0(r6) 55 std r4,0(r6)
@@ -65,7 +65,7 @@ _GLOBAL(memset)
65 std r4,0(r6) 65 std r4,0(r6)
66 addi r6,r6,8 66 addi r6,r6,8
678: cmpwi r5,0 678: cmpwi r5,0
68 PPC_MTOCRF 1,r5 68 PPC_MTOCRF(1,r5)
69 beqlr+ 69 beqlr+
70 bf 29,9f 70 bf 29,9f
71 stw r4,0(r6) 71 stw r4,0(r6)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index e178922b2c21..82fea3963e15 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -12,7 +12,7 @@
12 .align 7 12 .align 7
13_GLOBAL(memcpy) 13_GLOBAL(memcpy)
14 std r3,48(r1) /* save destination pointer for return value */ 14 std r3,48(r1) /* save destination pointer for return value */
15 PPC_MTOCRF 0x01,r5 15 PPC_MTOCRF(0x01,r5)
16 cmpldi cr1,r5,16 16 cmpldi cr1,r5,16
17 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry 17 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
18 andi. r6,r6,7 18 andi. r6,r6,7
@@ -154,7 +154,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
154 blr 154 blr
155 155
156.Ldst_unaligned: 156.Ldst_unaligned:
157 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 157 PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
158 subf r5,r6,r5 158 subf r5,r6,r5
159 li r7,0 159 li r7,0
160 cmpldi cr1,r5,16 160 cmpldi cr1,r5,16
@@ -169,7 +169,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
1692: bf cr7*4+1,3f 1692: bf cr7*4+1,3f
170 lwzx r0,r7,r4 170 lwzx r0,r7,r4
171 stwx r0,r7,r3 171 stwx r0,r7,r3
1723: PPC_MTOCRF 0x01,r5 1723: PPC_MTOCRF(0x01,r5)
173 add r4,r6,r4 173 add r4,r6,r4
174 add r3,r6,r3 174 add r3,r6,r3
175 b .Ldst_aligned 175 b .Ldst_aligned