aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/checksum_64.S12
-rw-r--r--arch/powerpc/lib/feature-fixups.c32
-rw-r--r--arch/powerpc/lib/locks.c16
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/lib/string.S44
-rw-r--r--arch/powerpc/lib/vmx-helper.c1
7 files changed, 41 insertions, 70 deletions
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 8e6e51016cc5..fdec6e613e95 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -74,9 +74,9 @@ _GLOBAL(__csum_partial)
74 ld r11,24(r3) 74 ld r11,24(r3)
75 75
76 /* 76 /*
77 * On POWER6 and POWER7 back to back addes take 2 cycles because of 77 * On POWER6 and POWER7 back to back adde instructions take 2 cycles
78 * the XER dependency. This means the fastest this loop can go is 78 * because of the XER dependency. This means the fastest this loop can
79 * 16 cycles per iteration. The scheduling of the loop below has 79 * go is 16 cycles per iteration. The scheduling of the loop below has
80 * been shown to hit this on both POWER6 and POWER7. 80 * been shown to hit this on both POWER6 and POWER7.
81 */ 81 */
82 .align 5 82 .align 5
@@ -275,9 +275,9 @@ source; ld r10,16(r3)
275source; ld r11,24(r3) 275source; ld r11,24(r3)
276 276
277 /* 277 /*
278 * On POWER6 and POWER7 back to back addes take 2 cycles because of 278 * On POWER6 and POWER7 back to back adde instructions take 2 cycles
279 * the XER dependency. This means the fastest this loop can go is 279 * because of the XER dependency. This means the fastest this loop can
280 * 16 cycles per iteration. The scheduling of the loop below has 280 * go is 16 cycles per iteration. The scheduling of the loop below has
281 * been shown to hit this on both POWER6 and POWER7. 281 * been shown to hit this on both POWER6 and POWER7.
282 */ 282 */
283 .align 5 283 .align 5
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7ce3870d7ddd..defb2998b818 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -20,7 +20,8 @@
20#include <asm/code-patching.h> 20#include <asm/code-patching.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/sections.h> 22#include <asm/sections.h>
23 23#include <asm/setup.h>
24#include <asm/firmware.h>
24 25
25struct fixup_entry { 26struct fixup_entry {
26 unsigned long mask; 27 unsigned long mask;
@@ -130,7 +131,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
130 } 131 }
131} 132}
132 133
133void do_final_fixups(void) 134static void do_final_fixups(void)
134{ 135{
135#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) 136#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
136 int *src, *dest; 137 int *src, *dest;
@@ -151,6 +152,33 @@ void do_final_fixups(void)
151#endif 152#endif
152} 153}
153 154
155void apply_feature_fixups(void)
156{
157 struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
158
159 /*
160 * Apply the CPU-specific and firmware specific fixups to kernel text
161 * (nop out sections not relevant to this CPU or this firmware).
162 */
163 do_feature_fixups(spec->cpu_features,
164 PTRRELOC(&__start___ftr_fixup),
165 PTRRELOC(&__stop___ftr_fixup));
166
167 do_feature_fixups(spec->mmu_features,
168 PTRRELOC(&__start___mmu_ftr_fixup),
169 PTRRELOC(&__stop___mmu_ftr_fixup));
170
171 do_lwsync_fixups(spec->cpu_features,
172 PTRRELOC(&__start___lwsync_fixup),
173 PTRRELOC(&__stop___lwsync_fixup));
174
175#ifdef CONFIG_PPC64
176 do_feature_fixups(powerpc_firmware_features,
177 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
178#endif
179 do_final_fixups();
180}
181
154#ifdef CONFIG_FTR_FIXUP_SELFTEST 182#ifdef CONFIG_FTR_FIXUP_SELFTEST
155 183
156#define check(x) \ 184#define check(x) \
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebdf3365..b7b1237d4aa6 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw)
68 get_hard_smp_processor_id(holder_cpu), yield_count); 68 get_hard_smp_processor_id(holder_cpu), yield_count);
69} 69}
70#endif 70#endif
71
72void arch_spin_unlock_wait(arch_spinlock_t *lock)
73{
74 smp_mb();
75
76 while (lock->slock) {
77 HMT_low();
78 if (SHARED_PROCESSOR)
79 __spin_yield(lock);
80 }
81 HMT_medium();
82
83 smp_mb();
84}
85
86EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index c422812f7405..ae69d846a841 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -9,11 +9,7 @@ EXPORT_SYMBOL(memmove);
9EXPORT_SYMBOL(memcmp); 9EXPORT_SYMBOL(memcmp);
10EXPORT_SYMBOL(memchr); 10EXPORT_SYMBOL(memchr);
11 11
12EXPORT_SYMBOL(strcpy);
13EXPORT_SYMBOL(strncpy); 12EXPORT_SYMBOL(strncpy);
14EXPORT_SYMBOL(strcat);
15EXPORT_SYMBOL(strlen);
16EXPORT_SYMBOL(strcmp);
17EXPORT_SYMBOL(strncmp); 13EXPORT_SYMBOL(strncmp);
18 14
19#ifndef CONFIG_GENERIC_CSUM 15#ifndef CONFIG_GENERIC_CSUM
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 69abf844c2c3..94058c21a482 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
325} 325}
326EXPORT_SYMBOL_GPL(rh_init); 326EXPORT_SYMBOL_GPL(rh_init);
327 327
328/* Attach a free memory region, coalesces regions if adjuscent */ 328/* Attach a free memory region, coalesces regions if adjacent */
329int rh_attach_region(rh_info_t * info, unsigned long start, int size) 329int rh_attach_region(rh_info_t * info, unsigned long start, int size)
330{ 330{
331 rh_block_t *blk; 331 rh_block_t *blk;
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index c80fb49ce607..beabc68d9a1e 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -16,15 +16,6 @@
16 PPC_LONG_ALIGN 16 PPC_LONG_ALIGN
17 .text 17 .text
18 18
19_GLOBAL(strcpy)
20 addi r5,r3,-1
21 addi r4,r4,-1
221: lbzu r0,1(r4)
23 cmpwi 0,r0,0
24 stbu r0,1(r5)
25 bne 1b
26 blr
27
28/* This clears out any unused part of the destination buffer, 19/* This clears out any unused part of the destination buffer,
29 just as the libc version does. -- paulus */ 20 just as the libc version does. -- paulus */
30_GLOBAL(strncpy) 21_GLOBAL(strncpy)
@@ -33,6 +24,7 @@ _GLOBAL(strncpy)
33 mtctr r5 24 mtctr r5
34 addi r6,r3,-1 25 addi r6,r3,-1
35 addi r4,r4,-1 26 addi r4,r4,-1
27 .balign 16
361: lbzu r0,1(r4) 281: lbzu r0,1(r4)
37 cmpwi 0,r0,0 29 cmpwi 0,r0,0
38 stbu r0,1(r6) 30 stbu r0,1(r6)
@@ -45,36 +37,13 @@ _GLOBAL(strncpy)
45 bdnz 2b 37 bdnz 2b
46 blr 38 blr
47 39
48_GLOBAL(strcat)
49 addi r5,r3,-1
50 addi r4,r4,-1
511: lbzu r0,1(r5)
52 cmpwi 0,r0,0
53 bne 1b
54 addi r5,r5,-1
551: lbzu r0,1(r4)
56 cmpwi 0,r0,0
57 stbu r0,1(r5)
58 bne 1b
59 blr
60
61_GLOBAL(strcmp)
62 addi r5,r3,-1
63 addi r4,r4,-1
641: lbzu r3,1(r5)
65 cmpwi 1,r3,0
66 lbzu r0,1(r4)
67 subf. r3,r0,r3
68 beqlr 1
69 beq 1b
70 blr
71
72_GLOBAL(strncmp) 40_GLOBAL(strncmp)
73 PPC_LCMPI 0,r5,0 41 PPC_LCMPI 0,r5,0
74 beq- 2f 42 beq- 2f
75 mtctr r5 43 mtctr r5
76 addi r5,r3,-1 44 addi r5,r3,-1
77 addi r4,r4,-1 45 addi r4,r4,-1
46 .balign 16
781: lbzu r3,1(r5) 471: lbzu r3,1(r5)
79 cmpwi 1,r3,0 48 cmpwi 1,r3,0
80 lbzu r0,1(r4) 49 lbzu r0,1(r4)
@@ -85,14 +54,6 @@ _GLOBAL(strncmp)
852: li r3,0 542: li r3,0
86 blr 55 blr
87 56
88_GLOBAL(strlen)
89 addi r4,r3,-1
901: lbzu r0,1(r4)
91 cmpwi 0,r0,0
92 bne 1b
93 subf r3,r3,r4
94 blr
95
96#ifdef CONFIG_PPC32 57#ifdef CONFIG_PPC32
97_GLOBAL(memcmp) 58_GLOBAL(memcmp)
98 PPC_LCMPI 0,r5,0 59 PPC_LCMPI 0,r5,0
@@ -114,6 +75,7 @@ _GLOBAL(memchr)
114 beq- 2f 75 beq- 2f
115 mtctr r5 76 mtctr r5
116 addi r3,r3,-1 77 addi r3,r3,-1
78 .balign 16
1171: lbzu r0,1(r3) 791: lbzu r0,1(r3)
118 cmpw 0,r0,r4 80 cmpw 0,r0,r4
119 bdnzf 2,1b 81 bdnzf 2,1b
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index b27e030fc9f8..bf925cdcaca9 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <asm/switch_to.h> 23#include <asm/switch_to.h>
24#include <asm/asm-prototypes.h>
24 25
25int enter_vmx_usercopy(void) 26int enter_vmx_usercopy(void)
26{ 27{