diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc/lib |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc/lib')
-rw-r--r-- | arch/ppc/lib/Makefile | 9 | ||||
-rw-r--r-- | arch/ppc/lib/checksum.S | 225 | ||||
-rw-r--r-- | arch/ppc/lib/dec_and_lock.c | 46 | ||||
-rw-r--r-- | arch/ppc/lib/div64.S | 58 | ||||
-rw-r--r-- | arch/ppc/lib/locks.c | 190 | ||||
-rw-r--r-- | arch/ppc/lib/rheap.c | 693 | ||||
-rw-r--r-- | arch/ppc/lib/strcase.c | 23 | ||||
-rw-r--r-- | arch/ppc/lib/string.S | 716 |
8 files changed, 1960 insertions, 0 deletions
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile new file mode 100644 index 000000000000..1c380e67d435 --- /dev/null +++ b/arch/ppc/lib/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for ppc-specific library files.. | ||
3 | # | ||
4 | |||
5 | obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o | ||
6 | |||
7 | obj-$(CONFIG_SMP) += locks.o | ||
8 | obj-$(CONFIG_8xx) += rheap.o | ||
9 | obj-$(CONFIG_CPM2) += rheap.o | ||
diff --git a/arch/ppc/lib/checksum.S b/arch/ppc/lib/checksum.S new file mode 100644 index 000000000000..7874e8a80455 --- /dev/null +++ b/arch/ppc/lib/checksum.S | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * This file contains assembly-language implementations | ||
3 | * of IP-style 1's complement checksum routines. | ||
4 | * | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). | ||
13 | */ | ||
14 | |||
15 | #include <linux/sys.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ppc_asm.h> | ||
19 | |||
20 | .text | ||
21 | |||
22 | /* | ||
23 | * ip_fast_csum(buf, len) -- Optimized for IP header | ||
24 | * len is in words and is always >= 5. | ||
25 | */ | ||
26 | _GLOBAL(ip_fast_csum) | ||
27 | lwz r0,0(r3) | ||
28 | lwzu r5,4(r3) | ||
29 | addic. r4,r4,-2 | ||
30 | addc r0,r0,r5 | ||
31 | mtctr r4 | ||
32 | blelr- | ||
33 | 1: lwzu r4,4(r3) | ||
34 | adde r0,r0,r4 | ||
35 | bdnz 1b | ||
36 | addze r0,r0 /* add in final carry */ | ||
37 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
38 | add r3,r0,r3 | ||
39 | not r3,r3 | ||
40 | srwi r3,r3,16 | ||
41 | blr | ||
42 | |||
43 | /* | ||
44 | * Compute checksum of TCP or UDP pseudo-header: | ||
45 | * csum_tcpudp_magic(saddr, daddr, len, proto, sum) | ||
46 | */ | ||
47 | _GLOBAL(csum_tcpudp_magic) | ||
48 | rlwimi r5,r6,16,0,15 /* put proto in upper half of len */ | ||
49 | addc r0,r3,r4 /* add 4 32-bit words together */ | ||
50 | adde r0,r0,r5 | ||
51 | adde r0,r0,r7 | ||
52 | addze r0,r0 /* add in final carry */ | ||
53 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
54 | add r3,r0,r3 | ||
55 | not r3,r3 | ||
56 | srwi r3,r3,16 | ||
57 | blr | ||
58 | |||
59 | /* | ||
60 | * computes the checksum of a memory block at buff, length len, | ||
61 | * and adds in "sum" (32-bit) | ||
62 | * | ||
63 | * csum_partial(buff, len, sum) | ||
64 | */ | ||
65 | _GLOBAL(csum_partial) | ||
66 | addic r0,r5,0 | ||
67 | subi r3,r3,4 | ||
68 | srwi. r6,r4,2 | ||
69 | beq 3f /* if we're doing < 4 bytes */ | ||
70 | andi. r5,r3,2 /* Align buffer to longword boundary */ | ||
71 | beq+ 1f | ||
72 | lhz r5,4(r3) /* do 2 bytes to get aligned */ | ||
73 | addi r3,r3,2 | ||
74 | subi r4,r4,2 | ||
75 | addc r0,r0,r5 | ||
76 | srwi. r6,r4,2 /* # words to do */ | ||
77 | beq 3f | ||
78 | 1: mtctr r6 | ||
79 | 2: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */ | ||
80 | adde r0,r0,r5 /* be unnecessary to unroll this loop */ | ||
81 | bdnz 2b | ||
82 | andi. r4,r4,3 | ||
83 | 3: cmpwi 0,r4,2 | ||
84 | blt+ 4f | ||
85 | lhz r5,4(r3) | ||
86 | addi r3,r3,2 | ||
87 | subi r4,r4,2 | ||
88 | adde r0,r0,r5 | ||
89 | 4: cmpwi 0,r4,1 | ||
90 | bne+ 5f | ||
91 | lbz r5,4(r3) | ||
92 | slwi r5,r5,8 /* Upper byte of word */ | ||
93 | adde r0,r0,r5 | ||
94 | 5: addze r3,r0 /* add in final carry */ | ||
95 | blr | ||
96 | |||
97 | /* | ||
98 | * Computes the checksum of a memory block at src, length len, | ||
99 | * and adds in "sum" (32-bit), while copying the block to dst. | ||
100 | * If an access exception occurs on src or dst, it stores -EFAULT | ||
101 | * to *src_err or *dst_err respectively, and (for an error on | ||
102 | * src) zeroes the rest of dst. | ||
103 | * | ||
104 | * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) | ||
105 | */ | ||
106 | _GLOBAL(csum_partial_copy_generic) | ||
107 | addic r0,r6,0 | ||
108 | subi r3,r3,4 | ||
109 | subi r4,r4,4 | ||
110 | srwi. r6,r5,2 | ||
111 | beq 3f /* if we're doing < 4 bytes */ | ||
112 | andi. r9,r4,2 /* Align dst to longword boundary */ | ||
113 | beq+ 1f | ||
114 | 81: lhz r6,4(r3) /* do 2 bytes to get aligned */ | ||
115 | addi r3,r3,2 | ||
116 | subi r5,r5,2 | ||
117 | 91: sth r6,4(r4) | ||
118 | addi r4,r4,2 | ||
119 | addc r0,r0,r6 | ||
120 | srwi. r6,r5,2 /* # words to do */ | ||
121 | beq 3f | ||
122 | 1: srwi. r6,r5,4 /* # groups of 4 words to do */ | ||
123 | beq 10f | ||
124 | mtctr r6 | ||
125 | 71: lwz r6,4(r3) | ||
126 | 72: lwz r9,8(r3) | ||
127 | 73: lwz r10,12(r3) | ||
128 | 74: lwzu r11,16(r3) | ||
129 | adde r0,r0,r6 | ||
130 | 75: stw r6,4(r4) | ||
131 | adde r0,r0,r9 | ||
132 | 76: stw r9,8(r4) | ||
133 | adde r0,r0,r10 | ||
134 | 77: stw r10,12(r4) | ||
135 | adde r0,r0,r11 | ||
136 | 78: stwu r11,16(r4) | ||
137 | bdnz 71b | ||
138 | 10: rlwinm. r6,r5,30,30,31 /* # words left to do */ | ||
139 | beq 13f | ||
140 | mtctr r6 | ||
141 | 82: lwzu r9,4(r3) | ||
142 | 92: stwu r9,4(r4) | ||
143 | adde r0,r0,r9 | ||
144 | bdnz 82b | ||
145 | 13: andi. r5,r5,3 | ||
146 | 3: cmpwi 0,r5,2 | ||
147 | blt+ 4f | ||
148 | 83: lhz r6,4(r3) | ||
149 | addi r3,r3,2 | ||
150 | subi r5,r5,2 | ||
151 | 93: sth r6,4(r4) | ||
152 | addi r4,r4,2 | ||
153 | adde r0,r0,r6 | ||
154 | 4: cmpwi 0,r5,1 | ||
155 | bne+ 5f | ||
156 | 84: lbz r6,4(r3) | ||
157 | 94: stb r6,4(r4) | ||
158 | slwi r6,r6,8 /* Upper byte of word */ | ||
159 | adde r0,r0,r6 | ||
160 | 5: addze r3,r0 /* add in final carry */ | ||
161 | blr | ||
162 | |||
163 | /* These shouldn't go in the fixup section, since that would | ||
164 | cause the ex_table addresses to get out of order. */ | ||
165 | |||
166 | src_error_4: | ||
167 | mfctr r6 /* update # bytes remaining from ctr */ | ||
168 | rlwimi r5,r6,4,0,27 | ||
169 | b 79f | ||
170 | src_error_1: | ||
171 | li r6,0 | ||
172 | subi r5,r5,2 | ||
173 | 95: sth r6,4(r4) | ||
174 | addi r4,r4,2 | ||
175 | 79: srwi. r6,r5,2 | ||
176 | beq 3f | ||
177 | mtctr r6 | ||
178 | src_error_2: | ||
179 | li r6,0 | ||
180 | 96: stwu r6,4(r4) | ||
181 | bdnz 96b | ||
182 | 3: andi. r5,r5,3 | ||
183 | beq src_error | ||
184 | src_error_3: | ||
185 | li r6,0 | ||
186 | mtctr r5 | ||
187 | addi r4,r4,3 | ||
188 | 97: stbu r6,1(r4) | ||
189 | bdnz 97b | ||
190 | src_error: | ||
191 | cmpwi 0,r7,0 | ||
192 | beq 1f | ||
193 | li r6,-EFAULT | ||
194 | stw r6,0(r7) | ||
195 | 1: addze r3,r0 | ||
196 | blr | ||
197 | |||
198 | dst_error: | ||
199 | cmpwi 0,r8,0 | ||
200 | beq 1f | ||
201 | li r6,-EFAULT | ||
202 | stw r6,0(r8) | ||
203 | 1: addze r3,r0 | ||
204 | blr | ||
205 | |||
206 | .section __ex_table,"a" | ||
207 | .long 81b,src_error_1 | ||
208 | .long 91b,dst_error | ||
209 | .long 71b,src_error_4 | ||
210 | .long 72b,src_error_4 | ||
211 | .long 73b,src_error_4 | ||
212 | .long 74b,src_error_4 | ||
213 | .long 75b,dst_error | ||
214 | .long 76b,dst_error | ||
215 | .long 77b,dst_error | ||
216 | .long 78b,dst_error | ||
217 | .long 82b,src_error_2 | ||
218 | .long 92b,dst_error | ||
219 | .long 83b,src_error_3 | ||
220 | .long 93b,dst_error | ||
221 | .long 84b,src_error_3 | ||
222 | .long 94b,dst_error | ||
223 | .long 95b,dst_error | ||
224 | .long 96b,dst_error | ||
225 | .long 97b,dst_error | ||
diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c new file mode 100644 index 000000000000..4ee888070d91 --- /dev/null +++ b/arch/ppc/lib/dec_and_lock.c | |||
@@ -0,0 +1,46 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/spinlock.h> | ||
3 | #include <asm/atomic.h> | ||
4 | #include <asm/system.h> | ||
5 | |||
6 | /* | ||
7 | * This is an implementation of the notion of "decrement a | ||
8 | * reference count, and return locked if it decremented to zero". | ||
9 | * | ||
10 | * This implementation can be used on any architecture that | ||
11 | * has a cmpxchg, and where atomic->value is an int holding | ||
12 | * the value of the atomic (i.e. the high bits aren't used | ||
13 | * for a lock or anything like that). | ||
14 | * | ||
15 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
16 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
17 | * to be atomic_dec_and_test - in that case we don't need it | ||
18 | * defined here as well. | ||
19 | */ | ||
20 | |||
21 | #ifndef ATOMIC_DEC_AND_LOCK | ||
22 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | ||
23 | { | ||
24 | int counter; | ||
25 | int newcount; | ||
26 | |||
27 | for (;;) { | ||
28 | counter = atomic_read(atomic); | ||
29 | newcount = counter - 1; | ||
30 | if (!newcount) | ||
31 | break; /* do it the slow way */ | ||
32 | |||
33 | newcount = cmpxchg(&atomic->counter, counter, newcount); | ||
34 | if (newcount == counter) | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | spin_lock(lock); | ||
39 | if (atomic_dec_and_test(atomic)) | ||
40 | return 1; | ||
41 | spin_unlock(lock); | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | EXPORT_SYMBOL(_atomic_dec_and_lock); | ||
46 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/ppc/lib/div64.S b/arch/ppc/lib/div64.S new file mode 100644 index 000000000000..3527569e9926 --- /dev/null +++ b/arch/ppc/lib/div64.S | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Divide a 64-bit unsigned number by a 32-bit unsigned number. | ||
3 | * This routine assumes that the top 32 bits of the dividend are | ||
4 | * non-zero to start with. | ||
5 | * On entry, r3 points to the dividend, which get overwritten with | ||
6 | * the 64-bit quotient, and r4 contains the divisor. | ||
7 | * On exit, r3 contains the remainder. | ||
8 | * | ||
9 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | #include <asm/ppc_asm.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | _GLOBAL(__div64_32) | ||
20 | lwz r5,0(r3) # get the dividend into r5/r6 | ||
21 | lwz r6,4(r3) | ||
22 | cmplw r5,r4 | ||
23 | li r7,0 | ||
24 | li r8,0 | ||
25 | blt 1f | ||
26 | divwu r7,r5,r4 # if dividend.hi >= divisor, | ||
27 | mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor | ||
28 | subf. r5,r0,r5 # dividend.hi %= divisor | ||
29 | beq 3f | ||
30 | 1: mr r11,r5 # here dividend.hi != 0 | ||
31 | andis. r0,r5,0xc000 | ||
32 | bne 2f | ||
33 | cntlzw r0,r5 # we are shifting the dividend right | ||
34 | li r10,-1 # to make it < 2^32, and shifting | ||
35 | srw r10,r10,r0 # the divisor right the same amount, | ||
36 | add r9,r4,r10 # rounding up (so the estimate cannot | ||
37 | andc r11,r6,r10 # ever be too large, only too small) | ||
38 | andc r9,r9,r10 | ||
39 | or r11,r5,r11 | ||
40 | rotlw r9,r9,r0 | ||
41 | rotlw r11,r11,r0 | ||
42 | divwu r11,r11,r9 # then we divide the shifted quantities | ||
43 | 2: mullw r10,r11,r4 # to get an estimate of the quotient, | ||
44 | mulhwu r9,r11,r4 # multiply the estimate by the divisor, | ||
45 | subfc r6,r10,r6 # take the product from the divisor, | ||
46 | add r8,r8,r11 # and add the estimate to the accumulated | ||
47 | subfe. r5,r9,r5 # quotient | ||
48 | bne 1b | ||
49 | 3: cmplw r6,r4 | ||
50 | blt 4f | ||
51 | divwu r0,r6,r4 # perform the remaining 32-bit division | ||
52 | mullw r10,r0,r4 # and get the remainder | ||
53 | add r8,r8,r0 | ||
54 | subf r6,r10,r6 | ||
55 | 4: stw r7,0(r3) # return the quotient in *r3 | ||
56 | stw r8,4(r3) | ||
57 | mr r3,r6 # return the remainder in r3 | ||
58 | blr | ||
diff --git a/arch/ppc/lib/locks.c b/arch/ppc/lib/locks.c new file mode 100644 index 000000000000..694163d696d8 --- /dev/null +++ b/arch/ppc/lib/locks.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Locks for smp ppc | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <asm/ppc_asm.h> | ||
12 | #include <asm/smp.h> | ||
13 | |||
14 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
15 | |||
16 | #undef INIT_STUCK | ||
17 | #define INIT_STUCK 200000000 /*0xffffffff*/ | ||
18 | |||
19 | /* | ||
20 | * Try to acquire a spinlock. | ||
21 | * Only does the stwcx. if the load returned 0 - the Programming | ||
22 | * Environments Manual suggests not doing unnecessary stcwx.'s | ||
23 | * since they may inhibit forward progress by other CPUs in getting | ||
24 | * a lock. | ||
25 | */ | ||
26 | static inline unsigned long __spin_trylock(volatile unsigned long *lock) | ||
27 | { | ||
28 | unsigned long ret; | ||
29 | |||
30 | __asm__ __volatile__ ("\n\ | ||
31 | 1: lwarx %0,0,%1\n\ | ||
32 | cmpwi 0,%0,0\n\ | ||
33 | bne 2f\n" | ||
34 | PPC405_ERR77(0,%1) | ||
35 | " stwcx. %2,0,%1\n\ | ||
36 | bne- 1b\n\ | ||
37 | isync\n\ | ||
38 | 2:" | ||
39 | : "=&r"(ret) | ||
40 | : "r"(lock), "r"(1) | ||
41 | : "cr0", "memory"); | ||
42 | |||
43 | return ret; | ||
44 | } | ||
45 | |||
46 | void _raw_spin_lock(spinlock_t *lock) | ||
47 | { | ||
48 | int cpu = smp_processor_id(); | ||
49 | unsigned int stuck = INIT_STUCK; | ||
50 | while (__spin_trylock(&lock->lock)) { | ||
51 | while ((unsigned volatile long)lock->lock != 0) { | ||
52 | if (!--stuck) { | ||
53 | printk("_spin_lock(%p) CPU#%d NIP %p" | ||
54 | " holder: cpu %ld pc %08lX\n", | ||
55 | lock, cpu, __builtin_return_address(0), | ||
56 | lock->owner_cpu,lock->owner_pc); | ||
57 | stuck = INIT_STUCK; | ||
58 | /* steal the lock */ | ||
59 | /*xchg_u32((void *)&lock->lock,0);*/ | ||
60 | } | ||
61 | } | ||
62 | } | ||
63 | lock->owner_pc = (unsigned long)__builtin_return_address(0); | ||
64 | lock->owner_cpu = cpu; | ||
65 | } | ||
66 | EXPORT_SYMBOL(_raw_spin_lock); | ||
67 | |||
68 | int _raw_spin_trylock(spinlock_t *lock) | ||
69 | { | ||
70 | if (__spin_trylock(&lock->lock)) | ||
71 | return 0; | ||
72 | lock->owner_cpu = smp_processor_id(); | ||
73 | lock->owner_pc = (unsigned long)__builtin_return_address(0); | ||
74 | return 1; | ||
75 | } | ||
76 | EXPORT_SYMBOL(_raw_spin_trylock); | ||
77 | |||
78 | void _raw_spin_unlock(spinlock_t *lp) | ||
79 | { | ||
80 | if ( !lp->lock ) | ||
81 | printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n", | ||
82 | lp, smp_processor_id(), __builtin_return_address(0), | ||
83 | current->comm, current->pid); | ||
84 | if ( lp->owner_cpu != smp_processor_id() ) | ||
85 | printk("_spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %lx\n", | ||
86 | lp, smp_processor_id(), (int)lp->owner_cpu, | ||
87 | lp->owner_pc,lp->lock); | ||
88 | lp->owner_pc = lp->owner_cpu = 0; | ||
89 | wmb(); | ||
90 | lp->lock = 0; | ||
91 | } | ||
92 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
93 | |||
94 | /* | ||
95 | * For rwlocks, zero is unlocked, -1 is write-locked, | ||
96 | * positive is read-locked. | ||
97 | */ | ||
98 | static __inline__ int __read_trylock(rwlock_t *rw) | ||
99 | { | ||
100 | signed int tmp; | ||
101 | |||
102 | __asm__ __volatile__( | ||
103 | "2: lwarx %0,0,%1 # __read_trylock\n\ | ||
104 | addic. %0,%0,1\n\ | ||
105 | ble- 1f\n" | ||
106 | PPC405_ERR77(0,%1) | ||
107 | " stwcx. %0,0,%1\n\ | ||
108 | bne- 2b\n\ | ||
109 | isync\n\ | ||
110 | 1:" | ||
111 | : "=&r"(tmp) | ||
112 | : "r"(&rw->lock) | ||
113 | : "cr0", "memory"); | ||
114 | |||
115 | return tmp; | ||
116 | } | ||
117 | |||
118 | int _raw_read_trylock(rwlock_t *rw) | ||
119 | { | ||
120 | return __read_trylock(rw) > 0; | ||
121 | } | ||
122 | EXPORT_SYMBOL(_raw_read_trylock); | ||
123 | |||
124 | void _raw_read_lock(rwlock_t *rw) | ||
125 | { | ||
126 | unsigned int stuck; | ||
127 | |||
128 | while (__read_trylock(rw) <= 0) { | ||
129 | stuck = INIT_STUCK; | ||
130 | while (!read_can_lock(rw)) { | ||
131 | if (--stuck == 0) { | ||
132 | printk("_read_lock(%p) CPU#%d lock %d\n", | ||
133 | rw, _smp_processor_id(), rw->lock); | ||
134 | stuck = INIT_STUCK; | ||
135 | } | ||
136 | } | ||
137 | } | ||
138 | } | ||
139 | EXPORT_SYMBOL(_raw_read_lock); | ||
140 | |||
141 | void _raw_read_unlock(rwlock_t *rw) | ||
142 | { | ||
143 | if ( rw->lock == 0 ) | ||
144 | printk("_read_unlock(): %s/%d (nip %08lX) lock %d\n", | ||
145 | current->comm,current->pid,current->thread.regs->nip, | ||
146 | rw->lock); | ||
147 | wmb(); | ||
148 | atomic_dec((atomic_t *) &(rw)->lock); | ||
149 | } | ||
150 | EXPORT_SYMBOL(_raw_read_unlock); | ||
151 | |||
152 | void _raw_write_lock(rwlock_t *rw) | ||
153 | { | ||
154 | unsigned int stuck; | ||
155 | |||
156 | while (cmpxchg(&rw->lock, 0, -1) != 0) { | ||
157 | stuck = INIT_STUCK; | ||
158 | while (!write_can_lock(rw)) { | ||
159 | if (--stuck == 0) { | ||
160 | printk("write_lock(%p) CPU#%d lock %d)\n", | ||
161 | rw, _smp_processor_id(), rw->lock); | ||
162 | stuck = INIT_STUCK; | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | wmb(); | ||
167 | } | ||
168 | EXPORT_SYMBOL(_raw_write_lock); | ||
169 | |||
170 | int _raw_write_trylock(rwlock_t *rw) | ||
171 | { | ||
172 | if (cmpxchg(&rw->lock, 0, -1) != 0) | ||
173 | return 0; | ||
174 | wmb(); | ||
175 | return 1; | ||
176 | } | ||
177 | EXPORT_SYMBOL(_raw_write_trylock); | ||
178 | |||
179 | void _raw_write_unlock(rwlock_t *rw) | ||
180 | { | ||
181 | if (rw->lock >= 0) | ||
182 | printk("_write_lock(): %s/%d (nip %08lX) lock %d\n", | ||
183 | current->comm,current->pid,current->thread.regs->nip, | ||
184 | rw->lock); | ||
185 | wmb(); | ||
186 | rw->lock = 0; | ||
187 | } | ||
188 | EXPORT_SYMBOL(_raw_write_unlock); | ||
189 | |||
190 | #endif | ||
diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c new file mode 100644 index 000000000000..42c5de2c898f --- /dev/null +++ b/arch/ppc/lib/rheap.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /* | ||
2 | * arch/ppc/syslib/rheap.c | ||
3 | * | ||
4 | * A Remote Heap. Remote means that we don't touch the memory that the | ||
5 | * heap points to. Normal heap implementations use the memory they manage | ||
6 | * to place their list. We cannot do that because the memory we manage may | ||
7 | * have special properties, for example it is uncachable or of different | ||
8 | * endianess. | ||
9 | * | ||
10 | * Author: Pantelis Antoniou <panto@intracom.gr> | ||
11 | * | ||
12 | * 2004 (c) INTRACOM S.A. Greece. This file is licensed under | ||
13 | * the terms of the GNU General Public License version 2. This program | ||
14 | * is licensed "as is" without any warranty of any kind, whether express | ||
15 | * or implied. | ||
16 | */ | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | #include <asm/rheap.h> | ||
23 | |||
24 | /* | ||
25 | * Fixup a list_head, needed when copying lists. If the pointers fall | ||
26 | * between s and e, apply the delta. This assumes that | ||
27 | * sizeof(struct list_head *) == sizeof(unsigned long *). | ||
28 | */ | ||
29 | static inline void fixup(unsigned long s, unsigned long e, int d, | ||
30 | struct list_head *l) | ||
31 | { | ||
32 | unsigned long *pp; | ||
33 | |||
34 | pp = (unsigned long *)&l->next; | ||
35 | if (*pp >= s && *pp < e) | ||
36 | *pp += d; | ||
37 | |||
38 | pp = (unsigned long *)&l->prev; | ||
39 | if (*pp >= s && *pp < e) | ||
40 | *pp += d; | ||
41 | } | ||
42 | |||
43 | /* Grow the allocated blocks */ | ||
44 | static int grow(rh_info_t * info, int max_blocks) | ||
45 | { | ||
46 | rh_block_t *block, *blk; | ||
47 | int i, new_blocks; | ||
48 | int delta; | ||
49 | unsigned long blks, blke; | ||
50 | |||
51 | if (max_blocks <= info->max_blocks) | ||
52 | return -EINVAL; | ||
53 | |||
54 | new_blocks = max_blocks - info->max_blocks; | ||
55 | |||
56 | block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); | ||
57 | if (block == NULL) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | if (info->max_blocks > 0) { | ||
61 | |||
62 | /* copy old block area */ | ||
63 | memcpy(block, info->block, | ||
64 | sizeof(rh_block_t) * info->max_blocks); | ||
65 | |||
66 | delta = (char *)block - (char *)info->block; | ||
67 | |||
68 | /* and fixup list pointers */ | ||
69 | blks = (unsigned long)info->block; | ||
70 | blke = (unsigned long)(info->block + info->max_blocks); | ||
71 | |||
72 | for (i = 0, blk = block; i < info->max_blocks; i++, blk++) | ||
73 | fixup(blks, blke, delta, &blk->list); | ||
74 | |||
75 | fixup(blks, blke, delta, &info->empty_list); | ||
76 | fixup(blks, blke, delta, &info->free_list); | ||
77 | fixup(blks, blke, delta, &info->taken_list); | ||
78 | |||
79 | /* free the old allocated memory */ | ||
80 | if ((info->flags & RHIF_STATIC_BLOCK) == 0) | ||
81 | kfree(info->block); | ||
82 | } | ||
83 | |||
84 | info->block = block; | ||
85 | info->empty_slots += new_blocks; | ||
86 | info->max_blocks = max_blocks; | ||
87 | info->flags &= ~RHIF_STATIC_BLOCK; | ||
88 | |||
89 | /* add all new blocks to the free list */ | ||
90 | for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) | ||
91 | list_add(&blk->list, &info->empty_list); | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Assure at least the required amount of empty slots. If this function | ||
98 | * causes a grow in the block area then all pointers kept to the block | ||
99 | * area are invalid! | ||
100 | */ | ||
101 | static int assure_empty(rh_info_t * info, int slots) | ||
102 | { | ||
103 | int max_blocks; | ||
104 | |||
105 | /* This function is not meant to be used to grow uncontrollably */ | ||
106 | if (slots >= 4) | ||
107 | return -EINVAL; | ||
108 | |||
109 | /* Enough space */ | ||
110 | if (info->empty_slots >= slots) | ||
111 | return 0; | ||
112 | |||
113 | /* Next 16 sized block */ | ||
114 | max_blocks = ((info->max_blocks + slots) + 15) & ~15; | ||
115 | |||
116 | return grow(info, max_blocks); | ||
117 | } | ||
118 | |||
119 | static rh_block_t *get_slot(rh_info_t * info) | ||
120 | { | ||
121 | rh_block_t *blk; | ||
122 | |||
123 | /* If no more free slots, and failure to extend. */ | ||
124 | /* XXX: You should have called assure_empty before */ | ||
125 | if (info->empty_slots == 0) { | ||
126 | printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
130 | /* Get empty slot to use */ | ||
131 | blk = list_entry(info->empty_list.next, rh_block_t, list); | ||
132 | list_del_init(&blk->list); | ||
133 | info->empty_slots--; | ||
134 | |||
135 | /* Initialize */ | ||
136 | blk->start = NULL; | ||
137 | blk->size = 0; | ||
138 | blk->owner = NULL; | ||
139 | |||
140 | return blk; | ||
141 | } | ||
142 | |||
143 | static inline void release_slot(rh_info_t * info, rh_block_t * blk) | ||
144 | { | ||
145 | list_add(&blk->list, &info->empty_list); | ||
146 | info->empty_slots++; | ||
147 | } | ||
148 | |||
149 | static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | ||
150 | { | ||
151 | rh_block_t *blk; | ||
152 | rh_block_t *before; | ||
153 | rh_block_t *after; | ||
154 | rh_block_t *next; | ||
155 | int size; | ||
156 | unsigned long s, e, bs, be; | ||
157 | struct list_head *l; | ||
158 | |||
159 | /* We assume that they are aligned properly */ | ||
160 | size = blkn->size; | ||
161 | s = (unsigned long)blkn->start; | ||
162 | e = s + size; | ||
163 | |||
164 | /* Find the blocks immediately before and after the given one | ||
165 | * (if any) */ | ||
166 | before = NULL; | ||
167 | after = NULL; | ||
168 | next = NULL; | ||
169 | |||
170 | list_for_each(l, &info->free_list) { | ||
171 | blk = list_entry(l, rh_block_t, list); | ||
172 | |||
173 | bs = (unsigned long)blk->start; | ||
174 | be = bs + blk->size; | ||
175 | |||
176 | if (next == NULL && s >= bs) | ||
177 | next = blk; | ||
178 | |||
179 | if (be == s) | ||
180 | before = blk; | ||
181 | |||
182 | if (e == bs) | ||
183 | after = blk; | ||
184 | |||
185 | /* If both are not null, break now */ | ||
186 | if (before != NULL && after != NULL) | ||
187 | break; | ||
188 | } | ||
189 | |||
190 | /* Now check if they are really adjacent */ | ||
191 | if (before != NULL && s != (unsigned long)before->start + before->size) | ||
192 | before = NULL; | ||
193 | |||
194 | if (after != NULL && e != (unsigned long)after->start) | ||
195 | after = NULL; | ||
196 | |||
197 | /* No coalescing; list insert and return */ | ||
198 | if (before == NULL && after == NULL) { | ||
199 | |||
200 | if (next != NULL) | ||
201 | list_add(&blkn->list, &next->list); | ||
202 | else | ||
203 | list_add(&blkn->list, &info->free_list); | ||
204 | |||
205 | return; | ||
206 | } | ||
207 | |||
208 | /* We don't need it anymore */ | ||
209 | release_slot(info, blkn); | ||
210 | |||
211 | /* Grow the before block */ | ||
212 | if (before != NULL && after == NULL) { | ||
213 | before->size += size; | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | /* Grow the after block backwards */ | ||
218 | if (before == NULL && after != NULL) { | ||
219 | after->start = (int8_t *)after->start - size; | ||
220 | after->size += size; | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | /* Grow the before block, and release the after block */ | ||
225 | before->size += size + after->size; | ||
226 | list_del(&after->list); | ||
227 | release_slot(info, after); | ||
228 | } | ||
229 | |||
230 | static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) | ||
231 | { | ||
232 | rh_block_t *blk; | ||
233 | struct list_head *l; | ||
234 | |||
235 | /* Find the block immediately before the given one (if any) */ | ||
236 | list_for_each(l, &info->taken_list) { | ||
237 | blk = list_entry(l, rh_block_t, list); | ||
238 | if (blk->start > blkn->start) { | ||
239 | list_add_tail(&blkn->list, &blk->list); | ||
240 | return; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | list_add_tail(&blkn->list, &info->taken_list); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Create a remote heap dynamically. Note that no memory for the blocks | ||
249 | * are allocated. It will upon the first allocation | ||
250 | */ | ||
251 | rh_info_t *rh_create(unsigned int alignment) | ||
252 | { | ||
253 | rh_info_t *info; | ||
254 | |||
255 | /* Alignment must be a power of two */ | ||
256 | if ((alignment & (alignment - 1)) != 0) | ||
257 | return ERR_PTR(-EINVAL); | ||
258 | |||
259 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
260 | if (info == NULL) | ||
261 | return ERR_PTR(-ENOMEM); | ||
262 | |||
263 | info->alignment = alignment; | ||
264 | |||
265 | /* Initially everything as empty */ | ||
266 | info->block = NULL; | ||
267 | info->max_blocks = 0; | ||
268 | info->empty_slots = 0; | ||
269 | info->flags = 0; | ||
270 | |||
271 | INIT_LIST_HEAD(&info->empty_list); | ||
272 | INIT_LIST_HEAD(&info->free_list); | ||
273 | INIT_LIST_HEAD(&info->taken_list); | ||
274 | |||
275 | return info; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Destroy a dynamically created remote heap. Deallocate only if the areas | ||
280 | * are not static | ||
281 | */ | ||
282 | void rh_destroy(rh_info_t * info) | ||
283 | { | ||
284 | if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) | ||
285 | kfree(info->block); | ||
286 | |||
287 | if ((info->flags & RHIF_STATIC_INFO) == 0) | ||
288 | kfree(info); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Initialize in place a remote heap info block. This is needed to support | ||
293 | * operation very early in the startup of the kernel, when it is not yet safe | ||
294 | * to call kmalloc. | ||
295 | */ | ||
296 | void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, | ||
297 | rh_block_t * block) | ||
298 | { | ||
299 | int i; | ||
300 | rh_block_t *blk; | ||
301 | |||
302 | /* Alignment must be a power of two */ | ||
303 | if ((alignment & (alignment - 1)) != 0) | ||
304 | return; | ||
305 | |||
306 | info->alignment = alignment; | ||
307 | |||
308 | /* Initially everything as empty */ | ||
309 | info->block = block; | ||
310 | info->max_blocks = max_blocks; | ||
311 | info->empty_slots = max_blocks; | ||
312 | info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; | ||
313 | |||
314 | INIT_LIST_HEAD(&info->empty_list); | ||
315 | INIT_LIST_HEAD(&info->free_list); | ||
316 | INIT_LIST_HEAD(&info->taken_list); | ||
317 | |||
318 | /* Add all new blocks to the free list */ | ||
319 | for (i = 0, blk = block; i < max_blocks; i++, blk++) | ||
320 | list_add(&blk->list, &info->empty_list); | ||
321 | } | ||
322 | |||
323 | /* Attach a free memory region, coalesces regions if adjuscent */ | ||
324 | int rh_attach_region(rh_info_t * info, void *start, int size) | ||
325 | { | ||
326 | rh_block_t *blk; | ||
327 | unsigned long s, e, m; | ||
328 | int r; | ||
329 | |||
330 | /* The region must be aligned */ | ||
331 | s = (unsigned long)start; | ||
332 | e = s + size; | ||
333 | m = info->alignment - 1; | ||
334 | |||
335 | /* Round start up */ | ||
336 | s = (s + m) & ~m; | ||
337 | |||
338 | /* Round end down */ | ||
339 | e = e & ~m; | ||
340 | |||
341 | /* Take final values */ | ||
342 | start = (void *)s; | ||
343 | size = (int)(e - s); | ||
344 | |||
345 | /* Grow the blocks, if needed */ | ||
346 | r = assure_empty(info, 1); | ||
347 | if (r < 0) | ||
348 | return r; | ||
349 | |||
350 | blk = get_slot(info); | ||
351 | blk->start = start; | ||
352 | blk->size = size; | ||
353 | blk->owner = NULL; | ||
354 | |||
355 | attach_free_block(info, blk); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | /* Detatch given address range, splits free block if needed. */ | ||
361 | void *rh_detach_region(rh_info_t * info, void *start, int size) | ||
362 | { | ||
363 | struct list_head *l; | ||
364 | rh_block_t *blk, *newblk; | ||
365 | unsigned long s, e, m, bs, be; | ||
366 | |||
367 | /* Validate size */ | ||
368 | if (size <= 0) | ||
369 | return ERR_PTR(-EINVAL); | ||
370 | |||
371 | /* The region must be aligned */ | ||
372 | s = (unsigned long)start; | ||
373 | e = s + size; | ||
374 | m = info->alignment - 1; | ||
375 | |||
376 | /* Round start up */ | ||
377 | s = (s + m) & ~m; | ||
378 | |||
379 | /* Round end down */ | ||
380 | e = e & ~m; | ||
381 | |||
382 | if (assure_empty(info, 1) < 0) | ||
383 | return ERR_PTR(-ENOMEM); | ||
384 | |||
385 | blk = NULL; | ||
386 | list_for_each(l, &info->free_list) { | ||
387 | blk = list_entry(l, rh_block_t, list); | ||
388 | /* The range must lie entirely inside one free block */ | ||
389 | bs = (unsigned long)blk->start; | ||
390 | be = (unsigned long)blk->start + blk->size; | ||
391 | if (s >= bs && e <= be) | ||
392 | break; | ||
393 | blk = NULL; | ||
394 | } | ||
395 | |||
396 | if (blk == NULL) | ||
397 | return ERR_PTR(-ENOMEM); | ||
398 | |||
399 | /* Perfect fit */ | ||
400 | if (bs == s && be == e) { | ||
401 | /* Delete from free list, release slot */ | ||
402 | list_del(&blk->list); | ||
403 | release_slot(info, blk); | ||
404 | return (void *)s; | ||
405 | } | ||
406 | |||
407 | /* blk still in free list, with updated start and/or size */ | ||
408 | if (bs == s || be == e) { | ||
409 | if (bs == s) | ||
410 | blk->start = (int8_t *)blk->start + size; | ||
411 | blk->size -= size; | ||
412 | |||
413 | } else { | ||
414 | /* The front free fragment */ | ||
415 | blk->size = s - bs; | ||
416 | |||
417 | /* the back free fragment */ | ||
418 | newblk = get_slot(info); | ||
419 | newblk->start = (void *)e; | ||
420 | newblk->size = be - e; | ||
421 | |||
422 | list_add(&newblk->list, &blk->list); | ||
423 | } | ||
424 | |||
425 | return (void *)s; | ||
426 | } | ||
427 | |||
428 | void *rh_alloc(rh_info_t * info, int size, const char *owner) | ||
429 | { | ||
430 | struct list_head *l; | ||
431 | rh_block_t *blk; | ||
432 | rh_block_t *newblk; | ||
433 | void *start; | ||
434 | |||
435 | /* Validate size */ | ||
436 | if (size <= 0) | ||
437 | return ERR_PTR(-EINVAL); | ||
438 | |||
439 | /* Align to configured alignment */ | ||
440 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | ||
441 | |||
442 | if (assure_empty(info, 1) < 0) | ||
443 | return ERR_PTR(-ENOMEM); | ||
444 | |||
445 | blk = NULL; | ||
446 | list_for_each(l, &info->free_list) { | ||
447 | blk = list_entry(l, rh_block_t, list); | ||
448 | if (size <= blk->size) | ||
449 | break; | ||
450 | blk = NULL; | ||
451 | } | ||
452 | |||
453 | if (blk == NULL) | ||
454 | return ERR_PTR(-ENOMEM); | ||
455 | |||
456 | /* Just fits */ | ||
457 | if (blk->size == size) { | ||
458 | /* Move from free list to taken list */ | ||
459 | list_del(&blk->list); | ||
460 | blk->owner = owner; | ||
461 | start = blk->start; | ||
462 | |||
463 | attach_taken_block(info, blk); | ||
464 | |||
465 | return start; | ||
466 | } | ||
467 | |||
468 | newblk = get_slot(info); | ||
469 | newblk->start = blk->start; | ||
470 | newblk->size = size; | ||
471 | newblk->owner = owner; | ||
472 | |||
473 | /* blk still in free list, with updated start, size */ | ||
474 | blk->start = (int8_t *)blk->start + size; | ||
475 | blk->size -= size; | ||
476 | |||
477 | start = newblk->start; | ||
478 | |||
479 | attach_taken_block(info, newblk); | ||
480 | |||
481 | return start; | ||
482 | } | ||
483 | |||
484 | /* allocate at precisely the given address */ | ||
485 | void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | ||
486 | { | ||
487 | struct list_head *l; | ||
488 | rh_block_t *blk, *newblk1, *newblk2; | ||
489 | unsigned long s, e, m, bs, be; | ||
490 | |||
491 | /* Validate size */ | ||
492 | if (size <= 0) | ||
493 | return ERR_PTR(-EINVAL); | ||
494 | |||
495 | /* The region must be aligned */ | ||
496 | s = (unsigned long)start; | ||
497 | e = s + size; | ||
498 | m = info->alignment - 1; | ||
499 | |||
500 | /* Round start up */ | ||
501 | s = (s + m) & ~m; | ||
502 | |||
503 | /* Round end down */ | ||
504 | e = e & ~m; | ||
505 | |||
506 | if (assure_empty(info, 2) < 0) | ||
507 | return ERR_PTR(-ENOMEM); | ||
508 | |||
509 | blk = NULL; | ||
510 | list_for_each(l, &info->free_list) { | ||
511 | blk = list_entry(l, rh_block_t, list); | ||
512 | /* The range must lie entirely inside one free block */ | ||
513 | bs = (unsigned long)blk->start; | ||
514 | be = (unsigned long)blk->start + blk->size; | ||
515 | if (s >= bs && e <= be) | ||
516 | break; | ||
517 | } | ||
518 | |||
519 | if (blk == NULL) | ||
520 | return ERR_PTR(-ENOMEM); | ||
521 | |||
522 | /* Perfect fit */ | ||
523 | if (bs == s && be == e) { | ||
524 | /* Move from free list to taken list */ | ||
525 | list_del(&blk->list); | ||
526 | blk->owner = owner; | ||
527 | |||
528 | start = blk->start; | ||
529 | attach_taken_block(info, blk); | ||
530 | |||
531 | return start; | ||
532 | |||
533 | } | ||
534 | |||
535 | /* blk still in free list, with updated start and/or size */ | ||
536 | if (bs == s || be == e) { | ||
537 | if (bs == s) | ||
538 | blk->start = (int8_t *)blk->start + size; | ||
539 | blk->size -= size; | ||
540 | |||
541 | } else { | ||
542 | /* The front free fragment */ | ||
543 | blk->size = s - bs; | ||
544 | |||
545 | /* The back free fragment */ | ||
546 | newblk2 = get_slot(info); | ||
547 | newblk2->start = (void *)e; | ||
548 | newblk2->size = be - e; | ||
549 | |||
550 | list_add(&newblk2->list, &blk->list); | ||
551 | } | ||
552 | |||
553 | newblk1 = get_slot(info); | ||
554 | newblk1->start = (void *)s; | ||
555 | newblk1->size = e - s; | ||
556 | newblk1->owner = owner; | ||
557 | |||
558 | start = newblk1->start; | ||
559 | attach_taken_block(info, newblk1); | ||
560 | |||
561 | return start; | ||
562 | } | ||
563 | |||
564 | int rh_free(rh_info_t * info, void *start) | ||
565 | { | ||
566 | rh_block_t *blk, *blk2; | ||
567 | struct list_head *l; | ||
568 | int size; | ||
569 | |||
570 | /* Linear search for block */ | ||
571 | blk = NULL; | ||
572 | list_for_each(l, &info->taken_list) { | ||
573 | blk2 = list_entry(l, rh_block_t, list); | ||
574 | if (start < blk2->start) | ||
575 | break; | ||
576 | blk = blk2; | ||
577 | } | ||
578 | |||
579 | if (blk == NULL || start > (blk->start + blk->size)) | ||
580 | return -EINVAL; | ||
581 | |||
582 | /* Remove from taken list */ | ||
583 | list_del(&blk->list); | ||
584 | |||
585 | /* Get size of freed block */ | ||
586 | size = blk->size; | ||
587 | attach_free_block(info, blk); | ||
588 | |||
589 | return size; | ||
590 | } | ||
591 | |||
592 | int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) | ||
593 | { | ||
594 | rh_block_t *blk; | ||
595 | struct list_head *l; | ||
596 | struct list_head *h; | ||
597 | int nr; | ||
598 | |||
599 | switch (what) { | ||
600 | |||
601 | case RHGS_FREE: | ||
602 | h = &info->free_list; | ||
603 | break; | ||
604 | |||
605 | case RHGS_TAKEN: | ||
606 | h = &info->taken_list; | ||
607 | break; | ||
608 | |||
609 | default: | ||
610 | return -EINVAL; | ||
611 | } | ||
612 | |||
613 | /* Linear search for block */ | ||
614 | nr = 0; | ||
615 | list_for_each(l, h) { | ||
616 | blk = list_entry(l, rh_block_t, list); | ||
617 | if (stats != NULL && nr < max_stats) { | ||
618 | stats->start = blk->start; | ||
619 | stats->size = blk->size; | ||
620 | stats->owner = blk->owner; | ||
621 | stats++; | ||
622 | } | ||
623 | nr++; | ||
624 | } | ||
625 | |||
626 | return nr; | ||
627 | } | ||
628 | |||
629 | int rh_set_owner(rh_info_t * info, void *start, const char *owner) | ||
630 | { | ||
631 | rh_block_t *blk, *blk2; | ||
632 | struct list_head *l; | ||
633 | int size; | ||
634 | |||
635 | /* Linear search for block */ | ||
636 | blk = NULL; | ||
637 | list_for_each(l, &info->taken_list) { | ||
638 | blk2 = list_entry(l, rh_block_t, list); | ||
639 | if (start < blk2->start) | ||
640 | break; | ||
641 | blk = blk2; | ||
642 | } | ||
643 | |||
644 | if (blk == NULL || start > (blk->start + blk->size)) | ||
645 | return -EINVAL; | ||
646 | |||
647 | blk->owner = owner; | ||
648 | size = blk->size; | ||
649 | |||
650 | return size; | ||
651 | } | ||
652 | |||
653 | void rh_dump(rh_info_t * info) | ||
654 | { | ||
655 | static rh_stats_t st[32]; /* XXX maximum 32 blocks */ | ||
656 | int maxnr; | ||
657 | int i, nr; | ||
658 | |||
659 | maxnr = sizeof(st) / sizeof(st[0]); | ||
660 | |||
661 | printk(KERN_INFO | ||
662 | "info @0x%p (%d slots empty / %d max)\n", | ||
663 | info, info->empty_slots, info->max_blocks); | ||
664 | |||
665 | printk(KERN_INFO " Free:\n"); | ||
666 | nr = rh_get_stats(info, RHGS_FREE, maxnr, st); | ||
667 | if (nr > maxnr) | ||
668 | nr = maxnr; | ||
669 | for (i = 0; i < nr; i++) | ||
670 | printk(KERN_INFO | ||
671 | " 0x%p-0x%p (%u)\n", | ||
672 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
673 | st[i].size); | ||
674 | printk(KERN_INFO "\n"); | ||
675 | |||
676 | printk(KERN_INFO " Taken:\n"); | ||
677 | nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); | ||
678 | if (nr > maxnr) | ||
679 | nr = maxnr; | ||
680 | for (i = 0; i < nr; i++) | ||
681 | printk(KERN_INFO | ||
682 | " 0x%p-0x%p (%u) %s\n", | ||
683 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
684 | st[i].size, st[i].owner != NULL ? st[i].owner : ""); | ||
685 | printk(KERN_INFO "\n"); | ||
686 | } | ||
687 | |||
688 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) | ||
689 | { | ||
690 | printk(KERN_INFO | ||
691 | "blk @0x%p: 0x%p-0x%p (%u)\n", | ||
692 | blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); | ||
693 | } | ||
diff --git a/arch/ppc/lib/strcase.c b/arch/ppc/lib/strcase.c new file mode 100644 index 000000000000..36b521091bbc --- /dev/null +++ b/arch/ppc/lib/strcase.c | |||
@@ -0,0 +1,23 @@ | |||
1 | #include <linux/ctype.h> | ||
2 | |||
3 | int strcasecmp(const char *s1, const char *s2) | ||
4 | { | ||
5 | int c1, c2; | ||
6 | |||
7 | do { | ||
8 | c1 = tolower(*s1++); | ||
9 | c2 = tolower(*s2++); | ||
10 | } while (c1 == c2 && c1 != 0); | ||
11 | return c1 - c2; | ||
12 | } | ||
13 | |||
14 | int strncasecmp(const char *s1, const char *s2, int n) | ||
15 | { | ||
16 | int c1, c2; | ||
17 | |||
18 | do { | ||
19 | c1 = tolower(*s1++); | ||
20 | c2 = tolower(*s2++); | ||
21 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
22 | return c1 - c2; | ||
23 | } | ||
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S new file mode 100644 index 000000000000..8d08a2eb225e --- /dev/null +++ b/arch/ppc/lib/string.S | |||
@@ -0,0 +1,716 @@ | |||
1 | /* | ||
2 | * String handling functions for PowerPC. | ||
3 | * | ||
4 | * Copyright (C) 1996 Paul Mackerras. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/cache.h> | ||
14 | #include <asm/errno.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | |||
17 | #define COPY_16_BYTES \ | ||
18 | lwz r7,4(r4); \ | ||
19 | lwz r8,8(r4); \ | ||
20 | lwz r9,12(r4); \ | ||
21 | lwzu r10,16(r4); \ | ||
22 | stw r7,4(r6); \ | ||
23 | stw r8,8(r6); \ | ||
24 | stw r9,12(r6); \ | ||
25 | stwu r10,16(r6) | ||
26 | |||
27 | #define COPY_16_BYTES_WITHEX(n) \ | ||
28 | 8 ## n ## 0: \ | ||
29 | lwz r7,4(r4); \ | ||
30 | 8 ## n ## 1: \ | ||
31 | lwz r8,8(r4); \ | ||
32 | 8 ## n ## 2: \ | ||
33 | lwz r9,12(r4); \ | ||
34 | 8 ## n ## 3: \ | ||
35 | lwzu r10,16(r4); \ | ||
36 | 8 ## n ## 4: \ | ||
37 | stw r7,4(r6); \ | ||
38 | 8 ## n ## 5: \ | ||
39 | stw r8,8(r6); \ | ||
40 | 8 ## n ## 6: \ | ||
41 | stw r9,12(r6); \ | ||
42 | 8 ## n ## 7: \ | ||
43 | stwu r10,16(r6) | ||
44 | |||
45 | #define COPY_16_BYTES_EXCODE(n) \ | ||
46 | 9 ## n ## 0: \ | ||
47 | addi r5,r5,-(16 * n); \ | ||
48 | b 104f; \ | ||
49 | 9 ## n ## 1: \ | ||
50 | addi r5,r5,-(16 * n); \ | ||
51 | b 105f; \ | ||
52 | .section __ex_table,"a"; \ | ||
53 | .align 2; \ | ||
54 | .long 8 ## n ## 0b,9 ## n ## 0b; \ | ||
55 | .long 8 ## n ## 1b,9 ## n ## 0b; \ | ||
56 | .long 8 ## n ## 2b,9 ## n ## 0b; \ | ||
57 | .long 8 ## n ## 3b,9 ## n ## 0b; \ | ||
58 | .long 8 ## n ## 4b,9 ## n ## 1b; \ | ||
59 | .long 8 ## n ## 5b,9 ## n ## 1b; \ | ||
60 | .long 8 ## n ## 6b,9 ## n ## 1b; \ | ||
61 | .long 8 ## n ## 7b,9 ## n ## 1b; \ | ||
62 | .text | ||
63 | |||
64 | .text | ||
65 | .stabs "arch/ppc/lib/",N_SO,0,0,0f | ||
66 | .stabs "string.S",N_SO,0,0,0f | ||
67 | |||
68 | CACHELINE_BYTES = L1_CACHE_LINE_SIZE | ||
69 | LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE | ||
70 | CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) | ||
71 | |||
72 | _GLOBAL(strcpy) | ||
73 | addi r5,r3,-1 | ||
74 | addi r4,r4,-1 | ||
75 | 1: lbzu r0,1(r4) | ||
76 | cmpwi 0,r0,0 | ||
77 | stbu r0,1(r5) | ||
78 | bne 1b | ||
79 | blr | ||
80 | |||
81 | /* This clears out any unused part of the destination buffer, | ||
82 | just as the libc version does. -- paulus */ | ||
83 | _GLOBAL(strncpy) | ||
84 | cmpwi 0,r5,0 | ||
85 | beqlr | ||
86 | mtctr r5 | ||
87 | addi r6,r3,-1 | ||
88 | addi r4,r4,-1 | ||
89 | 1: lbzu r0,1(r4) | ||
90 | cmpwi 0,r0,0 | ||
91 | stbu r0,1(r6) | ||
92 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ | ||
93 | bnelr /* if we didn't hit a null char, we're done */ | ||
94 | mfctr r5 | ||
95 | cmpwi 0,r5,0 /* any space left in destination buffer? */ | ||
96 | beqlr /* we know r0 == 0 here */ | ||
97 | 2: stbu r0,1(r6) /* clear it out if so */ | ||
98 | bdnz 2b | ||
99 | blr | ||
100 | |||
101 | _GLOBAL(strcat) | ||
102 | addi r5,r3,-1 | ||
103 | addi r4,r4,-1 | ||
104 | 1: lbzu r0,1(r5) | ||
105 | cmpwi 0,r0,0 | ||
106 | bne 1b | ||
107 | addi r5,r5,-1 | ||
108 | 1: lbzu r0,1(r4) | ||
109 | cmpwi 0,r0,0 | ||
110 | stbu r0,1(r5) | ||
111 | bne 1b | ||
112 | blr | ||
113 | |||
114 | _GLOBAL(strcmp) | ||
115 | addi r5,r3,-1 | ||
116 | addi r4,r4,-1 | ||
117 | 1: lbzu r3,1(r5) | ||
118 | cmpwi 1,r3,0 | ||
119 | lbzu r0,1(r4) | ||
120 | subf. r3,r0,r3 | ||
121 | beqlr 1 | ||
122 | beq 1b | ||
123 | blr | ||
124 | |||
125 | _GLOBAL(strlen) | ||
126 | addi r4,r3,-1 | ||
127 | 1: lbzu r0,1(r4) | ||
128 | cmpwi 0,r0,0 | ||
129 | bne 1b | ||
130 | subf r3,r3,r4 | ||
131 | blr | ||
132 | |||
133 | /* | ||
134 | * Use dcbz on the complete cache lines in the destination | ||
135 | * to set them to zero. This requires that the destination | ||
136 | * area is cacheable. -- paulus | ||
137 | */ | ||
138 | _GLOBAL(cacheable_memzero) | ||
139 | mr r5,r4 | ||
140 | li r4,0 | ||
141 | addi r6,r3,-4 | ||
142 | cmplwi 0,r5,4 | ||
143 | blt 7f | ||
144 | stwu r4,4(r6) | ||
145 | beqlr | ||
146 | andi. r0,r6,3 | ||
147 | add r5,r0,r5 | ||
148 | subf r6,r0,r6 | ||
149 | clrlwi r7,r6,32-LG_CACHELINE_BYTES | ||
150 | add r8,r7,r5 | ||
151 | srwi r9,r8,LG_CACHELINE_BYTES | ||
152 | addic. r9,r9,-1 /* total number of complete cachelines */ | ||
153 | ble 2f | ||
154 | xori r0,r7,CACHELINE_MASK & ~3 | ||
155 | srwi. r0,r0,2 | ||
156 | beq 3f | ||
157 | mtctr r0 | ||
158 | 4: stwu r4,4(r6) | ||
159 | bdnz 4b | ||
160 | 3: mtctr r9 | ||
161 | li r7,4 | ||
162 | #if !defined(CONFIG_8xx) | ||
163 | 10: dcbz r7,r6 | ||
164 | #else | ||
165 | 10: stw r4, 4(r6) | ||
166 | stw r4, 8(r6) | ||
167 | stw r4, 12(r6) | ||
168 | stw r4, 16(r6) | ||
169 | #if CACHE_LINE_SIZE >= 32 | ||
170 | stw r4, 20(r6) | ||
171 | stw r4, 24(r6) | ||
172 | stw r4, 28(r6) | ||
173 | stw r4, 32(r6) | ||
174 | #endif /* CACHE_LINE_SIZE */ | ||
175 | #endif | ||
176 | addi r6,r6,CACHELINE_BYTES | ||
177 | bdnz 10b | ||
178 | clrlwi r5,r8,32-LG_CACHELINE_BYTES | ||
179 | addi r5,r5,4 | ||
180 | 2: srwi r0,r5,2 | ||
181 | mtctr r0 | ||
182 | bdz 6f | ||
183 | 1: stwu r4,4(r6) | ||
184 | bdnz 1b | ||
185 | 6: andi. r5,r5,3 | ||
186 | 7: cmpwi 0,r5,0 | ||
187 | beqlr | ||
188 | mtctr r5 | ||
189 | addi r6,r6,3 | ||
190 | 8: stbu r4,1(r6) | ||
191 | bdnz 8b | ||
192 | blr | ||
193 | |||
194 | _GLOBAL(memset) | ||
195 | rlwimi r4,r4,8,16,23 | ||
196 | rlwimi r4,r4,16,0,15 | ||
197 | addi r6,r3,-4 | ||
198 | cmplwi 0,r5,4 | ||
199 | blt 7f | ||
200 | stwu r4,4(r6) | ||
201 | beqlr | ||
202 | andi. r0,r6,3 | ||
203 | add r5,r0,r5 | ||
204 | subf r6,r0,r6 | ||
205 | srwi r0,r5,2 | ||
206 | mtctr r0 | ||
207 | bdz 6f | ||
208 | 1: stwu r4,4(r6) | ||
209 | bdnz 1b | ||
210 | 6: andi. r5,r5,3 | ||
211 | 7: cmpwi 0,r5,0 | ||
212 | beqlr | ||
213 | mtctr r5 | ||
214 | addi r6,r6,3 | ||
215 | 8: stbu r4,1(r6) | ||
216 | bdnz 8b | ||
217 | blr | ||
218 | |||
219 | /* | ||
220 | * This version uses dcbz on the complete cache lines in the | ||
221 | * destination area to reduce memory traffic. This requires that | ||
222 | * the destination area is cacheable. | ||
223 | * We only use this version if the source and dest don't overlap. | ||
224 | * -- paulus. | ||
225 | */ | ||
226 | _GLOBAL(cacheable_memcpy) | ||
227 | add r7,r3,r5 /* test if the src & dst overlap */ | ||
228 | add r8,r4,r5 | ||
229 | cmplw 0,r4,r7 | ||
230 | cmplw 1,r3,r8 | ||
231 | crand 0,0,4 /* cr0.lt &= cr1.lt */ | ||
232 | blt memcpy /* if regions overlap */ | ||
233 | |||
234 | addi r4,r4,-4 | ||
235 | addi r6,r3,-4 | ||
236 | neg r0,r3 | ||
237 | andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ | ||
238 | beq 58f | ||
239 | |||
240 | cmplw 0,r5,r0 /* is this more than total to do? */ | ||
241 | blt 63f /* if not much to do */ | ||
242 | andi. r8,r0,3 /* get it word-aligned first */ | ||
243 | subf r5,r0,r5 | ||
244 | mtctr r8 | ||
245 | beq+ 61f | ||
246 | 70: lbz r9,4(r4) /* do some bytes */ | ||
247 | stb r9,4(r6) | ||
248 | addi r4,r4,1 | ||
249 | addi r6,r6,1 | ||
250 | bdnz 70b | ||
251 | 61: srwi. r0,r0,2 | ||
252 | mtctr r0 | ||
253 | beq 58f | ||
254 | 72: lwzu r9,4(r4) /* do some words */ | ||
255 | stwu r9,4(r6) | ||
256 | bdnz 72b | ||
257 | |||
258 | 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ | ||
259 | clrlwi r5,r5,32-LG_CACHELINE_BYTES | ||
260 | li r11,4 | ||
261 | mtctr r0 | ||
262 | beq 63f | ||
263 | 53: | ||
264 | #if !defined(CONFIG_8xx) | ||
265 | dcbz r11,r6 | ||
266 | #endif | ||
267 | COPY_16_BYTES | ||
268 | #if L1_CACHE_LINE_SIZE >= 32 | ||
269 | COPY_16_BYTES | ||
270 | #if L1_CACHE_LINE_SIZE >= 64 | ||
271 | COPY_16_BYTES | ||
272 | COPY_16_BYTES | ||
273 | #if L1_CACHE_LINE_SIZE >= 128 | ||
274 | COPY_16_BYTES | ||
275 | COPY_16_BYTES | ||
276 | COPY_16_BYTES | ||
277 | COPY_16_BYTES | ||
278 | #endif | ||
279 | #endif | ||
280 | #endif | ||
281 | bdnz 53b | ||
282 | |||
283 | 63: srwi. r0,r5,2 | ||
284 | mtctr r0 | ||
285 | beq 64f | ||
286 | 30: lwzu r0,4(r4) | ||
287 | stwu r0,4(r6) | ||
288 | bdnz 30b | ||
289 | |||
290 | 64: andi. r0,r5,3 | ||
291 | mtctr r0 | ||
292 | beq+ 65f | ||
293 | 40: lbz r0,4(r4) | ||
294 | stb r0,4(r6) | ||
295 | addi r4,r4,1 | ||
296 | addi r6,r6,1 | ||
297 | bdnz 40b | ||
298 | 65: blr | ||
299 | |||
300 | _GLOBAL(memmove) | ||
301 | cmplw 0,r3,r4 | ||
302 | bgt backwards_memcpy | ||
303 | /* fall through */ | ||
304 | |||
305 | _GLOBAL(memcpy) | ||
306 | srwi. r7,r5,3 | ||
307 | addi r6,r3,-4 | ||
308 | addi r4,r4,-4 | ||
309 | beq 2f /* if less than 8 bytes to do */ | ||
310 | andi. r0,r6,3 /* get dest word aligned */ | ||
311 | mtctr r7 | ||
312 | bne 5f | ||
313 | 1: lwz r7,4(r4) | ||
314 | lwzu r8,8(r4) | ||
315 | stw r7,4(r6) | ||
316 | stwu r8,8(r6) | ||
317 | bdnz 1b | ||
318 | andi. r5,r5,7 | ||
319 | 2: cmplwi 0,r5,4 | ||
320 | blt 3f | ||
321 | lwzu r0,4(r4) | ||
322 | addi r5,r5,-4 | ||
323 | stwu r0,4(r6) | ||
324 | 3: cmpwi 0,r5,0 | ||
325 | beqlr | ||
326 | mtctr r5 | ||
327 | addi r4,r4,3 | ||
328 | addi r6,r6,3 | ||
329 | 4: lbzu r0,1(r4) | ||
330 | stbu r0,1(r6) | ||
331 | bdnz 4b | ||
332 | blr | ||
333 | 5: subfic r0,r0,4 | ||
334 | mtctr r0 | ||
335 | 6: lbz r7,4(r4) | ||
336 | addi r4,r4,1 | ||
337 | stb r7,4(r6) | ||
338 | addi r6,r6,1 | ||
339 | bdnz 6b | ||
340 | subf r5,r0,r5 | ||
341 | rlwinm. r7,r5,32-3,3,31 | ||
342 | beq 2b | ||
343 | mtctr r7 | ||
344 | b 1b | ||
345 | |||
346 | _GLOBAL(backwards_memcpy) | ||
347 | rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ | ||
348 | add r6,r3,r5 | ||
349 | add r4,r4,r5 | ||
350 | beq 2f | ||
351 | andi. r0,r6,3 | ||
352 | mtctr r7 | ||
353 | bne 5f | ||
354 | 1: lwz r7,-4(r4) | ||
355 | lwzu r8,-8(r4) | ||
356 | stw r7,-4(r6) | ||
357 | stwu r8,-8(r6) | ||
358 | bdnz 1b | ||
359 | andi. r5,r5,7 | ||
360 | 2: cmplwi 0,r5,4 | ||
361 | blt 3f | ||
362 | lwzu r0,-4(r4) | ||
363 | subi r5,r5,4 | ||
364 | stwu r0,-4(r6) | ||
365 | 3: cmpwi 0,r5,0 | ||
366 | beqlr | ||
367 | mtctr r5 | ||
368 | 4: lbzu r0,-1(r4) | ||
369 | stbu r0,-1(r6) | ||
370 | bdnz 4b | ||
371 | blr | ||
372 | 5: mtctr r0 | ||
373 | 6: lbzu r7,-1(r4) | ||
374 | stbu r7,-1(r6) | ||
375 | bdnz 6b | ||
376 | subf r5,r0,r5 | ||
377 | rlwinm. r7,r5,32-3,3,31 | ||
378 | beq 2b | ||
379 | mtctr r7 | ||
380 | b 1b | ||
381 | |||
382 | _GLOBAL(memcmp) | ||
383 | cmpwi 0,r5,0 | ||
384 | ble- 2f | ||
385 | mtctr r5 | ||
386 | addi r6,r3,-1 | ||
387 | addi r4,r4,-1 | ||
388 | 1: lbzu r3,1(r6) | ||
389 | lbzu r0,1(r4) | ||
390 | subf. r3,r0,r3 | ||
391 | bdnzt 2,1b | ||
392 | blr | ||
393 | 2: li r3,0 | ||
394 | blr | ||
395 | |||
396 | _GLOBAL(memchr) | ||
397 | cmpwi 0,r5,0 | ||
398 | ble- 2f | ||
399 | mtctr r5 | ||
400 | addi r3,r3,-1 | ||
401 | 1: lbzu r0,1(r3) | ||
402 | cmpw 0,r0,r4 | ||
403 | bdnzf 2,1b | ||
404 | beqlr | ||
405 | 2: li r3,0 | ||
406 | blr | ||
407 | |||
408 | _GLOBAL(__copy_tofrom_user) | ||
409 | addi r4,r4,-4 | ||
410 | addi r6,r3,-4 | ||
411 | neg r0,r3 | ||
412 | andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ | ||
413 | beq 58f | ||
414 | |||
415 | cmplw 0,r5,r0 /* is this more than total to do? */ | ||
416 | blt 63f /* if not much to do */ | ||
417 | andi. r8,r0,3 /* get it word-aligned first */ | ||
418 | mtctr r8 | ||
419 | beq+ 61f | ||
420 | 70: lbz r9,4(r4) /* do some bytes */ | ||
421 | 71: stb r9,4(r6) | ||
422 | addi r4,r4,1 | ||
423 | addi r6,r6,1 | ||
424 | bdnz 70b | ||
425 | 61: subf r5,r0,r5 | ||
426 | srwi. r0,r0,2 | ||
427 | mtctr r0 | ||
428 | beq 58f | ||
429 | 72: lwzu r9,4(r4) /* do some words */ | ||
430 | 73: stwu r9,4(r6) | ||
431 | bdnz 72b | ||
432 | |||
433 | .section __ex_table,"a" | ||
434 | .align 2 | ||
435 | .long 70b,100f | ||
436 | .long 71b,101f | ||
437 | .long 72b,102f | ||
438 | .long 73b,103f | ||
439 | .text | ||
440 | |||
441 | 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ | ||
442 | clrlwi r5,r5,32-LG_CACHELINE_BYTES | ||
443 | li r11,4 | ||
444 | beq 63f | ||
445 | |||
446 | #ifdef CONFIG_8xx | ||
447 | /* Don't use prefetch on 8xx */ | ||
448 | mtctr r0 | ||
449 | 53: COPY_16_BYTES_WITHEX(0) | ||
450 | bdnz 53b | ||
451 | |||
452 | #else /* not CONFIG_8xx */ | ||
453 | /* Here we decide how far ahead to prefetch the source */ | ||
454 | li r3,4 | ||
455 | cmpwi r0,1 | ||
456 | li r7,0 | ||
457 | ble 114f | ||
458 | li r7,1 | ||
459 | #if MAX_COPY_PREFETCH > 1 | ||
460 | /* Heuristically, for large transfers we prefetch | ||
461 | MAX_COPY_PREFETCH cachelines ahead. For small transfers | ||
462 | we prefetch 1 cacheline ahead. */ | ||
463 | cmpwi r0,MAX_COPY_PREFETCH | ||
464 | ble 112f | ||
465 | li r7,MAX_COPY_PREFETCH | ||
466 | 112: mtctr r7 | ||
467 | 111: dcbt r3,r4 | ||
468 | addi r3,r3,CACHELINE_BYTES | ||
469 | bdnz 111b | ||
470 | #else | ||
471 | dcbt r3,r4 | ||
472 | addi r3,r3,CACHELINE_BYTES | ||
473 | #endif /* MAX_COPY_PREFETCH > 1 */ | ||
474 | |||
475 | 114: subf r8,r7,r0 | ||
476 | mr r0,r7 | ||
477 | mtctr r8 | ||
478 | |||
479 | 53: dcbt r3,r4 | ||
480 | 54: dcbz r11,r6 | ||
481 | .section __ex_table,"a" | ||
482 | .align 2 | ||
483 | .long 54b,105f | ||
484 | .text | ||
485 | /* the main body of the cacheline loop */ | ||
486 | COPY_16_BYTES_WITHEX(0) | ||
487 | #if L1_CACHE_LINE_SIZE >= 32 | ||
488 | COPY_16_BYTES_WITHEX(1) | ||
489 | #if L1_CACHE_LINE_SIZE >= 64 | ||
490 | COPY_16_BYTES_WITHEX(2) | ||
491 | COPY_16_BYTES_WITHEX(3) | ||
492 | #if L1_CACHE_LINE_SIZE >= 128 | ||
493 | COPY_16_BYTES_WITHEX(4) | ||
494 | COPY_16_BYTES_WITHEX(5) | ||
495 | COPY_16_BYTES_WITHEX(6) | ||
496 | COPY_16_BYTES_WITHEX(7) | ||
497 | #endif | ||
498 | #endif | ||
499 | #endif | ||
500 | bdnz 53b | ||
501 | cmpwi r0,0 | ||
502 | li r3,4 | ||
503 | li r7,0 | ||
504 | bne 114b | ||
505 | #endif /* CONFIG_8xx */ | ||
506 | |||
507 | 63: srwi. r0,r5,2 | ||
508 | mtctr r0 | ||
509 | beq 64f | ||
510 | 30: lwzu r0,4(r4) | ||
511 | 31: stwu r0,4(r6) | ||
512 | bdnz 30b | ||
513 | |||
514 | 64: andi. r0,r5,3 | ||
515 | mtctr r0 | ||
516 | beq+ 65f | ||
517 | 40: lbz r0,4(r4) | ||
518 | 41: stb r0,4(r6) | ||
519 | addi r4,r4,1 | ||
520 | addi r6,r6,1 | ||
521 | bdnz 40b | ||
522 | 65: li r3,0 | ||
523 | blr | ||
524 | |||
525 | /* read fault, initial single-byte copy */ | ||
526 | 100: li r9,0 | ||
527 | b 90f | ||
528 | /* write fault, initial single-byte copy */ | ||
529 | 101: li r9,1 | ||
530 | 90: subf r5,r8,r5 | ||
531 | li r3,0 | ||
532 | b 99f | ||
533 | /* read fault, initial word copy */ | ||
534 | 102: li r9,0 | ||
535 | b 91f | ||
536 | /* write fault, initial word copy */ | ||
537 | 103: li r9,1 | ||
538 | 91: li r3,2 | ||
539 | b 99f | ||
540 | |||
541 | /* | ||
542 | * this stuff handles faults in the cacheline loop and branches to either | ||
543 | * 104f (if in read part) or 105f (if in write part), after updating r5 | ||
544 | */ | ||
545 | COPY_16_BYTES_EXCODE(0) | ||
546 | #if L1_CACHE_LINE_SIZE >= 32 | ||
547 | COPY_16_BYTES_EXCODE(1) | ||
548 | #if L1_CACHE_LINE_SIZE >= 64 | ||
549 | COPY_16_BYTES_EXCODE(2) | ||
550 | COPY_16_BYTES_EXCODE(3) | ||
551 | #if L1_CACHE_LINE_SIZE >= 128 | ||
552 | COPY_16_BYTES_EXCODE(4) | ||
553 | COPY_16_BYTES_EXCODE(5) | ||
554 | COPY_16_BYTES_EXCODE(6) | ||
555 | COPY_16_BYTES_EXCODE(7) | ||
556 | #endif | ||
557 | #endif | ||
558 | #endif | ||
559 | |||
560 | /* read fault in cacheline loop */ | ||
561 | 104: li r9,0 | ||
562 | b 92f | ||
563 | /* fault on dcbz (effectively a write fault) */ | ||
564 | /* or write fault in cacheline loop */ | ||
565 | 105: li r9,1 | ||
566 | 92: li r3,LG_CACHELINE_BYTES | ||
567 | b 99f | ||
568 | /* read fault in final word loop */ | ||
569 | 108: li r9,0 | ||
570 | b 93f | ||
571 | /* write fault in final word loop */ | ||
572 | 109: li r9,1 | ||
573 | 93: andi. r5,r5,3 | ||
574 | li r3,2 | ||
575 | b 99f | ||
576 | /* read fault in final byte loop */ | ||
577 | 110: li r9,0 | ||
578 | b 94f | ||
579 | /* write fault in final byte loop */ | ||
580 | 111: li r9,1 | ||
581 | 94: li r5,0 | ||
582 | li r3,0 | ||
583 | /* | ||
584 | * At this stage the number of bytes not copied is | ||
585 | * r5 + (ctr << r3), and r9 is 0 for read or 1 for write. | ||
586 | */ | ||
587 | 99: mfctr r0 | ||
588 | slw r3,r0,r3 | ||
589 | add. r3,r3,r5 | ||
590 | beq 120f /* shouldn't happen */ | ||
591 | cmpwi 0,r9,0 | ||
592 | bne 120f | ||
593 | /* for a read fault, first try to continue the copy one byte at a time */ | ||
594 | mtctr r3 | ||
595 | 130: lbz r0,4(r4) | ||
596 | 131: stb r0,4(r6) | ||
597 | addi r4,r4,1 | ||
598 | addi r6,r6,1 | ||
599 | bdnz 130b | ||
600 | /* then clear out the destination: r3 bytes starting at 4(r6) */ | ||
601 | 132: mfctr r3 | ||
602 | srwi. r0,r3,2 | ||
603 | li r9,0 | ||
604 | mtctr r0 | ||
605 | beq 113f | ||
606 | 112: stwu r9,4(r6) | ||
607 | bdnz 112b | ||
608 | 113: andi. r0,r3,3 | ||
609 | mtctr r0 | ||
610 | beq 120f | ||
611 | 114: stb r9,4(r6) | ||
612 | addi r6,r6,1 | ||
613 | bdnz 114b | ||
614 | 120: blr | ||
615 | |||
616 | .section __ex_table,"a" | ||
617 | .align 2 | ||
618 | .long 30b,108b | ||
619 | .long 31b,109b | ||
620 | .long 40b,110b | ||
621 | .long 41b,111b | ||
622 | .long 130b,132b | ||
623 | .long 131b,120b | ||
624 | .long 112b,120b | ||
625 | .long 114b,120b | ||
626 | .text | ||
627 | |||
628 | _GLOBAL(__clear_user) | ||
629 | addi r6,r3,-4 | ||
630 | li r3,0 | ||
631 | li r5,0 | ||
632 | cmplwi 0,r4,4 | ||
633 | blt 7f | ||
634 | /* clear a single word */ | ||
635 | 11: stwu r5,4(r6) | ||
636 | beqlr | ||
637 | /* clear word sized chunks */ | ||
638 | andi. r0,r6,3 | ||
639 | add r4,r0,r4 | ||
640 | subf r6,r0,r6 | ||
641 | srwi r0,r4,2 | ||
642 | andi. r4,r4,3 | ||
643 | mtctr r0 | ||
644 | bdz 7f | ||
645 | 1: stwu r5,4(r6) | ||
646 | bdnz 1b | ||
647 | /* clear byte sized chunks */ | ||
648 | 7: cmpwi 0,r4,0 | ||
649 | beqlr | ||
650 | mtctr r4 | ||
651 | addi r6,r6,3 | ||
652 | 8: stbu r5,1(r6) | ||
653 | bdnz 8b | ||
654 | blr | ||
655 | 90: mr r3,r4 | ||
656 | blr | ||
657 | 91: mfctr r3 | ||
658 | slwi r3,r3,2 | ||
659 | add r3,r3,r4 | ||
660 | blr | ||
661 | 92: mfctr r3 | ||
662 | blr | ||
663 | |||
664 | .section __ex_table,"a" | ||
665 | .align 2 | ||
666 | .long 11b,90b | ||
667 | .long 1b,91b | ||
668 | .long 8b,92b | ||
669 | .text | ||
670 | |||
671 | _GLOBAL(__strncpy_from_user) | ||
672 | addi r6,r3,-1 | ||
673 | addi r4,r4,-1 | ||
674 | cmpwi 0,r5,0 | ||
675 | beq 2f | ||
676 | mtctr r5 | ||
677 | 1: lbzu r0,1(r4) | ||
678 | cmpwi 0,r0,0 | ||
679 | stbu r0,1(r6) | ||
680 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ | ||
681 | beq 3f | ||
682 | 2: addi r6,r6,1 | ||
683 | 3: subf r3,r3,r6 | ||
684 | blr | ||
685 | 99: li r3,-EFAULT | ||
686 | blr | ||
687 | |||
688 | .section __ex_table,"a" | ||
689 | .align 2 | ||
690 | .long 1b,99b | ||
691 | .text | ||
692 | |||
693 | /* r3 = str, r4 = len (> 0), r5 = top (highest addr) */ | ||
694 | _GLOBAL(__strnlen_user) | ||
695 | addi r7,r3,-1 | ||
696 | subf r6,r7,r5 /* top+1 - str */ | ||
697 | cmplw 0,r4,r6 | ||
698 | bge 0f | ||
699 | mr r6,r4 | ||
700 | 0: mtctr r6 /* ctr = min(len, top - str) */ | ||
701 | 1: lbzu r0,1(r7) /* get next byte */ | ||
702 | cmpwi 0,r0,0 | ||
703 | bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */ | ||
704 | addi r7,r7,1 | ||
705 | subf r3,r3,r7 /* number of bytes we have looked at */ | ||
706 | beqlr /* return if we found a 0 byte */ | ||
707 | cmpw 0,r3,r4 /* did we look at all len bytes? */ | ||
708 | blt 99f /* if not, must have hit top */ | ||
709 | addi r3,r4,1 /* return len + 1 to indicate no null found */ | ||
710 | blr | ||
711 | 99: li r3,0 /* bad address, return 0 */ | ||
712 | blr | ||
713 | |||
714 | .section __ex_table,"a" | ||
715 | .align 2 | ||
716 | .long 1b,99b | ||