aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:25:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:25:51 -0400
commit9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e (patch)
tree0c3ffda953b82750638a06507591ad587b565ff2 /arch/x86/lib
parentd7bb545d86825e635cab33a1dd81ca0ad7b92887 (diff)
parent77ad386e596c6b0930cc2e09e3cce485e3ee7f72 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (613 commits) x86: standalone trampoline code x86: move suspend wakeup code to C x86: coding style fixes to arch/x86/kernel/acpi/sleep.c x86: setup_trampoline() - fix section mismatch warning x86: section mismatch fixes, #1 x86: fix paranoia about using BIOS quickboot mechanism. x86: print out buggy mptable x86: use cpu_online() x86: use cpumask_of_cpu() x86: remove unnecessary tmp local variable x86: remove unnecessary memset() x86: use ioapic_read_entry() and ioapic_write_entry() x86: avoid redundant loop in io_apic_level_ack_pending() x86: remove superfluous initialisation in boot code. x86: merge mpparse_{32,64}.c x86: unify mp_register_gsi x86: unify mp_config_acpi_legacy_irqs x86: unify mp_register_ioapic x86: unify uniq_io_apic_id x86: unify smp_scan_config ...
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/memcpy_32.c2
-rw-r--r--arch/x86/lib/memmove_64.c8
-rw-r--r--arch/x86/lib/mmx_32.c197
-rw-r--r--arch/x86/lib/string_32.c60
-rw-r--r--arch/x86/lib/strstr_32.c4
-rw-r--r--arch/x86/lib/usercopy_32.c122
6 files changed, 192 insertions, 201 deletions
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index 37756b6fb329..5415a9d06f53 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -25,7 +25,7 @@ void *memmove(void *dest, const void *src, size_t n)
25 int d0, d1, d2; 25 int d0, d1, d2;
26 26
27 if (dest < src) { 27 if (dest < src) {
28 memcpy(dest,src,n); 28 memcpy(dest, src, n);
29 } else { 29 } else {
30 __asm__ __volatile__( 30 __asm__ __volatile__(
31 "std\n\t" 31 "std\n\t"
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c
index 80175e47b190..0a33909bf122 100644
--- a/arch/x86/lib/memmove_64.c
+++ b/arch/x86/lib/memmove_64.c
@@ -6,10 +6,10 @@
6#include <linux/module.h> 6#include <linux/module.h>
7 7
8#undef memmove 8#undef memmove
9void *memmove(void * dest,const void *src,size_t count) 9void *memmove(void *dest, const void *src, size_t count)
10{ 10{
11 if (dest < src) { 11 if (dest < src) {
12 return memcpy(dest,src,count); 12 return memcpy(dest, src, count);
13 } else { 13 } else {
14 char *p = dest + count; 14 char *p = dest + count;
15 const char *s = src + count; 15 const char *s = src + count;
@@ -17,5 +17,5 @@ void *memmove(void * dest,const void *src,size_t count)
17 *--p = *--s; 17 *--p = *--s;
18 } 18 }
19 return dest; 19 return dest;
20} 20}
21EXPORT_SYMBOL(memmove); 21EXPORT_SYMBOL(memmove);
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index cc9b4a4450f3..c9f2d9ba8dd8 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -1,32 +1,30 @@
1#include <linux/types.h>
2#include <linux/string.h>
3#include <linux/sched.h>
4#include <linux/hardirq.h>
5#include <linux/module.h>
6
7#include <asm/asm.h>
8#include <asm/i387.h>
9
10
11/* 1/*
12 * MMX 3DNow! library helper functions 2 * MMX 3DNow! library helper functions
13 * 3 *
14 * To do: 4 * To do:
15 * We can use MMX just for prefetch in IRQ's. This may be a win. 5 * We can use MMX just for prefetch in IRQ's. This may be a win.
16 * (reported so on K6-III) 6 * (reported so on K6-III)
17 * We should use a better code neutral filler for the short jump 7 * We should use a better code neutral filler for the short jump
18 * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? 8 * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
19 * We also want to clobber the filler register so we don't get any 9 * We also want to clobber the filler register so we don't get any
20 * register forwarding stalls on the filler. 10 * register forwarding stalls on the filler.
21 * 11 *
22 * Add *user handling. Checksums are not a win with MMX on any CPU 12 * Add *user handling. Checksums are not a win with MMX on any CPU
23 * tested so far for any MMX solution figured. 13 * tested so far for any MMX solution figured.
24 * 14 *
25 * 22/09/2000 - Arjan van de Ven 15 * 22/09/2000 - Arjan van de Ven
26 * Improved for non-egineering-sample Athlons 16 * Improved for non-egineering-sample Athlons
27 * 17 *
28 */ 18 */
29 19#include <linux/hardirq.h>
20#include <linux/string.h>
21#include <linux/module.h>
22#include <linux/sched.h>
23#include <linux/types.h>
24
25#include <asm/i387.h>
26#include <asm/asm.h>
27
30void *_mmx_memcpy(void *to, const void *from, size_t len) 28void *_mmx_memcpy(void *to, const void *from, size_t len)
31{ 29{
32 void *p; 30 void *p;
@@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
51 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 49 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
52 " jmp 2b\n" 50 " jmp 2b\n"
53 ".previous\n" 51 ".previous\n"
54 _ASM_EXTABLE(1b,3b) 52 _ASM_EXTABLE(1b, 3b)
55 : : "r" (from) ); 53 : : "r" (from));
56 54
57 55 for ( ; i > 5; i--) {
58 for(; i>5; i--)
59 {
60 __asm__ __volatile__ ( 56 __asm__ __volatile__ (
61 "1: prefetch 320(%0)\n" 57 "1: prefetch 320(%0)\n"
62 "2: movq (%0), %%mm0\n" 58 "2: movq (%0), %%mm0\n"
@@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
79 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 75 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
80 " jmp 2b\n" 76 " jmp 2b\n"
81 ".previous\n" 77 ".previous\n"
82 _ASM_EXTABLE(1b,3b) 78 _ASM_EXTABLE(1b, 3b)
83 : : "r" (from), "r" (to) : "memory"); 79 : : "r" (from), "r" (to) : "memory");
84 from+=64; 80
85 to+=64; 81 from += 64;
82 to += 64;
86 } 83 }
87 84
88 for(; i>0; i--) 85 for ( ; i > 0; i--) {
89 {
90 __asm__ __volatile__ ( 86 __asm__ __volatile__ (
91 " movq (%0), %%mm0\n" 87 " movq (%0), %%mm0\n"
92 " movq 8(%0), %%mm1\n" 88 " movq 8(%0), %%mm1\n"
@@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
104 " movq %%mm1, 40(%1)\n" 100 " movq %%mm1, 40(%1)\n"
105 " movq %%mm2, 48(%1)\n" 101 " movq %%mm2, 48(%1)\n"
106 " movq %%mm3, 56(%1)\n" 102 " movq %%mm3, 56(%1)\n"
107 : : "r" (from), "r" (to) : "memory"); 103 : : "r" (from), "r" (to) : "memory");
108 from+=64; 104
109 to+=64; 105 from += 64;
106 to += 64;
110 } 107 }
111 /* 108 /*
112 * Now do the tail of the block 109 * Now do the tail of the block:
113 */ 110 */
114 __memcpy(to, from, len&63); 111 __memcpy(to, from, len & 63);
115 kernel_fpu_end(); 112 kernel_fpu_end();
113
116 return p; 114 return p;
117} 115}
116EXPORT_SYMBOL(_mmx_memcpy);
118 117
119#ifdef CONFIG_MK7 118#ifdef CONFIG_MK7
120 119
@@ -128,13 +127,12 @@ static void fast_clear_page(void *page)
128 int i; 127 int i;
129 128
130 kernel_fpu_begin(); 129 kernel_fpu_begin();
131 130
132 __asm__ __volatile__ ( 131 __asm__ __volatile__ (
133 " pxor %%mm0, %%mm0\n" : : 132 " pxor %%mm0, %%mm0\n" : :
134 ); 133 );
135 134
136 for(i=0;i<4096/64;i++) 135 for (i = 0; i < 4096/64; i++) {
137 {
138 __asm__ __volatile__ ( 136 __asm__ __volatile__ (
139 " movntq %%mm0, (%0)\n" 137 " movntq %%mm0, (%0)\n"
140 " movntq %%mm0, 8(%0)\n" 138 " movntq %%mm0, 8(%0)\n"
@@ -145,14 +143,15 @@ static void fast_clear_page(void *page)
145 " movntq %%mm0, 48(%0)\n" 143 " movntq %%mm0, 48(%0)\n"
146 " movntq %%mm0, 56(%0)\n" 144 " movntq %%mm0, 56(%0)\n"
147 : : "r" (page) : "memory"); 145 : : "r" (page) : "memory");
148 page+=64; 146 page += 64;
149 } 147 }
150 /* since movntq is weakly-ordered, a "sfence" is needed to become 148
151 * ordered again. 149 /*
150 * Since movntq is weakly-ordered, a "sfence" is needed to become
151 * ordered again:
152 */ 152 */
153 __asm__ __volatile__ ( 153 __asm__ __volatile__("sfence\n"::);
154 " sfence \n" : : 154
155 );
156 kernel_fpu_end(); 155 kernel_fpu_end();
157} 156}
158 157
@@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from)
162 161
163 kernel_fpu_begin(); 162 kernel_fpu_begin();
164 163
165 /* maybe the prefetch stuff can go before the expensive fnsave... 164 /*
165 * maybe the prefetch stuff can go before the expensive fnsave...
166 * but that is for later. -AV 166 * but that is for later. -AV
167 */ 167 */
168 __asm__ __volatile__ ( 168 __asm__ __volatile__(
169 "1: prefetch (%0)\n" 169 "1: prefetch (%0)\n"
170 " prefetch 64(%0)\n" 170 " prefetch 64(%0)\n"
171 " prefetch 128(%0)\n" 171 " prefetch 128(%0)\n"
@@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from)
176 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 176 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
177 " jmp 2b\n" 177 " jmp 2b\n"
178 ".previous\n" 178 ".previous\n"
179 _ASM_EXTABLE(1b,3b) 179 _ASM_EXTABLE(1b, 3b) : : "r" (from));
180 : : "r" (from) );
181 180
182 for(i=0; i<(4096-320)/64; i++) 181 for (i = 0; i < (4096-320)/64; i++) {
183 {
184 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
185 "1: prefetch 320(%0)\n" 183 "1: prefetch 320(%0)\n"
186 "2: movq (%0), %%mm0\n" 184 "2: movq (%0), %%mm0\n"
@@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from)
203 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 201 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
204 " jmp 2b\n" 202 " jmp 2b\n"
205 ".previous\n" 203 ".previous\n"
206 _ASM_EXTABLE(1b,3b) 204 _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
207 : : "r" (from), "r" (to) : "memory"); 205
208 from+=64; 206 from += 64;
209 to+=64; 207 to += 64;
210 } 208 }
211 for(i=(4096-320)/64; i<4096/64; i++) 209
212 { 210 for (i = (4096-320)/64; i < 4096/64; i++) {
213 __asm__ __volatile__ ( 211 __asm__ __volatile__ (
214 "2: movq (%0), %%mm0\n" 212 "2: movq (%0), %%mm0\n"
215 " movntq %%mm0, (%1)\n" 213 " movntq %%mm0, (%1)\n"
@@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from)
227 " movntq %%mm6, 48(%1)\n" 225 " movntq %%mm6, 48(%1)\n"
228 " movq 56(%0), %%mm7\n" 226 " movq 56(%0), %%mm7\n"
229 " movntq %%mm7, 56(%1)\n" 227 " movntq %%mm7, 56(%1)\n"
230 : : "r" (from), "r" (to) : "memory"); 228 : : "r" (from), "r" (to) : "memory");
231 from+=64; 229 from += 64;
232 to+=64; 230 to += 64;
233 } 231 }
234 /* since movntq is weakly-ordered, a "sfence" is needed to become 232 /*
235 * ordered again. 233 * Since movntq is weakly-ordered, a "sfence" is needed to become
234 * ordered again:
236 */ 235 */
237 __asm__ __volatile__ ( 236 __asm__ __volatile__("sfence \n"::);
238 " sfence \n" : :
239 );
240 kernel_fpu_end(); 237 kernel_fpu_end();
241} 238}
242 239
243#else 240#else /* CONFIG_MK7 */
244 241
245/* 242/*
246 * Generic MMX implementation without K7 specific streaming 243 * Generic MMX implementation without K7 specific streaming
247 */ 244 */
248
249static void fast_clear_page(void *page) 245static void fast_clear_page(void *page)
250{ 246{
251 int i; 247 int i;
252 248
253 kernel_fpu_begin(); 249 kernel_fpu_begin();
254 250
255 __asm__ __volatile__ ( 251 __asm__ __volatile__ (
256 " pxor %%mm0, %%mm0\n" : : 252 " pxor %%mm0, %%mm0\n" : :
257 ); 253 );
258 254
259 for(i=0;i<4096/128;i++) 255 for (i = 0; i < 4096/128; i++) {
260 {
261 __asm__ __volatile__ ( 256 __asm__ __volatile__ (
262 " movq %%mm0, (%0)\n" 257 " movq %%mm0, (%0)\n"
263 " movq %%mm0, 8(%0)\n" 258 " movq %%mm0, 8(%0)\n"
@@ -275,8 +270,8 @@ static void fast_clear_page(void *page)
275 " movq %%mm0, 104(%0)\n" 270 " movq %%mm0, 104(%0)\n"
276 " movq %%mm0, 112(%0)\n" 271 " movq %%mm0, 112(%0)\n"
277 " movq %%mm0, 120(%0)\n" 272 " movq %%mm0, 120(%0)\n"
278 : : "r" (page) : "memory"); 273 : : "r" (page) : "memory");
279 page+=128; 274 page += 128;
280 } 275 }
281 276
282 kernel_fpu_end(); 277 kernel_fpu_end();
@@ -285,8 +280,7 @@ static void fast_clear_page(void *page)
285static void fast_copy_page(void *to, void *from) 280static void fast_copy_page(void *to, void *from)
286{ 281{
287 int i; 282 int i;
288 283
289
290 kernel_fpu_begin(); 284 kernel_fpu_begin();
291 285
292 __asm__ __volatile__ ( 286 __asm__ __volatile__ (
@@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from)
300 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ 294 "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
301 " jmp 2b\n" 295 " jmp 2b\n"
302 ".previous\n" 296 ".previous\n"
303 _ASM_EXTABLE(1b,3b) 297 _ASM_EXTABLE(1b, 3b) : : "r" (from));
304 : : "r" (from) );
305 298
306 for(i=0; i<4096/64; i++) 299 for (i = 0; i < 4096/64; i++) {
307 {
308 __asm__ __volatile__ ( 300 __asm__ __volatile__ (
309 "1: prefetch 320(%0)\n" 301 "1: prefetch 320(%0)\n"
310 "2: movq (%0), %%mm0\n" 302 "2: movq (%0), %%mm0\n"
@@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from)
327 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ 319 "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
328 " jmp 2b\n" 320 " jmp 2b\n"
329 ".previous\n" 321 ".previous\n"
330 _ASM_EXTABLE(1b,3b) 322 _ASM_EXTABLE(1b, 3b)
331 : : "r" (from), "r" (to) : "memory"); 323 : : "r" (from), "r" (to) : "memory");
332 from+=64; 324
333 to+=64; 325 from += 64;
326 to += 64;
334 } 327 }
335 kernel_fpu_end(); 328 kernel_fpu_end();
336} 329}
337 330
338 331#endif /* !CONFIG_MK7 */
339#endif
340 332
341/* 333/*
342 * Favour MMX for page clear and copy. 334 * Favour MMX for page clear and copy:
343 */ 335 */
344 336static void slow_zero_page(void *page)
345static void slow_zero_page(void * page)
346{ 337{
347 int d0, d1; 338 int d0, d1;
348 __asm__ __volatile__( \ 339
349 "cld\n\t" \ 340 __asm__ __volatile__(
350 "rep ; stosl" \ 341 "cld\n\t"
351 : "=&c" (d0), "=&D" (d1) 342 "rep ; stosl"
352 :"a" (0),"1" (page),"0" (1024) 343
353 :"memory"); 344 : "=&c" (d0), "=&D" (d1)
345 :"a" (0), "1" (page), "0" (1024)
346 :"memory");
354} 347}
355 348
356void mmx_clear_page(void * page) 349void mmx_clear_page(void *page)
357{ 350{
358 if(unlikely(in_interrupt())) 351 if (unlikely(in_interrupt()))
359 slow_zero_page(page); 352 slow_zero_page(page);
360 else 353 else
361 fast_clear_page(page); 354 fast_clear_page(page);
362} 355}
356EXPORT_SYMBOL(mmx_clear_page);
363 357
364static void slow_copy_page(void *to, void *from) 358static void slow_copy_page(void *to, void *from)
365{ 359{
366 int d0, d1, d2; 360 int d0, d1, d2;
367 __asm__ __volatile__( \ 361
368 "cld\n\t" \ 362 __asm__ __volatile__(
369 "rep ; movsl" \ 363 "cld\n\t"
370 : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ 364 "rep ; movsl"
371 : "0" (1024),"1" ((long) to),"2" ((long) from) \ 365 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
366 : "0" (1024), "1" ((long) to), "2" ((long) from)
372 : "memory"); 367 : "memory");
373} 368}
374
375 369
376void mmx_copy_page(void *to, void *from) 370void mmx_copy_page(void *to, void *from)
377{ 371{
378 if(unlikely(in_interrupt())) 372 if (unlikely(in_interrupt()))
379 slow_copy_page(to, from); 373 slow_copy_page(to, from);
380 else 374 else
381 fast_copy_page(to, from); 375 fast_copy_page(to, from);
382} 376}
383
384EXPORT_SYMBOL(_mmx_memcpy);
385EXPORT_SYMBOL(mmx_clear_page);
386EXPORT_SYMBOL(mmx_copy_page); 377EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index c2c0504a3071..94972e7c094d 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -14,25 +14,25 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16#ifdef __HAVE_ARCH_STRCPY 16#ifdef __HAVE_ARCH_STRCPY
17char *strcpy(char * dest,const char *src) 17char *strcpy(char *dest, const char *src)
18{ 18{
19 int d0, d1, d2; 19 int d0, d1, d2;
20 asm volatile( "1:\tlodsb\n\t" 20 asm volatile("1:\tlodsb\n\t"
21 "stosb\n\t" 21 "stosb\n\t"
22 "testb %%al,%%al\n\t" 22 "testb %%al,%%al\n\t"
23 "jne 1b" 23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2) 24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src),"1" (dest) : "memory"); 25 :"0" (src), "1" (dest) : "memory");
26 return dest; 26 return dest;
27} 27}
28EXPORT_SYMBOL(strcpy); 28EXPORT_SYMBOL(strcpy);
29#endif 29#endif
30 30
31#ifdef __HAVE_ARCH_STRNCPY 31#ifdef __HAVE_ARCH_STRNCPY
32char *strncpy(char * dest,const char *src,size_t count) 32char *strncpy(char *dest, const char *src, size_t count)
33{ 33{
34 int d0, d1, d2, d3; 34 int d0, d1, d2, d3;
35 asm volatile( "1:\tdecl %2\n\t" 35 asm volatile("1:\tdecl %2\n\t"
36 "js 2f\n\t" 36 "js 2f\n\t"
37 "lodsb\n\t" 37 "lodsb\n\t"
38 "stosb\n\t" 38 "stosb\n\t"
@@ -42,17 +42,17 @@ char *strncpy(char * dest,const char *src,size_t count)
42 "stosb\n" 42 "stosb\n"
43 "2:" 43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) 44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src),"1" (dest),"2" (count) : "memory"); 45 :"0" (src), "1" (dest), "2" (count) : "memory");
46 return dest; 46 return dest;
47} 47}
48EXPORT_SYMBOL(strncpy); 48EXPORT_SYMBOL(strncpy);
49#endif 49#endif
50 50
51#ifdef __HAVE_ARCH_STRCAT 51#ifdef __HAVE_ARCH_STRCAT
52char *strcat(char * dest,const char * src) 52char *strcat(char *dest, const char *src)
53{ 53{
54 int d0, d1, d2, d3; 54 int d0, d1, d2, d3;
55 asm volatile( "repne\n\t" 55 asm volatile("repne\n\t"
56 "scasb\n\t" 56 "scasb\n\t"
57 "decl %1\n" 57 "decl %1\n"
58 "1:\tlodsb\n\t" 58 "1:\tlodsb\n\t"
@@ -67,10 +67,10 @@ EXPORT_SYMBOL(strcat);
67#endif 67#endif
68 68
69#ifdef __HAVE_ARCH_STRNCAT 69#ifdef __HAVE_ARCH_STRNCAT
70char *strncat(char * dest,const char * src,size_t count) 70char *strncat(char *dest, const char *src, size_t count)
71{ 71{
72 int d0, d1, d2, d3; 72 int d0, d1, d2, d3;
73 asm volatile( "repne\n\t" 73 asm volatile("repne\n\t"
74 "scasb\n\t" 74 "scasb\n\t"
75 "decl %1\n\t" 75 "decl %1\n\t"
76 "movl %8,%3\n" 76 "movl %8,%3\n"
@@ -83,7 +83,7 @@ char *strncat(char * dest,const char * src,size_t count)
83 "2:\txorl %2,%2\n\t" 83 "2:\txorl %2,%2\n\t"
84 "stosb" 84 "stosb"
85 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) 85 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
86 : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) 86 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu), "g" (count)
87 : "memory"); 87 : "memory");
88 return dest; 88 return dest;
89} 89}
@@ -91,11 +91,11 @@ EXPORT_SYMBOL(strncat);
91#endif 91#endif
92 92
93#ifdef __HAVE_ARCH_STRCMP 93#ifdef __HAVE_ARCH_STRCMP
94int strcmp(const char * cs,const char * ct) 94int strcmp(const char *cs, const char *ct)
95{ 95{
96 int d0, d1; 96 int d0, d1;
97 int res; 97 int res;
98 asm volatile( "1:\tlodsb\n\t" 98 asm volatile("1:\tlodsb\n\t"
99 "scasb\n\t" 99 "scasb\n\t"
100 "jne 2f\n\t" 100 "jne 2f\n\t"
101 "testb %%al,%%al\n\t" 101 "testb %%al,%%al\n\t"
@@ -106,7 +106,7 @@ int strcmp(const char * cs,const char * ct)
106 "orb $1,%%al\n" 106 "orb $1,%%al\n"
107 "3:" 107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1) 108 :"=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs),"2" (ct) 109 :"1" (cs), "2" (ct)
110 :"memory"); 110 :"memory");
111 return res; 111 return res;
112} 112}
@@ -114,11 +114,11 @@ EXPORT_SYMBOL(strcmp);
114#endif 114#endif
115 115
116#ifdef __HAVE_ARCH_STRNCMP 116#ifdef __HAVE_ARCH_STRNCMP
117int strncmp(const char * cs,const char * ct,size_t count) 117int strncmp(const char *cs, const char *ct, size_t count)
118{ 118{
119 int res; 119 int res;
120 int d0, d1, d2; 120 int d0, d1, d2;
121 asm volatile( "1:\tdecl %3\n\t" 121 asm volatile("1:\tdecl %3\n\t"
122 "js 2f\n\t" 122 "js 2f\n\t"
123 "lodsb\n\t" 123 "lodsb\n\t"
124 "scasb\n\t" 124 "scasb\n\t"
@@ -131,7 +131,7 @@ int strncmp(const char * cs,const char * ct,size_t count)
131 "orb $1,%%al\n" 131 "orb $1,%%al\n"
132 "4:" 132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) 133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs),"2" (ct),"3" (count) 134 :"1" (cs), "2" (ct), "3" (count)
135 :"memory"); 135 :"memory");
136 return res; 136 return res;
137} 137}
@@ -139,11 +139,11 @@ EXPORT_SYMBOL(strncmp);
139#endif 139#endif
140 140
141#ifdef __HAVE_ARCH_STRCHR 141#ifdef __HAVE_ARCH_STRCHR
142char *strchr(const char * s, int c) 142char *strchr(const char *s, int c)
143{ 143{
144 int d0; 144 int d0;
145 char * res; 145 char *res;
146 asm volatile( "movb %%al,%%ah\n" 146 asm volatile("movb %%al,%%ah\n"
147 "1:\tlodsb\n\t" 147 "1:\tlodsb\n\t"
148 "cmpb %%ah,%%al\n\t" 148 "cmpb %%ah,%%al\n\t"
149 "je 2f\n\t" 149 "je 2f\n\t"
@@ -153,7 +153,7 @@ char *strchr(const char * s, int c)
153 "2:\tmovl %1,%0\n\t" 153 "2:\tmovl %1,%0\n\t"
154 "decl %0" 154 "decl %0"
155 :"=a" (res), "=&S" (d0) 155 :"=a" (res), "=&S" (d0)
156 :"1" (s),"0" (c) 156 :"1" (s), "0" (c)
157 :"memory"); 157 :"memory");
158 return res; 158 return res;
159} 159}
@@ -161,16 +161,16 @@ EXPORT_SYMBOL(strchr);
161#endif 161#endif
162 162
163#ifdef __HAVE_ARCH_STRLEN 163#ifdef __HAVE_ARCH_STRLEN
164size_t strlen(const char * s) 164size_t strlen(const char *s)
165{ 165{
166 int d0; 166 int d0;
167 int res; 167 int res;
168 asm volatile( "repne\n\t" 168 asm volatile("repne\n\t"
169 "scasb\n\t" 169 "scasb\n\t"
170 "notl %0\n\t" 170 "notl %0\n\t"
171 "decl %0" 171 "decl %0"
172 :"=c" (res), "=&D" (d0) 172 :"=c" (res), "=&D" (d0)
173 :"1" (s),"a" (0), "0" (0xffffffffu) 173 :"1" (s), "a" (0), "0" (0xffffffffu)
174 :"memory"); 174 :"memory");
175 return res; 175 return res;
176} 176}
@@ -178,19 +178,19 @@ EXPORT_SYMBOL(strlen);
178#endif 178#endif
179 179
180#ifdef __HAVE_ARCH_MEMCHR 180#ifdef __HAVE_ARCH_MEMCHR
181void *memchr(const void *cs,int c,size_t count) 181void *memchr(const void *cs, int c, size_t count)
182{ 182{
183 int d0; 183 int d0;
184 void *res; 184 void *res;
185 if (!count) 185 if (!count)
186 return NULL; 186 return NULL;
187 asm volatile( "repne\n\t" 187 asm volatile("repne\n\t"
188 "scasb\n\t" 188 "scasb\n\t"
189 "je 1f\n\t" 189 "je 1f\n\t"
190 "movl $1,%0\n" 190 "movl $1,%0\n"
191 "1:\tdecl %0" 191 "1:\tdecl %0"
192 :"=D" (res), "=&c" (d0) 192 :"=D" (res), "=&c" (d0)
193 :"a" (c),"0" (cs),"1" (count) 193 :"a" (c), "0" (cs), "1" (count)
194 :"memory"); 194 :"memory");
195 return res; 195 return res;
196} 196}
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(memchr);
198#endif 198#endif
199 199
200#ifdef __HAVE_ARCH_MEMSCAN 200#ifdef __HAVE_ARCH_MEMSCAN
201void *memscan(void * addr, int c, size_t size) 201void *memscan(void *addr, int c, size_t size)
202{ 202{
203 if (!size) 203 if (!size)
204 return addr; 204 return addr;
@@ -219,7 +219,7 @@ size_t strnlen(const char *s, size_t count)
219{ 219{
220 int d0; 220 int d0;
221 int res; 221 int res;
222 asm volatile( "movl %2,%0\n\t" 222 asm volatile("movl %2,%0\n\t"
223 "jmp 2f\n" 223 "jmp 2f\n"
224 "1:\tcmpb $0,(%0)\n\t" 224 "1:\tcmpb $0,(%0)\n\t"
225 "je 3f\n\t" 225 "je 3f\n\t"
@@ -229,7 +229,7 @@ size_t strnlen(const char *s, size_t count)
229 "jne 1b\n" 229 "jne 1b\n"
230 "3:\tsubl %2,%0" 230 "3:\tsubl %2,%0"
231 :"=a" (res), "=&d" (d0) 231 :"=a" (res), "=&d" (d0)
232 :"c" (s),"1" (count) 232 :"c" (s), "1" (count)
233 :"memory"); 233 :"memory");
234 return res; 234 return res;
235} 235}
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index a3dafbf59dae..42e8a50303f3 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -1,9 +1,9 @@
1#include <linux/string.h> 1#include <linux/string.h>
2 2
3char * strstr(const char * cs,const char * ct) 3char *strstr(const char *cs, const char *ct)
4{ 4{
5int d0, d1; 5int d0, d1;
6register char * __res; 6register char *__res;
7__asm__ __volatile__( 7__asm__ __volatile__(
8 "movl %6,%%edi\n\t" 8 "movl %6,%%edi\n\t"
9 "repne\n\t" 9 "repne\n\t"
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index e849b9998b0e..24e60944971a 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * User address space access functions. 2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here. 3 * The non inlined parts of asm-i386/uaccess.h are here.
4 * 4 *
@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
22#endif 22#endif
23 return 1; 23 return 1;
24} 24}
25#define movsl_is_ok(a1,a2,n) \ 25#define movsl_is_ok(a1, a2, n) \
26 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) 26 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
27 27
28/* 28/*
29 * Copy a null terminated string from userspace. 29 * Copy a null terminated string from userspace.
30 */ 30 */
31 31
32#define __do_strncpy_from_user(dst,src,count,res) \ 32#define __do_strncpy_from_user(dst, src, count, res) \
33do { \ 33do { \
34 int __d0, __d1, __d2; \ 34 int __d0, __d1, __d2; \
35 might_sleep(); \ 35 might_sleep(); \
@@ -61,7 +61,7 @@ do { \
61 * least @count bytes long. 61 * least @count bytes long.
62 * @src: Source address, in user space. 62 * @src: Source address, in user space.
63 * @count: Maximum number of bytes to copy, including the trailing NUL. 63 * @count: Maximum number of bytes to copy, including the trailing NUL.
64 * 64 *
65 * Copies a NUL-terminated string from userspace to kernel space. 65 * Copies a NUL-terminated string from userspace to kernel space.
66 * Caller must check the specified block with access_ok() before calling 66 * Caller must check the specified block with access_ok() before calling
67 * this function. 67 * this function.
@@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
90 * least @count bytes long. 90 * least @count bytes long.
91 * @src: Source address, in user space. 91 * @src: Source address, in user space.
92 * @count: Maximum number of bytes to copy, including the trailing NUL. 92 * @count: Maximum number of bytes to copy, including the trailing NUL.
93 * 93 *
94 * Copies a NUL-terminated string from userspace to kernel space. 94 * Copies a NUL-terminated string from userspace to kernel space.
95 * 95 *
96 * On success, returns the length of the string (not including the trailing 96 * On success, returns the length of the string (not including the trailing
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user);
120do { \ 120do { \
121 int __d0; \ 121 int __d0; \
122 might_sleep(); \ 122 might_sleep(); \
123 __asm__ __volatile__( \ 123 __asm__ __volatile__( \
124 "0: rep; stosl\n" \ 124 "0: rep; stosl\n" \
125 " movl %2,%0\n" \ 125 " movl %2,%0\n" \
126 "1: rep; stosb\n" \ 126 "1: rep; stosb\n" \
@@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
333 __asm__ __volatile__( 333 __asm__ __volatile__(
334 " .align 2,0x90\n" 334 " .align 2,0x90\n"
335 "0: movl 32(%4), %%eax\n" 335 "0: movl 32(%4), %%eax\n"
336 " cmpl $67, %0\n" 336 " cmpl $67, %0\n"
337 " jbe 2f\n" 337 " jbe 2f\n"
338 "1: movl 64(%4), %%eax\n" 338 "1: movl 64(%4), %%eax\n"
339 " .align 2,0x90\n" 339 " .align 2,0x90\n"
340 "2: movl 0(%4), %%eax\n" 340 "2: movl 0(%4), %%eax\n"
341 "21: movl 4(%4), %%edx\n" 341 "21: movl 4(%4), %%edx\n"
342 " movl %%eax, 0(%3)\n" 342 " movl %%eax, 0(%3)\n"
343 " movl %%edx, 4(%3)\n" 343 " movl %%edx, 4(%3)\n"
344 "3: movl 8(%4), %%eax\n" 344 "3: movl 8(%4), %%eax\n"
345 "31: movl 12(%4),%%edx\n" 345 "31: movl 12(%4),%%edx\n"
346 " movl %%eax, 8(%3)\n" 346 " movl %%eax, 8(%3)\n"
347 " movl %%edx, 12(%3)\n" 347 " movl %%edx, 12(%3)\n"
348 "4: movl 16(%4), %%eax\n" 348 "4: movl 16(%4), %%eax\n"
349 "41: movl 20(%4), %%edx\n" 349 "41: movl 20(%4), %%edx\n"
@@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
369 "91: movl 60(%4), %%edx\n" 369 "91: movl 60(%4), %%edx\n"
370 " movl %%eax, 56(%3)\n" 370 " movl %%eax, 56(%3)\n"
371 " movl %%edx, 60(%3)\n" 371 " movl %%edx, 60(%3)\n"
372 " addl $-64, %0\n" 372 " addl $-64, %0\n"
373 " addl $64, %4\n" 373 " addl $64, %4\n"
374 " addl $64, %3\n" 374 " addl $64, %3\n"
375 " cmpl $63, %0\n" 375 " cmpl $63, %0\n"
376 " ja 0b\n" 376 " ja 0b\n"
377 "5: movl %0, %%eax\n" 377 "5: movl %0, %%eax\n"
378 " shrl $2, %0\n" 378 " shrl $2, %0\n"
379 " andl $3, %%eax\n" 379 " andl $3, %%eax\n"
380 " cld\n" 380 " cld\n"
381 "6: rep; movsl\n" 381 "6: rep; movsl\n"
382 " movl %%eax,%0\n" 382 " movl %%eax,%0\n"
383 "7: rep; movsb\n" 383 "7: rep; movsb\n"
384 "8:\n" 384 "8:\n"
385 ".section .fixup,\"ax\"\n" 385 ".section .fixup,\"ax\"\n"
386 "9: lea 0(%%eax,%0,4),%0\n" 386 "9: lea 0(%%eax,%0,4),%0\n"
387 "16: pushl %0\n" 387 "16: pushl %0\n"
388 " pushl %%eax\n" 388 " pushl %%eax\n"
389 " xorl %%eax,%%eax\n" 389 " xorl %%eax,%%eax\n"
390 " rep; stosb\n" 390 " rep; stosb\n"
391 " popl %%eax\n" 391 " popl %%eax\n"
392 " popl %0\n" 392 " popl %0\n"
393 " jmp 8b\n" 393 " jmp 8b\n"
394 ".previous\n" 394 ".previous\n"
395 ".section __ex_table,\"a\"\n" 395 ".section __ex_table,\"a\"\n"
396 " .align 4\n" 396 " .align 4\n"
397 " .long 0b,16b\n" 397 " .long 0b,16b\n"
398 " .long 1b,16b\n" 398 " .long 1b,16b\n"
399 " .long 2b,16b\n" 399 " .long 2b,16b\n"
400 " .long 21b,16b\n" 400 " .long 21b,16b\n"
401 " .long 3b,16b\n" 401 " .long 3b,16b\n"
402 " .long 31b,16b\n" 402 " .long 31b,16b\n"
403 " .long 4b,16b\n" 403 " .long 4b,16b\n"
404 " .long 41b,16b\n" 404 " .long 41b,16b\n"
405 " .long 10b,16b\n" 405 " .long 10b,16b\n"
406 " .long 51b,16b\n" 406 " .long 51b,16b\n"
@@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
412 " .long 81b,16b\n" 412 " .long 81b,16b\n"
413 " .long 14b,16b\n" 413 " .long 14b,16b\n"
414 " .long 91b,16b\n" 414 " .long 91b,16b\n"
415 " .long 6b,9b\n" 415 " .long 6b,9b\n"
416 " .long 7b,16b\n" 416 " .long 7b,16b\n"
417 ".previous" 417 ".previous"
418 : "=&c"(size), "=&D" (d0), "=&S" (d1) 418 : "=&c"(size), "=&D" (d0), "=&S" (d1)
419 : "1"(to), "2"(from), "0"(size) 419 : "1"(to), "2"(from), "0"(size)
420 : "eax", "edx", "memory"); 420 : "eax", "edx", "memory");
@@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
429static unsigned long __copy_user_zeroing_intel_nocache(void *to, 429static unsigned long __copy_user_zeroing_intel_nocache(void *to,
430 const void __user *from, unsigned long size) 430 const void __user *from, unsigned long size)
431{ 431{
432 int d0, d1; 432 int d0, d1;
433 433
434 __asm__ __volatile__( 434 __asm__ __volatile__(
435 " .align 2,0x90\n" 435 " .align 2,0x90\n"
@@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
526static unsigned long __copy_user_intel_nocache(void *to, 526static unsigned long __copy_user_intel_nocache(void *to,
527 const void __user *from, unsigned long size) 527 const void __user *from, unsigned long size)
528{ 528{
529 int d0, d1; 529 int d0, d1;
530 530
531 __asm__ __volatile__( 531 __asm__ __volatile__(
532 " .align 2,0x90\n" 532 " .align 2,0x90\n"
@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to,
629#endif /* CONFIG_X86_INTEL_USERCOPY */ 629#endif /* CONFIG_X86_INTEL_USERCOPY */
630 630
631/* Generic arbitrary sized copy. */ 631/* Generic arbitrary sized copy. */
632#define __copy_user(to,from,size) \ 632#define __copy_user(to, from, size) \
633do { \ 633do { \
634 int __d0, __d1, __d2; \ 634 int __d0, __d1, __d2; \
635 __asm__ __volatile__( \ 635 __asm__ __volatile__( \
@@ -665,7 +665,7 @@ do { \
665 : "memory"); \ 665 : "memory"); \
666} while (0) 666} while (0)
667 667
668#define __copy_user_zeroing(to,from,size) \ 668#define __copy_user_zeroing(to, from, size) \
669do { \ 669do { \
670 int __d0, __d1, __d2; \ 670 int __d0, __d1, __d2; \
671 __asm__ __volatile__( \ 671 __asm__ __volatile__( \
@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
712{ 712{
713#ifndef CONFIG_X86_WP_WORKS_OK 713#ifndef CONFIG_X86_WP_WORKS_OK
714 if (unlikely(boot_cpu_data.wp_works_ok == 0) && 714 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
715 ((unsigned long )to) < TASK_SIZE) { 715 ((unsigned long)to) < TASK_SIZE) {
716 /* 716 /*
717 * When we are in an atomic section (see 717 * When we are in an atomic section (see
718 * mm/filemap.c:file_read_actor), return the full 718 * mm/filemap.c:file_read_actor), return the full
@@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
721 if (in_atomic()) 721 if (in_atomic())
722 return n; 722 return n;
723 723
724 /* 724 /*
725 * CPU does not honor the WP bit when writing 725 * CPU does not honor the WP bit when writing
726 * from supervisory mode, and due to preemption or SMP, 726 * from supervisory mode, and due to preemption or SMP,
727 * the page tables can change at any time. 727 * the page tables can change at any time.
728 * Do it manually. Manfred <manfred@colorfullife.com> 728 * Do it manually. Manfred <manfred@colorfullife.com>
729 */ 729 */
730 while (n) { 730 while (n) {
731 unsigned long offset = ((unsigned long)to)%PAGE_SIZE; 731 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
732 unsigned long len = PAGE_SIZE - offset; 732 unsigned long len = PAGE_SIZE - offset;
733 int retval; 733 int retval;
734 struct page *pg; 734 struct page *pg;
735 void *maddr; 735 void *maddr;
736 736
737 if (len > n) 737 if (len > n)
738 len = n; 738 len = n;
739 739
740survive: 740survive:
741 down_read(&current->mm->mmap_sem); 741 down_read(&current->mm->mmap_sem);
742 retval = get_user_pages(current, current->mm, 742 retval = get_user_pages(current, current->mm,
743 (unsigned long )to, 1, 1, 0, &pg, NULL); 743 (unsigned long)to, 1, 1, 0, &pg, NULL);
744 744
745 if (retval == -ENOMEM && is_global_init(current)) { 745 if (retval == -ENOMEM && is_global_init(current)) {
746 up_read(&current->mm->mmap_sem); 746 up_read(&current->mm->mmap_sem);
@@ -750,8 +750,8 @@ survive:
750 750
751 if (retval != 1) { 751 if (retval != 1) {
752 up_read(&current->mm->mmap_sem); 752 up_read(&current->mm->mmap_sem);
753 break; 753 break;
754 } 754 }
755 755
756 maddr = kmap_atomic(pg, KM_USER0); 756 maddr = kmap_atomic(pg, KM_USER0);
757 memcpy(maddr + offset, from, len); 757 memcpy(maddr + offset, from, len);
@@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
802 unsigned long n) 802 unsigned long n)
803{ 803{
804#ifdef CONFIG_X86_INTEL_USERCOPY 804#ifdef CONFIG_X86_INTEL_USERCOPY
805 if ( n > 64 && cpu_has_xmm2) 805 if (n > 64 && cpu_has_xmm2)
806 n = __copy_user_zeroing_intel_nocache(to, from, n); 806 n = __copy_user_zeroing_intel_nocache(to, from, n);
807 else 807 else
808 __copy_user_zeroing(to, from, n); 808 __copy_user_zeroing(to, from, n);
809#else 809#else
810 __copy_user_zeroing(to, from, n); 810 __copy_user_zeroing(to, from, n);
811#endif 811#endif
812 return n; 812 return n;
813} 813}
@@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
817 unsigned long n) 817 unsigned long n)
818{ 818{
819#ifdef CONFIG_X86_INTEL_USERCOPY 819#ifdef CONFIG_X86_INTEL_USERCOPY
820 if ( n > 64 && cpu_has_xmm2) 820 if (n > 64 && cpu_has_xmm2)
821 n = __copy_user_intel_nocache(to, from, n); 821 n = __copy_user_intel_nocache(to, from, n);
822 else 822 else
823 __copy_user(to, from, n); 823 __copy_user(to, from, n);
824#else 824#else
825 __copy_user(to, from, n); 825 __copy_user(to, from, n);
826#endif 826#endif
827 return n; 827 return n;
828} 828}