diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2013-01-22 06:59:30 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2013-02-01 04:00:22 -0500 |
commit | 7034228792cc561e79ff8600f02884bd4c80e287 (patch) | |
tree | 89b77af37d087d9de236fc5d21f60bf552d0a2c6 /arch/mips/lib | |
parent | 405ab01c70e18058d9c01a1256769a61fc65413e (diff) |
MIPS: Whitespace cleanup.
Having received another series of whitespace patches I decided to do this
once and for all rather than dealing with this kind of patches trickling
in forever.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/lib')
-rw-r--r-- | arch/mips/lib/bitops.c | 4 | ||||
-rw-r--r-- | arch/mips/lib/csum_partial.S | 42 | ||||
-rw-r--r-- | arch/mips/lib/delay.c | 2 | ||||
-rw-r--r-- | arch/mips/lib/dump_tlb.c | 2 | ||||
-rw-r--r-- | arch/mips/lib/memcpy.S | 34 | ||||
-rw-r--r-- | arch/mips/lib/memset.S | 4 | ||||
-rw-r--r-- | arch/mips/lib/r3k_dump_tlb.c | 2 | ||||
-rw-r--r-- | arch/mips/lib/strncpy_user.S | 2 | ||||
-rw-r--r-- | arch/mips/lib/strnlen_user.S | 6 | ||||
-rw-r--r-- | arch/mips/lib/uncached.c | 2 |
10 files changed, 50 insertions, 50 deletions
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 239a9c957b02..81f1dcfdcab8 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c | |||
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(__mips_clear_bit); | |||
56 | 56 | ||
57 | 57 | ||
58 | /** | 58 | /** |
59 | * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() | 59 | * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() |
60 | * if it cannot find a faster solution. | 60 | * if it cannot find a faster solution. |
61 | * @nr: Bit to change | 61 | * @nr: Bit to change |
62 | * @addr: Address to start counting from | 62 | * @addr: Address to start counting from |
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit); | |||
155 | 155 | ||
156 | 156 | ||
157 | /** | 157 | /** |
158 | * __mips_test_and_change_bit - Change a bit and return its old value. This is | 158 | * __mips_test_and_change_bit - Change a bit and return its old value. This is |
159 | * called by test_and_change_bit() if it cannot find a faster solution. | 159 | * called by test_and_change_bit() if it cannot find a faster solution. |
160 | * @nr: Bit to change | 160 | * @nr: Bit to change |
161 | * @addr: Address to count from | 161 | * @addr: Address to count from |
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 6b876ca299ee..507147aebd41 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -67,8 +67,8 @@ | |||
67 | #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ | 67 | #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ |
68 | LOAD _t0, (offset + UNIT(0))(src); \ | 68 | LOAD _t0, (offset + UNIT(0))(src); \ |
69 | LOAD _t1, (offset + UNIT(1))(src); \ | 69 | LOAD _t1, (offset + UNIT(1))(src); \ |
70 | LOAD _t2, (offset + UNIT(2))(src); \ | 70 | LOAD _t2, (offset + UNIT(2))(src); \ |
71 | LOAD _t3, (offset + UNIT(3))(src); \ | 71 | LOAD _t3, (offset + UNIT(3))(src); \ |
72 | ADDC(sum, _t0); \ | 72 | ADDC(sum, _t0); \ |
73 | ADDC(sum, _t1); \ | 73 | ADDC(sum, _t1); \ |
74 | ADDC(sum, _t2); \ | 74 | ADDC(sum, _t2); \ |
@@ -285,7 +285,7 @@ LEAF(csum_partial) | |||
285 | 1: | 285 | 1: |
286 | #endif | 286 | #endif |
287 | .set reorder | 287 | .set reorder |
288 | /* Add the passed partial csum. */ | 288 | /* Add the passed partial csum. */ |
289 | ADDC32(sum, a2) | 289 | ADDC32(sum, a2) |
290 | jr ra | 290 | jr ra |
291 | .set noreorder | 291 | .set noreorder |
@@ -298,7 +298,7 @@ LEAF(csum_partial) | |||
298 | * csum_partial_copy_nocheck(src, dst, len, sum) | 298 | * csum_partial_copy_nocheck(src, dst, len, sum) |
299 | * __csum_partial_copy_user(src, dst, len, sum, errp) | 299 | * __csum_partial_copy_user(src, dst, len, sum, errp) |
300 | * | 300 | * |
301 | * See "Spec" in memcpy.S for details. Unlike __copy_user, all | 301 | * See "Spec" in memcpy.S for details. Unlike __copy_user, all |
302 | * function in this file use the standard calling convention. | 302 | * function in this file use the standard calling convention. |
303 | */ | 303 | */ |
304 | 304 | ||
@@ -371,16 +371,16 @@ LEAF(csum_partial) | |||
371 | 371 | ||
372 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 372 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
373 | #define LDFIRST LOADR | 373 | #define LDFIRST LOADR |
374 | #define LDREST LOADL | 374 | #define LDREST LOADL |
375 | #define STFIRST STORER | 375 | #define STFIRST STORER |
376 | #define STREST STOREL | 376 | #define STREST STOREL |
377 | #define SHIFT_DISCARD SLLV | 377 | #define SHIFT_DISCARD SLLV |
378 | #define SHIFT_DISCARD_REVERT SRLV | 378 | #define SHIFT_DISCARD_REVERT SRLV |
379 | #else | 379 | #else |
380 | #define LDFIRST LOADL | 380 | #define LDFIRST LOADL |
381 | #define LDREST LOADR | 381 | #define LDREST LOADR |
382 | #define STFIRST STOREL | 382 | #define STFIRST STOREL |
383 | #define STREST STORER | 383 | #define STREST STORER |
384 | #define SHIFT_DISCARD SRLV | 384 | #define SHIFT_DISCARD SRLV |
385 | #define SHIFT_DISCARD_REVERT SLLV | 385 | #define SHIFT_DISCARD_REVERT SLLV |
386 | #endif | 386 | #endif |
@@ -430,7 +430,7 @@ FEXPORT(csum_partial_copy_nocheck) | |||
430 | * src and dst are aligned; need to compute rem | 430 | * src and dst are aligned; need to compute rem |
431 | */ | 431 | */ |
432 | .Lboth_aligned: | 432 | .Lboth_aligned: |
433 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter | 433 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter |
434 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES | 434 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES |
435 | nop | 435 | nop |
436 | SUB len, 8*NBYTES # subtract here for bgez loop | 436 | SUB len, 8*NBYTES # subtract here for bgez loop |
@@ -518,7 +518,7 @@ EXC( STORE t0, 0(dst), .Ls_exc) | |||
518 | /* | 518 | /* |
519 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | 519 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) |
520 | * A loop would do only a byte at a time with possible branch | 520 | * A loop would do only a byte at a time with possible branch |
521 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE | 521 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE |
522 | * because can't assume read-access to dst. Instead, use | 522 | * because can't assume read-access to dst. Instead, use |
523 | * STREST dst, which doesn't require read access to dst. | 523 | * STREST dst, which doesn't require read access to dst. |
524 | * | 524 | * |
@@ -532,7 +532,7 @@ EXC( STORE t0, 0(dst), .Ls_exc) | |||
532 | li bits, 8*NBYTES | 532 | li bits, 8*NBYTES |
533 | SLL rem, len, 3 # rem = number of bits to keep | 533 | SLL rem, len, 3 # rem = number of bits to keep |
534 | EXC( LOAD t0, 0(src), .Ll_exc) | 534 | EXC( LOAD t0, 0(src), .Ll_exc) |
535 | SUB bits, bits, rem # bits = number of bits to discard | 535 | SUB bits, bits, rem # bits = number of bits to discard |
536 | SHIFT_DISCARD t0, t0, bits | 536 | SHIFT_DISCARD t0, t0, bits |
537 | EXC( STREST t0, -1(t1), .Ls_exc) | 537 | EXC( STREST t0, -1(t1), .Ls_exc) |
538 | SHIFT_DISCARD_REVERT t0, t0, bits | 538 | SHIFT_DISCARD_REVERT t0, t0, bits |
@@ -551,7 +551,7 @@ EXC( STREST t0, -1(t1), .Ls_exc) | |||
551 | * Set match = (src and dst have same alignment) | 551 | * Set match = (src and dst have same alignment) |
552 | */ | 552 | */ |
553 | #define match rem | 553 | #define match rem |
554 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) | 554 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) |
555 | ADD t2, zero, NBYTES | 555 | ADD t2, zero, NBYTES |
556 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) | 556 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) |
557 | SUB t2, t2, t1 # t2 = number of bytes copied | 557 | SUB t2, t2, t1 # t2 = number of bytes copied |
@@ -568,9 +568,9 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
568 | ADD src, src, t2 | 568 | ADD src, src, t2 |
569 | 569 | ||
570 | .Lsrc_unaligned_dst_aligned: | 570 | .Lsrc_unaligned_dst_aligned: |
571 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | 571 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter |
572 | beqz t0, .Lcleanup_src_unaligned | 572 | beqz t0, .Lcleanup_src_unaligned |
573 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | 573 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES |
574 | 1: | 574 | 1: |
575 | /* | 575 | /* |
576 | * Avoid consecutive LD*'s to the same register since some mips | 576 | * Avoid consecutive LD*'s to the same register since some mips |
@@ -578,13 +578,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
578 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | 578 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses |
579 | * are to the same unit (unless src is aligned, but it's not). | 579 | * are to the same unit (unless src is aligned, but it's not). |
580 | */ | 580 | */ |
581 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) | 581 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) |
582 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) | 582 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) |
583 | SUB len, len, 4*NBYTES | 583 | SUB len, len, 4*NBYTES |
584 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) | 584 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) |
585 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) | 585 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) |
586 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) | 586 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) |
587 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) | 587 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) |
588 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) | 588 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) |
589 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) | 589 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) |
590 | ADD src, src, 4*NBYTES | 590 | ADD src, src, 4*NBYTES |
@@ -634,7 +634,7 @@ EXC( STORE t0, 0(dst), .Ls_exc) | |||
634 | #define SHIFT_INC -8 | 634 | #define SHIFT_INC -8 |
635 | #endif | 635 | #endif |
636 | move t2, zero # partial word | 636 | move t2, zero # partial word |
637 | li t3, SHIFT_START # shift | 637 | li t3, SHIFT_START # shift |
638 | /* use .Ll_exc_copy here to return correct sum on fault */ | 638 | /* use .Ll_exc_copy here to return correct sum on fault */ |
639 | #define COPY_BYTE(N) \ | 639 | #define COPY_BYTE(N) \ |
640 | EXC( lbu t0, N(src), .Ll_exc_copy); \ | 640 | EXC( lbu t0, N(src), .Ll_exc_copy); \ |
@@ -642,7 +642,7 @@ EXC( lbu t0, N(src), .Ll_exc_copy); \ | |||
642 | EXC( sb t0, N(dst), .Ls_exc); \ | 642 | EXC( sb t0, N(dst), .Ls_exc); \ |
643 | SLLV t0, t0, t3; \ | 643 | SLLV t0, t0, t3; \ |
644 | addu t3, SHIFT_INC; \ | 644 | addu t3, SHIFT_INC; \ |
645 | beqz len, .Lcopy_bytes_done; \ | 645 | beqz len, .Lcopy_bytes_done; \ |
646 | or t2, t0 | 646 | or t2, t0 |
647 | 647 | ||
648 | COPY_BYTE(0) | 648 | COPY_BYTE(0) |
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 288f7954988d..44713af15a62 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c | |||
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(__delay); | |||
36 | * Division by multiplication: you don't have to worry about | 36 | * Division by multiplication: you don't have to worry about |
37 | * loss of precision. | 37 | * loss of precision. |
38 | * | 38 | * |
39 | * Use only for very small delays ( < 1 msec). Should probably use a | 39 | * Use only for very small delays ( < 1 msec). Should probably use a |
40 | * lookup table, really, as the multiplications take much too long with | 40 | * lookup table, really, as the multiplications take much too long with |
41 | * short delays. This is a "reasonable" implementation, though (and the | 41 | * short delays. This is a "reasonable" implementation, though (and the |
42 | * first constant multiplications gets optimized away if the delay is | 42 | * first constant multiplications gets optimized away if the delay is |
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index a99c1d3fc567..32b9f21bfd85 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c | |||
@@ -63,7 +63,7 @@ static void dump_tlb(int first, int last) | |||
63 | tlb_read(); | 63 | tlb_read(); |
64 | BARRIER(); | 64 | BARRIER(); |
65 | pagemask = read_c0_pagemask(); | 65 | pagemask = read_c0_pagemask(); |
66 | entryhi = read_c0_entryhi(); | 66 | entryhi = read_c0_entryhi(); |
67 | entrylo0 = read_c0_entrylo0(); | 67 | entrylo0 = read_c0_entrylo0(); |
68 | entrylo1 = read_c0_entrylo1(); | 68 | entrylo1 = read_c0_entrylo1(); |
69 | 69 | ||
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 65192c06781e..c5c40dad0bbf 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S | |||
@@ -156,15 +156,15 @@ | |||
156 | 156 | ||
157 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 157 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
158 | #define LDFIRST LOADR | 158 | #define LDFIRST LOADR |
159 | #define LDREST LOADL | 159 | #define LDREST LOADL |
160 | #define STFIRST STORER | 160 | #define STFIRST STORER |
161 | #define STREST STOREL | 161 | #define STREST STOREL |
162 | #define SHIFT_DISCARD SLLV | 162 | #define SHIFT_DISCARD SLLV |
163 | #else | 163 | #else |
164 | #define LDFIRST LOADL | 164 | #define LDFIRST LOADL |
165 | #define LDREST LOADR | 165 | #define LDREST LOADR |
166 | #define STFIRST STOREL | 166 | #define STFIRST STOREL |
167 | #define STREST STORER | 167 | #define STREST STORER |
168 | #define SHIFT_DISCARD SRLV | 168 | #define SHIFT_DISCARD SRLV |
169 | #endif | 169 | #endif |
170 | 170 | ||
@@ -235,7 +235,7 @@ __copy_user_common: | |||
235 | * src and dst are aligned; need to compute rem | 235 | * src and dst are aligned; need to compute rem |
236 | */ | 236 | */ |
237 | .Lboth_aligned: | 237 | .Lboth_aligned: |
238 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter | 238 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter |
239 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES | 239 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES |
240 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) | 240 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) |
241 | PREF( 0, 3*32(src) ) | 241 | PREF( 0, 3*32(src) ) |
@@ -313,7 +313,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u) | |||
313 | /* | 313 | /* |
314 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | 314 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) |
315 | * A loop would do only a byte at a time with possible branch | 315 | * A loop would do only a byte at a time with possible branch |
316 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE | 316 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE |
317 | * because can't assume read-access to dst. Instead, use | 317 | * because can't assume read-access to dst. Instead, use |
318 | * STREST dst, which doesn't require read access to dst. | 318 | * STREST dst, which doesn't require read access to dst. |
319 | * | 319 | * |
@@ -327,7 +327,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u) | |||
327 | li bits, 8*NBYTES | 327 | li bits, 8*NBYTES |
328 | SLL rem, len, 3 # rem = number of bits to keep | 328 | SLL rem, len, 3 # rem = number of bits to keep |
329 | EXC( LOAD t0, 0(src), .Ll_exc) | 329 | EXC( LOAD t0, 0(src), .Ll_exc) |
330 | SUB bits, bits, rem # bits = number of bits to discard | 330 | SUB bits, bits, rem # bits = number of bits to discard |
331 | SHIFT_DISCARD t0, t0, bits | 331 | SHIFT_DISCARD t0, t0, bits |
332 | EXC( STREST t0, -1(t1), .Ls_exc) | 332 | EXC( STREST t0, -1(t1), .Ls_exc) |
333 | jr ra | 333 | jr ra |
@@ -343,7 +343,7 @@ EXC( STREST t0, -1(t1), .Ls_exc) | |||
343 | * Set match = (src and dst have same alignment) | 343 | * Set match = (src and dst have same alignment) |
344 | */ | 344 | */ |
345 | #define match rem | 345 | #define match rem |
346 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) | 346 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) |
347 | ADD t2, zero, NBYTES | 347 | ADD t2, zero, NBYTES |
348 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) | 348 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) |
349 | SUB t2, t2, t1 # t2 = number of bytes copied | 349 | SUB t2, t2, t1 # t2 = number of bytes copied |
@@ -357,10 +357,10 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
357 | ADD src, src, t2 | 357 | ADD src, src, t2 |
358 | 358 | ||
359 | .Lsrc_unaligned_dst_aligned: | 359 | .Lsrc_unaligned_dst_aligned: |
360 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | 360 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter |
361 | PREF( 0, 3*32(src) ) | 361 | PREF( 0, 3*32(src) ) |
362 | beqz t0, .Lcleanup_src_unaligned | 362 | beqz t0, .Lcleanup_src_unaligned |
363 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | 363 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES |
364 | PREF( 1, 3*32(dst) ) | 364 | PREF( 1, 3*32(dst) ) |
365 | 1: | 365 | 1: |
366 | /* | 366 | /* |
@@ -370,13 +370,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
370 | * are to the same unit (unless src is aligned, but it's not). | 370 | * are to the same unit (unless src is aligned, but it's not). |
371 | */ | 371 | */ |
372 | R10KCBARRIER(0(ra)) | 372 | R10KCBARRIER(0(ra)) |
373 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) | 373 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) |
374 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) | 374 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) |
375 | SUB len, len, 4*NBYTES | 375 | SUB len, len, 4*NBYTES |
376 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) | 376 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) |
377 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) | 377 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) |
378 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) | 378 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) |
379 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) | 379 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) |
380 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) | 380 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) |
381 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) | 381 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) |
382 | PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) | 382 | PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) |
@@ -388,7 +388,7 @@ EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) | |||
388 | EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) | 388 | EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) |
389 | EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) | 389 | EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) |
390 | EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) | 390 | EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) |
391 | PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) | 391 | PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) |
392 | .set reorder /* DADDI_WAR */ | 392 | .set reorder /* DADDI_WAR */ |
393 | ADD dst, dst, 4*NBYTES | 393 | ADD dst, dst, 4*NBYTES |
394 | bne len, rem, 1b | 394 | bne len, rem, 1b |
@@ -502,7 +502,7 @@ EXC( lb t1, 0(src), .Ll_exc) | |||
502 | 502 | ||
503 | 503 | ||
504 | #define SEXC(n) \ | 504 | #define SEXC(n) \ |
505 | .set reorder; /* DADDI_WAR */ \ | 505 | .set reorder; /* DADDI_WAR */ \ |
506 | .Ls_exc_p ## n ## u: \ | 506 | .Ls_exc_p ## n ## u: \ |
507 | ADD len, len, n*NBYTES; \ | 507 | ADD len, len, n*NBYTES; \ |
508 | jr ra; \ | 508 | jr ra; \ |
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 606c8a9efe3b..053d3b0b0317 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S | |||
@@ -21,8 +21,8 @@ | |||
21 | 21 | ||
22 | #define EX(insn,reg,addr,handler) \ | 22 | #define EX(insn,reg,addr,handler) \ |
23 | 9: insn reg, addr; \ | 23 | 9: insn reg, addr; \ |
24 | .section __ex_table,"a"; \ | 24 | .section __ex_table,"a"; \ |
25 | PTR 9b, handler; \ | 25 | PTR 9b, handler; \ |
26 | .previous | 26 | .previous |
27 | 27 | ||
28 | .macro f_fill64 dst, offset, val, fixup | 28 | .macro f_fill64 dst, offset, val, fixup |
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 9cee907975ae..91615c2ef0cf 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c | |||
@@ -30,7 +30,7 @@ static void dump_tlb(int first, int last) | |||
30 | "tlbr\n\t" | 30 | "tlbr\n\t" |
31 | "nop\n\t" | 31 | "nop\n\t" |
32 | ".set\treorder"); | 32 | ".set\treorder"); |
33 | entryhi = read_c0_entryhi(); | 33 | entryhi = read_c0_entryhi(); |
34 | entrylo0 = read_c0_entrylo0(); | 34 | entrylo0 = read_c0_entrylo0(); |
35 | 35 | ||
36 | /* Unused entries have a virtual address of KSEG0. */ | 36 | /* Unused entries have a virtual address of KSEG0. */ |
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index 7201b2ff08c8..bad539487503 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Ugly special case have to check: we might get passed a user space | 25 | * Ugly special case have to check: we might get passed a user space |
26 | * pointer which wraps into the kernel space. We don't deal with that. If | 26 | * pointer which wraps into the kernel space. We don't deal with that. If |
27 | * it happens at most some bytes of the exceptions handlers will be copied. | 27 | * it happens at most some bytes of the exceptions handlers will be copied. |
28 | */ | 28 | */ |
29 | 29 | ||
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index 64457162f7e0..beea03c8c0ce 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S | |||
@@ -21,9 +21,9 @@ | |||
21 | * maximum of a1 or 0 in case of error. | 21 | * maximum of a1 or 0 in case of error. |
22 | * | 22 | * |
23 | * Note: for performance reasons we deliberately accept that a user may | 23 | * Note: for performance reasons we deliberately accept that a user may |
24 | * make strlen_user and strnlen_user access the first few KSEG0 | 24 | * make strlen_user and strnlen_user access the first few KSEG0 |
25 | * bytes. There's nothing secret there. On 64-bit accessing beyond | 25 | * bytes. There's nothing secret there. On 64-bit accessing beyond |
26 | * the maximum is a tad hairier ... | 26 | * the maximum is a tad hairier ... |
27 | */ | 27 | */ |
28 | LEAF(__strnlen_user_asm) | 28 | LEAF(__strnlen_user_asm) |
29 | LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? | 29 | LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? |
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c index a6d1c77034d5..65e3dfc4e585 100644 --- a/arch/mips/lib/uncached.c +++ b/arch/mips/lib/uncached.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2005 Thiemo Seufer | 6 | * Copyright (C) 2005 Thiemo Seufer |
7 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | 7 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. |
8 | * Author: Maciej W. Rozycki <macro@mips.com> | 8 | * Author: Maciej W. Rozycki <macro@mips.com> |
9 | */ | 9 | */ |
10 | 10 | ||