diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-02 10:44:16 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-02 10:44:16 -0500 |
commit | aebb2afd5420c860b7fbc3882a323ef1247fbf16 (patch) | |
tree | 05ee0efcebca5ec421de44de7a6d6271088c64a8 /arch/mips/lib/memcpy.S | |
parent | 8eae508b7c6ff502a71d0293b69e97c5505d5840 (diff) | |
parent | edb15d83a875a1f4b1576188844db5c330c3267d (diff) |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle:
o Add basic support for the Mediatek/Ralink Wireless SoC family.
o The Qualcomm Atheros platform is extended by support for the new
QCA955X SoC series as well as a bunch of patches that get the code
ready for OF support.
o Lantiq and BCM47XX platform have a few improvements and bug fixes.
o MIPS has sent a few patches that get the kernel ready for the
upcoming microMIPS support.
o The rest of the series is made up of small bug fixes and cleanups
that relate to various parts of the MIPS code. The biggy in there is
a whitespace cleanup. After I was sent another set of whitespace
cleanup patches I decided it was the time to clean the whitespace
"issues" for once and and that touches many files below arch/mips/.
Fix up silly conflicts, mostly due to whitespace cleanups.
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (105 commits)
MIPS: Quit exporting kernel internel break codes to uapi/asm/break.h
MIPS: remove broken conditional inside vpe loader code
MIPS: SMTC: fix implicit declaration of set_vi_handler
MIPS: early_printk: drop __init annotations
MIPS: Probe for and report hardware virtualization support.
MIPS: ath79: add support for the Qualcomm Atheros AP136-010 board
MIPS: ath79: add USB controller registration code for the QCA955X SoCs
MIPS: ath79: add PCI controller registration code for the QCA955X SoCs
MIPS: ath79: add WMAC registration code for the QCA955X SoCs
MIPS: ath79: register UART for the QCA955X SoCs
MIPS: ath79: add QCA955X specific glue to ath79_device_reset_{set, clear}
MIPS: ath79: add GPIO setup code for the QCA955X SoCs
MIPS: ath79: add IRQ handling code for the QCA955X SoCs
MIPS: ath79: add clock setup code for the QCA955X SoCs
MIPS: ath79: add SoC detection code for the QCA955X SoCs
MIPS: ath79: add early printk support for the QCA955X SoCs
MIPS: ath79: fix WMAC IRQ resource assignment
mips: reserve elfcorehdr
mips: Make sure kernel memory is in iomem
MIPS: ath79: use dynamically allocated USB platform devices
...
Diffstat (limited to 'arch/mips/lib/memcpy.S')
-rw-r--r-- | arch/mips/lib/memcpy.S | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 65192c06781e..c5c40dad0bbf 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S | |||
@@ -156,15 +156,15 @@ | |||
156 | 156 | ||
157 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 157 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
158 | #define LDFIRST LOADR | 158 | #define LDFIRST LOADR |
159 | #define LDREST LOADL | 159 | #define LDREST LOADL |
160 | #define STFIRST STORER | 160 | #define STFIRST STORER |
161 | #define STREST STOREL | 161 | #define STREST STOREL |
162 | #define SHIFT_DISCARD SLLV | 162 | #define SHIFT_DISCARD SLLV |
163 | #else | 163 | #else |
164 | #define LDFIRST LOADL | 164 | #define LDFIRST LOADL |
165 | #define LDREST LOADR | 165 | #define LDREST LOADR |
166 | #define STFIRST STOREL | 166 | #define STFIRST STOREL |
167 | #define STREST STORER | 167 | #define STREST STORER |
168 | #define SHIFT_DISCARD SRLV | 168 | #define SHIFT_DISCARD SRLV |
169 | #endif | 169 | #endif |
170 | 170 | ||
@@ -235,7 +235,7 @@ __copy_user_common: | |||
235 | * src and dst are aligned; need to compute rem | 235 | * src and dst are aligned; need to compute rem |
236 | */ | 236 | */ |
237 | .Lboth_aligned: | 237 | .Lboth_aligned: |
238 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter | 238 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter |
239 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES | 239 | beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES |
240 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) | 240 | and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) |
241 | PREF( 0, 3*32(src) ) | 241 | PREF( 0, 3*32(src) ) |
@@ -313,7 +313,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u) | |||
313 | /* | 313 | /* |
314 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | 314 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) |
315 | * A loop would do only a byte at a time with possible branch | 315 | * A loop would do only a byte at a time with possible branch |
316 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE | 316 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE |
317 | * because can't assume read-access to dst. Instead, use | 317 | * because can't assume read-access to dst. Instead, use |
318 | * STREST dst, which doesn't require read access to dst. | 318 | * STREST dst, which doesn't require read access to dst. |
319 | * | 319 | * |
@@ -327,7 +327,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u) | |||
327 | li bits, 8*NBYTES | 327 | li bits, 8*NBYTES |
328 | SLL rem, len, 3 # rem = number of bits to keep | 328 | SLL rem, len, 3 # rem = number of bits to keep |
329 | EXC( LOAD t0, 0(src), .Ll_exc) | 329 | EXC( LOAD t0, 0(src), .Ll_exc) |
330 | SUB bits, bits, rem # bits = number of bits to discard | 330 | SUB bits, bits, rem # bits = number of bits to discard |
331 | SHIFT_DISCARD t0, t0, bits | 331 | SHIFT_DISCARD t0, t0, bits |
332 | EXC( STREST t0, -1(t1), .Ls_exc) | 332 | EXC( STREST t0, -1(t1), .Ls_exc) |
333 | jr ra | 333 | jr ra |
@@ -343,7 +343,7 @@ EXC( STREST t0, -1(t1), .Ls_exc) | |||
343 | * Set match = (src and dst have same alignment) | 343 | * Set match = (src and dst have same alignment) |
344 | */ | 344 | */ |
345 | #define match rem | 345 | #define match rem |
346 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) | 346 | EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) |
347 | ADD t2, zero, NBYTES | 347 | ADD t2, zero, NBYTES |
348 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) | 348 | EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) |
349 | SUB t2, t2, t1 # t2 = number of bytes copied | 349 | SUB t2, t2, t1 # t2 = number of bytes copied |
@@ -357,10 +357,10 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
357 | ADD src, src, t2 | 357 | ADD src, src, t2 |
358 | 358 | ||
359 | .Lsrc_unaligned_dst_aligned: | 359 | .Lsrc_unaligned_dst_aligned: |
360 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | 360 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter |
361 | PREF( 0, 3*32(src) ) | 361 | PREF( 0, 3*32(src) ) |
362 | beqz t0, .Lcleanup_src_unaligned | 362 | beqz t0, .Lcleanup_src_unaligned |
363 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | 363 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES |
364 | PREF( 1, 3*32(dst) ) | 364 | PREF( 1, 3*32(dst) ) |
365 | 1: | 365 | 1: |
366 | /* | 366 | /* |
@@ -370,13 +370,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) | |||
370 | * are to the same unit (unless src is aligned, but it's not). | 370 | * are to the same unit (unless src is aligned, but it's not). |
371 | */ | 371 | */ |
372 | R10KCBARRIER(0(ra)) | 372 | R10KCBARRIER(0(ra)) |
373 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) | 373 | EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) |
374 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) | 374 | EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) |
375 | SUB len, len, 4*NBYTES | 375 | SUB len, len, 4*NBYTES |
376 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) | 376 | EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) |
377 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) | 377 | EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) |
378 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) | 378 | EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) |
379 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) | 379 | EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) |
380 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) | 380 | EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) |
381 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) | 381 | EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) |
382 | PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) | 382 | PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) |
@@ -388,7 +388,7 @@ EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) | |||
388 | EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) | 388 | EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) |
389 | EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) | 389 | EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) |
390 | EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) | 390 | EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) |
391 | PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) | 391 | PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) |
392 | .set reorder /* DADDI_WAR */ | 392 | .set reorder /* DADDI_WAR */ |
393 | ADD dst, dst, 4*NBYTES | 393 | ADD dst, dst, 4*NBYTES |
394 | bne len, rem, 1b | 394 | bne len, rem, 1b |
@@ -502,7 +502,7 @@ EXC( lb t1, 0(src), .Ll_exc) | |||
502 | 502 | ||
503 | 503 | ||
504 | #define SEXC(n) \ | 504 | #define SEXC(n) \ |
505 | .set reorder; /* DADDI_WAR */ \ | 505 | .set reorder; /* DADDI_WAR */ \ |
506 | .Ls_exc_p ## n ## u: \ | 506 | .Ls_exc_p ## n ## u: \ |
507 | ADD len, len, n*NBYTES; \ | 507 | ADD len, len, n*NBYTES; \ |
508 | jr ra; \ | 508 | jr ra; \ |