aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteven J. Hill <Steven.Hill@imgtec.com>2013-03-25 14:22:59 -0400
committerRalf Baechle <ralf@linux-mips.org>2013-05-09 11:55:19 -0400
commit1658f914ff91c3bf572d5ddae3773720f6803d20 (patch)
tree6a7e13a9ac7d7d978e41eb7a8e47b769b4f44290 /arch
parent01be057b33f52f094bff3a79a93d9ca99e27483d (diff)
MIPS: microMIPS: Disable LL/SC and fix linker bug.
Partially revert commit e0c14a260d66ba35935600d6435940a566fe806b and turn off LL/SC when building a pure microMIPS kernel. This is a temporary fix until the cmpxchg assembly macro functions are re-written to not use the HI/LO registers in address calculations. Also add .insn in selected user access functions which would otherwise produce ISA mode jump incompatibilities. This is also a temporary fix. Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/uaccess.h14
2 files changed, 16 insertions, 2 deletions
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c0912d38e..bfbd7035d4c5 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
28/* #define cpu_has_prefetch ? */ 28/* #define cpu_has_prefetch ? */
29#define cpu_has_mcheck 1 29#define cpu_has_mcheck 1
30/* #define cpu_has_ejtag ? */ 30/* #define cpu_has_ejtag ? */
31#ifdef CONFIG_CPU_MICROMIPS
32#define cpu_has_llsc 0
33#else
31#define cpu_has_llsc 1 34#define cpu_has_llsc 1
35#endif
32/* #define cpu_has_vtag_icache ? */ 36/* #define cpu_has_vtag_icache ? */
33/* #define cpu_has_dc_aliases ? */ 37/* #define cpu_has_dc_aliases ? */
34/* #define cpu_has_ic_fills_f_dc ? */ 38/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36bf26a..1c9edd63dda7 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -261,6 +261,7 @@ do { \
261 __asm__ __volatile__( \ 261 __asm__ __volatile__( \
262 "1: " insn " %1, %3 \n" \ 262 "1: " insn " %1, %3 \n" \
263 "2: \n" \ 263 "2: \n" \
264 " .insn \n" \
264 " .section .fixup,\"ax\" \n" \ 265 " .section .fixup,\"ax\" \n" \
265 "3: li %0, %4 \n" \ 266 "3: li %0, %4 \n" \
266 " j 2b \n" \ 267 " j 2b \n" \
@@ -287,7 +288,9 @@ do { \
287 __asm__ __volatile__( \ 288 __asm__ __volatile__( \
288 "1: lw %1, (%3) \n" \ 289 "1: lw %1, (%3) \n" \
289 "2: lw %D1, 4(%3) \n" \ 290 "2: lw %D1, 4(%3) \n" \
290 "3: .section .fixup,\"ax\" \n" \ 291 "3: \n" \
292 " .insn \n" \
293 " .section .fixup,\"ax\" \n" \
291 "4: li %0, %4 \n" \ 294 "4: li %0, %4 \n" \
292 " move %1, $0 \n" \ 295 " move %1, $0 \n" \
293 " move %D1, $0 \n" \ 296 " move %D1, $0 \n" \
@@ -355,6 +358,7 @@ do { \
355 __asm__ __volatile__( \ 358 __asm__ __volatile__( \
356 "1: " insn " %z2, %3 # __put_user_asm\n" \ 359 "1: " insn " %z2, %3 # __put_user_asm\n" \
357 "2: \n" \ 360 "2: \n" \
361 " .insn \n" \
358 " .section .fixup,\"ax\" \n" \ 362 " .section .fixup,\"ax\" \n" \
359 "3: li %0, %4 \n" \ 363 "3: li %0, %4 \n" \
360 " j 2b \n" \ 364 " j 2b \n" \
@@ -373,6 +377,7 @@ do { \
373 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 377 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
374 "2: sw %D2, 4(%3) \n" \ 378 "2: sw %D2, 4(%3) \n" \
375 "3: \n" \ 379 "3: \n" \
380 " .insn \n" \
376 " .section .fixup,\"ax\" \n" \ 381 " .section .fixup,\"ax\" \n" \
377 "4: li %0, %4 \n" \ 382 "4: li %0, %4 \n" \
378 " j 3b \n" \ 383 " j 3b \n" \
@@ -524,6 +529,7 @@ do { \
524 __asm__ __volatile__( \ 529 __asm__ __volatile__( \
525 "1: " insn " %1, %3 \n" \ 530 "1: " insn " %1, %3 \n" \
526 "2: \n" \ 531 "2: \n" \
532 " .insn \n" \
527 " .section .fixup,\"ax\" \n" \ 533 " .section .fixup,\"ax\" \n" \
528 "3: li %0, %4 \n" \ 534 "3: li %0, %4 \n" \
529 " j 2b \n" \ 535 " j 2b \n" \
@@ -549,7 +555,9 @@ do { \
549 "1: ulw %1, (%3) \n" \ 555 "1: ulw %1, (%3) \n" \
550 "2: ulw %D1, 4(%3) \n" \ 556 "2: ulw %D1, 4(%3) \n" \
551 " move %0, $0 \n" \ 557 " move %0, $0 \n" \
552 "3: .section .fixup,\"ax\" \n" \ 558 "3: \n" \
559 " .insn \n" \
560 " .section .fixup,\"ax\" \n" \
553 "4: li %0, %4 \n" \ 561 "4: li %0, %4 \n" \
554 " move %1, $0 \n" \ 562 " move %1, $0 \n" \
555 " move %D1, $0 \n" \ 563 " move %D1, $0 \n" \
@@ -616,6 +624,7 @@ do { \
616 __asm__ __volatile__( \ 624 __asm__ __volatile__( \
617 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 625 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
618 "2: \n" \ 626 "2: \n" \
627 " .insn \n" \
619 " .section .fixup,\"ax\" \n" \ 628 " .section .fixup,\"ax\" \n" \
620 "3: li %0, %4 \n" \ 629 "3: li %0, %4 \n" \
621 " j 2b \n" \ 630 " j 2b \n" \
@@ -634,6 +643,7 @@ do { \
634 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 643 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
635 "2: sw %D2, 4(%3) \n" \ 644 "2: sw %D2, 4(%3) \n" \
636 "3: \n" \ 645 "3: \n" \
646 " .insn \n" \
637 " .section .fixup,\"ax\" \n" \ 647 " .section .fixup,\"ax\" \n" \
638 "4: li %0, %4 \n" \ 648 "4: li %0, %4 \n" \
639 " j 3b \n" \ 649 " j 3b \n" \