summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-23 17:23:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-23 17:23:08 -0400
commit0c4b0f815f20304156f66d47d0c2a6e148f6ffaa (patch)
tree06c62ea33b1ae6be9d4e34b1301a95407329c038
parent2ab054fd1f88d7d22e6df7c34c41a2f9782c3f08 (diff)
parent690d9163bf4b8563a2682e619f938e6a0443947f (diff)
Merge tag 'mips_4.19_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS fixes from Paul Burton: - Fix microMIPS build failures by adding a .insn directive to the barrier_before_unreachable() asm statement in order to convince the toolchain that the asm statement is a valid branch target rather than a bogus attempt to switch ISA. - Clean up our declarations of TLB functions that we overwrite with generated code in order to prevent the compiler making assumptions about alignment that cause microMIPS kernels built with GCC 7 & above to die early during boot. - Fix up a regression for MIPS32 kernels which slipped into the main MIPS pull for 4.19, causing CONFIG_32BIT=y kernels to contain inappropriate MIPS64 instructions. - Extend our existing workaround for MIPSr6 builds that end up using the __multi3 intrinsic to GCC 7 & below, rather than just GCC 7. * tag 'mips_4.19_2' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: MIPS: lib: Provide MIPS64r6 __multi3() for GCC < 7 MIPS: Workaround GCC __builtin_unreachable reordering bug compiler.h: Allow arch-specific asm/compiler.h MIPS: Avoid move psuedo-instruction whilst using MIPS_ISA_LEVEL MIPS: Consistently declare TLB functions MIPS: Export tlbmiss_handler_setup_pgd near its definition
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/asm-prototypes.h1
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/compiler.h35
-rw-r--r--arch/mips/include/asm/mmu_context.h1
-rw-r--r--arch/mips/include/asm/tlbex.h9
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/lib/multi3.c6
-rw-r--r--arch/mips/mm/tlb-funcs.S3
-rw-r--r--arch/mips/mm/tlbex.c101
-rw-r--r--include/linux/compiler_types.h12
12 files changed, 116 insertions, 69 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 4426e9687d89..0a0ea5066fc0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -841,6 +841,14 @@ config REFCOUNT_FULL
841 against various use-after-free conditions that can be used in 841 against various use-after-free conditions that can be used in
842 security flaw exploits. 842 security flaw exploits.
843 843
844config HAVE_ARCH_COMPILER_H
845 bool
846 help
847 An architecture can select this if it provides an
848 asm/compiler.h header that should be included after
849 linux/compiler-*.h in order to override macro definitions that those
850 headers generally provide.
851
844config HAVE_ARCH_PREL32_RELOCATIONS 852config HAVE_ARCH_PREL32_RELOCATIONS
845 bool 853 bool
846 help 854 help
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2af13b162e5e..35511999156a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -33,6 +33,7 @@ config MIPS
33 select GENERIC_SMP_IDLE_THREAD 33 select GENERIC_SMP_IDLE_THREAD
34 select GENERIC_TIME_VSYSCALL 34 select GENERIC_TIME_VSYSCALL
35 select HANDLE_DOMAIN_IRQ 35 select HANDLE_DOMAIN_IRQ
36 select HAVE_ARCH_COMPILER_H
36 select HAVE_ARCH_JUMP_LABEL 37 select HAVE_ARCH_JUMP_LABEL
37 select HAVE_ARCH_KGDB 38 select HAVE_ARCH_KGDB
38 select HAVE_ARCH_MMAP_RND_BITS if MMU 39 select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/mips/include/asm/asm-prototypes.h b/arch/mips/include/asm/asm-prototypes.h
index 576f1a62dea9..f901ed043c71 100644
--- a/arch/mips/include/asm/asm-prototypes.h
+++ b/arch/mips/include/asm/asm-prototypes.h
@@ -5,3 +5,4 @@
5#include <asm-generic/asm-prototypes.h> 5#include <asm-generic/asm-prototypes.h>
6#include <linux/uaccess.h> 6#include <linux/uaccess.h>
7#include <asm/ftrace.h> 7#include <asm/ftrace.h>
8#include <asm/mmu_context.h>
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 0269b3de8b51..d4ea7a5b60cf 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -122,8 +122,8 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
122 " " #asm_op " %0, %1, %3 \n" \ 122 " " #asm_op " %0, %1, %3 \n" \
123 " sc %0, %2 \n" \ 123 " sc %0, %2 \n" \
124 "\t" __scbeqz " %0, 1b \n" \ 124 "\t" __scbeqz " %0, 1b \n" \
125 " move %0, %1 \n" \
126 " .set mips0 \n" \ 125 " .set mips0 \n" \
126 " move %0, %1 \n" \
127 : "=&r" (result), "=&r" (temp), \ 127 : "=&r" (result), "=&r" (temp), \
128 "+" GCC_OFF_SMALL_ASM() (v->counter) \ 128 "+" GCC_OFF_SMALL_ASM() (v->counter) \
129 : "Ir" (i)); \ 129 : "Ir" (i)); \
@@ -190,9 +190,11 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
190 __asm__ __volatile__( 190 __asm__ __volatile__(
191 " .set "MIPS_ISA_LEVEL" \n" 191 " .set "MIPS_ISA_LEVEL" \n"
192 "1: ll %1, %2 # atomic_sub_if_positive\n" 192 "1: ll %1, %2 # atomic_sub_if_positive\n"
193 " .set mips0 \n"
193 " subu %0, %1, %3 \n" 194 " subu %0, %1, %3 \n"
194 " move %1, %0 \n" 195 " move %1, %0 \n"
195 " bltz %0, 1f \n" 196 " bltz %0, 1f \n"
197 " .set "MIPS_ISA_LEVEL" \n"
196 " sc %1, %2 \n" 198 " sc %1, %2 \n"
197 "\t" __scbeqz " %1, 1b \n" 199 "\t" __scbeqz " %1, 1b \n"
198 "1: \n" 200 "1: \n"
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index e081a265f422..cc2eb1b06050 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -8,6 +8,41 @@
8#ifndef _ASM_COMPILER_H 8#ifndef _ASM_COMPILER_H
9#define _ASM_COMPILER_H 9#define _ASM_COMPILER_H
10 10
11/*
12 * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
13 * compiler that a particular code path will never be hit. This allows it to be
14 * optimised out of the generated binary.
15 *
16 * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
17 * that can lead to instructions from beyond an unreachable statement being
18 * incorrectly reordered into earlier delay slots if the unreachable statement
19 * is the only content of a case in a switch statement. This can lead to
20 * seemingly random behaviour, such as invalid memory accesses from incorrectly
21 * reordered loads or stores. See this potential GCC fix for details:
22 *
23 * https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
24 *
25 * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
26 * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
27 * stands out in GCC commit logs, but these newer GCC versions generate very
28 * different code for the testcase which doesn't exhibit the bug.
29 *
30 * GCC also handles stack allocation suboptimally when calling noreturn
31 * functions or calling __builtin_unreachable():
32 *
33 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
34 *
35 * We work around both of these issues by placing a volatile asm statement,
36 * which GCC is prevented from reordering past, prior to __builtin_unreachable
37 * calls.
38 *
39 * The .insn statement is required to ensure that any branches to the
40 * statement, which sadly must be kept due to the asm statement, are known to
41 * be branches to code and satisfy linker requirements for microMIPS kernels.
42 */
43#undef barrier_before_unreachable
44#define barrier_before_unreachable() asm volatile(".insn")
45
11#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) 46#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
12#define GCC_IMM_ASM() "n" 47#define GCC_IMM_ASM() "n"
13#define GCC_REG_ACCUM "$0" 48#define GCC_REG_ACCUM "$0"
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index b509371a6b0c..94414561de0e 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -32,6 +32,7 @@ do { \
32} while (0) 32} while (0)
33 33
34extern void tlbmiss_handler_setup_pgd(unsigned long); 34extern void tlbmiss_handler_setup_pgd(unsigned long);
35extern char tlbmiss_handler_setup_pgd_end[];
35 36
36/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */ 37/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
37#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 38#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h
index 4ed7ebed3bf1..6d97e23f30ab 100644
--- a/arch/mips/include/asm/tlbex.h
+++ b/arch/mips/include/asm/tlbex.h
@@ -24,4 +24,13 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
24 struct uasm_reloc **r, 24 struct uasm_reloc **r,
25 enum tlb_write_entry wmode); 25 enum tlb_write_entry wmode);
26 26
27extern void handle_tlbl(void);
28extern char handle_tlbl_end[];
29
30extern void handle_tlbs(void);
31extern char handle_tlbs_end[];
32
33extern void handle_tlbm(void);
34extern char handle_tlbm_end[];
35
27#endif /* __ASM_TLBEX_H */ 36#endif /* __ASM_TLBEX_H */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index f8871d5b7eb3..9dab0ed1b227 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -67,14 +67,12 @@
67#include <asm/mmu_context.h> 67#include <asm/mmu_context.h>
68#include <asm/types.h> 68#include <asm/types.h>
69#include <asm/stacktrace.h> 69#include <asm/stacktrace.h>
70#include <asm/tlbex.h>
70#include <asm/uasm.h> 71#include <asm/uasm.h>
71 72
72extern void check_wait(void); 73extern void check_wait(void);
73extern asmlinkage void rollback_handle_int(void); 74extern asmlinkage void rollback_handle_int(void);
74extern asmlinkage void handle_int(void); 75extern asmlinkage void handle_int(void);
75extern u32 handle_tlbl[];
76extern u32 handle_tlbs[];
77extern u32 handle_tlbm[];
78extern asmlinkage void handle_adel(void); 76extern asmlinkage void handle_adel(void);
79extern asmlinkage void handle_ades(void); 77extern asmlinkage void handle_ades(void);
80extern asmlinkage void handle_ibe(void); 78extern asmlinkage void handle_ibe(void);
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
index 111ad475aa0c..4c2483f410c2 100644
--- a/arch/mips/lib/multi3.c
+++ b/arch/mips/lib/multi3.c
@@ -4,12 +4,12 @@
4#include "libgcc.h" 4#include "libgcc.h"
5 5
6/* 6/*
7 * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that 7 * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
8 * specific case only we'll implement it here. 8 * that specific case only we implement that intrinsic here.
9 * 9 *
10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
11 */ 11 */
12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) 12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
13 13
14/* multiply 64-bit values, low 64-bits returned */ 14/* multiply 64-bit values, low 64-bits returned */
15static inline long long notrace dmulu(long long a, long long b) 15static inline long long notrace dmulu(long long a, long long b)
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index a5427c6e9757..00fef578c8cd 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -12,16 +12,17 @@
12 * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> 12 * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
13 */ 13 */
14#include <asm/asm.h> 14#include <asm/asm.h>
15#include <asm/export.h>
15#include <asm/regdef.h> 16#include <asm/regdef.h>
16 17
17#define FASTPATH_SIZE 128 18#define FASTPATH_SIZE 128
18 19
19EXPORT(tlbmiss_handler_setup_pgd_start)
20LEAF(tlbmiss_handler_setup_pgd) 20LEAF(tlbmiss_handler_setup_pgd)
211: j 1b /* Dummy, will be replaced. */ 211: j 1b /* Dummy, will be replaced. */
22 .space 64 22 .space 64
23END(tlbmiss_handler_setup_pgd) 23END(tlbmiss_handler_setup_pgd)
24EXPORT(tlbmiss_handler_setup_pgd_end) 24EXPORT(tlbmiss_handler_setup_pgd_end)
25EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd)
25 26
26LEAF(handle_tlbm) 27LEAF(handle_tlbm)
27 .space FASTPATH_SIZE * 4 28 .space FASTPATH_SIZE * 4
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 49312a14cd17..067714291643 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/cpu-type.h> 33#include <asm/cpu-type.h>
34#include <asm/mmu_context.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/war.h> 36#include <asm/war.h>
36#include <asm/uasm.h> 37#include <asm/uasm.h>
@@ -253,8 +254,10 @@ static void output_pgtable_bits_defines(void)
253 pr_debug("\n"); 254 pr_debug("\n");
254} 255}
255 256
256static inline void dump_handler(const char *symbol, const u32 *handler, int count) 257static inline void dump_handler(const char *symbol, const void *start, const void *end)
257{ 258{
259 unsigned int count = (end - start) / sizeof(u32);
260 const u32 *handler = start;
258 int i; 261 int i;
259 262
260 pr_debug("LEAF(%s)\n", symbol); 263 pr_debug("LEAF(%s)\n", symbol);
@@ -402,12 +405,6 @@ static void build_restore_work_registers(u32 **p)
402 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 405 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
403 * we cannot do r3000 under these circumstances. 406 * we cannot do r3000 under these circumstances.
404 * 407 *
405 * Declare pgd_current here instead of including mmu_context.h to avoid type
406 * conflicts for tlbmiss_handler_setup_pgd
407 */
408extern unsigned long pgd_current[];
409
410/*
411 * The R3000 TLB handler is simple. 408 * The R3000 TLB handler is simple.
412 */ 409 */
413static void build_r3000_tlb_refill_handler(void) 410static void build_r3000_tlb_refill_handler(void)
@@ -444,8 +441,7 @@ static void build_r3000_tlb_refill_handler(void)
444 441
445 memcpy((void *)ebase, tlb_handler, 0x80); 442 memcpy((void *)ebase, tlb_handler, 0x80);
446 local_flush_icache_range(ebase, ebase + 0x80); 443 local_flush_icache_range(ebase, ebase + 0x80);
447 444 dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
448 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
449} 445}
450#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 446#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
451 447
@@ -1465,8 +1461,7 @@ static void build_r4000_tlb_refill_handler(void)
1465 1461
1466 memcpy((void *)ebase, final_handler, 0x100); 1462 memcpy((void *)ebase, final_handler, 0x100);
1467 local_flush_icache_range(ebase, ebase + 0x100); 1463 local_flush_icache_range(ebase, ebase + 0x100);
1468 1464 dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
1469 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1470} 1465}
1471 1466
1472static void setup_pw(void) 1467static void setup_pw(void)
@@ -1568,31 +1563,21 @@ static void build_loongson3_tlb_refill_handler(void)
1568 uasm_resolve_relocs(relocs, labels); 1563 uasm_resolve_relocs(relocs, labels);
1569 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80); 1564 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1570 local_flush_icache_range(ebase + 0x80, ebase + 0x100); 1565 local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1571 dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32); 1566 dump_handler("loongson3_tlb_refill",
1567 (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
1572} 1568}
1573 1569
1574extern u32 handle_tlbl[], handle_tlbl_end[];
1575extern u32 handle_tlbs[], handle_tlbs_end[];
1576extern u32 handle_tlbm[], handle_tlbm_end[];
1577extern u32 tlbmiss_handler_setup_pgd_start[];
1578extern u32 tlbmiss_handler_setup_pgd[];
1579EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
1580extern u32 tlbmiss_handler_setup_pgd_end[];
1581
1582static void build_setup_pgd(void) 1570static void build_setup_pgd(void)
1583{ 1571{
1584 const int a0 = 4; 1572 const int a0 = 4;
1585 const int __maybe_unused a1 = 5; 1573 const int __maybe_unused a1 = 5;
1586 const int __maybe_unused a2 = 6; 1574 const int __maybe_unused a2 = 6;
1587 u32 *p = tlbmiss_handler_setup_pgd_start; 1575 u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
1588 const int tlbmiss_handler_setup_pgd_size =
1589 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
1590#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1576#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1591 long pgdc = (long)pgd_current; 1577 long pgdc = (long)pgd_current;
1592#endif 1578#endif
1593 1579
1594 memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size * 1580 memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
1595 sizeof(tlbmiss_handler_setup_pgd[0]));
1596 memset(labels, 0, sizeof(labels)); 1581 memset(labels, 0, sizeof(labels));
1597 memset(relocs, 0, sizeof(relocs)); 1582 memset(relocs, 0, sizeof(relocs));
1598 pgd_reg = allocate_kscratch(); 1583 pgd_reg = allocate_kscratch();
@@ -1645,15 +1630,15 @@ static void build_setup_pgd(void)
1645 else 1630 else
1646 uasm_i_nop(&p); 1631 uasm_i_nop(&p);
1647#endif 1632#endif
1648 if (p >= tlbmiss_handler_setup_pgd_end) 1633 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
1649 panic("tlbmiss_handler_setup_pgd space exceeded"); 1634 panic("tlbmiss_handler_setup_pgd space exceeded");
1650 1635
1651 uasm_resolve_relocs(relocs, labels); 1636 uasm_resolve_relocs(relocs, labels);
1652 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1637 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1653 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1638 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
1654 1639
1655 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd, 1640 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1656 tlbmiss_handler_setup_pgd_size); 1641 tlbmiss_handler_setup_pgd_end);
1657} 1642}
1658 1643
1659static void 1644static void
@@ -1922,12 +1907,11 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1922 1907
1923static void build_r3000_tlb_load_handler(void) 1908static void build_r3000_tlb_load_handler(void)
1924{ 1909{
1925 u32 *p = handle_tlbl; 1910 u32 *p = (u32 *)handle_tlbl;
1926 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1927 struct uasm_label *l = labels; 1911 struct uasm_label *l = labels;
1928 struct uasm_reloc *r = relocs; 1912 struct uasm_reloc *r = relocs;
1929 1913
1930 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 1914 memset(p, 0, handle_tlbl_end - (char *)p);
1931 memset(labels, 0, sizeof(labels)); 1915 memset(labels, 0, sizeof(labels));
1932 memset(relocs, 0, sizeof(relocs)); 1916 memset(relocs, 0, sizeof(relocs));
1933 1917
@@ -1941,24 +1925,23 @@ static void build_r3000_tlb_load_handler(void)
1941 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1925 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1942 uasm_i_nop(&p); 1926 uasm_i_nop(&p);
1943 1927
1944 if (p >= handle_tlbl_end) 1928 if (p >= (u32 *)handle_tlbl_end)
1945 panic("TLB load handler fastpath space exceeded"); 1929 panic("TLB load handler fastpath space exceeded");
1946 1930
1947 uasm_resolve_relocs(relocs, labels); 1931 uasm_resolve_relocs(relocs, labels);
1948 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1932 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1949 (unsigned int)(p - handle_tlbl)); 1933 (unsigned int)(p - (u32 *)handle_tlbl));
1950 1934
1951 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1935 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
1952} 1936}
1953 1937
1954static void build_r3000_tlb_store_handler(void) 1938static void build_r3000_tlb_store_handler(void)
1955{ 1939{
1956 u32 *p = handle_tlbs; 1940 u32 *p = (u32 *)handle_tlbs;
1957 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
1958 struct uasm_label *l = labels; 1941 struct uasm_label *l = labels;
1959 struct uasm_reloc *r = relocs; 1942 struct uasm_reloc *r = relocs;
1960 1943
1961 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 1944 memset(p, 0, handle_tlbs_end - (char *)p);
1962 memset(labels, 0, sizeof(labels)); 1945 memset(labels, 0, sizeof(labels));
1963 memset(relocs, 0, sizeof(relocs)); 1946 memset(relocs, 0, sizeof(relocs));
1964 1947
@@ -1972,24 +1955,23 @@ static void build_r3000_tlb_store_handler(void)
1972 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1955 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1973 uasm_i_nop(&p); 1956 uasm_i_nop(&p);
1974 1957
1975 if (p >= handle_tlbs_end) 1958 if (p >= (u32 *)handle_tlbs_end)
1976 panic("TLB store handler fastpath space exceeded"); 1959 panic("TLB store handler fastpath space exceeded");
1977 1960
1978 uasm_resolve_relocs(relocs, labels); 1961 uasm_resolve_relocs(relocs, labels);
1979 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1962 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1980 (unsigned int)(p - handle_tlbs)); 1963 (unsigned int)(p - (u32 *)handle_tlbs));
1981 1964
1982 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1965 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
1983} 1966}
1984 1967
1985static void build_r3000_tlb_modify_handler(void) 1968static void build_r3000_tlb_modify_handler(void)
1986{ 1969{
1987 u32 *p = handle_tlbm; 1970 u32 *p = (u32 *)handle_tlbm;
1988 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
1989 struct uasm_label *l = labels; 1971 struct uasm_label *l = labels;
1990 struct uasm_reloc *r = relocs; 1972 struct uasm_reloc *r = relocs;
1991 1973
1992 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 1974 memset(p, 0, handle_tlbm_end - (char *)p);
1993 memset(labels, 0, sizeof(labels)); 1975 memset(labels, 0, sizeof(labels));
1994 memset(relocs, 0, sizeof(relocs)); 1976 memset(relocs, 0, sizeof(relocs));
1995 1977
@@ -2003,14 +1985,14 @@ static void build_r3000_tlb_modify_handler(void)
2003 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1985 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2004 uasm_i_nop(&p); 1986 uasm_i_nop(&p);
2005 1987
2006 if (p >= handle_tlbm_end) 1988 if (p >= (u32 *)handle_tlbm_end)
2007 panic("TLB modify handler fastpath space exceeded"); 1989 panic("TLB modify handler fastpath space exceeded");
2008 1990
2009 uasm_resolve_relocs(relocs, labels); 1991 uasm_resolve_relocs(relocs, labels);
2010 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1992 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2011 (unsigned int)(p - handle_tlbm)); 1993 (unsigned int)(p - (u32 *)handle_tlbm));
2012 1994
2013 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size); 1995 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
2014} 1996}
2015#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ 1997#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
2016 1998
@@ -2102,12 +2084,11 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2102static void build_r4000_tlb_load_handler(void) 2084static void build_r4000_tlb_load_handler(void)
2103{ 2085{
2104 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl); 2086 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
2105 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
2106 struct uasm_label *l = labels; 2087 struct uasm_label *l = labels;
2107 struct uasm_reloc *r = relocs; 2088 struct uasm_reloc *r = relocs;
2108 struct work_registers wr; 2089 struct work_registers wr;
2109 2090
2110 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0])); 2091 memset(p, 0, handle_tlbl_end - (char *)p);
2111 memset(labels, 0, sizeof(labels)); 2092 memset(labels, 0, sizeof(labels));
2112 memset(relocs, 0, sizeof(relocs)); 2093 memset(relocs, 0, sizeof(relocs));
2113 2094
@@ -2288,25 +2269,24 @@ static void build_r4000_tlb_load_handler(void)
2288 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2269 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2289 uasm_i_nop(&p); 2270 uasm_i_nop(&p);
2290 2271
2291 if (p >= handle_tlbl_end) 2272 if (p >= (u32 *)handle_tlbl_end)
2292 panic("TLB load handler fastpath space exceeded"); 2273 panic("TLB load handler fastpath space exceeded");
2293 2274
2294 uasm_resolve_relocs(relocs, labels); 2275 uasm_resolve_relocs(relocs, labels);
2295 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 2276 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2296 (unsigned int)(p - handle_tlbl)); 2277 (unsigned int)(p - (u32 *)handle_tlbl));
2297 2278
2298 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2279 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
2299} 2280}
2300 2281
2301static void build_r4000_tlb_store_handler(void) 2282static void build_r4000_tlb_store_handler(void)
2302{ 2283{
2303 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs); 2284 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
2304 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
2305 struct uasm_label *l = labels; 2285 struct uasm_label *l = labels;
2306 struct uasm_reloc *r = relocs; 2286 struct uasm_reloc *r = relocs;
2307 struct work_registers wr; 2287 struct work_registers wr;
2308 2288
2309 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0])); 2289 memset(p, 0, handle_tlbs_end - (char *)p);
2310 memset(labels, 0, sizeof(labels)); 2290 memset(labels, 0, sizeof(labels));
2311 memset(relocs, 0, sizeof(relocs)); 2291 memset(relocs, 0, sizeof(relocs));
2312 2292
@@ -2343,25 +2323,24 @@ static void build_r4000_tlb_store_handler(void)
2343 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2323 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2344 uasm_i_nop(&p); 2324 uasm_i_nop(&p);
2345 2325
2346 if (p >= handle_tlbs_end) 2326 if (p >= (u32 *)handle_tlbs_end)
2347 panic("TLB store handler fastpath space exceeded"); 2327 panic("TLB store handler fastpath space exceeded");
2348 2328
2349 uasm_resolve_relocs(relocs, labels); 2329 uasm_resolve_relocs(relocs, labels);
2350 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 2330 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2351 (unsigned int)(p - handle_tlbs)); 2331 (unsigned int)(p - (u32 *)handle_tlbs));
2352 2332
2353 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2333 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
2354} 2334}
2355 2335
2356static void build_r4000_tlb_modify_handler(void) 2336static void build_r4000_tlb_modify_handler(void)
2357{ 2337{
2358 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm); 2338 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
2359 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
2360 struct uasm_label *l = labels; 2339 struct uasm_label *l = labels;
2361 struct uasm_reloc *r = relocs; 2340 struct uasm_reloc *r = relocs;
2362 struct work_registers wr; 2341 struct work_registers wr;
2363 2342
2364 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0])); 2343 memset(p, 0, handle_tlbm_end - (char *)p);
2365 memset(labels, 0, sizeof(labels)); 2344 memset(labels, 0, sizeof(labels));
2366 memset(relocs, 0, sizeof(relocs)); 2345 memset(relocs, 0, sizeof(relocs));
2367 2346
@@ -2399,14 +2378,14 @@ static void build_r4000_tlb_modify_handler(void)
2399 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2378 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2400 uasm_i_nop(&p); 2379 uasm_i_nop(&p);
2401 2380
2402 if (p >= handle_tlbm_end) 2381 if (p >= (u32 *)handle_tlbm_end)
2403 panic("TLB modify handler fastpath space exceeded"); 2382 panic("TLB modify handler fastpath space exceeded");
2404 2383
2405 uasm_resolve_relocs(relocs, labels); 2384 uasm_resolve_relocs(relocs, labels);
2406 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 2385 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2407 (unsigned int)(p - handle_tlbm)); 2386 (unsigned int)(p - (u32 *)handle_tlbm));
2408 2387
2409 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2388 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
2410} 2389}
2411 2390
2412static void flush_tlb_handlers(void) 2391static void flush_tlb_handlers(void)
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 90479a0f3986..3525c179698c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -67,6 +67,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67#endif 67#endif
68 68
69/* 69/*
70 * Some architectures need to provide custom definitions of macros provided
71 * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
72 * conditionally rather than using an asm-generic wrapper in order to avoid
73 * build failures if any C compilation, which will include this file via an
74 * -include argument in c_flags, occurs prior to the asm-generic wrappers being
75 * generated.
76 */
77#ifdef CONFIG_HAVE_ARCH_COMPILER_H
78#include <asm/compiler.h>
79#endif
80
81/*
70 * Generic compiler-independent macros required for kernel 82 * Generic compiler-independent macros required for kernel
71 * build go below this comment. Actual compiler/compiler version 83 * build go below this comment. Actual compiler/compiler version
72 * specific implementations come from the above header files 84 * specific implementations come from the above header files