aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-11-03 23:46:19 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-11-03 23:46:19 -0400
commitedc9a958fd31ef1d89f9eaee82b2a3882c8e34c9 (patch)
treebdc96edeb6a41ab72ae87daf46d3402aba633541
parente2fcf74f3d3dabe8591732cd37869a0cc88ed7a5 (diff)
sh: nommu: Support building without an uncached mapping.
Now that nommu selects 32BIT we run in to the situation where SH-2A supports an uncached identity mapping by way of the BSC, while the SH-2 does not. This provides stubs for the PC manglers and tidies up some of the system*.h mess in the process. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h36
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/uncached.h40
4 files changed, 41 insertions, 42 deletions
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5afff03..10c8b1823a18 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/uncached.h>
13 14
14#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
15 16
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
137#define instruction_size(insn) (4) 138#define instruction_size(insn) (4)
138#endif 139#endif
139 140
140extern unsigned long cached_to_uncached;
141extern unsigned long uncached_size;
142
143void per_cpu_trap_init(void); 141void per_cpu_trap_init(void);
144void default_idle(void); 142void default_idle(void);
145void cpu_idle_wait(void); 143void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b2739405..a4ad1cd9bc4d 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
145 __restore_dsp(prev); \ 145 __restore_dsp(prev); \
146} while (0) 146} while (0)
147 147
148/*
149 * Jump to uncached area.
150 * When handling TLB or caches, we need to do it from an uncached area.
151 */
152#define jump_to_uncached() \
153do { \
154 unsigned long __dummy; \
155 \
156 __asm__ __volatile__( \
157 "mova 1f, %0\n\t" \
158 "add %1, %0\n\t" \
159 "jmp @%0\n\t" \
160 " nop\n\t" \
161 ".balign 4\n" \
162 "1:" \
163 : "=&z" (__dummy) \
164 : "r" (cached_to_uncached)); \
165} while (0)
166
167/*
168 * Back to cached area.
169 */
170#define back_to_cached() \
171do { \
172 unsigned long __dummy; \
173 ctrl_barrier(); \
174 __asm__ __volatile__( \
175 "mov.l 1f, %0\n\t" \
176 "jmp @%0\n\t" \
177 " nop\n\t" \
178 ".balign 4\n" \
179 "1: .long 2f\n" \
180 "2:" \
181 : "=&r" (__dummy)); \
182} while (0)
183
184#ifdef CONFIG_CPU_HAS_SR_RB 148#ifdef CONFIG_CPU_HAS_SR_RB
185#define lookup_exception_vector() \ 149#define lookup_exception_vector() \
186({ \ 150({ \
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc8..8593bc8d1a4e 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
34 &next->thread); \ 34 &next->thread); \
35} while (0) 35} while (0)
36 36
37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0)
39
40#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) 37#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
41#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) 38#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
42#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) 39#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626a..6f8816b79cf1 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
4#include <linux/bug.h> 4#include <linux/bug.h>
5 5
6#ifdef CONFIG_UNCACHED_MAPPING 6#ifdef CONFIG_UNCACHED_MAPPING
7extern unsigned long cached_to_uncached;
8extern unsigned long uncached_size;
7extern unsigned long uncached_start, uncached_end; 9extern unsigned long uncached_start, uncached_end;
8 10
9extern int virt_addr_uncached(unsigned long kaddr); 11extern int virt_addr_uncached(unsigned long kaddr);
10extern void uncached_init(void); 12extern void uncached_init(void);
11extern void uncached_resize(unsigned long size); 13extern void uncached_resize(unsigned long size);
14
15/*
16 * Jump to uncached area.
17 * When handling TLB or caches, we need to do it from an uncached area.
18 */
19#define jump_to_uncached() \
20do { \
21 unsigned long __dummy; \
22 \
23 __asm__ __volatile__( \
24 "mova 1f, %0\n\t" \
25 "add %1, %0\n\t" \
26 "jmp @%0\n\t" \
27 " nop\n\t" \
28 ".balign 4\n" \
29 "1:" \
30 : "=&z" (__dummy) \
31 : "r" (cached_to_uncached)); \
32} while (0)
33
34/*
35 * Back to cached area.
36 */
37#define back_to_cached() \
38do { \
39 unsigned long __dummy; \
40 ctrl_barrier(); \
41 __asm__ __volatile__( \
42 "mov.l 1f, %0\n\t" \
43 "jmp @%0\n\t" \
44 " nop\n\t" \
45 ".balign 4\n" \
46 "1: .long 2f\n" \
47 "2:" \
48 : "=&r" (__dummy)); \
49} while (0)
12#else 50#else
13#define virt_addr_uncached(kaddr) (0) 51#define virt_addr_uncached(kaddr) (0)
14#define uncached_init() do { } while (0) 52#define uncached_init() do { } while (0)
15#define uncached_resize(size) BUG() 53#define uncached_resize(size) BUG()
54#define jump_to_uncached() do { } while (0)
55#define back_to_cached() do { } while (0)
16#endif 56#endif
17 57
18#endif /* __ASM_SH_UNCACHED_H */ 58#endif /* __ASM_SH_UNCACHED_H */