aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2007-11-30 03:06:36 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-01-27 23:18:59 -0500
commitcbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93 (patch)
treee60db5c0f3573558c97f39cfab78732220a72e6d /include/asm-sh
parent325df7f20467da07901c4f2b006d3457bba0adec (diff)
sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation with a 1:1 cached/uncached mapping, jumping between the two to control the caching behaviour. This provides the basic infrastructure to maintain this behaviour on 32-bit physical parts that don't map P1/P2 at all, using a shiny new linker section and corresponding fixmap entry. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/fixmap.h1
-rw-r--r--include/asm-sh/sections.h1
-rw-r--r--include/asm-sh/system.h2
-rw-r--r--include/asm-sh/system_32.h36
-rw-r--r--include/asm-sh/system_64.h7
5 files changed, 27 insertions, 20 deletions
diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h
index 09463cd9bbb9..721fcc4d5e98 100644
--- a/include/asm-sh/fixmap.h
+++ b/include/asm-sh/fixmap.h
@@ -49,6 +49,7 @@ enum fixed_addresses {
49#define FIX_N_COLOURS 16 49#define FIX_N_COLOURS 16
50 FIX_CMAP_BEGIN, 50 FIX_CMAP_BEGIN,
51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, 51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
52 FIX_UNCACHED,
52#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
53 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
54 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 55 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h
index bd9cbc967c2a..8f8f4ad400df 100644
--- a/include/asm-sh/sections.h
+++ b/include/asm-sh/sections.h
@@ -4,6 +4,7 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern long __machvec_start, __machvec_end; 6extern long __machvec_start, __machvec_end;
7extern char __uncached_start, __uncached_end;
7extern char _ebss[]; 8extern char _ebss[];
8 9
9#endif /* __ASM_SH_SECTIONS_H */ 10#endif /* __ASM_SH_SECTIONS_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 969f3d4afe2a..9bda8d063ecf 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -144,6 +144,8 @@ extern unsigned int instruction_size(unsigned int insn);
144#define instruction_size(insn) (4) 144#define instruction_size(insn) (4)
145#endif 145#endif
146 146
147extern unsigned long cached_to_uncached;
148
147/* XXX 149/* XXX
148 * disable hlt during certain critical i/o operations 150 * disable hlt during certain critical i/o operations
149 */ 151 */
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
index ad37e8d5f31e..e918bacd5ecf 100644
--- a/include/asm-sh/system_32.h
+++ b/include/asm-sh/system_32.h
@@ -58,29 +58,31 @@ do { \
58 last = __last; \ 58 last = __last; \
59} while (0) 59} while (0)
60 60
61#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
62
61/* 63/*
62 * Jump to P2 area. 64 * Jump to uncached area.
63 * When handling TLB or caches, we need to do it from P2 area. 65 * When handling TLB or caches, we need to do it from an uncached area.
64 */ 66 */
65#define jump_to_P2() \ 67#define jump_to_uncached() \
66do { \ 68do { \
67 unsigned long __dummy; \ 69 unsigned long __dummy; \
68 __asm__ __volatile__( \ 70 \
69 "mov.l 1f, %0\n\t" \ 71 __asm__ __volatile__( \
70 "or %1, %0\n\t" \ 72 "mova 1f, %0\n\t" \
71 "jmp @%0\n\t" \ 73 "add %1, %0\n\t" \
72 " nop\n\t" \ 74 "jmp @%0\n\t" \
73 ".balign 4\n" \ 75 " nop\n\t" \
74 "1: .long 2f\n" \ 76 ".balign 4\n" \
75 "2:" \ 77 "1:" \
76 : "=&r" (__dummy) \ 78 : "=&z" (__dummy) \
77 : "r" (0x20000000)); \ 79 : "r" (cached_to_uncached)); \
78} while (0) 80} while (0)
79 81
80/* 82/*
81 * Back to P1 area. 83 * Back to cached area.
82 */ 84 */
83#define back_to_P1() \ 85#define back_to_cached() \
84do { \ 86do { \
85 unsigned long __dummy; \ 87 unsigned long __dummy; \
86 ctrl_barrier(); \ 88 ctrl_barrier(); \
diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h
index 0e466e991f7d..943acf5ea07c 100644
--- a/include/asm-sh/system_64.h
+++ b/include/asm-sh/system_64.h
@@ -32,8 +32,9 @@ do { \
32 &next->thread); \ 32 &next->thread); \
33} while (0) 33} while (0)
34 34
35/* No segmentation.. */ 35#define __uses_jump_to_uncached
36#define jump_to_P2() do { } while (0) 36
37#define back_to_P1() do { } while (0) 37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0)
38 39
39#endif /* __ASM_SH_SYSTEM_64_H */ 40#endif /* __ASM_SH_SYSTEM_64_H */