aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 02:28:00 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 02:28:00 -0500
commit9edef28653a519bf0a48250f36cce96b1736ec4e (patch)
tree68049b29e69228fe0cdf26b27a3743928c5e7fdb /arch/sh
parent51becfd96287b3913b13075699433730984e2f4f (diff)
sh: uncached mapping helpers.
This adds some helper routines for uncached mapping support. This simplifies some of the cases where we need to check the uncached mapping boundaries in addition to giving us a centralized location for building more complex manipulation on top of. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/page.h19
-rw-r--r--arch/sh/include/asm/ptrace.h11
-rw-r--r--arch/sh/kernel/head_32.S8
-rw-r--r--arch/sh/mm/Makefile1
-rw-r--r--arch/sh/mm/init.c21
-rw-r--r--arch/sh/mm/uncached.c28
6 files changed, 58 insertions, 30 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 3accdc5ab122..8237d9f53e56 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -50,13 +50,22 @@ extern unsigned long shm_align_mask;
50extern unsigned long max_low_pfn, min_low_pfn; 50extern unsigned long max_low_pfn, min_low_pfn;
51extern unsigned long memory_start, memory_end; 51extern unsigned long memory_start, memory_end;
52 52
53#ifdef CONFIG_UNCACHED_MAPPING
54extern unsigned long uncached_start, uncached_end;
55
56extern int virt_addr_uncached(unsigned long kaddr);
57extern void uncached_init(void);
58#else
59#define virt_addr_uncached(kaddr) (0)
60#define uncached_init() do { } while (0)
61#endif
62
53static inline unsigned long 63static inline unsigned long
54pages_do_alias(unsigned long addr1, unsigned long addr2) 64pages_do_alias(unsigned long addr1, unsigned long addr2)
55{ 65{
56 return (addr1 ^ addr2) & shm_align_mask; 66 return (addr1 ^ addr2) & shm_align_mask;
57} 67}
58 68
59
60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 69#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
61extern void copy_page(void *to, void *from); 70extern void copy_page(void *to, void *from);
62 71
@@ -135,6 +144,14 @@ typedef struct page *pgtable_t;
135#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 144#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
136#endif 145#endif
137 146
147#ifdef CONFIG_UNCACHED_MAPPING
148#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
149#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
150#else
151#define UNCAC_ADDR(addr) ((addr))
152#define CAC_ADDR(addr) ((addr))
153#endif
154
138#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 155#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
139#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 156#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
140 157
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index e879dffa324b..e11b14ea2c43 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
139{ 139{
140 unsigned long pc = instruction_pointer(regs); 140 unsigned long pc = instruction_pointer(regs);
141 141
142#ifdef CONFIG_UNCACHED_MAPPING 142 if (virt_addr_uncached(pc))
143 /* 143 return CAC_ADDR(pc);
144 * If PC points in to the uncached mapping, fix it up and hand
145 * back the cached equivalent.
146 */
147 if ((pc >= (memory_start + cached_to_uncached)) &&
148 (pc < (memory_start + cached_to_uncached + uncached_size)))
149 pc -= cached_to_uncached;
150#endif
151 144
152 return pc; 145 return pc;
153} 146}
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 91ae76277d8f..79ff39517f8e 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -152,6 +152,7 @@ ENTRY(_stext)
152 152
153 mov #0, r10 153 mov #0, r10
154 154
155#ifdef CONFIG_UNCACHED_MAPPING
155 /* 156 /*
156 * Uncached mapping 157 * Uncached mapping
157 */ 158 */
@@ -171,6 +172,7 @@ ENTRY(_stext)
171 add r4, r1 172 add r4, r1
172 add r4, r3 173 add r4, r3
173 add #1, r10 174 add #1, r10
175#endif
174 176
175/* 177/*
176 * Iterate over all of the available sizes from largest to 178 * Iterate over all of the available sizes from largest to
@@ -216,6 +218,7 @@ ENTRY(_stext)
216 __PMB_ITER_BY_SIZE(64) 218 __PMB_ITER_BY_SIZE(64)
217 __PMB_ITER_BY_SIZE(16) 219 __PMB_ITER_BY_SIZE(16)
218 220
221#ifdef CONFIG_UNCACHED_MAPPING
219 /* 222 /*
220 * Now that we can access it, update cached_to_uncached and 223 * Now that we can access it, update cached_to_uncached and
221 * uncached_size. 224 * uncached_size.
@@ -228,6 +231,7 @@ ENTRY(_stext)
228 shll16 r7 231 shll16 r7
229 shll8 r7 232 shll8 r7
230 mov.l r7, @r0 233 mov.l r7, @r0
234#endif
231 235
232 /* 236 /*
233 * Clear the remaining PMB entries. 237 * Clear the remaining PMB entries.
@@ -306,7 +310,9 @@ ENTRY(stack_start)
306.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V 310.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
307.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V 311.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
308.LMMUCR: .long MMUCR 312.LMMUCR: .long MMUCR
313.LMEMORY_SIZE: .long __MEMORY_SIZE
314#ifdef CONFIG_UNCACHED_MAPPING
309.Lcached_to_uncached: .long cached_to_uncached 315.Lcached_to_uncached: .long cached_to_uncached
310.Luncached_size: .long uncached_size 316.Luncached_size: .long uncached_size
311.LMEMORY_SIZE: .long __MEMORY_SIZE 317#endif
312#endif 318#endif
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index de714cbd961a..3dc8a8a63822 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
36obj-$(CONFIG_PMB) += pmb.o 36obj-$(CONFIG_PMB) += pmb.o
37obj-$(CONFIG_NUMA) += numa.o 37obj-$(CONFIG_NUMA) += numa.o
38obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o 38obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
39obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
39 40
40# Special flags for fault_64.o. This puts restrictions on the number of 41# Special flags for fault_64.o. This puts restrictions on the number of
41# caller-save registers that the compiler can target when building this file. 42# caller-save registers that the compiler can target when building this file.
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 58012b6bbe76..08e280d7cc7e 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -26,21 +26,6 @@
26DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 26DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
27pgd_t swapper_pg_dir[PTRS_PER_PGD]; 27pgd_t swapper_pg_dir[PTRS_PER_PGD];
28 28
29#ifdef CONFIG_UNCACHED_MAPPING
30/*
31 * This is the offset of the uncached section from its cached alias.
32 *
33 * Legacy platforms handle trivial transitions between cached and
34 * uncached segments by making use of the 1:1 mapping relationship in
35 * 512MB lowmem, others via a special uncached mapping.
36 *
37 * Default value only valid in 29 bit mode, in 32bit mode this will be
38 * updated by the early PMB initialization code.
39 */
40unsigned long cached_to_uncached = 0x20000000;
41unsigned long uncached_size = SZ_512M;
42#endif
43
44#ifdef CONFIG_MMU 29#ifdef CONFIG_MMU
45static pte_t *__get_pte_phys(unsigned long addr) 30static pte_t *__get_pte_phys(unsigned long addr)
46{ 31{
@@ -260,7 +245,7 @@ void __init mem_init(void)
260 memset(empty_zero_page, 0, PAGE_SIZE); 245 memset(empty_zero_page, 0, PAGE_SIZE);
261 __flush_wback_region(empty_zero_page, PAGE_SIZE); 246 __flush_wback_region(empty_zero_page, PAGE_SIZE);
262 247
263 /* Initialize the vDSO */ 248 uncached_init();
264 vsyscall_init(); 249 vsyscall_init();
265 250
266 codesize = (unsigned long) &_etext - (unsigned long) &_text; 251 codesize = (unsigned long) &_etext - (unsigned long) &_text;
@@ -303,9 +288,7 @@ void __init mem_init(void)
303 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, 288 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
304 289
305#ifdef CONFIG_UNCACHED_MAPPING 290#ifdef CONFIG_UNCACHED_MAPPING
306 (unsigned long)memory_start + cached_to_uncached, 291 uncached_start, uncached_end, uncached_size >> 20,
307 (unsigned long)memory_start + cached_to_uncached + uncached_size,
308 uncached_size >> 20,
309#endif 292#endif
310 293
311 (unsigned long)&__init_begin, (unsigned long)&__init_end, 294 (unsigned long)&__init_begin, (unsigned long)&__init_end,
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
new file mode 100644
index 000000000000..807906981d9d
--- /dev/null
+++ b/arch/sh/mm/uncached.c
@@ -0,0 +1,28 @@
1#include <linux/init.h>
2#include <asm/sizes.h>
3#include <asm/page.h>
4
5/*
6 * This is the offset of the uncached section from its cached alias.
7 *
8 * Legacy platforms handle trivial transitions between cached and
9 * uncached segments by making use of the 1:1 mapping relationship in
10 * 512MB lowmem, others via a special uncached mapping.
11 *
12 * Default value only valid in 29 bit mode, in 32bit mode this will be
13 * updated by the early PMB initialization code.
14 */
15unsigned long cached_to_uncached = SZ_512M;
16unsigned long uncached_size = SZ_512M;
17unsigned long uncached_start, uncached_end;
18
19int virt_addr_uncached(unsigned long kaddr)
20{
21 return (kaddr >= uncached_start) && (kaddr < uncached_end);
22}
23
24void __init uncached_init(void)
25{
26 uncached_start = memory_end;
27 uncached_end = uncached_start + uncached_size;
28}