diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 04:35:20 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-18 04:35:20 -0500 |
commit | 77f36fcc035a5af19e95f50a2e648cda2a6ef2b9 (patch) | |
tree | a183a3289807a83da9c11e0d2d722cec60fce5d9 /arch | |
parent | 838a4a9dcee0cbaeb0943531da00ac44d578f315 (diff) | |
parent | d01447b3197c2c470a14666be2c640407bbbfec7 (diff) |
Merge branch 'sh/pmb-dynamic'
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/boot/compressed/misc.c | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/io.h | 22 | ||||
-rw-r--r-- | arch/sh/include/asm/mmu.h | 40 | ||||
-rw-r--r-- | arch/sh/include/asm/page.h | 17 | ||||
-rw-r--r-- | arch/sh/include/asm/ptrace.h | 11 | ||||
-rw-r--r-- | arch/sh/include/asm/uncached.h | 18 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh4/cpu/sq.h | 3 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 21 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/sq.c | 13 | ||||
-rw-r--r-- | arch/sh/kernel/head_32.S | 52 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/traps_32.c | 7 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux.lds.S | 7 | ||||
-rw-r--r-- | arch/sh/mm/Kconfig | 10 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 20 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 571 | ||||
-rw-r--r-- | arch/sh/mm/uncached.c | 34 |
19 files changed, 571 insertions, 282 deletions
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index 9ba07927d16a..27140a6b365d 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c | |||
@@ -117,7 +117,7 @@ void decompress_kernel(void) | |||
117 | output_addr = (CONFIG_MEMORY_START + 0x2000); | 117 | output_addr = (CONFIG_MEMORY_START + 0x2000); |
118 | #else | 118 | #else |
119 | output_addr = __pa((unsigned long)&_text+PAGE_SIZE); | 119 | output_addr = __pa((unsigned long)&_text+PAGE_SIZE); |
120 | #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY) | 120 | #if defined(CONFIG_29BIT) |
121 | output_addr |= P2SEG; | 121 | output_addr |= P2SEG; |
122 | #endif | 122 | #endif |
123 | #endif | 123 | #endif |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index bd5fafa23eb4..7dab7b23a5ec 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -133,6 +133,28 @@ static inline void ctrl_delay(void) | |||
133 | __raw_readw(generic_io_base); | 133 | __raw_readw(generic_io_base); |
134 | } | 134 | } |
135 | 135 | ||
136 | #define __BUILD_UNCACHED_IO(bwlq, type) \ | ||
137 | static inline type read##bwlq##_uncached(unsigned long addr) \ | ||
138 | { \ | ||
139 | type ret; \ | ||
140 | jump_to_uncached(); \ | ||
141 | ret = __raw_read##bwlq(addr); \ | ||
142 | back_to_cached(); \ | ||
143 | return ret; \ | ||
144 | } \ | ||
145 | \ | ||
146 | static inline void write##bwlq##_uncached(type v, unsigned long addr) \ | ||
147 | { \ | ||
148 | jump_to_uncached(); \ | ||
149 | __raw_write##bwlq(v, addr); \ | ||
150 | back_to_cached(); \ | ||
151 | } | ||
152 | |||
153 | __BUILD_UNCACHED_IO(b, u8) | ||
154 | __BUILD_UNCACHED_IO(w, u16) | ||
155 | __BUILD_UNCACHED_IO(l, u32) | ||
156 | __BUILD_UNCACHED_IO(q, u64) | ||
157 | |||
136 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | 158 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
137 | \ | 159 | \ |
138 | static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ | 160 | static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ |
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index ca7d91e8aa72..15a05b615ba7 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -11,7 +11,9 @@ | |||
11 | 11 | ||
12 | #define PMB_ADDR 0xf6100000 | 12 | #define PMB_ADDR 0xf6100000 |
13 | #define PMB_DATA 0xf7100000 | 13 | #define PMB_DATA 0xf7100000 |
14 | #define PMB_ENTRY_MAX 16 | 14 | |
15 | #define NR_PMB_ENTRIES 16 | ||
16 | |||
15 | #define PMB_E_MASK 0x0000000f | 17 | #define PMB_E_MASK 0x0000000f |
16 | #define PMB_E_SHIFT 8 | 18 | #define PMB_E_SHIFT 8 |
17 | 19 | ||
@@ -25,6 +27,7 @@ | |||
25 | #define PMB_C 0x00000008 | 27 | #define PMB_C 0x00000008 |
26 | #define PMB_WT 0x00000001 | 28 | #define PMB_WT 0x00000001 |
27 | #define PMB_UB 0x00000200 | 29 | #define PMB_UB 0x00000200 |
30 | #define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) | ||
28 | #define PMB_V 0x00000100 | 31 | #define PMB_V 0x00000100 |
29 | 32 | ||
30 | #define PMB_NO_ENTRY (-1) | 33 | #define PMB_NO_ENTRY (-1) |
@@ -32,6 +35,7 @@ | |||
32 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
33 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
34 | #include <linux/threads.h> | 37 | #include <linux/threads.h> |
38 | #include <asm/page.h> | ||
35 | 39 | ||
36 | /* Default "unsigned long" context */ | 40 | /* Default "unsigned long" context */ |
37 | typedef unsigned long mm_context_id_t[NR_CPUS]; | 41 | typedef unsigned long mm_context_id_t[NR_CPUS]; |
@@ -49,46 +53,22 @@ typedef struct { | |||
49 | #endif | 53 | #endif |
50 | } mm_context_t; | 54 | } mm_context_t; |
51 | 55 | ||
52 | struct pmb_entry; | ||
53 | |||
54 | struct pmb_entry { | ||
55 | unsigned long vpn; | ||
56 | unsigned long ppn; | ||
57 | unsigned long flags; | ||
58 | |||
59 | /* | ||
60 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | ||
61 | * PMB_NO_ENTRY to search for a free one | ||
62 | */ | ||
63 | int entry; | ||
64 | |||
65 | struct pmb_entry *next; | ||
66 | /* Adjacent entry link for contiguous multi-entry mappings */ | ||
67 | struct pmb_entry *link; | ||
68 | }; | ||
69 | |||
70 | #ifdef CONFIG_PMB | 56 | #ifdef CONFIG_PMB |
71 | /* arch/sh/mm/pmb.c */ | 57 | /* arch/sh/mm/pmb.c */ |
72 | long pmb_remap(unsigned long virt, unsigned long phys, | 58 | long pmb_remap(unsigned long virt, unsigned long phys, |
73 | unsigned long size, unsigned long flags); | 59 | unsigned long size, pgprot_t prot); |
74 | void pmb_unmap(unsigned long addr); | 60 | void pmb_unmap(unsigned long addr); |
75 | int pmb_init(void); | 61 | void pmb_init(void); |
76 | bool __in_29bit_mode(void); | 62 | bool __in_29bit_mode(void); |
77 | #else | 63 | #else |
78 | static inline long pmb_remap(unsigned long virt, unsigned long phys, | 64 | static inline long pmb_remap(unsigned long virt, unsigned long phys, |
79 | unsigned long size, unsigned long flags) | 65 | unsigned long size, pgprot_t prot) |
80 | { | 66 | { |
81 | return -EINVAL; | 67 | return -EINVAL; |
82 | } | 68 | } |
83 | 69 | ||
84 | static inline void pmb_unmap(unsigned long addr) | 70 | #define pmb_unmap(addr) do { } while (0) |
85 | { | 71 | #define pmb_init(addr) do { } while (0) |
86 | } | ||
87 | |||
88 | static inline int pmb_init(void) | ||
89 | { | ||
90 | return -ENODEV; | ||
91 | } | ||
92 | 72 | ||
93 | #ifdef CONFIG_29BIT | 73 | #ifdef CONFIG_29BIT |
94 | #define __in_29bit_mode() (1) | 74 | #define __in_29bit_mode() (1) |
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 61e58105adc3..d71feb359304 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifndef __ASSEMBLY__ | 47 | #ifndef __ASSEMBLY__ |
48 | #include <asm/uncached.h> | ||
48 | 49 | ||
49 | extern unsigned long shm_align_mask; | 50 | extern unsigned long shm_align_mask; |
50 | extern unsigned long max_low_pfn, min_low_pfn; | 51 | extern unsigned long max_low_pfn, min_low_pfn; |
@@ -56,7 +57,6 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) | |||
56 | return (addr1 ^ addr2) & shm_align_mask; | 57 | return (addr1 ^ addr2) & shm_align_mask; |
57 | } | 58 | } |
58 | 59 | ||
59 | |||
60 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 60 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
61 | extern void copy_page(void *to, void *from); | 61 | extern void copy_page(void *to, void *from); |
62 | 62 | ||
@@ -127,12 +127,7 @@ typedef struct page *pgtable_t; | |||
127 | * is not visible (it is part of the PMB mapping) and so needs to be | 127 | * is not visible (it is part of the PMB mapping) and so needs to be |
128 | * added or subtracted as required. | 128 | * added or subtracted as required. |
129 | */ | 129 | */ |
130 | #if defined(CONFIG_PMB_LEGACY) | 130 | #ifdef CONFIG_PMB |
131 | /* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ | ||
132 | #define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) | ||
133 | #define __pa(x) ((unsigned long)(x) - PMB_OFFSET) | ||
134 | #define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) | ||
135 | #elif defined(CONFIG_32BIT) | ||
136 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) | 131 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) |
137 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) | 132 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) |
138 | #else | 133 | #else |
@@ -140,6 +135,14 @@ typedef struct page *pgtable_t; | |||
140 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 135 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
141 | #endif | 136 | #endif |
142 | 137 | ||
138 | #ifdef CONFIG_UNCACHED_MAPPING | ||
139 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) | ||
140 | #define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) | ||
141 | #else | ||
142 | #define UNCAC_ADDR(addr) ((addr)) | ||
143 | #define CAC_ADDR(addr) ((addr)) | ||
144 | #endif | ||
145 | |||
143 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 146 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
144 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 147 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
145 | 148 | ||
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index e879dffa324b..e11b14ea2c43 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h | |||
@@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs) | |||
139 | { | 139 | { |
140 | unsigned long pc = instruction_pointer(regs); | 140 | unsigned long pc = instruction_pointer(regs); |
141 | 141 | ||
142 | #ifdef CONFIG_UNCACHED_MAPPING | 142 | if (virt_addr_uncached(pc)) |
143 | /* | 143 | return CAC_ADDR(pc); |
144 | * If PC points in to the uncached mapping, fix it up and hand | ||
145 | * back the cached equivalent. | ||
146 | */ | ||
147 | if ((pc >= (memory_start + cached_to_uncached)) && | ||
148 | (pc < (memory_start + cached_to_uncached + uncached_size))) | ||
149 | pc -= cached_to_uncached; | ||
150 | #endif | ||
151 | 144 | ||
152 | return pc; | 145 | return pc; |
153 | } | 146 | } |
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h new file mode 100644 index 000000000000..e3419f96626a --- /dev/null +++ b/arch/sh/include/asm/uncached.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __ASM_SH_UNCACHED_H | ||
2 | #define __ASM_SH_UNCACHED_H | ||
3 | |||
4 | #include <linux/bug.h> | ||
5 | |||
6 | #ifdef CONFIG_UNCACHED_MAPPING | ||
7 | extern unsigned long uncached_start, uncached_end; | ||
8 | |||
9 | extern int virt_addr_uncached(unsigned long kaddr); | ||
10 | extern void uncached_init(void); | ||
11 | extern void uncached_resize(unsigned long size); | ||
12 | #else | ||
13 | #define virt_addr_uncached(kaddr) (0) | ||
14 | #define uncached_init() do { } while (0) | ||
15 | #define uncached_resize(size) BUG() | ||
16 | #endif | ||
17 | |||
18 | #endif /* __ASM_SH_UNCACHED_H */ | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h index 586d6491816a..74716ba2dc3c 100644 --- a/arch/sh/include/cpu-sh4/cpu/sq.h +++ b/arch/sh/include/cpu-sh4/cpu/sq.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __ASM_CPU_SH4_SQ_H | 12 | #define __ASM_CPU_SH4_SQ_H |
13 | 13 | ||
14 | #include <asm/addrspace.h> | 14 | #include <asm/addrspace.h> |
15 | #include <asm/page.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be | 18 | * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be |
@@ -28,7 +29,7 @@ | |||
28 | 29 | ||
29 | /* arch/sh/kernel/cpu/sh4/sq.c */ | 30 | /* arch/sh/kernel/cpu/sh4/sq.c */ |
30 | unsigned long sq_remap(unsigned long phys, unsigned int size, | 31 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
31 | const char *name, unsigned long flags); | 32 | const char *name, pgprot_t prot); |
32 | void sq_unmap(unsigned long vaddr); | 33 | void sq_unmap(unsigned long vaddr); |
33 | void sq_flush_range(unsigned long start, unsigned int len); | 34 | void sq_flush_range(unsigned long start, unsigned int len); |
34 | 35 | ||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 6311b0b1789d..c736422344eb 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/elf.h> | 24 | #include <asm/elf.h> |
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/smp.h> | 26 | #include <asm/smp.h> |
27 | #include <asm/sh_bios.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_SH_FPU | 29 | #ifdef CONFIG_SH_FPU |
29 | #define cpu_has_fpu 1 | 30 | #define cpu_has_fpu 1 |
@@ -342,9 +343,21 @@ asmlinkage void __init sh_cpu_init(void) | |||
342 | speculative_execution_init(); | 343 | speculative_execution_init(); |
343 | expmask_init(); | 344 | expmask_init(); |
344 | 345 | ||
345 | /* | 346 | /* Do the rest of the boot processor setup */ |
346 | * Boot processor to setup the FP and extended state context info. | 347 | if (raw_smp_processor_id() == 0) { |
347 | */ | 348 | /* Save off the BIOS VBR, if there is one */ |
348 | if (raw_smp_processor_id() == 0) | 349 | sh_bios_vbr_init(); |
350 | |||
351 | /* | ||
352 | * Setup VBR for boot CPU. Secondary CPUs do this through | ||
353 | * start_secondary(). | ||
354 | */ | ||
355 | per_cpu_trap_init(); | ||
356 | |||
357 | /* | ||
358 | * Boot processor to setup the FP and extended state | ||
359 | * context info. | ||
360 | */ | ||
349 | init_thread_xstate(); | 361 | init_thread_xstate(); |
362 | } | ||
350 | } | 363 | } |
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 97aea9d69b00..fc065f9da6e5 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) | |||
100 | spin_unlock_irq(&sq_mapping_lock); | 100 | spin_unlock_irq(&sq_mapping_lock); |
101 | } | 101 | } |
102 | 102 | ||
103 | static int __sq_remap(struct sq_mapping *map, unsigned long flags) | 103 | static int __sq_remap(struct sq_mapping *map, pgprot_t prot) |
104 | { | 104 | { |
105 | #if defined(CONFIG_MMU) | 105 | #if defined(CONFIG_MMU) |
106 | struct vm_struct *vma; | 106 | struct vm_struct *vma; |
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
113 | 113 | ||
114 | if (ioremap_page_range((unsigned long)vma->addr, | 114 | if (ioremap_page_range((unsigned long)vma->addr, |
115 | (unsigned long)vma->addr + map->size, | 115 | (unsigned long)vma->addr + map->size, |
116 | vma->phys_addr, __pgprot(flags))) { | 116 | vma->phys_addr, prot)) { |
117 | vunmap(vma->addr); | 117 | vunmap(vma->addr); |
118 | return -EAGAIN; | 118 | return -EAGAIN; |
119 | } | 119 | } |
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) | |||
135 | * @phys: Physical address of mapping. | 135 | * @phys: Physical address of mapping. |
136 | * @size: Length of mapping. | 136 | * @size: Length of mapping. |
137 | * @name: User invoking mapping. | 137 | * @name: User invoking mapping. |
138 | * @flags: Protection flags. | 138 | * @prot: Protection bits. |
139 | * | 139 | * |
140 | * Remaps the physical address @phys through the next available store queue | 140 | * Remaps the physical address @phys through the next available store queue |
141 | * address of @size length. @name is logged at boot time as well as through | 141 | * address of @size length. @name is logged at boot time as well as through |
142 | * the sysfs interface. | 142 | * the sysfs interface. |
143 | */ | 143 | */ |
144 | unsigned long sq_remap(unsigned long phys, unsigned int size, | 144 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
145 | const char *name, unsigned long flags) | 145 | const char *name, pgprot_t prot) |
146 | { | 146 | { |
147 | struct sq_mapping *map; | 147 | struct sq_mapping *map; |
148 | unsigned long end; | 148 | unsigned long end; |
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, | |||
177 | 177 | ||
178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); | 178 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); |
179 | 179 | ||
180 | ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | 180 | ret = __sq_remap(map, prot); |
181 | if (unlikely(ret != 0)) | 181 | if (unlikely(ret != 0)) |
182 | goto out; | 182 | goto out; |
183 | 183 | ||
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) | |||
309 | return -EIO; | 309 | return -EIO; |
310 | 310 | ||
311 | if (likely(len)) { | 311 | if (likely(len)) { |
312 | int ret = sq_remap(base, len, "Userspace", | 312 | int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); |
313 | pgprot_val(PAGE_SHARED)); | ||
314 | if (ret < 0) | 313 | if (ret < 0) |
315 | return ret; | 314 | return ret; |
316 | } else | 315 | } else |
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 83f2b84b58da..fe0b743881b0 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -85,7 +85,7 @@ ENTRY(_stext) | |||
85 | ldc r0, r7_bank ! ... and initial thread_info | 85 | ldc r0, r7_bank ! ... and initial thread_info |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | #if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) | 88 | #ifdef CONFIG_PMB |
89 | /* | 89 | /* |
90 | * Reconfigure the initial PMB mappings setup by the hardware. | 90 | * Reconfigure the initial PMB mappings setup by the hardware. |
91 | * | 91 | * |
@@ -139,7 +139,6 @@ ENTRY(_stext) | |||
139 | mov.l r0, @r1 | 139 | mov.l r0, @r1 |
140 | 140 | ||
141 | mov.l .LMEMORY_SIZE, r5 | 141 | mov.l .LMEMORY_SIZE, r5 |
142 | mov r5, r7 | ||
143 | 142 | ||
144 | mov #PMB_E_SHIFT, r0 | 143 | mov #PMB_E_SHIFT, r0 |
145 | mov #0x1, r4 | 144 | mov #0x1, r4 |
@@ -150,8 +149,43 @@ ENTRY(_stext) | |||
150 | mov.l .LFIRST_ADDR_ENTRY, r2 | 149 | mov.l .LFIRST_ADDR_ENTRY, r2 |
151 | mov.l .LPMB_ADDR, r3 | 150 | mov.l .LPMB_ADDR, r3 |
152 | 151 | ||
152 | /* | ||
153 | * First we need to walk the PMB and figure out if there are any | ||
154 | * existing mappings that match the initial mappings VPN/PPN. | ||
155 | * If these have already been established by the bootloader, we | ||
156 | * don't bother setting up new entries here, and let the late PMB | ||
157 | * initialization take care of things instead. | ||
158 | * | ||
159 | * Note that we may need to coalesce and merge entries in order | ||
160 | * to reclaim more available PMB slots, which is much more than | ||
161 | * we want to do at this early stage. | ||
162 | */ | ||
163 | mov #0, r10 | ||
164 | mov #NR_PMB_ENTRIES, r9 | ||
165 | |||
166 | mov r1, r7 /* temporary PMB_DATA iter */ | ||
167 | |||
168 | .Lvalidate_existing_mappings: | ||
169 | |||
170 | mov.l @r7, r8 | ||
171 | and r0, r8 | ||
172 | cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ | ||
173 | bt .Lpmb_done | ||
174 | |||
175 | add #1, r10 /* Increment the loop counter */ | ||
176 | cmp/eq r9, r10 | ||
177 | bf/s .Lvalidate_existing_mappings | ||
178 | add r4, r7 /* Increment to the next PMB_DATA entry */ | ||
179 | |||
180 | /* | ||
181 | * If we've fallen through, continue with setting up the initial | ||
182 | * mappings. | ||
183 | */ | ||
184 | |||
185 | mov r5, r7 /* cached_to_uncached */ | ||
153 | mov #0, r10 | 186 | mov #0, r10 |
154 | 187 | ||
188 | #ifdef CONFIG_UNCACHED_MAPPING | ||
155 | /* | 189 | /* |
156 | * Uncached mapping | 190 | * Uncached mapping |
157 | */ | 191 | */ |
@@ -171,6 +205,7 @@ ENTRY(_stext) | |||
171 | add r4, r1 | 205 | add r4, r1 |
172 | add r4, r3 | 206 | add r4, r3 |
173 | add #1, r10 | 207 | add #1, r10 |
208 | #endif | ||
174 | 209 | ||
175 | /* | 210 | /* |
176 | * Iterate over all of the available sizes from largest to | 211 | * Iterate over all of the available sizes from largest to |
@@ -216,6 +251,7 @@ ENTRY(_stext) | |||
216 | __PMB_ITER_BY_SIZE(64) | 251 | __PMB_ITER_BY_SIZE(64) |
217 | __PMB_ITER_BY_SIZE(16) | 252 | __PMB_ITER_BY_SIZE(16) |
218 | 253 | ||
254 | #ifdef CONFIG_UNCACHED_MAPPING | ||
219 | /* | 255 | /* |
220 | * Now that we can access it, update cached_to_uncached and | 256 | * Now that we can access it, update cached_to_uncached and |
221 | * uncached_size. | 257 | * uncached_size. |
@@ -228,6 +264,7 @@ ENTRY(_stext) | |||
228 | shll16 r7 | 264 | shll16 r7 |
229 | shll8 r7 | 265 | shll8 r7 |
230 | mov.l r7, @r0 | 266 | mov.l r7, @r0 |
267 | #endif | ||
231 | 268 | ||
232 | /* | 269 | /* |
233 | * Clear the remaining PMB entries. | 270 | * Clear the remaining PMB entries. |
@@ -236,7 +273,7 @@ ENTRY(_stext) | |||
236 | * r10 = number of entries we've setup so far | 273 | * r10 = number of entries we've setup so far |
237 | */ | 274 | */ |
238 | mov #0, r1 | 275 | mov #0, r1 |
239 | mov #PMB_ENTRY_MAX, r0 | 276 | mov #NR_PMB_ENTRIES, r0 |
240 | 277 | ||
241 | .Lagain: | 278 | .Lagain: |
242 | mov.l r1, @r3 /* Clear PMB_ADDR entry */ | 279 | mov.l r1, @r3 /* Clear PMB_ADDR entry */ |
@@ -248,7 +285,8 @@ ENTRY(_stext) | |||
248 | mov.l 6f, r0 | 285 | mov.l 6f, r0 |
249 | icbi @r0 | 286 | icbi @r0 |
250 | 287 | ||
251 | #endif /* !CONFIG_PMB_LEGACY */ | 288 | .Lpmb_done: |
289 | #endif /* CONFIG_PMB */ | ||
252 | 290 | ||
253 | #ifndef CONFIG_SH_NO_BSS_INIT | 291 | #ifndef CONFIG_SH_NO_BSS_INIT |
254 | /* | 292 | /* |
@@ -300,13 +338,15 @@ ENTRY(stack_start) | |||
300 | 6: .long sh_cpu_init | 338 | 6: .long sh_cpu_init |
301 | 7: .long init_thread_union | 339 | 7: .long init_thread_union |
302 | 340 | ||
303 | #if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) | 341 | #ifdef CONFIG_PMB |
304 | .LPMB_ADDR: .long PMB_ADDR | 342 | .LPMB_ADDR: .long PMB_ADDR |
305 | .LPMB_DATA: .long PMB_DATA | 343 | .LPMB_DATA: .long PMB_DATA |
306 | .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V | 344 | .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V |
307 | .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V | 345 | .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V |
308 | .LMMUCR: .long MMUCR | 346 | .LMMUCR: .long MMUCR |
347 | .LMEMORY_SIZE: .long __MEMORY_SIZE | ||
348 | #ifdef CONFIG_UNCACHED_MAPPING | ||
309 | .Lcached_to_uncached: .long cached_to_uncached | 349 | .Lcached_to_uncached: .long cached_to_uncached |
310 | .Luncached_size: .long uncached_size | 350 | .Luncached_size: .long uncached_size |
311 | .LMEMORY_SIZE: .long __MEMORY_SIZE | 351 | #endif |
312 | #endif | 352 | #endif |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e187750dd319..3459e70eed72 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) | |||
421 | 421 | ||
422 | parse_early_param(); | 422 | parse_early_param(); |
423 | 423 | ||
424 | uncached_init(); | ||
425 | |||
424 | plat_early_device_setup(); | 426 | plat_early_device_setup(); |
425 | 427 | ||
426 | /* Let earlyprintk output early console messages */ | 428 | /* Let earlyprintk output early console messages */ |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 9c090cb68878..c3d86fa71ddf 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/alignment.h> | 30 | #include <asm/alignment.h> |
31 | #include <asm/fpu.h> | 31 | #include <asm/fpu.h> |
32 | #include <asm/kprobes.h> | 32 | #include <asm/kprobes.h> |
33 | #include <asm/sh_bios.h> | ||
34 | 33 | ||
35 | #ifdef CONFIG_CPU_SH2 | 34 | #ifdef CONFIG_CPU_SH2 |
36 | # define TRAP_RESERVED_INST 4 | 35 | # define TRAP_RESERVED_INST 4 |
@@ -848,12 +847,6 @@ void __init trap_init(void) | |||
848 | #ifdef TRAP_UBC | 847 | #ifdef TRAP_UBC |
849 | set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); | 848 | set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); |
850 | #endif | 849 | #endif |
851 | |||
852 | /* Save off the BIOS VBR, if there is one */ | ||
853 | sh_bios_vbr_init(); | ||
854 | |||
855 | /* Setup VBR for boot cpu */ | ||
856 | per_cpu_trap_init(); | ||
857 | } | 850 | } |
858 | 851 | ||
859 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 852 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 0e66c7b30e0f..7f8a709c3ada 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S | |||
@@ -14,11 +14,10 @@ OUTPUT_ARCH(sh) | |||
14 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
15 | #include <asm/vmlinux.lds.h> | 15 | #include <asm/vmlinux.lds.h> |
16 | 16 | ||
17 | #if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ | 17 | #ifdef CONFIG_PMB |
18 | defined(CONFIG_PMB_LEGACY) | 18 | #define MEMORY_OFFSET 0 |
19 | #define MEMORY_OFFSET __MEMORY_START | ||
20 | #else | 19 | #else |
21 | #define MEMORY_OFFSET 0 | 20 | #define MEMORY_OFFSET __MEMORY_START |
22 | #endif | 21 | #endif |
23 | 22 | ||
24 | ENTRY(_start) | 23 | ENTRY(_start) |
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 65cb5b83e072..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -91,16 +91,6 @@ config PMB | |||
91 | 32-bits through the SH-4A PMB. If this is not set, legacy | 91 | 32-bits through the SH-4A PMB. If this is not set, legacy |
92 | 29-bit physical addressing will be used. | 92 | 29-bit physical addressing will be used. |
93 | 93 | ||
94 | config PMB_LEGACY | ||
95 | bool "Support legacy boot mappings for PMB" | ||
96 | depends on PMB | ||
97 | select 32BIT | ||
98 | help | ||
99 | If this option is enabled, fixed PMB mappings are inherited | ||
100 | from the boot loader, and the kernel does not attempt dynamic | ||
101 | management. This is the closest to legacy 29-bit physical mode, | ||
102 | and allows systems to support up to 512MiB of system memory. | ||
103 | |||
104 | config X2TLB | 94 | config X2TLB |
105 | def_bool y | 95 | def_bool y |
106 | depends on (CPU_SHX2 || CPU_SHX3) && MMU | 96 | depends on (CPU_SHX2 || CPU_SHX3) && MMU |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index de714cbd961a..3dc8a8a63822 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | |||
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB) += pmb.o |
37 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
38 | obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o | 38 | obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o |
39 | obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o | ||
39 | 40 | ||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | 41 | # Special flags for fault_64.o. This puts restrictions on the number of |
41 | # caller-save registers that the compiler can target when building this file. | 42 | # caller-save registers that the compiler can target when building this file. |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 58012b6bbe76..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -26,21 +26,6 @@ | |||
26 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 26 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
27 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 27 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
28 | 28 | ||
29 | #ifdef CONFIG_UNCACHED_MAPPING | ||
30 | /* | ||
31 | * This is the offset of the uncached section from its cached alias. | ||
32 | * | ||
33 | * Legacy platforms handle trivial transitions between cached and | ||
34 | * uncached segments by making use of the 1:1 mapping relationship in | ||
35 | * 512MB lowmem, others via a special uncached mapping. | ||
36 | * | ||
37 | * Default value only valid in 29 bit mode, in 32bit mode this will be | ||
38 | * updated by the early PMB initialization code. | ||
39 | */ | ||
40 | unsigned long cached_to_uncached = 0x20000000; | ||
41 | unsigned long uncached_size = SZ_512M; | ||
42 | #endif | ||
43 | |||
44 | #ifdef CONFIG_MMU | 29 | #ifdef CONFIG_MMU |
45 | static pte_t *__get_pte_phys(unsigned long addr) | 30 | static pte_t *__get_pte_phys(unsigned long addr) |
46 | { | 31 | { |
@@ -260,7 +245,6 @@ void __init mem_init(void) | |||
260 | memset(empty_zero_page, 0, PAGE_SIZE); | 245 | memset(empty_zero_page, 0, PAGE_SIZE); |
261 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 246 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
262 | 247 | ||
263 | /* Initialize the vDSO */ | ||
264 | vsyscall_init(); | 248 | vsyscall_init(); |
265 | 249 | ||
266 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 250 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
@@ -303,9 +287,7 @@ void __init mem_init(void) | |||
303 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | 287 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, |
304 | 288 | ||
305 | #ifdef CONFIG_UNCACHED_MAPPING | 289 | #ifdef CONFIG_UNCACHED_MAPPING |
306 | (unsigned long)memory_start + cached_to_uncached, | 290 | uncached_start, uncached_end, uncached_size >> 20, |
307 | (unsigned long)memory_start + cached_to_uncached + uncached_size, | ||
308 | uncached_size >> 20, | ||
309 | #endif | 291 | #endif |
310 | 292 | ||
311 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | 293 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 94583c5da855..c68d2d7d00a9 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
80 | if (unlikely(phys_addr >= P1SEG)) { | 80 | if (unlikely(phys_addr >= P1SEG)) { |
81 | unsigned long mapped; | 81 | unsigned long mapped; |
82 | 82 | ||
83 | mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); | 83 | mapped = pmb_remap(addr, phys_addr, size, pgprot); |
84 | if (likely(mapped)) { | 84 | if (likely(mapped)) { |
85 | addr += mapped; | 85 | addr += mapped; |
86 | phys_addr += mapped; | 86 | phys_addr += mapped; |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 3c9bf5b5c36f..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -21,47 +21,67 @@ | |||
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/rwlock.h> | ||
27 | #include <asm/sizes.h> | ||
24 | #include <asm/system.h> | 28 | #include <asm/system.h> |
25 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
26 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
28 | #include <asm/io.h> | ||
29 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
30 | 34 | ||
31 | #define NR_PMB_ENTRIES 16 | 35 | struct pmb_entry; |
32 | 36 | ||
33 | static void __pmb_unmap(struct pmb_entry *); | 37 | struct pmb_entry { |
38 | unsigned long vpn; | ||
39 | unsigned long ppn; | ||
40 | unsigned long flags; | ||
41 | unsigned long size; | ||
42 | |||
43 | spinlock_t lock; | ||
44 | |||
45 | /* | ||
46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | ||
47 | * PMB_NO_ENTRY to search for a free one | ||
48 | */ | ||
49 | int entry; | ||
50 | |||
51 | /* Adjacent entry link for contiguous multi-entry mappings */ | ||
52 | struct pmb_entry *link; | ||
53 | }; | ||
54 | |||
55 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | ||
34 | 56 | ||
57 | static DEFINE_RWLOCK(pmb_rwlock); | ||
35 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
36 | static unsigned long pmb_map; | 59 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
37 | 60 | ||
38 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 61 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) |
39 | { | 62 | { |
40 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
41 | } | 64 | } |
42 | 65 | ||
43 | static inline unsigned long mk_pmb_addr(unsigned int entry) | 66 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) |
44 | { | 67 | { |
45 | return mk_pmb_entry(entry) | PMB_ADDR; | 68 | return mk_pmb_entry(entry) | PMB_ADDR; |
46 | } | 69 | } |
47 | 70 | ||
48 | static inline unsigned long mk_pmb_data(unsigned int entry) | 71 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) |
49 | { | 72 | { |
50 | return mk_pmb_entry(entry) | PMB_DATA; | 73 | return mk_pmb_entry(entry) | PMB_DATA; |
51 | } | 74 | } |
52 | 75 | ||
53 | static int pmb_alloc_entry(void) | 76 | static int pmb_alloc_entry(void) |
54 | { | 77 | { |
55 | unsigned int pos; | 78 | int pos; |
56 | |||
57 | repeat: | ||
58 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
59 | |||
60 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
61 | return -ENOSPC; | ||
62 | 79 | ||
63 | if (test_and_set_bit(pos, &pmb_map)) | 80 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); |
64 | goto repeat; | 81 | if (pos >= 0 && pos < NR_PMB_ENTRIES) |
82 | __set_bit(pos, pmb_map); | ||
83 | else | ||
84 | pos = -ENOSPC; | ||
65 | 85 | ||
66 | return pos; | 86 | return pos; |
67 | } | 87 | } |
@@ -70,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
70 | unsigned long flags, int entry) | 90 | unsigned long flags, int entry) |
71 | { | 91 | { |
72 | struct pmb_entry *pmbe; | 92 | struct pmb_entry *pmbe; |
93 | unsigned long irqflags; | ||
94 | void *ret = NULL; | ||
73 | int pos; | 95 | int pos; |
74 | 96 | ||
97 | write_lock_irqsave(&pmb_rwlock, irqflags); | ||
98 | |||
75 | if (entry == PMB_NO_ENTRY) { | 99 | if (entry == PMB_NO_ENTRY) { |
76 | pos = pmb_alloc_entry(); | 100 | pos = pmb_alloc_entry(); |
77 | if (pos < 0) | 101 | if (unlikely(pos < 0)) { |
78 | return ERR_PTR(pos); | 102 | ret = ERR_PTR(pos); |
103 | goto out; | ||
104 | } | ||
79 | } else { | 105 | } else { |
80 | if (test_bit(entry, &pmb_map)) | 106 | if (__test_and_set_bit(entry, pmb_map)) { |
81 | return ERR_PTR(-ENOSPC); | 107 | ret = ERR_PTR(-ENOSPC); |
108 | goto out; | ||
109 | } | ||
110 | |||
82 | pos = entry; | 111 | pos = entry; |
83 | } | 112 | } |
84 | 113 | ||
114 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
115 | |||
85 | pmbe = &pmb_entry_list[pos]; | 116 | pmbe = &pmb_entry_list[pos]; |
86 | if (!pmbe) | 117 | |
87 | return ERR_PTR(-ENOMEM); | 118 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
119 | |||
120 | spin_lock_init(&pmbe->lock); | ||
88 | 121 | ||
89 | pmbe->vpn = vpn; | 122 | pmbe->vpn = vpn; |
90 | pmbe->ppn = ppn; | 123 | pmbe->ppn = ppn; |
@@ -92,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
92 | pmbe->entry = pos; | 125 | pmbe->entry = pos; |
93 | 126 | ||
94 | return pmbe; | 127 | return pmbe; |
128 | |||
129 | out: | ||
130 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
131 | return ret; | ||
95 | } | 132 | } |
96 | 133 | ||
97 | static void pmb_free(struct pmb_entry *pmbe) | 134 | static void pmb_free(struct pmb_entry *pmbe) |
98 | { | 135 | { |
99 | int pos = pmbe->entry; | 136 | __clear_bit(pmbe->entry, pmb_map); |
100 | |||
101 | pmbe->vpn = 0; | ||
102 | pmbe->ppn = 0; | ||
103 | pmbe->flags = 0; | ||
104 | pmbe->entry = 0; | ||
105 | 137 | ||
106 | clear_bit(pos, &pmb_map); | 138 | pmbe->entry = PMB_NO_ENTRY; |
139 | pmbe->link = NULL; | ||
107 | } | 140 | } |
108 | 141 | ||
109 | /* | 142 | /* |
110 | * Must be in P2 for __set_pmb_entry() | 143 | * Ensure that the PMB entries match our cache configuration. |
144 | * | ||
145 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
146 | * invalid, so care must be taken to manually adjust cacheable | ||
147 | * translations. | ||
111 | */ | 148 | */ |
112 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 149 | static __always_inline unsigned long pmb_cache_flags(void) |
113 | unsigned long flags, int pos) | ||
114 | { | 150 | { |
115 | __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); | 151 | unsigned long flags = 0; |
116 | 152 | ||
117 | #ifdef CONFIG_CACHE_WRITETHROUGH | 153 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
118 | /* | 154 | flags |= PMB_C | PMB_WT | PMB_UB; |
119 | * When we are in 32-bit address extended mode, CCR.CB becomes | 155 | #elif defined(CONFIG_CACHE_WRITEBACK) |
120 | * invalid, so care must be taken to manually adjust cacheable | 156 | flags |= PMB_C; |
121 | * translations. | ||
122 | */ | ||
123 | if (likely(flags & PMB_C)) | ||
124 | flags |= PMB_WT; | ||
125 | #endif | 157 | #endif |
126 | 158 | ||
127 | __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); | 159 | return flags; |
128 | } | 160 | } |
129 | 161 | ||
130 | static void set_pmb_entry(struct pmb_entry *pmbe) | 162 | /* |
163 | * Must be run uncached. | ||
164 | */ | ||
165 | static void __set_pmb_entry(struct pmb_entry *pmbe) | ||
131 | { | 166 | { |
132 | jump_to_uncached(); | 167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); |
133 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); | 168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, |
134 | back_to_cached(); | 169 | mk_pmb_data(pmbe->entry)); |
135 | } | 170 | } |
136 | 171 | ||
137 | static void clear_pmb_entry(struct pmb_entry *pmbe) | 172 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
138 | { | 173 | { |
139 | unsigned int entry = pmbe->entry; | 174 | unsigned long addr, data; |
140 | unsigned long addr; | 175 | unsigned long addr_val, data_val; |
141 | 176 | ||
142 | if (unlikely(entry >= NR_PMB_ENTRIES)) | 177 | addr = mk_pmb_addr(pmbe->entry); |
143 | return; | 178 | data = mk_pmb_data(pmbe->entry); |
144 | 179 | ||
145 | jump_to_uncached(); | 180 | addr_val = __raw_readl(addr); |
181 | data_val = __raw_readl(data); | ||
146 | 182 | ||
147 | /* Clear V-bit */ | 183 | /* Clear V-bit */ |
148 | addr = mk_pmb_addr(entry); | 184 | writel_uncached(addr_val & ~PMB_V, addr); |
149 | __raw_writel(__raw_readl(addr) & ~PMB_V, addr); | 185 | writel_uncached(data_val & ~PMB_V, data); |
186 | } | ||
150 | 187 | ||
151 | addr = mk_pmb_data(entry); | 188 | static void set_pmb_entry(struct pmb_entry *pmbe) |
152 | __raw_writel(__raw_readl(addr) & ~PMB_V, addr); | 189 | { |
190 | unsigned long flags; | ||
153 | 191 | ||
154 | back_to_cached(); | 192 | spin_lock_irqsave(&pmbe->lock, flags); |
193 | __set_pmb_entry(pmbe); | ||
194 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
155 | } | 195 | } |
156 | 196 | ||
157 | |||
158 | static struct { | 197 | static struct { |
159 | unsigned long size; | 198 | unsigned long size; |
160 | int flag; | 199 | int flag; |
161 | } pmb_sizes[] = { | 200 | } pmb_sizes[] = { |
162 | { .size = 0x20000000, .flag = PMB_SZ_512M, }, | 201 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, |
163 | { .size = 0x08000000, .flag = PMB_SZ_128M, }, | 202 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, |
164 | { .size = 0x04000000, .flag = PMB_SZ_64M, }, | 203 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, |
165 | { .size = 0x01000000, .flag = PMB_SZ_16M, }, | 204 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, |
166 | }; | 205 | }; |
167 | 206 | ||
168 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 207 | long pmb_remap(unsigned long vaddr, unsigned long phys, |
169 | unsigned long size, unsigned long flags) | 208 | unsigned long size, pgprot_t prot) |
170 | { | 209 | { |
171 | struct pmb_entry *pmbp, *pmbe; | 210 | struct pmb_entry *pmbp, *pmbe; |
172 | unsigned long wanted; | 211 | unsigned long wanted; |
173 | int pmb_flags, i; | 212 | int pmb_flags, i; |
174 | long err; | 213 | long err; |
214 | u64 flags; | ||
215 | |||
216 | flags = pgprot_val(prot); | ||
217 | |||
218 | pmb_flags = PMB_WT | PMB_UB; | ||
175 | 219 | ||
176 | /* Convert typical pgprot value to the PMB equivalent */ | 220 | /* Convert typical pgprot value to the PMB equivalent */ |
177 | if (flags & _PAGE_CACHABLE) { | 221 | if (flags & _PAGE_CACHABLE) { |
178 | if (flags & _PAGE_WT) | 222 | pmb_flags |= PMB_C; |
179 | pmb_flags = PMB_WT; | 223 | |
180 | else | 224 | if ((flags & _PAGE_WT) == 0) |
181 | pmb_flags = PMB_C; | 225 | pmb_flags &= ~(PMB_WT | PMB_UB); |
182 | } else | 226 | } |
183 | pmb_flags = PMB_WT | PMB_UB; | ||
184 | 227 | ||
185 | pmbp = NULL; | 228 | pmbp = NULL; |
186 | wanted = size; | 229 | wanted = size; |
187 | 230 | ||
188 | again: | 231 | again: |
189 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 232 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
233 | unsigned long flags; | ||
234 | |||
190 | if (size < pmb_sizes[i].size) | 235 | if (size < pmb_sizes[i].size) |
191 | continue; | 236 | continue; |
192 | 237 | ||
@@ -197,18 +242,25 @@ again: | |||
197 | goto out; | 242 | goto out; |
198 | } | 243 | } |
199 | 244 | ||
200 | set_pmb_entry(pmbe); | 245 | spin_lock_irqsave(&pmbe->lock, flags); |
246 | |||
247 | __set_pmb_entry(pmbe); | ||
201 | 248 | ||
202 | phys += pmb_sizes[i].size; | 249 | phys += pmb_sizes[i].size; |
203 | vaddr += pmb_sizes[i].size; | 250 | vaddr += pmb_sizes[i].size; |
204 | size -= pmb_sizes[i].size; | 251 | size -= pmb_sizes[i].size; |
205 | 252 | ||
253 | pmbe->size = pmb_sizes[i].size; | ||
254 | |||
206 | /* | 255 | /* |
207 | * Link adjacent entries that span multiple PMB entries | 256 | * Link adjacent entries that span multiple PMB entries |
208 | * for easier tear-down. | 257 | * for easier tear-down. |
209 | */ | 258 | */ |
210 | if (likely(pmbp)) | 259 | if (likely(pmbp)) { |
260 | spin_lock(&pmbp->lock); | ||
211 | pmbp->link = pmbe; | 261 | pmbp->link = pmbe; |
262 | spin_unlock(&pmbp->lock); | ||
263 | } | ||
212 | 264 | ||
213 | pmbp = pmbe; | 265 | pmbp = pmbe; |
214 | 266 | ||
@@ -218,16 +270,17 @@ again: | |||
218 | * pmb_sizes[i].size again. | 270 | * pmb_sizes[i].size again. |
219 | */ | 271 | */ |
220 | i--; | 272 | i--; |
273 | |||
274 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
221 | } | 275 | } |
222 | 276 | ||
223 | if (size >= 0x1000000) | 277 | if (size >= SZ_16M) |
224 | goto again; | 278 | goto again; |
225 | 279 | ||
226 | return wanted - size; | 280 | return wanted - size; |
227 | 281 | ||
228 | out: | 282 | out: |
229 | if (pmbp) | 283 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); |
230 | __pmb_unmap(pmbp); | ||
231 | 284 | ||
232 | return err; | 285 | return err; |
233 | } | 286 | } |
@@ -237,24 +290,52 @@ void pmb_unmap(unsigned long addr) | |||
237 | struct pmb_entry *pmbe = NULL; | 290 | struct pmb_entry *pmbe = NULL; |
238 | int i; | 291 | int i; |
239 | 292 | ||
293 | read_lock(&pmb_rwlock); | ||
294 | |||
240 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 295 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
241 | if (test_bit(i, &pmb_map)) { | 296 | if (test_bit(i, pmb_map)) { |
242 | pmbe = &pmb_entry_list[i]; | 297 | pmbe = &pmb_entry_list[i]; |
243 | if (pmbe->vpn == addr) | 298 | if (pmbe->vpn == addr) |
244 | break; | 299 | break; |
245 | } | 300 | } |
246 | } | 301 | } |
247 | 302 | ||
248 | if (unlikely(!pmbe)) | 303 | read_unlock(&pmb_rwlock); |
249 | return; | 304 | |
305 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); | ||
306 | } | ||
307 | |||
308 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | ||
309 | { | ||
310 | return (b->vpn == (a->vpn + a->size)) && | ||
311 | (b->ppn == (a->ppn + a->size)) && | ||
312 | (b->flags == a->flags); | ||
313 | } | ||
314 | |||
315 | static bool pmb_size_valid(unsigned long size) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
320 | if (pmb_sizes[i].size == size) | ||
321 | return true; | ||
250 | 322 | ||
251 | __pmb_unmap(pmbe); | 323 | return false; |
252 | } | 324 | } |
253 | 325 | ||
254 | static void __pmb_unmap(struct pmb_entry *pmbe) | 326 | static int pmb_size_to_flags(unsigned long size) |
255 | { | 327 | { |
256 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); | 328 | int i; |
257 | 329 | ||
330 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
331 | if (pmb_sizes[i].size == size) | ||
332 | return pmb_sizes[i].flag; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
338 | { | ||
258 | do { | 339 | do { |
259 | struct pmb_entry *pmblink = pmbe; | 340 | struct pmb_entry *pmblink = pmbe; |
260 | 341 | ||
@@ -265,52 +346,91 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
265 | * this entry in pmb_alloc() (even if we haven't filled | 346 | * this entry in pmb_alloc() (even if we haven't filled |
266 | * it yet). | 347 | * it yet). |
267 | * | 348 | * |
268 | * Therefore, calling clear_pmb_entry() is safe as no | 349 | * Therefore, calling __clear_pmb_entry() is safe as no |
269 | * other mapping can be using that slot. | 350 | * other mapping can be using that slot. |
270 | */ | 351 | */ |
271 | clear_pmb_entry(pmbe); | 352 | __clear_pmb_entry(pmbe); |
272 | 353 | ||
273 | pmbe = pmblink->link; | 354 | pmbe = pmblink->link; |
274 | 355 | ||
275 | pmb_free(pmblink); | 356 | pmb_free(pmblink); |
276 | } while (pmbe); | 357 | } while (pmbe && --depth); |
358 | } | ||
359 | |||
360 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | if (unlikely(!pmbe)) | ||
365 | return; | ||
366 | |||
367 | write_lock_irqsave(&pmb_rwlock, flags); | ||
368 | __pmb_unmap_entry(pmbe, depth); | ||
369 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
277 | } | 370 | } |
278 | 371 | ||
279 | #ifdef CONFIG_PMB_LEGACY | 372 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) |
280 | static inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
281 | { | 373 | { |
282 | return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; | 374 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); |
283 | } | 375 | } |
284 | 376 | ||
285 | static int pmb_apply_legacy_mappings(void) | 377 | static void __init pmb_notify(void) |
286 | { | 378 | { |
287 | unsigned int applied = 0; | ||
288 | int i; | 379 | int i; |
289 | 380 | ||
290 | pr_info("PMB: Preserving legacy mappings:\n"); | 381 | pr_info("PMB: boot mappings:\n"); |
382 | |||
383 | read_lock(&pmb_rwlock); | ||
384 | |||
385 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
386 | struct pmb_entry *pmbe; | ||
387 | |||
388 | if (!test_bit(i, pmb_map)) | ||
389 | continue; | ||
390 | |||
391 | pmbe = &pmb_entry_list[i]; | ||
392 | |||
393 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | ||
394 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | ||
395 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | ||
396 | } | ||
397 | |||
398 | read_unlock(&pmb_rwlock); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Sync our software copy of the PMB mappings with those in hardware. The | ||
403 | * mappings in the hardware PMB were either set up by the bootloader or | ||
404 | * very early on by the kernel. | ||
405 | */ | ||
406 | static void __init pmb_synchronize(void) | ||
407 | { | ||
408 | struct pmb_entry *pmbp = NULL; | ||
409 | int i, j; | ||
291 | 410 | ||
292 | /* | 411 | /* |
293 | * The following entries are setup by the bootloader. | 412 | * Run through the initial boot mappings, log the established |
413 | * ones, and blow away anything that falls outside of the valid | ||
414 | * PPN range. Specifically, we only care about existing mappings | ||
415 | * that impact the cached/uncached sections. | ||
294 | * | 416 | * |
295 | * Entry VPN PPN V SZ C UB | 417 | * Note that touching these can be a bit of a minefield; the boot |
296 | * -------------------------------------------------------- | 418 | * loader can establish multi-page mappings with the same caching |
297 | * 0 0xA0000000 0x00000000 1 64MB 0 0 | 419 | * attributes, so we need to ensure that we aren't modifying a |
298 | * 1 0xA4000000 0x04000000 1 16MB 0 0 | 420 | * mapping that we're presently executing from, or may execute |
299 | * 2 0xA6000000 0x08000000 1 16MB 0 0 | 421 | * from in the case of straddling page boundaries. |
300 | * 9 0x88000000 0x48000000 1 128MB 1 1 | ||
301 | * 10 0x90000000 0x50000000 1 128MB 1 1 | ||
302 | * 11 0x98000000 0x58000000 1 128MB 1 1 | ||
303 | * 13 0xA8000000 0x48000000 1 128MB 0 0 | ||
304 | * 14 0xB0000000 0x50000000 1 128MB 0 0 | ||
305 | * 15 0xB8000000 0x58000000 1 128MB 0 0 | ||
306 | * | 422 | * |
307 | * The only entries the we need are the ones that map the kernel | 423 | * In the future we will have to tidy up after the boot loader by |
308 | * at the cached and uncached addresses. | 424 | * jumping between the cached and uncached mappings and tearing |
425 | * down alternating mappings while executing from the other. | ||
309 | */ | 426 | */ |
310 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 427 | for (i = 0; i < NR_PMB_ENTRIES; i++) { |
311 | unsigned long addr, data; | 428 | unsigned long addr, data; |
312 | unsigned long addr_val, data_val; | 429 | unsigned long addr_val, data_val; |
313 | unsigned long ppn, vpn; | 430 | unsigned long ppn, vpn, flags; |
431 | unsigned long irqflags; | ||
432 | unsigned int size; | ||
433 | struct pmb_entry *pmbe; | ||
314 | 434 | ||
315 | addr = mk_pmb_addr(i); | 435 | addr = mk_pmb_addr(i); |
316 | data = mk_pmb_data(i); | 436 | data = mk_pmb_data(i); |
@@ -330,110 +450,202 @@ static int pmb_apply_legacy_mappings(void) | |||
330 | /* | 450 | /* |
331 | * Only preserve in-range mappings. | 451 | * Only preserve in-range mappings. |
332 | */ | 452 | */ |
333 | if (pmb_ppn_in_range(ppn)) { | 453 | if (!pmb_ppn_in_range(ppn)) { |
334 | unsigned int size; | 454 | /* |
335 | char *sz_str = NULL; | 455 | * Invalidate anything out of bounds. |
456 | */ | ||
457 | writel_uncached(addr_val & ~PMB_V, addr); | ||
458 | writel_uncached(data_val & ~PMB_V, data); | ||
459 | continue; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Update the caching attributes if necessary | ||
464 | */ | ||
465 | if (data_val & PMB_C) { | ||
466 | data_val &= ~PMB_CACHE_MASK; | ||
467 | data_val |= pmb_cache_flags(); | ||
468 | |||
469 | writel_uncached(data_val, data); | ||
470 | } | ||
471 | |||
472 | size = data_val & PMB_SZ_MASK; | ||
473 | flags = size | (data_val & PMB_CACHE_MASK); | ||
336 | 474 | ||
337 | size = data_val & PMB_SZ_MASK; | 475 | pmbe = pmb_alloc(vpn, ppn, flags, i); |
476 | if (IS_ERR(pmbe)) { | ||
477 | WARN_ON_ONCE(1); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | spin_lock_irqsave(&pmbe->lock, irqflags); | ||
338 | 482 | ||
339 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | 483 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) |
340 | (size == PMB_SZ_64M) ? " 64MB": | 484 | if (pmb_sizes[j].flag == size) |
341 | (size == PMB_SZ_128M) ? "128MB": | 485 | pmbe->size = pmb_sizes[j].size; |
342 | "512MB"; | ||
343 | 486 | ||
344 | pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", | 487 | if (pmbp) { |
345 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, | 488 | spin_lock(&pmbp->lock); |
346 | (data_val & PMB_C) ? "" : "un"); | ||
347 | 489 | ||
348 | applied++; | ||
349 | } else { | ||
350 | /* | 490 | /* |
351 | * Invalidate anything out of bounds. | 491 | * Compare the previous entry against the current one to |
492 | * see if the entries span a contiguous mapping. If so, | ||
493 | * setup the entry links accordingly. Compound mappings | ||
494 | * are later coalesced. | ||
352 | */ | 495 | */ |
353 | __raw_writel(addr_val & ~PMB_V, addr); | 496 | if (pmb_can_merge(pmbp, pmbe)) |
354 | __raw_writel(data_val & ~PMB_V, data); | 497 | pmbp->link = pmbe; |
498 | |||
499 | spin_unlock(&pmbp->lock); | ||
355 | } | 500 | } |
356 | } | ||
357 | 501 | ||
358 | return (applied == 0); | 502 | pmbp = pmbe; |
503 | |||
504 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | ||
505 | } | ||
359 | } | 506 | } |
360 | #else | 507 | |
361 | static inline int pmb_apply_legacy_mappings(void) | 508 | static void __init pmb_merge(struct pmb_entry *head) |
362 | { | 509 | { |
363 | return 1; | 510 | unsigned long span, newsize; |
511 | struct pmb_entry *tail; | ||
512 | int i = 1, depth = 0; | ||
513 | |||
514 | span = newsize = head->size; | ||
515 | |||
516 | tail = head->link; | ||
517 | while (tail) { | ||
518 | span += tail->size; | ||
519 | |||
520 | if (pmb_size_valid(span)) { | ||
521 | newsize = span; | ||
522 | depth = i; | ||
523 | } | ||
524 | |||
525 | /* This is the end of the line.. */ | ||
526 | if (!tail->link) | ||
527 | break; | ||
528 | |||
529 | tail = tail->link; | ||
530 | i++; | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * The merged page size must be valid. | ||
535 | */ | ||
536 | if (!pmb_size_valid(newsize)) | ||
537 | return; | ||
538 | |||
539 | head->flags &= ~PMB_SZ_MASK; | ||
540 | head->flags |= pmb_size_to_flags(newsize); | ||
541 | |||
542 | head->size = newsize; | ||
543 | |||
544 | __pmb_unmap_entry(head->link, depth); | ||
545 | __set_pmb_entry(head); | ||
364 | } | 546 | } |
365 | #endif | ||
366 | 547 | ||
367 | int pmb_init(void) | 548 | static void __init pmb_coalesce(void) |
368 | { | 549 | { |
550 | unsigned long flags; | ||
369 | int i; | 551 | int i; |
370 | unsigned long addr, data; | ||
371 | unsigned long ret; | ||
372 | 552 | ||
373 | jump_to_uncached(); | 553 | write_lock_irqsave(&pmb_rwlock, flags); |
374 | 554 | ||
375 | /* | 555 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
376 | * Attempt to apply the legacy boot mappings if configured. If | 556 | struct pmb_entry *pmbe; |
377 | * this is successful then we simply carry on with those and | 557 | |
378 | * don't bother establishing additional memory mappings. Dynamic | 558 | if (!test_bit(i, pmb_map)) |
379 | * device mappings through pmb_remap() can still be bolted on | 559 | continue; |
380 | * after this. | 560 | |
381 | */ | 561 | pmbe = &pmb_entry_list[i]; |
382 | ret = pmb_apply_legacy_mappings(); | 562 | |
383 | if (ret == 0) { | 563 | /* |
384 | back_to_cached(); | 564 | * We're only interested in compound mappings |
385 | return 0; | 565 | */ |
566 | if (!pmbe->link) | ||
567 | continue; | ||
568 | |||
569 | /* | ||
570 | * Nothing to do if it already uses the largest possible | ||
571 | * page size. | ||
572 | */ | ||
573 | if (pmbe->size == SZ_512M) | ||
574 | continue; | ||
575 | |||
576 | pmb_merge(pmbe); | ||
386 | } | 577 | } |
387 | 578 | ||
579 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
580 | } | ||
581 | |||
582 | #ifdef CONFIG_UNCACHED_MAPPING | ||
583 | static void __init pmb_resize(void) | ||
584 | { | ||
585 | int i; | ||
586 | |||
388 | /* | 587 | /* |
389 | * Sync our software copy of the PMB mappings with those in | 588 | * If the uncached mapping was constructed by the kernel, it will |
390 | * hardware. The mappings in the hardware PMB were either set up | 589 | * already be a reasonable size. |
391 | * by the bootloader or very early on by the kernel. | ||
392 | */ | 590 | */ |
393 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 591 | if (uncached_size == SZ_16M) |
592 | return; | ||
593 | |||
594 | read_lock(&pmb_rwlock); | ||
595 | |||
596 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
394 | struct pmb_entry *pmbe; | 597 | struct pmb_entry *pmbe; |
395 | unsigned long vpn, ppn, flags; | 598 | unsigned long flags; |
396 | 599 | ||
397 | addr = PMB_DATA + (i << PMB_E_SHIFT); | 600 | if (!test_bit(i, pmb_map)) |
398 | data = __raw_readl(addr); | ||
399 | if (!(data & PMB_V)) | ||
400 | continue; | 601 | continue; |
401 | 602 | ||
402 | if (data & PMB_C) { | 603 | pmbe = &pmb_entry_list[i]; |
403 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
404 | data |= PMB_WT; | ||
405 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
406 | data &= ~PMB_WT; | ||
407 | #else | ||
408 | data &= ~(PMB_C | PMB_WT); | ||
409 | #endif | ||
410 | } | ||
411 | __raw_writel(data, addr); | ||
412 | 604 | ||
413 | ppn = data & PMB_PFN_MASK; | 605 | if (pmbe->vpn != uncached_start) |
606 | continue; | ||
414 | 607 | ||
415 | flags = data & (PMB_C | PMB_WT | PMB_UB); | 608 | /* |
416 | flags |= data & PMB_SZ_MASK; | 609 | * Found it, now resize it. |
610 | */ | ||
611 | spin_lock_irqsave(&pmbe->lock, flags); | ||
417 | 612 | ||
418 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | 613 | pmbe->size = SZ_16M; |
419 | data = __raw_readl(addr); | 614 | pmbe->flags &= ~PMB_SZ_MASK; |
615 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | ||
420 | 616 | ||
421 | vpn = data & PMB_PFN_MASK; | 617 | uncached_resize(pmbe->size); |
422 | 618 | ||
423 | pmbe = pmb_alloc(vpn, ppn, flags, i); | 619 | __set_pmb_entry(pmbe); |
424 | WARN_ON(IS_ERR(pmbe)); | 620 | |
621 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
425 | } | 622 | } |
426 | 623 | ||
427 | __raw_writel(0, PMB_IRMCR); | 624 | read_lock(&pmb_rwlock); |
625 | } | ||
626 | #endif | ||
428 | 627 | ||
429 | /* Flush out the TLB */ | 628 | void __init pmb_init(void) |
430 | i = __raw_readl(MMUCR); | 629 | { |
431 | i |= MMUCR_TI; | 630 | /* Synchronize software state */ |
432 | __raw_writel(i, MMUCR); | 631 | pmb_synchronize(); |
433 | 632 | ||
434 | back_to_cached(); | 633 | /* Attempt to combine compound mappings */ |
634 | pmb_coalesce(); | ||
435 | 635 | ||
436 | return 0; | 636 | #ifdef CONFIG_UNCACHED_MAPPING |
637 | /* Resize initial mappings, if necessary */ | ||
638 | pmb_resize(); | ||
639 | #endif | ||
640 | |||
641 | /* Log them */ | ||
642 | pmb_notify(); | ||
643 | |||
644 | writel_uncached(0, PMB_IRMCR); | ||
645 | |||
646 | /* Flush out the TLB */ | ||
647 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); | ||
648 | ctrl_barrier(); | ||
437 | } | 649 | } |
438 | 650 | ||
439 | bool __in_29bit_mode(void) | 651 | bool __in_29bit_mode(void) |
@@ -513,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
513 | if (state.event == PM_EVENT_ON && | 725 | if (state.event == PM_EVENT_ON && |
514 | prev_state.event == PM_EVENT_FREEZE) { | 726 | prev_state.event == PM_EVENT_FREEZE) { |
515 | struct pmb_entry *pmbe; | 727 | struct pmb_entry *pmbe; |
728 | |||
729 | read_lock(&pmb_rwlock); | ||
730 | |||
516 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 731 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
517 | if (test_bit(i, &pmb_map)) { | 732 | if (test_bit(i, pmb_map)) { |
518 | pmbe = &pmb_entry_list[i]; | 733 | pmbe = &pmb_entry_list[i]; |
519 | set_pmb_entry(pmbe); | 734 | set_pmb_entry(pmbe); |
520 | } | 735 | } |
521 | } | 736 | } |
737 | |||
738 | read_unlock(&pmb_rwlock); | ||
522 | } | 739 | } |
740 | |||
523 | prev_state = state; | 741 | prev_state = state; |
742 | |||
524 | return 0; | 743 | return 0; |
525 | } | 744 | } |
526 | 745 | ||
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c new file mode 100644 index 000000000000..cf20a5c5136a --- /dev/null +++ b/arch/sh/mm/uncached.c | |||
@@ -0,0 +1,34 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/sizes.h> | ||
3 | #include <asm/page.h> | ||
4 | |||
5 | /* | ||
6 | * This is the offset of the uncached section from its cached alias. | ||
7 | * | ||
8 | * Legacy platforms handle trivial transitions between cached and | ||
9 | * uncached segments by making use of the 1:1 mapping relationship in | ||
10 | * 512MB lowmem, others via a special uncached mapping. | ||
11 | * | ||
12 | * Default value only valid in 29 bit mode, in 32bit mode this will be | ||
13 | * updated by the early PMB initialization code. | ||
14 | */ | ||
15 | unsigned long cached_to_uncached = SZ_512M; | ||
16 | unsigned long uncached_size = SZ_512M; | ||
17 | unsigned long uncached_start, uncached_end; | ||
18 | |||
19 | int virt_addr_uncached(unsigned long kaddr) | ||
20 | { | ||
21 | return (kaddr >= uncached_start) && (kaddr < uncached_end); | ||
22 | } | ||
23 | |||
24 | void __init uncached_init(void) | ||
25 | { | ||
26 | uncached_start = memory_end; | ||
27 | uncached_end = uncached_start + uncached_size; | ||
28 | } | ||
29 | |||
30 | void __init uncached_resize(unsigned long size) | ||
31 | { | ||
32 | uncached_size = size; | ||
33 | uncached_end = uncached_start + uncached_size; | ||
34 | } | ||