diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-11-24 02:32:11 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-11-24 02:32:11 -0500 |
commit | 49fb2cd2571e0134e5a12c5abab227696e4940c7 (patch) | |
tree | 9a77364e988ef3f3af24feee3f5bb91bd0c34129 /arch/sh/mm | |
parent | dfc349402de8e95f6a42e8341e9ea193b718eee3 (diff) | |
parent | 260af56271f79da0e37faa5a99b1786b221297e5 (diff) |
Merge branch 'master' into sh/st-integration
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 19 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 503 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 18 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 28 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 19 | ||||
-rw-r--r-- | arch/sh/mm/kmap.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pmb-fixed.c | 45 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 268 |
12 files changed, 277 insertions, 636 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 7f7b52f9beba..0e7ba8e891cf 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -82,8 +82,7 @@ config 32BIT | |||
82 | 82 | ||
83 | config PMB_ENABLE | 83 | config PMB_ENABLE |
84 | bool "Support 32-bit physical addressing through PMB" | 84 | bool "Support 32-bit physical addressing through PMB" |
85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 85 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
86 | select 32BIT | ||
87 | default y | 86 | default y |
88 | help | 87 | help |
89 | If you say Y here, physical addressing will be extended to | 88 | If you say Y here, physical addressing will be extended to |
@@ -97,8 +96,7 @@ choice | |||
97 | 96 | ||
98 | config PMB | 97 | config PMB |
99 | bool "PMB" | 98 | bool "PMB" |
100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 99 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
101 | select 32BIT | ||
102 | help | 100 | help |
103 | If you say Y here, physical addressing will be extended to | 101 | If you say Y here, physical addressing will be extended to |
104 | 32-bits through the SH-4A PMB. If this is not set, legacy | 102 | 32-bits through the SH-4A PMB. If this is not set, legacy |
@@ -106,9 +104,7 @@ config PMB | |||
106 | 104 | ||
107 | config PMB_FIXED | 105 | config PMB_FIXED |
108 | bool "fixed PMB" | 106 | bool "fixed PMB" |
109 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \ | 107 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
110 | CPU_SUBTYPE_SH7780 || \ | ||
111 | CPU_SUBTYPE_SH7785) | ||
112 | select 32BIT | 108 | select 32BIT |
113 | help | 109 | help |
114 | If this option is enabled, fixed PMB mappings are inherited | 110 | If this option is enabled, fixed PMB mappings are inherited |
@@ -258,6 +254,15 @@ endchoice | |||
258 | 254 | ||
259 | source "mm/Kconfig" | 255 | source "mm/Kconfig" |
260 | 256 | ||
257 | config SCHED_MC | ||
258 | bool "Multi-core scheduler support" | ||
259 | depends on SMP | ||
260 | default y | ||
261 | help | ||
262 | Multi-core scheduler support improves the CPU scheduler's decision | ||
263 | making when dealing with multi-core CPU chips at a cost of slightly | ||
264 | increased overhead in some places. If unsure say N here. | ||
265 | |||
261 | endmenu | 266 | endmenu |
262 | 267 | ||
263 | menu "Cache configuration" | 268 | menu "Cache configuration" |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3759bf853293..8a70535fa7ce 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -33,8 +33,7 @@ obj-y += $(tlb-y) | |||
33 | endif | 33 | endif |
34 | 34 | ||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB_ENABLE) += pmb.o |
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
39 | 38 | ||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | 39 | # Special flags for fault_64.o. This puts restrictions on the number of |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b7f235c74d66..6bfd08d5fb81 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache-sh4.c | 2 | * arch/sh/mm/cache-sh4.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2001 - 2007 Paul Mundt | 5 | * Copyright (C) 2001 - 2009 Paul Mundt |
6 | * Copyright (C) 2003 Richard Curnow | 6 | * Copyright (C) 2003 Richard Curnow |
7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. | 7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. |
8 | * | 8 | * |
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/highmem.h> | ||
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
20 | 22 | ||
@@ -23,21 +25,12 @@ | |||
23 | * flushing. Anything exceeding this will simply flush the dcache in its | 25 | * flushing. Anything exceeding this will simply flush the dcache in its |
24 | * entirety. | 26 | * entirety. |
25 | */ | 27 | */ |
26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | ||
27 | #define MAX_ICACHE_PAGES 32 | 28 | #define MAX_ICACHE_PAGES 32 |
28 | 29 | ||
29 | static void __flush_cache_one(unsigned long addr, unsigned long phys, | 30 | static void __flush_cache_one(unsigned long addr, unsigned long phys, |
30 | unsigned long exec_offset); | 31 | unsigned long exec_offset); |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * This is initialised here to ensure that it is not placed in the BSS. If | ||
34 | * that were to happen, note that cache_init gets called before the BSS is | ||
35 | * cleared, so this would get nulled out which would be hopeless. | ||
36 | */ | ||
37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | ||
38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | ||
39 | |||
40 | /* | ||
41 | * Write back the range of D-cache, and purge the I-cache. | 34 | * Write back the range of D-cache, and purge the I-cache. |
42 | * | 35 | * |
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
@@ -97,15 +90,15 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
97 | unsigned long flags, exec_offset = 0; | 90 | unsigned long flags, exec_offset = 0; |
98 | 91 | ||
99 | /* | 92 | /* |
100 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 93 | * All types of SH-4 require PC to be uncached to operate on the I-cache. |
101 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 94 | * Some types of SH-4 require PC to be uncached to operate on the D-cache. |
102 | */ | 95 | */ |
103 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || | 96 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || |
104 | (start < CACHE_OC_ADDRESS_ARRAY)) | 97 | (start < CACHE_OC_ADDRESS_ARRAY)) |
105 | exec_offset = 0x20000000; | 98 | exec_offset = cached_to_uncached; |
106 | 99 | ||
107 | local_irq_save(flags); | 100 | local_irq_save(flags); |
108 | __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); | 101 | __flush_cache_one(start | SH_CACHE_ASSOC, phys, exec_offset); |
109 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
110 | } | 103 | } |
111 | 104 | ||
@@ -124,13 +117,13 @@ static void sh4_flush_dcache_page(void *arg) | |||
124 | else | 117 | else |
125 | #endif | 118 | #endif |
126 | { | 119 | { |
127 | unsigned long phys = PHYSADDR(page_address(page)); | 120 | unsigned long phys = page_to_phys(page); |
128 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 121 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
129 | int i, n; | 122 | int i, n; |
130 | 123 | ||
131 | /* Loop all the D-cache */ | 124 | /* Loop all the D-cache */ |
132 | n = boot_cpu_data.dcache.n_aliases; | 125 | n = boot_cpu_data.dcache.n_aliases; |
133 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | 126 | for (i = 0; i <= n; i++, addr += PAGE_SIZE) |
134 | flush_cache_one(addr, phys); | 127 | flush_cache_one(addr, phys); |
135 | } | 128 | } |
136 | 129 | ||
@@ -159,10 +152,27 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
159 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
160 | } | 153 | } |
161 | 154 | ||
162 | static inline void flush_dcache_all(void) | 155 | static void flush_dcache_all(void) |
163 | { | 156 | { |
164 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 157 | unsigned long addr, end_addr, entry_offset; |
165 | wmb(); | 158 | |
159 | end_addr = CACHE_OC_ADDRESS_ARRAY + | ||
160 | (current_cpu_data.dcache.sets << | ||
161 | current_cpu_data.dcache.entry_shift) * | ||
162 | current_cpu_data.dcache.ways; | ||
163 | |||
164 | entry_offset = 1 << current_cpu_data.dcache.entry_shift; | ||
165 | |||
166 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { | ||
167 | __raw_writel(0, addr); addr += entry_offset; | ||
168 | __raw_writel(0, addr); addr += entry_offset; | ||
169 | __raw_writel(0, addr); addr += entry_offset; | ||
170 | __raw_writel(0, addr); addr += entry_offset; | ||
171 | __raw_writel(0, addr); addr += entry_offset; | ||
172 | __raw_writel(0, addr); addr += entry_offset; | ||
173 | __raw_writel(0, addr); addr += entry_offset; | ||
174 | __raw_writel(0, addr); addr += entry_offset; | ||
175 | } | ||
166 | } | 176 | } |
167 | 177 | ||
168 | static void sh4_flush_cache_all(void *unused) | 178 | static void sh4_flush_cache_all(void *unused) |
@@ -171,89 +181,13 @@ static void sh4_flush_cache_all(void *unused) | |||
171 | flush_icache_all(); | 181 | flush_icache_all(); |
172 | } | 182 | } |
173 | 183 | ||
174 | static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | ||
175 | unsigned long end) | ||
176 | { | ||
177 | unsigned long d = 0, p = start & PAGE_MASK; | ||
178 | unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; | ||
179 | unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; | ||
180 | unsigned long select_bit; | ||
181 | unsigned long all_aliases_mask; | ||
182 | unsigned long addr_offset; | ||
183 | pgd_t *dir; | ||
184 | pmd_t *pmd; | ||
185 | pud_t *pud; | ||
186 | pte_t *pte; | ||
187 | int i; | ||
188 | |||
189 | dir = pgd_offset(mm, p); | ||
190 | pud = pud_offset(dir, p); | ||
191 | pmd = pmd_offset(pud, p); | ||
192 | end = PAGE_ALIGN(end); | ||
193 | |||
194 | all_aliases_mask = (1 << n_aliases) - 1; | ||
195 | |||
196 | do { | ||
197 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { | ||
198 | p &= PMD_MASK; | ||
199 | p += PMD_SIZE; | ||
200 | pmd++; | ||
201 | |||
202 | continue; | ||
203 | } | ||
204 | |||
205 | pte = pte_offset_kernel(pmd, p); | ||
206 | |||
207 | do { | ||
208 | unsigned long phys; | ||
209 | pte_t entry = *pte; | ||
210 | |||
211 | if (!(pte_val(entry) & _PAGE_PRESENT)) { | ||
212 | pte++; | ||
213 | p += PAGE_SIZE; | ||
214 | continue; | ||
215 | } | ||
216 | |||
217 | phys = pte_val(entry) & PTE_PHYS_MASK; | ||
218 | |||
219 | if ((p ^ phys) & alias_mask) { | ||
220 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | ||
221 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | ||
222 | |||
223 | if (d == all_aliases_mask) | ||
224 | goto loop_exit; | ||
225 | } | ||
226 | |||
227 | pte++; | ||
228 | p += PAGE_SIZE; | ||
229 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
230 | pmd++; | ||
231 | } while (p < end); | ||
232 | |||
233 | loop_exit: | ||
234 | addr_offset = 0; | ||
235 | select_bit = 1; | ||
236 | |||
237 | for (i = 0; i < n_aliases; i++) { | ||
238 | if (d & select_bit) { | ||
239 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | ||
240 | wmb(); | ||
241 | } | ||
242 | |||
243 | select_bit <<= 1; | ||
244 | addr_offset += PAGE_SIZE; | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /* | 184 | /* |
249 | * Note : (RPC) since the caches are physically tagged, the only point | 185 | * Note : (RPC) since the caches are physically tagged, the only point |
250 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | 186 | * of flush_cache_mm for SH-4 is to get rid of aliases from the |
251 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that | 187 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that |
252 | * lines can stay resident so long as the virtual address they were | 188 | * lines can stay resident so long as the virtual address they were |
253 | * accessed with (hence cache set) is in accord with the physical | 189 | * accessed with (hence cache set) is in accord with the physical |
254 | * address (i.e. tag). It's no different here. So I reckon we don't | 190 | * address (i.e. tag). It's no different here. |
255 | * need to flush the I-cache, since aliases don't matter for that. We | ||
256 | * should try that. | ||
257 | * | 191 | * |
258 | * Caller takes mm->mmap_sem. | 192 | * Caller takes mm->mmap_sem. |
259 | */ | 193 | */ |
@@ -264,33 +198,7 @@ static void sh4_flush_cache_mm(void *arg) | |||
264 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 198 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
265 | return; | 199 | return; |
266 | 200 | ||
267 | /* | 201 | flush_dcache_all(); |
268 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | ||
269 | * the cache is physically tagged, the data can just be left in there. | ||
270 | */ | ||
271 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
272 | return; | ||
273 | |||
274 | /* | ||
275 | * Don't bother groveling around the dcache for the VMA ranges | ||
276 | * if there are too many PTEs to make it worthwhile. | ||
277 | */ | ||
278 | if (mm->nr_ptes >= MAX_DCACHE_PAGES) | ||
279 | flush_dcache_all(); | ||
280 | else { | ||
281 | struct vm_area_struct *vma; | ||
282 | |||
283 | /* | ||
284 | * In this case there are reasonably sized ranges to flush, | ||
285 | * iterate through the VMA list and take care of any aliases. | ||
286 | */ | ||
287 | for (vma = mm->mmap; vma; vma = vma->vm_next) | ||
288 | __flush_cache_mm(mm, vma->vm_start, vma->vm_end); | ||
289 | } | ||
290 | |||
291 | /* Only touch the icache if one of the VMAs has VM_EXEC set. */ | ||
292 | if (mm->exec_vm) | ||
293 | flush_icache_all(); | ||
294 | } | 202 | } |
295 | 203 | ||
296 | /* | 204 | /* |
@@ -303,44 +211,63 @@ static void sh4_flush_cache_page(void *args) | |||
303 | { | 211 | { |
304 | struct flusher_data *data = args; | 212 | struct flusher_data *data = args; |
305 | struct vm_area_struct *vma; | 213 | struct vm_area_struct *vma; |
214 | struct page *page; | ||
306 | unsigned long address, pfn, phys; | 215 | unsigned long address, pfn, phys; |
307 | unsigned int alias_mask; | 216 | int map_coherent = 0; |
217 | pgd_t *pgd; | ||
218 | pud_t *pud; | ||
219 | pmd_t *pmd; | ||
220 | pte_t *pte; | ||
221 | void *vaddr; | ||
308 | 222 | ||
309 | vma = data->vma; | 223 | vma = data->vma; |
310 | address = data->addr1; | 224 | address = data->addr1 & PAGE_MASK; |
311 | pfn = data->addr2; | 225 | pfn = data->addr2; |
312 | phys = pfn << PAGE_SHIFT; | 226 | phys = pfn << PAGE_SHIFT; |
227 | page = pfn_to_page(pfn); | ||
313 | 228 | ||
314 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 229 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
315 | return; | 230 | return; |
316 | 231 | ||
317 | alias_mask = boot_cpu_data.dcache.alias_mask; | 232 | pgd = pgd_offset(vma->vm_mm, address); |
318 | 233 | pud = pud_offset(pgd, address); | |
319 | /* We only need to flush D-cache when we have alias */ | 234 | pmd = pmd_offset(pud, address); |
320 | if ((address^phys) & alias_mask) { | 235 | pte = pte_offset_kernel(pmd, address); |
321 | /* Loop 4K of the D-cache */ | 236 | |
322 | flush_cache_one( | 237 | /* If the page isn't present, there is nothing to do here. */ |
323 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), | 238 | if (!(pte_val(*pte) & _PAGE_PRESENT)) |
324 | phys); | 239 | return; |
325 | /* Loop another 4K of the D-cache */ | ||
326 | flush_cache_one( | ||
327 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), | ||
328 | phys); | ||
329 | } | ||
330 | 240 | ||
331 | alias_mask = boot_cpu_data.icache.alias_mask; | 241 | if ((vma->vm_mm == current->active_mm)) |
332 | if (vma->vm_flags & VM_EXEC) { | 242 | vaddr = NULL; |
243 | else { | ||
333 | /* | 244 | /* |
334 | * Evict entries from the portion of the cache from which code | 245 | * Use kmap_coherent or kmap_atomic to do flushes for |
335 | * may have been executed at this address (virtual). There's | 246 | * another ASID than the current one. |
336 | * no need to evict from the portion corresponding to the | ||
337 | * physical address as for the D-cache, because we know the | ||
338 | * kernel has never executed the code through its identity | ||
339 | * translation. | ||
340 | */ | 247 | */ |
341 | flush_cache_one( | 248 | map_coherent = (current_cpu_data.dcache.n_aliases && |
342 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), | 249 | !test_bit(PG_dcache_dirty, &page->flags) && |
343 | phys); | 250 | page_mapped(page)); |
251 | if (map_coherent) | ||
252 | vaddr = kmap_coherent(page, address); | ||
253 | else | ||
254 | vaddr = kmap_atomic(page, KM_USER0); | ||
255 | |||
256 | address = (unsigned long)vaddr; | ||
257 | } | ||
258 | |||
259 | if (pages_do_alias(address, phys)) | ||
260 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | | ||
261 | (address & shm_align_mask), phys); | ||
262 | |||
263 | if (vma->vm_flags & VM_EXEC) | ||
264 | flush_icache_all(); | ||
265 | |||
266 | if (vaddr) { | ||
267 | if (map_coherent) | ||
268 | kunmap_coherent(vaddr); | ||
269 | else | ||
270 | kunmap_atomic(vaddr, KM_USER0); | ||
344 | } | 271 | } |
345 | } | 272 | } |
346 | 273 | ||
@@ -373,24 +300,10 @@ static void sh4_flush_cache_range(void *args) | |||
373 | if (boot_cpu_data.dcache.n_aliases == 0) | 300 | if (boot_cpu_data.dcache.n_aliases == 0) |
374 | return; | 301 | return; |
375 | 302 | ||
376 | /* | 303 | flush_dcache_all(); |
377 | * Don't bother with the lookup and alias check if we have a | ||
378 | * wide range to cover, just blow away the dcache in its | ||
379 | * entirety instead. -- PFM. | ||
380 | */ | ||
381 | if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) | ||
382 | flush_dcache_all(); | ||
383 | else | ||
384 | __flush_cache_mm(vma->vm_mm, start, end); | ||
385 | 304 | ||
386 | if (vma->vm_flags & VM_EXEC) { | 305 | if (vma->vm_flags & VM_EXEC) |
387 | /* | ||
388 | * TODO: Is this required??? Need to look at how I-cache | ||
389 | * coherency is assured when new programs are loaded to see if | ||
390 | * this matters. | ||
391 | */ | ||
392 | flush_icache_all(); | 306 | flush_icache_all(); |
393 | } | ||
394 | } | 307 | } |
395 | 308 | ||
396 | /** | 309 | /** |
@@ -464,245 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, | |||
464 | } while (--way_count != 0); | 377 | } while (--way_count != 0); |
465 | } | 378 | } |
466 | 379 | ||
467 | /* | ||
468 | * Break the 1, 2 and 4 way variants of this out into separate functions to | ||
469 | * avoid nearly all the overhead of having the conditional stuff in the function | ||
470 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | ||
471 | * | ||
472 | * We want to eliminate unnecessary bus transactions, so this code uses | ||
473 | * a non-obvious technique. | ||
474 | * | ||
475 | * Loop over a cache way sized block of, one cache line at a time. For each | ||
476 | * line, use movca.a to cause the current cache line contents to be written | ||
477 | * back, but without reading anything from main memory. However this has the | ||
478 | * side effect that the cache is now caching that memory location. So follow | ||
479 | * this with a cache invalidate to mark the cache line invalid. And do all | ||
480 | * this with interrupts disabled, to avoid the cache line being accidently | ||
481 | * evicted while it is holding garbage. | ||
482 | * | ||
483 | * This also breaks in a number of circumstances: | ||
484 | * - if there are modifications to the region of memory just above | ||
485 | * empty_zero_page (for example because a breakpoint has been placed | ||
486 | * there), then these can be lost. | ||
487 | * | ||
488 | * This is because the the memory address which the cache temporarily | ||
489 | * caches in the above description is empty_zero_page. So the | ||
490 | * movca.l hits the cache (it is assumed that it misses, or at least | ||
491 | * isn't dirty), modifies the line and then invalidates it, losing the | ||
492 | * required change. | ||
493 | * | ||
494 | * - If caches are disabled or configured in write-through mode, then | ||
495 | * the movca.l writes garbage directly into memory. | ||
496 | */ | ||
497 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
498 | unsigned long extent_per_way) | ||
499 | { | ||
500 | unsigned long addr; | ||
501 | int i; | ||
502 | |||
503 | addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); | ||
504 | |||
505 | while (extent_per_way) { | ||
506 | for (i = 0; i < cpu_data->dcache.ways; i++) | ||
507 | __raw_writel(0, addr + cpu_data->dcache.way_incr * i); | ||
508 | |||
509 | addr += cpu_data->dcache.linesz; | ||
510 | extent_per_way -= cpu_data->dcache.linesz; | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static void __flush_dcache_segment_1way(unsigned long start, | ||
515 | unsigned long extent_per_way) | ||
516 | { | ||
517 | unsigned long orig_sr, sr_with_bl; | ||
518 | unsigned long base_addr; | ||
519 | unsigned long way_incr, linesz, way_size; | ||
520 | struct cache_info *dcache; | ||
521 | register unsigned long a0, a0e; | ||
522 | |||
523 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
524 | sr_with_bl = orig_sr | (1<<28); | ||
525 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
526 | |||
527 | /* | ||
528 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | ||
529 | * existing SH-4 D-caches. Whilst I don't see a need to have this | ||
530 | * aligned to any better than the cache line size (which it will be | ||
531 | * anyway by construction), let's align it to at least the way_size of | ||
532 | * any existing or conceivable SH-4 D-cache. -- RPC | ||
533 | */ | ||
534 | base_addr = ((base_addr >> 16) << 16); | ||
535 | base_addr |= start; | ||
536 | |||
537 | dcache = &boot_cpu_data.dcache; | ||
538 | linesz = dcache->linesz; | ||
539 | way_incr = dcache->way_incr; | ||
540 | way_size = dcache->way_size; | ||
541 | |||
542 | a0 = base_addr; | ||
543 | a0e = base_addr + extent_per_way; | ||
544 | do { | ||
545 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
546 | asm volatile("movca.l r0, @%0\n\t" | ||
547 | "ocbi @%0" : : "r" (a0)); | ||
548 | a0 += linesz; | ||
549 | asm volatile("movca.l r0, @%0\n\t" | ||
550 | "ocbi @%0" : : "r" (a0)); | ||
551 | a0 += linesz; | ||
552 | asm volatile("movca.l r0, @%0\n\t" | ||
553 | "ocbi @%0" : : "r" (a0)); | ||
554 | a0 += linesz; | ||
555 | asm volatile("movca.l r0, @%0\n\t" | ||
556 | "ocbi @%0" : : "r" (a0)); | ||
557 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
558 | a0 += linesz; | ||
559 | } while (a0 < a0e); | ||
560 | } | ||
561 | |||
562 | static void __flush_dcache_segment_2way(unsigned long start, | ||
563 | unsigned long extent_per_way) | ||
564 | { | ||
565 | unsigned long orig_sr, sr_with_bl; | ||
566 | unsigned long base_addr; | ||
567 | unsigned long way_incr, linesz, way_size; | ||
568 | struct cache_info *dcache; | ||
569 | register unsigned long a0, a1, a0e; | ||
570 | |||
571 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
572 | sr_with_bl = orig_sr | (1<<28); | ||
573 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
574 | |||
575 | /* See comment under 1-way above */ | ||
576 | base_addr = ((base_addr >> 16) << 16); | ||
577 | base_addr |= start; | ||
578 | |||
579 | dcache = &boot_cpu_data.dcache; | ||
580 | linesz = dcache->linesz; | ||
581 | way_incr = dcache->way_incr; | ||
582 | way_size = dcache->way_size; | ||
583 | |||
584 | a0 = base_addr; | ||
585 | a1 = a0 + way_incr; | ||
586 | a0e = base_addr + extent_per_way; | ||
587 | do { | ||
588 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
589 | asm volatile("movca.l r0, @%0\n\t" | ||
590 | "movca.l r0, @%1\n\t" | ||
591 | "ocbi @%0\n\t" | ||
592 | "ocbi @%1" : : | ||
593 | "r" (a0), "r" (a1)); | ||
594 | a0 += linesz; | ||
595 | a1 += linesz; | ||
596 | asm volatile("movca.l r0, @%0\n\t" | ||
597 | "movca.l r0, @%1\n\t" | ||
598 | "ocbi @%0\n\t" | ||
599 | "ocbi @%1" : : | ||
600 | "r" (a0), "r" (a1)); | ||
601 | a0 += linesz; | ||
602 | a1 += linesz; | ||
603 | asm volatile("movca.l r0, @%0\n\t" | ||
604 | "movca.l r0, @%1\n\t" | ||
605 | "ocbi @%0\n\t" | ||
606 | "ocbi @%1" : : | ||
607 | "r" (a0), "r" (a1)); | ||
608 | a0 += linesz; | ||
609 | a1 += linesz; | ||
610 | asm volatile("movca.l r0, @%0\n\t" | ||
611 | "movca.l r0, @%1\n\t" | ||
612 | "ocbi @%0\n\t" | ||
613 | "ocbi @%1" : : | ||
614 | "r" (a0), "r" (a1)); | ||
615 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
616 | a0 += linesz; | ||
617 | a1 += linesz; | ||
618 | } while (a0 < a0e); | ||
619 | } | ||
620 | |||
621 | static void __flush_dcache_segment_4way(unsigned long start, | ||
622 | unsigned long extent_per_way) | ||
623 | { | ||
624 | unsigned long orig_sr, sr_with_bl; | ||
625 | unsigned long base_addr; | ||
626 | unsigned long way_incr, linesz, way_size; | ||
627 | struct cache_info *dcache; | ||
628 | register unsigned long a0, a1, a2, a3, a0e; | ||
629 | |||
630 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
631 | sr_with_bl = orig_sr | (1<<28); | ||
632 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
633 | |||
634 | /* See comment under 1-way above */ | ||
635 | base_addr = ((base_addr >> 16) << 16); | ||
636 | base_addr |= start; | ||
637 | |||
638 | dcache = &boot_cpu_data.dcache; | ||
639 | linesz = dcache->linesz; | ||
640 | way_incr = dcache->way_incr; | ||
641 | way_size = dcache->way_size; | ||
642 | |||
643 | a0 = base_addr; | ||
644 | a1 = a0 + way_incr; | ||
645 | a2 = a1 + way_incr; | ||
646 | a3 = a2 + way_incr; | ||
647 | a0e = base_addr + extent_per_way; | ||
648 | do { | ||
649 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
650 | asm volatile("movca.l r0, @%0\n\t" | ||
651 | "movca.l r0, @%1\n\t" | ||
652 | "movca.l r0, @%2\n\t" | ||
653 | "movca.l r0, @%3\n\t" | ||
654 | "ocbi @%0\n\t" | ||
655 | "ocbi @%1\n\t" | ||
656 | "ocbi @%2\n\t" | ||
657 | "ocbi @%3\n\t" : : | ||
658 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
659 | a0 += linesz; | ||
660 | a1 += linesz; | ||
661 | a2 += linesz; | ||
662 | a3 += linesz; | ||
663 | asm volatile("movca.l r0, @%0\n\t" | ||
664 | "movca.l r0, @%1\n\t" | ||
665 | "movca.l r0, @%2\n\t" | ||
666 | "movca.l r0, @%3\n\t" | ||
667 | "ocbi @%0\n\t" | ||
668 | "ocbi @%1\n\t" | ||
669 | "ocbi @%2\n\t" | ||
670 | "ocbi @%3\n\t" : : | ||
671 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
672 | a0 += linesz; | ||
673 | a1 += linesz; | ||
674 | a2 += linesz; | ||
675 | a3 += linesz; | ||
676 | asm volatile("movca.l r0, @%0\n\t" | ||
677 | "movca.l r0, @%1\n\t" | ||
678 | "movca.l r0, @%2\n\t" | ||
679 | "movca.l r0, @%3\n\t" | ||
680 | "ocbi @%0\n\t" | ||
681 | "ocbi @%1\n\t" | ||
682 | "ocbi @%2\n\t" | ||
683 | "ocbi @%3\n\t" : : | ||
684 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
685 | a0 += linesz; | ||
686 | a1 += linesz; | ||
687 | a2 += linesz; | ||
688 | a3 += linesz; | ||
689 | asm volatile("movca.l r0, @%0\n\t" | ||
690 | "movca.l r0, @%1\n\t" | ||
691 | "movca.l r0, @%2\n\t" | ||
692 | "movca.l r0, @%3\n\t" | ||
693 | "ocbi @%0\n\t" | ||
694 | "ocbi @%1\n\t" | ||
695 | "ocbi @%2\n\t" | ||
696 | "ocbi @%3\n\t" : : | ||
697 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
698 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
699 | a0 += linesz; | ||
700 | a1 += linesz; | ||
701 | a2 += linesz; | ||
702 | a3 += linesz; | ||
703 | } while (a0 < a0e); | ||
704 | } | ||
705 | |||
706 | extern void __weak sh4__flush_region_init(void); | 380 | extern void __weak sh4__flush_region_init(void); |
707 | 381 | ||
708 | /* | 382 | /* |
@@ -710,32 +384,11 @@ extern void __weak sh4__flush_region_init(void); | |||
710 | */ | 384 | */ |
711 | void __init sh4_cache_init(void) | 385 | void __init sh4_cache_init(void) |
712 | { | 386 | { |
713 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
714 | |||
715 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 387 | printk("PVR=%08x CVR=%08x PRR=%08x\n", |
716 | ctrl_inl(CCN_PVR), | 388 | ctrl_inl(CCN_PVR), |
717 | ctrl_inl(CCN_CVR), | 389 | ctrl_inl(CCN_CVR), |
718 | ctrl_inl(CCN_PRR)); | 390 | ctrl_inl(CCN_PRR)); |
719 | 391 | ||
720 | if (wt_enabled) | ||
721 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
722 | else { | ||
723 | switch (boot_cpu_data.dcache.ways) { | ||
724 | case 1: | ||
725 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
726 | break; | ||
727 | case 2: | ||
728 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
729 | break; | ||
730 | case 4: | ||
731 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
732 | break; | ||
733 | default: | ||
734 | panic("unknown number of cache ways\n"); | ||
735 | break; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | local_flush_icache_range = sh4_flush_icache_range; | 392 | local_flush_icache_range = sh4_flush_icache_range; |
740 | local_flush_dcache_page = sh4_flush_dcache_page; | 393 | local_flush_dcache_page = sh4_flush_dcache_page; |
741 | local_flush_cache_all = sh4_flush_cache_all; | 394 | local_flush_cache_all = sh4_flush_cache_all; |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 467ff8e260f7..eb4cc4ec7952 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -563,7 +563,7 @@ static void sh5_flush_cache_page(void *args) | |||
563 | 563 | ||
564 | static void sh5_flush_dcache_page(void *page) | 564 | static void sh5_flush_dcache_page(void *page) |
565 | { | 565 | { |
566 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 566 | sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); |
567 | wmb(); | 567 | wmb(); |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2601935eb589..f527fb70fce6 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -141,7 +141,7 @@ static void sh7705_flush_dcache_page(void *arg) | |||
141 | if (mapping && !mapping_mapped(mapping)) | 141 | if (mapping && !mapping_mapped(mapping)) |
142 | set_bit(PG_dcache_dirty, &page->flags); | 142 | set_bit(PG_dcache_dirty, &page->flags); |
143 | else | 143 | else |
144 | __flush_dcache_page(PHYSADDR(page_address(page))); | 144 | __flush_dcache_page(__pa(page_address(page))); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) | 147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a2dc7f9ecc51..e9415d3ea94a 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -27,8 +27,11 @@ void (*local_flush_icache_page)(void *args) = cache_noop; | |||
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; |
28 | 28 | ||
29 | void (*__flush_wback_region)(void *start, int size); | 29 | void (*__flush_wback_region)(void *start, int size); |
30 | EXPORT_SYMBOL(__flush_wback_region); | ||
30 | void (*__flush_purge_region)(void *start, int size); | 31 | void (*__flush_purge_region)(void *start, int size); |
32 | EXPORT_SYMBOL(__flush_purge_region); | ||
31 | void (*__flush_invalidate_region)(void *start, int size); | 33 | void (*__flush_invalidate_region)(void *start, int size); |
34 | EXPORT_SYMBOL(__flush_invalidate_region); | ||
32 | 35 | ||
33 | static inline void noop__flush_region(void *start, int size) | 36 | static inline void noop__flush_region(void *start, int size) |
34 | { | 37 | { |
@@ -161,14 +164,21 @@ void flush_cache_all(void) | |||
161 | { | 164 | { |
162 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | 165 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); |
163 | } | 166 | } |
167 | EXPORT_SYMBOL(flush_cache_all); | ||
164 | 168 | ||
165 | void flush_cache_mm(struct mm_struct *mm) | 169 | void flush_cache_mm(struct mm_struct *mm) |
166 | { | 170 | { |
171 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
172 | return; | ||
173 | |||
167 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 174 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); |
168 | } | 175 | } |
169 | 176 | ||
170 | void flush_cache_dup_mm(struct mm_struct *mm) | 177 | void flush_cache_dup_mm(struct mm_struct *mm) |
171 | { | 178 | { |
179 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
180 | return; | ||
181 | |||
172 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 182 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); |
173 | } | 183 | } |
174 | 184 | ||
@@ -195,11 +205,13 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
195 | 205 | ||
196 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | 206 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); |
197 | } | 207 | } |
208 | EXPORT_SYMBOL(flush_cache_range); | ||
198 | 209 | ||
199 | void flush_dcache_page(struct page *page) | 210 | void flush_dcache_page(struct page *page) |
200 | { | 211 | { |
201 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | 212 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); |
202 | } | 213 | } |
214 | EXPORT_SYMBOL(flush_dcache_page); | ||
203 | 215 | ||
204 | void flush_icache_range(unsigned long start, unsigned long end) | 216 | void flush_icache_range(unsigned long start, unsigned long end) |
205 | { | 217 | { |
@@ -265,7 +277,11 @@ static void __init emit_cache_params(void) | |||
265 | 277 | ||
266 | void __init cpu_cache_init(void) | 278 | void __init cpu_cache_init(void) |
267 | { | 279 | { |
268 | unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 280 | unsigned int cache_disabled = 0; |
281 | |||
282 | #ifdef CCR | ||
283 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | ||
284 | #endif | ||
269 | 285 | ||
270 | compute_alias(&boot_cpu_data.icache); | 286 | compute_alias(&boot_cpu_data.icache); |
271 | compute_alias(&boot_cpu_data.dcache); | 287 | compute_alias(&boot_cpu_data.dcache); |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e098ec158ddb..902967e3f841 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -15,11 +15,15 @@ | |||
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/module.h> | ||
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | #include <asm/addrspace.h> | 20 | #include <asm/addrspace.h> |
20 | 21 | ||
21 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 22 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
22 | 23 | ||
24 | struct dma_map_ops *dma_ops; | ||
25 | EXPORT_SYMBOL(dma_ops); | ||
26 | |||
23 | static int __init dma_init(void) | 27 | static int __init dma_init(void) |
24 | { | 28 | { |
25 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 29 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
@@ -27,15 +31,12 @@ static int __init dma_init(void) | |||
27 | } | 31 | } |
28 | fs_initcall(dma_init); | 32 | fs_initcall(dma_init); |
29 | 33 | ||
30 | void *dma_alloc_coherent(struct device *dev, size_t size, | 34 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
31 | dma_addr_t *dma_handle, gfp_t gfp) | 35 | dma_addr_t *dma_handle, gfp_t gfp) |
32 | { | 36 | { |
33 | void *ret, *ret_nocache; | 37 | void *ret, *ret_nocache; |
34 | int order = get_order(size); | 38 | int order = get_order(size); |
35 | 39 | ||
36 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) | ||
37 | return ret; | ||
38 | |||
39 | ret = (void *)__get_free_pages(gfp, order); | 40 | ret = (void *)__get_free_pages(gfp, order); |
40 | if (!ret) | 41 | if (!ret) |
41 | return NULL; | 42 | return NULL; |
@@ -57,35 +58,26 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
57 | 58 | ||
58 | *dma_handle = virt_to_phys(ret); | 59 | *dma_handle = virt_to_phys(ret); |
59 | 60 | ||
60 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); | ||
61 | |||
62 | return ret_nocache; | 61 | return ret_nocache; |
63 | } | 62 | } |
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | 63 | ||
66 | void dma_free_coherent(struct device *dev, size_t size, | 64 | void dma_generic_free_coherent(struct device *dev, size_t size, |
67 | void *vaddr, dma_addr_t dma_handle) | 65 | void *vaddr, dma_addr_t dma_handle) |
68 | { | 66 | { |
69 | int order = get_order(size); | 67 | int order = get_order(size); |
70 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 68 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
71 | int k; | 69 | int k; |
72 | 70 | ||
73 | WARN_ON(irqs_disabled()); /* for portability */ | ||
74 | |||
75 | if (dma_release_from_coherent(dev, order, vaddr)) | ||
76 | return; | ||
77 | |||
78 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); | ||
79 | for (k = 0; k < (1 << order); k++) | 71 | for (k = 0; k < (1 << order); k++) |
80 | __free_pages(pfn_to_page(pfn + k), 0); | 72 | __free_pages(pfn_to_page(pfn + k), 0); |
73 | |||
81 | iounmap(vaddr); | 74 | iounmap(vaddr); |
82 | } | 75 | } |
83 | EXPORT_SYMBOL(dma_free_coherent); | ||
84 | 76 | ||
85 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 77 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
86 | enum dma_data_direction direction) | 78 | enum dma_data_direction direction) |
87 | { | 79 | { |
88 | #ifdef CONFIG_CPU_SH5 | 80 | #if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) |
89 | void *p1addr = vaddr; | 81 | void *p1addr = vaddr; |
90 | #else | 82 | #else |
91 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | 83 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8173e38afd38..432acd07e76a 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/dma-mapping.h> | ||
18 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
19 | #include <asm/tlb.h> | 20 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
@@ -186,11 +187,21 @@ void __init paging_init(void) | |||
186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 187 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); |
187 | } | 188 | } |
188 | 189 | ||
190 | /* | ||
191 | * Early initialization for any I/O MMUs we might have. | ||
192 | */ | ||
193 | static void __init iommu_init(void) | ||
194 | { | ||
195 | no_iommu_init(); | ||
196 | } | ||
197 | |||
189 | void __init mem_init(void) | 198 | void __init mem_init(void) |
190 | { | 199 | { |
191 | int codesize, datasize, initsize; | 200 | int codesize, datasize, initsize; |
192 | int nid; | 201 | int nid; |
193 | 202 | ||
203 | iommu_init(); | ||
204 | |||
194 | num_physpages = 0; | 205 | num_physpages = 0; |
195 | high_memory = NULL; | 206 | high_memory = NULL; |
196 | 207 | ||
@@ -323,4 +334,12 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
323 | } | 334 | } |
324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 335 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
325 | #endif | 336 | #endif |
337 | |||
326 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 338 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
339 | |||
340 | #ifdef CONFIG_PMB | ||
341 | int __in_29bit_mode(void) | ||
342 | { | ||
343 | return !(ctrl_inl(PMB_PASCR) & PASCR_SE); | ||
344 | } | ||
345 | #endif /* CONFIG_PMB */ | ||
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 16e01b5fed04..15d74ea42094 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c | |||
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
39 | pagefault_disable(); | 39 | pagefault_disable(); |
40 | 40 | ||
41 | idx = FIX_CMAP_END - | 41 | idx = FIX_CMAP_END - |
42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); | 42 | (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + |
43 | (FIX_N_COLOURS * smp_processor_id())); | ||
44 | |||
43 | vaddr = __fix_to_virt(idx); | 45 | vaddr = __fix_to_virt(idx); |
44 | 46 | ||
45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); | 47 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); |
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 9b784fdb947c..6c524446c0f6 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -60,7 +60,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
60 | unsigned long bootmem_paddr; | 60 | unsigned long bootmem_paddr; |
61 | 61 | ||
62 | /* Don't allow bogus node assignment */ | 62 | /* Don't allow bogus node assignment */ |
63 | BUG_ON(nid > MAX_NUMNODES || nid == 0); | 63 | BUG_ON(nid > MAX_NUMNODES || nid <= 0); |
64 | 64 | ||
65 | start_pfn = start >> PAGE_SHIFT; | 65 | start_pfn = start >> PAGE_SHIFT; |
66 | end_pfn = end >> PAGE_SHIFT; | 66 | end_pfn = end >> PAGE_SHIFT; |
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c deleted file mode 100644 index 43c8eac4d8a1..000000000000 --- a/arch/sh/mm/pmb-fixed.c +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/fixed_pmb.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Renesas Solutions Corp. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <asm/mmu.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | |||
16 | static int __uses_jump_to_uncached fixed_pmb_init(void) | ||
17 | { | ||
18 | int i; | ||
19 | unsigned long addr, data; | ||
20 | |||
21 | jump_to_uncached(); | ||
22 | |||
23 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
24 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
25 | data = ctrl_inl(addr); | ||
26 | if (!(data & PMB_V)) | ||
27 | continue; | ||
28 | |||
29 | if (data & PMB_C) { | ||
30 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
31 | data |= PMB_WT; | ||
32 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
33 | data &= ~PMB_WT; | ||
34 | #else | ||
35 | data &= ~(PMB_C | PMB_WT); | ||
36 | #endif | ||
37 | } | ||
38 | ctrl_outl(data, addr); | ||
39 | } | ||
40 | |||
41 | back_to_cached(); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | arch_initcall(fixed_pmb_init); | ||
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade31102112..280f6a166035 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -35,29 +35,9 @@ | |||
35 | 35 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 36 | static void __pmb_unmap(struct pmb_entry *); |
37 | 37 | ||
38 | static struct kmem_cache *pmb_cache; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | ||
42 | /* vpn ppn flags (ub/sz/c/wt) */ | ||
43 | |||
44 | /* P1 Section Mappings */ | ||
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | ||
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | ||
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | |||
52 | /* P2 Section Mappings */ | ||
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | ||
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
59 | }; | ||
60 | |||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 41 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 42 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -73,81 +53,68 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 53 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 54 | } |
75 | 55 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | 56 | static int pmb_alloc_entry(void) |
77 | static struct pmb_entry *pmb_list; | ||
78 | |||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
80 | { | 57 | { |
81 | struct pmb_entry **p, *tmp; | 58 | unsigned int pos; |
82 | 59 | ||
83 | p = &pmb_list; | 60 | repeat: |
84 | while ((tmp = *p) != NULL) | 61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
85 | p = &tmp->next; | ||
86 | 62 | ||
87 | pmbe->next = tmp; | 63 | if (unlikely(pos > NR_PMB_ENTRIES)) |
88 | *p = pmbe; | 64 | return -ENOSPC; |
89 | } | ||
90 | 65 | ||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | 66 | if (test_and_set_bit(pos, &pmb_map)) |
92 | { | 67 | goto repeat; |
93 | struct pmb_entry **p, *tmp; | ||
94 | 68 | ||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | 69 | return pos; |
96 | if (tmp == pmbe) { | ||
97 | *p = tmp->next; | ||
98 | return; | ||
99 | } | ||
100 | } | 70 | } |
101 | 71 | ||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 72 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
103 | unsigned long flags) | 73 | unsigned long flags, int entry) |
104 | { | 74 | { |
105 | struct pmb_entry *pmbe; | 75 | struct pmb_entry *pmbe; |
76 | int pos; | ||
77 | |||
78 | if (entry == PMB_NO_ENTRY) { | ||
79 | pos = pmb_alloc_entry(); | ||
80 | if (pos < 0) | ||
81 | return ERR_PTR(pos); | ||
82 | } else { | ||
83 | if (test_bit(entry, &pmb_map)) | ||
84 | return ERR_PTR(-ENOSPC); | ||
85 | pos = entry; | ||
86 | } | ||
106 | 87 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 88 | pmbe = &pmb_entry_list[pos]; |
108 | if (!pmbe) | 89 | if (!pmbe) |
109 | return ERR_PTR(-ENOMEM); | 90 | return ERR_PTR(-ENOMEM); |
110 | 91 | ||
111 | pmbe->vpn = vpn; | 92 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 93 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 94 | pmbe->flags = flags; |
114 | 95 | pmbe->entry = pos; | |
115 | spin_lock_irq(&pmb_list_lock); | ||
116 | pmb_list_add(pmbe); | ||
117 | spin_unlock_irq(&pmb_list_lock); | ||
118 | 96 | ||
119 | return pmbe; | 97 | return pmbe; |
120 | } | 98 | } |
121 | 99 | ||
122 | void pmb_free(struct pmb_entry *pmbe) | 100 | static void pmb_free(struct pmb_entry *pmbe) |
123 | { | 101 | { |
124 | spin_lock_irq(&pmb_list_lock); | 102 | int pos = pmbe->entry; |
125 | pmb_list_del(pmbe); | ||
126 | spin_unlock_irq(&pmb_list_lock); | ||
127 | 103 | ||
128 | kmem_cache_free(pmb_cache, pmbe); | 104 | pmbe->vpn = 0; |
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | |||
109 | clear_bit(pos, &pmb_map); | ||
129 | } | 110 | } |
130 | 111 | ||
131 | /* | 112 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 113 | * Must be in P2 for __set_pmb_entry() |
133 | */ | 114 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
135 | unsigned long flags, int *entry) | 116 | unsigned long flags, int pos) |
136 | { | 117 | { |
137 | unsigned int pos = *entry; | ||
138 | |||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | ||
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
141 | |||
142 | repeat: | ||
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | |||
146 | if (test_and_set_bit(pos, &pmb_map)) { | ||
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
148 | goto repeat; | ||
149 | } | ||
150 | |||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
152 | 119 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 120 | #ifdef CONFIG_CACHE_WRITETHROUGH |
@@ -161,35 +128,21 @@ repeat: | |||
161 | #endif | 128 | #endif |
162 | 129 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
164 | |||
165 | *entry = pos; | ||
166 | |||
167 | return 0; | ||
168 | } | 131 | } |
169 | 132 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) |
171 | { | 134 | { |
172 | int ret; | ||
173 | |||
174 | jump_to_uncached(); | 135 | jump_to_uncached(); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); |
176 | back_to_cached(); | 137 | back_to_cached(); |
177 | |||
178 | return ret; | ||
179 | } | 138 | } |
180 | 139 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) |
182 | { | 141 | { |
183 | unsigned int entry = pmbe->entry; | 142 | unsigned int entry = pmbe->entry; |
184 | unsigned long addr; | 143 | unsigned long addr; |
185 | 144 | ||
186 | /* | 145 | if (unlikely(entry >= NR_PMB_ENTRIES)) |
187 | * Don't allow clearing of wired init entries, P1 or P2 access | ||
188 | * without a corresponding mapping in the PMB will lead to reset | ||
189 | * by the TLB. | ||
190 | */ | ||
191 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | ||
192 | entry >= NR_PMB_ENTRIES)) | ||
193 | return; | 146 | return; |
194 | 147 | ||
195 | jump_to_uncached(); | 148 | jump_to_uncached(); |
@@ -202,8 +155,6 @@ void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
203 | 156 | ||
204 | back_to_cached(); | 157 | back_to_cached(); |
205 | |||
206 | clear_bit(entry, &pmb_map); | ||
207 | } | 158 | } |
208 | 159 | ||
209 | 160 | ||
@@ -239,23 +190,17 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
239 | 190 | ||
240 | again: | 191 | again: |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
242 | int ret; | ||
243 | |||
244 | if (size < pmb_sizes[i].size) | 193 | if (size < pmb_sizes[i].size) |
245 | continue; | 194 | continue; |
246 | 195 | ||
247 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); | 196 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, |
197 | PMB_NO_ENTRY); | ||
248 | if (IS_ERR(pmbe)) { | 198 | if (IS_ERR(pmbe)) { |
249 | err = PTR_ERR(pmbe); | 199 | err = PTR_ERR(pmbe); |
250 | goto out; | 200 | goto out; |
251 | } | 201 | } |
252 | 202 | ||
253 | ret = set_pmb_entry(pmbe); | 203 | set_pmb_entry(pmbe); |
254 | if (ret != 0) { | ||
255 | pmb_free(pmbe); | ||
256 | err = -EBUSY; | ||
257 | goto out; | ||
258 | } | ||
259 | 204 | ||
260 | phys += pmb_sizes[i].size; | 205 | phys += pmb_sizes[i].size; |
261 | vaddr += pmb_sizes[i].size; | 206 | vaddr += pmb_sizes[i].size; |
@@ -292,11 +237,16 @@ out: | |||
292 | 237 | ||
293 | void pmb_unmap(unsigned long addr) | 238 | void pmb_unmap(unsigned long addr) |
294 | { | 239 | { |
295 | struct pmb_entry **p, *pmbe; | 240 | struct pmb_entry *pmbe = NULL; |
241 | int i; | ||
296 | 242 | ||
297 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
298 | if (pmbe->vpn == addr) | 244 | if (test_bit(i, &pmb_map)) { |
299 | break; | 245 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | ||
247 | break; | ||
248 | } | ||
249 | } | ||
300 | 250 | ||
301 | if (unlikely(!pmbe)) | 251 | if (unlikely(!pmbe)) |
302 | return; | 252 | return; |
@@ -306,13 +256,22 @@ void pmb_unmap(unsigned long addr) | |||
306 | 256 | ||
307 | static void __pmb_unmap(struct pmb_entry *pmbe) | 257 | static void __pmb_unmap(struct pmb_entry *pmbe) |
308 | { | 258 | { |
309 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); |
310 | 260 | ||
311 | do { | 261 | do { |
312 | struct pmb_entry *pmblink = pmbe; | 262 | struct pmb_entry *pmblink = pmbe; |
313 | 263 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 264 | /* |
315 | clear_pmb_entry(pmbe); | 265 | * We may be called before this pmb_entry has been |
266 | * entered into the PMB table via set_pmb_entry(), but | ||
267 | * that's OK because we've allocated a unique slot for | ||
268 | * this entry in pmb_alloc() (even if we haven't filled | ||
269 | * it yet). | ||
270 | * | ||
271 | * Therefore, calling clear_pmb_entry() is safe as no | ||
272 | * other mapping can be using that slot. | ||
273 | */ | ||
274 | clear_pmb_entry(pmbe); | ||
316 | 275 | ||
317 | pmbe = pmblink->link; | 276 | pmbe = pmblink->link; |
318 | 277 | ||
@@ -320,42 +279,34 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
320 | } while (pmbe); | 279 | } while (pmbe); |
321 | } | 280 | } |
322 | 281 | ||
323 | static void pmb_cache_ctor(void *pmb) | 282 | #ifdef CONFIG_PMB |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
324 | { | 284 | { |
325 | struct pmb_entry *pmbe = pmb; | 285 | unsigned int i; |
326 | 286 | long size, ret; | |
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | ||
328 | |||
329 | pmbe->entry = PMB_NO_ENTRY; | ||
330 | } | ||
331 | |||
332 | static int __uses_jump_to_uncached pmb_init(void) | ||
333 | { | ||
334 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | ||
335 | unsigned int entry, i; | ||
336 | |||
337 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | ||
338 | |||
339 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | ||
340 | SLAB_PANIC, pmb_cache_ctor); | ||
341 | 287 | ||
342 | jump_to_uncached(); | 288 | jump_to_uncached(); |
343 | 289 | ||
344 | /* | 290 | /* |
345 | * Ordering is important, P2 must be mapped in the PMB before we | 291 | * Insert PMB entries for the P1 and P2 areas so that, after |
346 | * can set PMB.SE, and P1 must be mapped before we jump back to | 292 | * we've switched the MMU to 32-bit mode, the semantics of P1 |
347 | * P1 space. | 293 | * and P2 are the same as in 29-bit mode, e.g. |
294 | * | ||
295 | * P1 - provides a cached window onto physical memory | ||
296 | * P2 - provides an uncached window onto physical memory | ||
348 | */ | 297 | */ |
349 | for (entry = 0; entry < nr_entries; entry++) { | 298 | size = __MEMORY_START + __MEMORY_SIZE; |
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | ||
351 | 299 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); |
353 | } | 301 | BUG_ON(ret != size); |
302 | |||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | ||
304 | BUG_ON(ret != size); | ||
354 | 305 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 306 | ctrl_outl(0, PMB_IRMCR); |
356 | 307 | ||
357 | /* PMB.SE and UB[7] */ | 308 | /* PMB.SE and UB[7] */ |
358 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | 309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); |
359 | 310 | ||
360 | /* Flush out the TLB */ | 311 | /* Flush out the TLB */ |
361 | i = ctrl_inl(MMUCR); | 312 | i = ctrl_inl(MMUCR); |
@@ -366,7 +317,53 @@ static int __uses_jump_to_uncached pmb_init(void) | |||
366 | 317 | ||
367 | return 0; | 318 | return 0; |
368 | } | 319 | } |
369 | arch_initcall(pmb_init); | 320 | #else |
321 | int __uses_jump_to_uncached pmb_init(void) | ||
322 | { | ||
323 | int i; | ||
324 | unsigned long addr, data; | ||
325 | |||
326 | jump_to_uncached(); | ||
327 | |||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
329 | struct pmb_entry *pmbe; | ||
330 | unsigned long vpn, ppn, flags; | ||
331 | |||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | ||
336 | |||
337 | if (data & PMB_C) { | ||
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | |||
348 | ppn = data & PMB_PFN_MASK; | ||
349 | |||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
351 | flags |= data & PMB_SZ_MASK; | ||
352 | |||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | ||
354 | data = ctrl_inl(addr); | ||
355 | |||
356 | vpn = data & PMB_PFN_MASK; | ||
357 | |||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
359 | WARN_ON(IS_ERR(pmbe)); | ||
360 | } | ||
361 | |||
362 | back_to_cached(); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | #endif /* CONFIG_PMB */ | ||
370 | 367 | ||
371 | static int pmb_seq_show(struct seq_file *file, void *iter) | 368 | static int pmb_seq_show(struct seq_file *file, void *iter) |
372 | { | 369 | { |
@@ -434,15 +431,18 @@ postcore_initcall(pmb_debugfs_init); | |||
434 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 431 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
435 | { | 432 | { |
436 | static pm_message_t prev_state; | 433 | static pm_message_t prev_state; |
434 | int i; | ||
437 | 435 | ||
438 | /* Restore the PMB after a resume from hibernation */ | 436 | /* Restore the PMB after a resume from hibernation */ |
439 | if (state.event == PM_EVENT_ON && | 437 | if (state.event == PM_EVENT_ON && |
440 | prev_state.event == PM_EVENT_FREEZE) { | 438 | prev_state.event == PM_EVENT_FREEZE) { |
441 | struct pmb_entry *pmbe; | 439 | struct pmb_entry *pmbe; |
442 | spin_lock_irq(&pmb_list_lock); | 440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
443 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 441 | if (test_bit(i, &pmb_map)) { |
444 | set_pmb_entry(pmbe); | 442 | pmbe = &pmb_entry_list[i]; |
445 | spin_unlock_irq(&pmb_list_lock); | 443 | set_pmb_entry(pmbe); |
444 | } | ||
445 | } | ||
446 | } | 446 | } |
447 | prev_state = state; | 447 | prev_state = state; |
448 | return 0; | 448 | return 0; |