diff options
Diffstat (limited to 'arch/unicore32/mm/mmu.c')
-rw-r--r-- | arch/unicore32/mm/mmu.c | 533 |
1 files changed, 533 insertions, 0 deletions
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c new file mode 100644 index 000000000000..7bf3d588631f --- /dev/null +++ b/arch/unicore32/mm/mmu.c | |||
@@ -0,0 +1,533 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/mm/mmu.c | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/mman.h> | ||
17 | #include <linux/nodemask.h> | ||
18 | #include <linux/memblock.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <linux/bootmem.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | #include <asm/cputype.h> | ||
24 | #include <asm/sections.h> | ||
25 | #include <asm/setup.h> | ||
26 | #include <asm/sizes.h> | ||
27 | #include <asm/tlb.h> | ||
28 | |||
29 | #include <mach/map.h> | ||
30 | |||
31 | #include "mm.h" | ||
32 | |||
33 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
34 | |||
35 | /* | ||
36 | * empty_zero_page is a special page that is used for | ||
37 | * zero-initialized data and COW. | ||
38 | */ | ||
39 | struct page *empty_zero_page; | ||
40 | EXPORT_SYMBOL(empty_zero_page); | ||
41 | |||
42 | /* | ||
43 | * The pmd table for the upper-most set of pages. | ||
44 | */ | ||
45 | pmd_t *top_pmd; | ||
46 | |||
47 | pgprot_t pgprot_user; | ||
48 | EXPORT_SYMBOL(pgprot_user); | ||
49 | |||
50 | pgprot_t pgprot_kernel; | ||
51 | EXPORT_SYMBOL(pgprot_kernel); | ||
52 | |||
53 | static int __init noalign_setup(char *__unused) | ||
54 | { | ||
55 | cr_alignment &= ~CR_A; | ||
56 | cr_no_alignment &= ~CR_A; | ||
57 | set_cr(cr_alignment); | ||
58 | return 1; | ||
59 | } | ||
60 | __setup("noalign", noalign_setup); | ||
61 | |||
62 | void adjust_cr(unsigned long mask, unsigned long set) | ||
63 | { | ||
64 | unsigned long flags; | ||
65 | |||
66 | mask &= ~CR_A; | ||
67 | |||
68 | set &= mask; | ||
69 | |||
70 | local_irq_save(flags); | ||
71 | |||
72 | cr_no_alignment = (cr_no_alignment & ~mask) | set; | ||
73 | cr_alignment = (cr_alignment & ~mask) | set; | ||
74 | |||
75 | set_cr((get_cr() & ~mask) | set); | ||
76 | |||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | struct map_desc { | ||
81 | unsigned long virtual; | ||
82 | unsigned long pfn; | ||
83 | unsigned long length; | ||
84 | unsigned int type; | ||
85 | }; | ||
86 | |||
87 | #define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \ | ||
88 | PTE_DIRTY | PTE_READ | PTE_WRITE) | ||
89 | #define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \ | ||
90 | PMD_SECT_READ | PMD_SECT_WRITE) | ||
91 | |||
92 | static struct mem_type mem_types[] = { | ||
93 | [MT_DEVICE] = { /* Strongly ordered */ | ||
94 | .prot_pte = PROT_PTE_DEVICE, | ||
95 | .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, | ||
96 | .prot_sect = PROT_SECT_DEVICE, | ||
97 | }, | ||
98 | /* | ||
99 | * MT_KUSER: pte for vecpage -- cacheable, | ||
100 | * and sect for unigfx mmap -- noncacheable | ||
101 | */ | ||
102 | [MT_KUSER] = { | ||
103 | .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | | ||
104 | PTE_CACHEABLE | PTE_READ | PTE_EXEC, | ||
105 | .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, | ||
106 | .prot_sect = PROT_SECT_DEVICE, | ||
107 | }, | ||
108 | [MT_HIGH_VECTORS] = { | ||
109 | .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | | ||
110 | PTE_CACHEABLE | PTE_READ | PTE_WRITE | | ||
111 | PTE_EXEC, | ||
112 | .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, | ||
113 | }, | ||
114 | [MT_MEMORY] = { | ||
115 | .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | | ||
116 | PTE_WRITE | PTE_EXEC, | ||
117 | .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, | ||
118 | .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | | ||
119 | PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC, | ||
120 | }, | ||
121 | [MT_ROM] = { | ||
122 | .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | | ||
123 | PMD_SECT_READ, | ||
124 | }, | ||
125 | }; | ||
126 | |||
127 | const struct mem_type *get_mem_type(unsigned int type) | ||
128 | { | ||
129 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; | ||
130 | } | ||
131 | EXPORT_SYMBOL(get_mem_type); | ||
132 | |||
133 | /* | ||
134 | * Adjust the PMD section entries according to the CPU in use. | ||
135 | */ | ||
136 | static void __init build_mem_type_table(void) | ||
137 | { | ||
138 | pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE); | ||
139 | pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG | | ||
140 | PTE_DIRTY | PTE_READ | PTE_WRITE | | ||
141 | PTE_EXEC | PTE_CACHEABLE); | ||
142 | } | ||
143 | |||
144 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | ||
145 | |||
146 | static void __init *early_alloc(unsigned long sz) | ||
147 | { | ||
148 | void *ptr = __va(memblock_alloc(sz, sz)); | ||
149 | memset(ptr, 0, sz); | ||
150 | return ptr; | ||
151 | } | ||
152 | |||
153 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, | ||
154 | unsigned long prot) | ||
155 | { | ||
156 | if (pmd_none(*pmd)) { | ||
157 | pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); | ||
158 | __pmd_populate(pmd, __pa(pte) | prot); | ||
159 | } | ||
160 | BUG_ON(pmd_bad(*pmd)); | ||
161 | return pte_offset_kernel(pmd, addr); | ||
162 | } | ||
163 | |||
164 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | ||
165 | unsigned long end, unsigned long pfn, | ||
166 | const struct mem_type *type) | ||
167 | { | ||
168 | pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); | ||
169 | do { | ||
170 | set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte))); | ||
171 | pfn++; | ||
172 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
173 | } | ||
174 | |||
175 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | ||
176 | unsigned long end, unsigned long phys, | ||
177 | const struct mem_type *type) | ||
178 | { | ||
179 | pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); | ||
180 | |||
181 | /* | ||
182 | * Try a section mapping - end, addr and phys must all be aligned | ||
183 | * to a section boundary. | ||
184 | */ | ||
185 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | ||
186 | pmd_t *p = pmd; | ||
187 | |||
188 | do { | ||
189 | set_pmd(pmd, __pmd(phys | type->prot_sect)); | ||
190 | phys += SECTION_SIZE; | ||
191 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
192 | |||
193 | flush_pmd_entry(p); | ||
194 | } else { | ||
195 | /* | ||
196 | * No need to loop; pte's aren't interested in the | ||
197 | * individual L1 entries. | ||
198 | */ | ||
199 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Create the page directory entries and any necessary | ||
205 | * page tables for the mapping specified by `md'. We | ||
206 | * are able to cope here with varying sizes and address | ||
207 | * offsets, and we take full advantage of sections. | ||
208 | */ | ||
209 | static void __init create_mapping(struct map_desc *md) | ||
210 | { | ||
211 | unsigned long phys, addr, length, end; | ||
212 | const struct mem_type *type; | ||
213 | pgd_t *pgd; | ||
214 | |||
215 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | ||
216 | printk(KERN_WARNING "BUG: not creating mapping for " | ||
217 | "0x%08llx at 0x%08lx in user region\n", | ||
218 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | ||
223 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | ||
224 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | ||
225 | "overlaps vmalloc space\n", | ||
226 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
227 | } | ||
228 | |||
229 | type = &mem_types[md->type]; | ||
230 | |||
231 | addr = md->virtual & PAGE_MASK; | ||
232 | phys = (unsigned long)__pfn_to_phys(md->pfn); | ||
233 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | ||
234 | |||
235 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { | ||
236 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | ||
237 | "be mapped using pages, ignoring.\n", | ||
238 | __pfn_to_phys(md->pfn), addr); | ||
239 | return; | ||
240 | } | ||
241 | |||
242 | pgd = pgd_offset_k(addr); | ||
243 | end = addr + length; | ||
244 | do { | ||
245 | unsigned long next = pgd_addr_end(addr, end); | ||
246 | |||
247 | alloc_init_section(pgd, addr, next, phys, type); | ||
248 | |||
249 | phys += next - addr; | ||
250 | addr = next; | ||
251 | } while (pgd++, addr != end); | ||
252 | } | ||
253 | |||
254 | static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); | ||
255 | |||
256 | /* | ||
257 | * vmalloc=size forces the vmalloc area to be exactly 'size' | ||
258 | * bytes. This can be used to increase (or decrease) the vmalloc | ||
259 | * area - the default is 128m. | ||
260 | */ | ||
261 | static int __init early_vmalloc(char *arg) | ||
262 | { | ||
263 | unsigned long vmalloc_reserve = memparse(arg, NULL); | ||
264 | |||
265 | if (vmalloc_reserve < SZ_16M) { | ||
266 | vmalloc_reserve = SZ_16M; | ||
267 | printk(KERN_WARNING | ||
268 | "vmalloc area too small, limiting to %luMB\n", | ||
269 | vmalloc_reserve >> 20); | ||
270 | } | ||
271 | |||
272 | if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { | ||
273 | vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); | ||
274 | printk(KERN_WARNING | ||
275 | "vmalloc area is too big, limiting to %luMB\n", | ||
276 | vmalloc_reserve >> 20); | ||
277 | } | ||
278 | |||
279 | vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); | ||
280 | return 0; | ||
281 | } | ||
282 | early_param("vmalloc", early_vmalloc); | ||
283 | |||
284 | static phys_addr_t lowmem_limit __initdata = SZ_1G; | ||
285 | |||
286 | static void __init sanity_check_meminfo(void) | ||
287 | { | ||
288 | int i, j; | ||
289 | |||
290 | lowmem_limit = __pa(vmalloc_min - 1) + 1; | ||
291 | memblock_set_current_limit(lowmem_limit); | ||
292 | |||
293 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | ||
294 | struct membank *bank = &meminfo.bank[j]; | ||
295 | *bank = meminfo.bank[i]; | ||
296 | j++; | ||
297 | } | ||
298 | meminfo.nr_banks = j; | ||
299 | } | ||
300 | |||
301 | static inline void prepare_page_table(void) | ||
302 | { | ||
303 | unsigned long addr; | ||
304 | phys_addr_t end; | ||
305 | |||
306 | /* | ||
307 | * Clear out all the mappings below the kernel image. | ||
308 | */ | ||
309 | for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) | ||
310 | pmd_clear(pmd_off_k(addr)); | ||
311 | |||
312 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | ||
313 | pmd_clear(pmd_off_k(addr)); | ||
314 | |||
315 | /* | ||
316 | * Find the end of the first block of lowmem. | ||
317 | */ | ||
318 | end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; | ||
319 | if (end >= lowmem_limit) | ||
320 | end = lowmem_limit; | ||
321 | |||
322 | /* | ||
323 | * Clear out all the kernel space mappings, except for the first | ||
324 | * memory bank, up to the end of the vmalloc region. | ||
325 | */ | ||
326 | for (addr = __phys_to_virt(end); | ||
327 | addr < VMALLOC_END; addr += PGDIR_SIZE) | ||
328 | pmd_clear(pmd_off_k(addr)); | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Reserve the special regions of memory | ||
333 | */ | ||
334 | void __init uc32_mm_memblock_reserve(void) | ||
335 | { | ||
336 | /* | ||
337 | * Reserve the page tables. These are already in use, | ||
338 | * and can only be in node 0. | ||
339 | */ | ||
340 | memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); | ||
341 | |||
342 | #ifdef CONFIG_PUV3_UNIGFX | ||
343 | /* | ||
344 | * These should likewise go elsewhere. They pre-reserve the | ||
345 | * screen/video memory region at the 48M~64M of main system memory. | ||
346 | */ | ||
347 | memblock_reserve(PKUNITY_UNIGFX_MMAP_BASE, PKUNITY_UNIGFX_MMAP_SIZE); | ||
348 | memblock_reserve(PKUNITY_UVC_MMAP_BASE, PKUNITY_UVC_MMAP_SIZE); | ||
349 | #endif | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Set up device the mappings. Since we clear out the page tables for all | ||
354 | * mappings above VMALLOC_END, we will remove any debug device mappings. | ||
355 | * This means you have to be careful how you debug this function, or any | ||
356 | * called function. This means you can't use any function or debugging | ||
357 | * method which may touch any device, otherwise the kernel _will_ crash. | ||
358 | */ | ||
359 | static void __init devicemaps_init(void) | ||
360 | { | ||
361 | struct map_desc map; | ||
362 | unsigned long addr; | ||
363 | void *vectors; | ||
364 | |||
365 | /* | ||
366 | * Allocate the vector page early. | ||
367 | */ | ||
368 | vectors = early_alloc(PAGE_SIZE); | ||
369 | |||
370 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | ||
371 | pmd_clear(pmd_off_k(addr)); | ||
372 | |||
373 | /* | ||
374 | * Create a mapping for UniGFX VRAM | ||
375 | */ | ||
376 | #ifdef CONFIG_PUV3_UNIGFX | ||
377 | map.pfn = __phys_to_pfn(PKUNITY_UNIGFX_MMAP_BASE); | ||
378 | map.virtual = KUSER_UNIGFX_BASE; | ||
379 | map.length = PKUNITY_UNIGFX_MMAP_SIZE; | ||
380 | map.type = MT_KUSER; | ||
381 | create_mapping(&map); | ||
382 | #endif | ||
383 | |||
384 | /* | ||
385 | * Create a mapping for the machine vectors at the high-vectors | ||
386 | * location (0xffff0000). If we aren't using high-vectors, also | ||
387 | * create a mapping at the low-vectors virtual address. | ||
388 | */ | ||
389 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
390 | map.virtual = VECTORS_BASE; | ||
391 | map.length = PAGE_SIZE; | ||
392 | map.type = MT_HIGH_VECTORS; | ||
393 | create_mapping(&map); | ||
394 | |||
395 | /* | ||
396 | * Create a mapping for the kuser page at the special | ||
397 | * location (0xbfff0000) to the same vectors location. | ||
398 | */ | ||
399 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
400 | map.virtual = KUSER_VECPAGE_BASE; | ||
401 | map.length = PAGE_SIZE; | ||
402 | map.type = MT_KUSER; | ||
403 | create_mapping(&map); | ||
404 | |||
405 | /* | ||
406 | * Finally flush the caches and tlb to ensure that we're in a | ||
407 | * consistent state wrt the writebuffer. This also ensures that | ||
408 | * any write-allocated cache lines in the vector page are written | ||
409 | * back. After this point, we can start to touch devices again. | ||
410 | */ | ||
411 | local_flush_tlb_all(); | ||
412 | flush_cache_all(); | ||
413 | } | ||
414 | |||
415 | static void __init map_lowmem(void) | ||
416 | { | ||
417 | struct memblock_region *reg; | ||
418 | |||
419 | /* Map all the lowmem memory banks. */ | ||
420 | for_each_memblock(memory, reg) { | ||
421 | phys_addr_t start = reg->base; | ||
422 | phys_addr_t end = start + reg->size; | ||
423 | struct map_desc map; | ||
424 | |||
425 | if (end > lowmem_limit) | ||
426 | end = lowmem_limit; | ||
427 | if (start >= end) | ||
428 | break; | ||
429 | |||
430 | map.pfn = __phys_to_pfn(start); | ||
431 | map.virtual = __phys_to_virt(start); | ||
432 | map.length = end - start; | ||
433 | map.type = MT_MEMORY; | ||
434 | |||
435 | create_mapping(&map); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * paging_init() sets up the page tables, initialises the zone memory | ||
441 | * maps, and sets up the zero page, bad page and bad page tables. | ||
442 | */ | ||
443 | void __init paging_init(void) | ||
444 | { | ||
445 | void *zero_page; | ||
446 | |||
447 | build_mem_type_table(); | ||
448 | sanity_check_meminfo(); | ||
449 | prepare_page_table(); | ||
450 | map_lowmem(); | ||
451 | devicemaps_init(); | ||
452 | |||
453 | top_pmd = pmd_off_k(0xffff0000); | ||
454 | |||
455 | /* allocate the zero page. */ | ||
456 | zero_page = early_alloc(PAGE_SIZE); | ||
457 | |||
458 | bootmem_init(); | ||
459 | |||
460 | empty_zero_page = virt_to_page(zero_page); | ||
461 | __flush_dcache_page(NULL, empty_zero_page); | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
466 | * the user-mode pages. This will then ensure that we have predictable | ||
467 | * results when turning the mmu off | ||
468 | */ | ||
469 | void setup_mm_for_reboot(char mode) | ||
470 | { | ||
471 | unsigned long base_pmdval; | ||
472 | pgd_t *pgd; | ||
473 | int i; | ||
474 | |||
475 | /* | ||
476 | * We need to access to user-mode page tables here. For kernel threads | ||
477 | * we don't have any user-mode mappings so we use the context that we | ||
478 | * "borrowed". | ||
479 | */ | ||
480 | pgd = current->active_mm->pgd; | ||
481 | |||
482 | base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT; | ||
483 | |||
484 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | ||
485 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | ||
486 | pmd_t *pmd; | ||
487 | |||
488 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | ||
489 | set_pmd(pmd, __pmd(pmdval)); | ||
490 | flush_pmd_entry(pmd); | ||
491 | } | ||
492 | |||
493 | local_flush_tlb_all(); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Take care of architecture specific things when placing a new PTE into | ||
498 | * a page table, or changing an existing PTE. Basically, there are two | ||
499 | * things that we need to take care of: | ||
500 | * | ||
501 | * 1. If PG_dcache_clean is not set for the page, we need to ensure | ||
502 | * that any cache entries for the kernels virtual memory | ||
503 | * range are written back to the page. | ||
504 | * 2. If we have multiple shared mappings of the same space in | ||
505 | * an object, we need to deal with the cache aliasing issues. | ||
506 | * | ||
507 | * Note that the pte lock will be held. | ||
508 | */ | ||
509 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | ||
510 | pte_t *ptep) | ||
511 | { | ||
512 | unsigned long pfn = pte_pfn(*ptep); | ||
513 | struct address_space *mapping; | ||
514 | struct page *page; | ||
515 | |||
516 | if (!pfn_valid(pfn)) | ||
517 | return; | ||
518 | |||
519 | /* | ||
520 | * The zero page is never written to, so never has any dirty | ||
521 | * cache lines, and therefore never needs to be flushed. | ||
522 | */ | ||
523 | page = pfn_to_page(pfn); | ||
524 | if (page == ZERO_PAGE(0)) | ||
525 | return; | ||
526 | |||
527 | mapping = page_mapping(page); | ||
528 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | ||
529 | __flush_dcache_page(mapping, page); | ||
530 | if (mapping) | ||
531 | if (vma->vm_flags & VM_EXEC) | ||
532 | __flush_icache_all(); | ||
533 | } | ||