diff options
Diffstat (limited to 'arch/xtensa/mm/init.c')
-rw-r--r-- | arch/xtensa/mm/init.c | 551 |
1 files changed, 551 insertions, 0 deletions
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c new file mode 100644 index 000000000000..56aace84aaeb --- /dev/null +++ b/arch/xtensa/mm/init.c | |||
@@ -0,0 +1,551 @@ | |||
1 | /* | ||
2 | * arch/xtensa/mm/init.c | ||
3 | * | ||
4 | * Derived from MIPS, PPC. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
11 | * | ||
12 | * Chris Zankel <chris@zankel.net> | ||
13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
14 | * Marc Gauthier | ||
15 | * Kevin Chea | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/swap.h> | ||
29 | |||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/bootparam.h> | ||
32 | #include <asm/mmu_context.h> | ||
33 | #include <asm/tlb.h> | ||
34 | #include <asm/tlbflush.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | |||
39 | |||
40 | #define DEBUG 0 | ||
41 | |||
42 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
43 | //static DEFINE_SPINLOCK(tlb_lock); | ||
44 | |||
45 | /* | ||
46 | * This flag is used to indicate that the page was mapped and modified in | ||
47 | * kernel space, so the cache is probably dirty at that address. | ||
48 | * If cache aliasing is enabled and the page color mismatches, update_mmu_cache | ||
49 | * synchronizes the caches if this bit is set. | ||
50 | */ | ||
51 | |||
52 | #define PG_cache_clean PG_arch_1 | ||
53 | |||
54 | /* References to section boundaries */ | ||
55 | |||
56 | extern char _ftext, _etext, _fdata, _edata, _rodata_end; | ||
57 | extern char __init_begin, __init_end; | ||
58 | |||
59 | /* | ||
60 | * mem_reserve(start, end, must_exist) | ||
61 | * | ||
62 | * Reserve some memory from the memory pool. | ||
63 | * | ||
64 | * Parameters: | ||
65 | * start Start of region, | ||
66 | * end End of region, | ||
67 | * must_exist Must exist in memory pool. | ||
68 | * | ||
69 | * Returns: | ||
70 | * 0 (memory area couldn't be mapped) | ||
71 | * -1 (success) | ||
72 | */ | ||
73 | |||
74 | int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) | ||
75 | { | ||
76 | int i; | ||
77 | |||
78 | if (start == end) | ||
79 | return 0; | ||
80 | |||
81 | start = start & PAGE_MASK; | ||
82 | end = PAGE_ALIGN(end); | ||
83 | |||
84 | for (i = 0; i < sysmem.nr_banks; i++) | ||
85 | if (start < sysmem.bank[i].end | ||
86 | && end >= sysmem.bank[i].start) | ||
87 | break; | ||
88 | |||
89 | if (i == sysmem.nr_banks) { | ||
90 | if (must_exist) | ||
91 | printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " | ||
92 | "not in any region!\n", start, end); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | if (start > sysmem.bank[i].start) { | ||
97 | if (end < sysmem.bank[i].end) { | ||
98 | /* split entry */ | ||
99 | if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) | ||
100 | panic("meminfo overflow\n"); | ||
101 | sysmem.bank[sysmem.nr_banks].start = end; | ||
102 | sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; | ||
103 | sysmem.nr_banks++; | ||
104 | } | ||
105 | sysmem.bank[i].end = start; | ||
106 | } else { | ||
107 | if (end < sysmem.bank[i].end) | ||
108 | sysmem.bank[i].start = end; | ||
109 | else { | ||
110 | /* remove entry */ | ||
111 | sysmem.nr_banks--; | ||
112 | sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; | ||
113 | sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; | ||
114 | } | ||
115 | } | ||
116 | return -1; | ||
117 | } | ||
118 | |||
119 | |||
120 | /* | ||
121 | * Initialize the bootmem system and give it all the memory we have available. | ||
122 | */ | ||
123 | |||
124 | void __init bootmem_init(void) | ||
125 | { | ||
126 | unsigned long pfn; | ||
127 | unsigned long bootmap_start, bootmap_size; | ||
128 | int i; | ||
129 | |||
130 | max_low_pfn = max_pfn = 0; | ||
131 | min_low_pfn = ~0; | ||
132 | |||
133 | for (i=0; i < sysmem.nr_banks; i++) { | ||
134 | pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; | ||
135 | if (pfn < min_low_pfn) | ||
136 | min_low_pfn = pfn; | ||
137 | pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; | ||
138 | if (pfn > max_pfn) | ||
139 | max_pfn = pfn; | ||
140 | } | ||
141 | |||
142 | if (min_low_pfn > max_pfn) | ||
143 | panic("No memory found!\n"); | ||
144 | |||
145 | max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ? | ||
146 | max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT; | ||
147 | |||
148 | /* Find an area to use for the bootmem bitmap. */ | ||
149 | |||
150 | bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT; | ||
151 | bootmap_start = ~0; | ||
152 | |||
153 | for (i=0; i<sysmem.nr_banks; i++) | ||
154 | if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) { | ||
155 | bootmap_start = sysmem.bank[i].start; | ||
156 | break; | ||
157 | } | ||
158 | |||
159 | if (bootmap_start == ~0UL) | ||
160 | panic("Cannot find %ld bytes for bootmap\n", bootmap_size); | ||
161 | |||
162 | /* Reserve the bootmem bitmap area */ | ||
163 | |||
164 | mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); | ||
165 | bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn, | ||
166 | bootmap_start >> PAGE_SHIFT, | ||
167 | max_low_pfn); | ||
168 | |||
169 | /* Add all remaining memory pieces into the bootmem map */ | ||
170 | |||
171 | for (i=0; i<sysmem.nr_banks; i++) | ||
172 | free_bootmem(sysmem.bank[i].start, | ||
173 | sysmem.bank[i].end - sysmem.bank[i].start); | ||
174 | |||
175 | } | ||
176 | |||
177 | |||
178 | void __init paging_init(void) | ||
179 | { | ||
180 | unsigned long zones_size[MAX_NR_ZONES]; | ||
181 | int i; | ||
182 | |||
183 | /* All pages are DMA-able, so we put them all in the DMA zone. */ | ||
184 | |||
185 | zones_size[ZONE_DMA] = max_low_pfn; | ||
186 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
187 | zones_size[i] = 0; | ||
188 | |||
189 | #ifdef CONFIG_HIGHMEM | ||
190 | zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; | ||
191 | #endif | ||
192 | |||
193 | /* Initialize the kernel's page tables. */ | ||
194 | |||
195 | memset(swapper_pg_dir, 0, PAGE_SIZE); | ||
196 | |||
197 | free_area_init(zones_size); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Flush the mmu and reset associated register to default values. | ||
202 | */ | ||
203 | |||
204 | void __init init_mmu (void) | ||
205 | { | ||
206 | /* Writing zeros to the <t>TLBCFG special registers ensure | ||
207 | * that valid values exist in the register. For existing | ||
208 | * PGSZID<w> fields, zero selects the first element of the | ||
209 | * page-size array. For nonexistant PGSZID<w> fields, zero is | ||
210 | * the best value to write. Also, when changing PGSZID<w> | ||
211 | * fields, the corresponding TLB must be flushed. | ||
212 | */ | ||
213 | set_itlbcfg_register (0); | ||
214 | set_dtlbcfg_register (0); | ||
215 | flush_tlb_all (); | ||
216 | |||
217 | /* Set rasid register to a known value. */ | ||
218 | |||
219 | set_rasid_register (ASID_ALL_RESERVED); | ||
220 | |||
221 | /* Set PTEVADDR special register to the start of the page | ||
222 | * table, which is in kernel mappable space (ie. not | ||
223 | * statically mapped). This register's value is undefined on | ||
224 | * reset. | ||
225 | */ | ||
226 | set_ptevaddr_register (PGTABLE_START); | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Initialize memory pages. | ||
231 | */ | ||
232 | |||
233 | void __init mem_init(void) | ||
234 | { | ||
235 | unsigned long codesize, reservedpages, datasize, initsize; | ||
236 | unsigned long highmemsize, tmp, ram; | ||
237 | |||
238 | max_mapnr = num_physpages = max_low_pfn; | ||
239 | high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); | ||
240 | highmemsize = 0; | ||
241 | |||
242 | #if CONFIG_HIGHMEM | ||
243 | #error HIGHGMEM not implemented in init.c | ||
244 | #endif | ||
245 | |||
246 | totalram_pages += free_all_bootmem(); | ||
247 | |||
248 | reservedpages = ram = 0; | ||
249 | for (tmp = 0; tmp < max_low_pfn; tmp++) { | ||
250 | ram++; | ||
251 | if (PageReserved(mem_map+tmp)) | ||
252 | reservedpages++; | ||
253 | } | ||
254 | |||
255 | codesize = (unsigned long) &_etext - (unsigned long) &_ftext; | ||
256 | datasize = (unsigned long) &_edata - (unsigned long) &_fdata; | ||
257 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
258 | |||
259 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " | ||
260 | "%ldk data, %ldk init %ldk highmem)\n", | ||
261 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
262 | ram << (PAGE_SHIFT-10), | ||
263 | codesize >> 10, | ||
264 | reservedpages << (PAGE_SHIFT-10), | ||
265 | datasize >> 10, | ||
266 | initsize >> 10, | ||
267 | highmemsize >> 10); | ||
268 | } | ||
269 | |||
270 | void | ||
271 | free_reserved_mem(void *start, void *end) | ||
272 | { | ||
273 | for (; start < end; start += PAGE_SIZE) { | ||
274 | ClearPageReserved(virt_to_page(start)); | ||
275 | set_page_count(virt_to_page(start), 1); | ||
276 | free_page((unsigned long)start); | ||
277 | totalram_pages++; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | #ifdef CONFIG_BLK_DEV_INITRD | ||
282 | extern int initrd_is_mapped; | ||
283 | |||
284 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
285 | { | ||
286 | if (initrd_is_mapped) { | ||
287 | free_reserved_mem((void*)start, (void*)end); | ||
288 | printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); | ||
289 | } | ||
290 | } | ||
291 | #endif | ||
292 | |||
293 | void free_initmem(void) | ||
294 | { | ||
295 | free_reserved_mem(&__init_begin, &__init_end); | ||
296 | printk("Freeing unused kernel memory: %dk freed\n", | ||
297 | (&__init_end - &__init_begin) >> 10); | ||
298 | } | ||
299 | |||
300 | void show_mem(void) | ||
301 | { | ||
302 | int i, free = 0, total = 0, reserved = 0; | ||
303 | int shared = 0, cached = 0; | ||
304 | |||
305 | printk("Mem-info:\n"); | ||
306 | show_free_areas(); | ||
307 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
308 | i = max_mapnr; | ||
309 | while (i-- > 0) { | ||
310 | total++; | ||
311 | if (PageReserved(mem_map+i)) | ||
312 | reserved++; | ||
313 | else if (PageSwapCache(mem_map+i)) | ||
314 | cached++; | ||
315 | else if (!page_count(mem_map + i)) | ||
316 | free++; | ||
317 | else | ||
318 | shared += page_count(mem_map + i) - 1; | ||
319 | } | ||
320 | printk("%d pages of RAM\n", total); | ||
321 | printk("%d reserved pages\n", reserved); | ||
322 | printk("%d pages shared\n", shared); | ||
323 | printk("%d pages swap cached\n",cached); | ||
324 | printk("%d free pages\n", free); | ||
325 | } | ||
326 | |||
327 | /* ------------------------------------------------------------------------- */ | ||
328 | |||
329 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
330 | |||
331 | /* | ||
332 | * With cache aliasing, the page color of the page in kernel space and user | ||
333 | * space might mismatch. We temporarily map the page to a different virtual | ||
334 | * address with the same color and clear the page there. | ||
335 | */ | ||
336 | |||
337 | void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page) | ||
338 | { | ||
339 | |||
340 | /* There shouldn't be any entries for this page. */ | ||
341 | |||
342 | __flush_invalidate_dcache_page_phys(__pa(page_address(page))); | ||
343 | |||
344 | if (!PAGE_COLOR_EQ(vaddr, kaddr)) { | ||
345 | unsigned long v, p; | ||
346 | |||
347 | /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */ | ||
348 | |||
349 | spin_lock(&tlb_lock); | ||
350 | |||
351 | p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL))); | ||
352 | kaddr = (void*)PAGE_COLOR_MAP0(vaddr); | ||
353 | v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0; | ||
354 | __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v)); | ||
355 | |||
356 | clear_page(kaddr); | ||
357 | |||
358 | spin_unlock(&tlb_lock); | ||
359 | } else { | ||
360 | clear_page(kaddr); | ||
361 | } | ||
362 | |||
363 | /* We need to make sure that i$ and d$ are coherent. */ | ||
364 | |||
365 | clear_bit(PG_cache_clean, &page->flags); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * With cache aliasing, we have to make sure that the page color of the page | ||
370 | * in kernel space matches that of the virtual user address before we read | ||
371 | * the page. If the page color differ, we create a temporary DTLB entry with | ||
372 | * the corrent page color and use this 'temporary' address as the source. | ||
373 | * We then use the same approach as in clear_user_page and copy the data | ||
374 | * to the kernel space and clear the PG_cache_clean bit to synchronize caches | ||
375 | * later. | ||
376 | * | ||
377 | * Note: | ||
378 | * Instead of using another 'way' for the temporary DTLB entry, we could | ||
379 | * probably use the same entry that points to the kernel address (after | ||
380 | * saving the original value and restoring it when we are done). | ||
381 | */ | ||
382 | |||
383 | void copy_user_page(void* to, void* from, unsigned long vaddr, | ||
384 | struct page* to_page) | ||
385 | { | ||
386 | /* There shouldn't be any entries for the new page. */ | ||
387 | |||
388 | __flush_invalidate_dcache_page_phys(__pa(page_address(to_page))); | ||
389 | |||
390 | spin_lock(&tlb_lock); | ||
391 | |||
392 | if (!PAGE_COLOR_EQ(vaddr, from)) { | ||
393 | unsigned long v, p, t; | ||
394 | |||
395 | __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1" | ||
396 | : "=a"(p), "=a"(t) : "a"(from)); | ||
397 | from = (void*)PAGE_COLOR_MAP0(vaddr); | ||
398 | v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0; | ||
399 | __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); | ||
400 | } | ||
401 | |||
402 | if (!PAGE_COLOR_EQ(vaddr, to)) { | ||
403 | unsigned long v, p; | ||
404 | |||
405 | p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL))); | ||
406 | to = (void*)PAGE_COLOR_MAP1(vaddr); | ||
407 | v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1; | ||
408 | __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); | ||
409 | } | ||
410 | copy_page(to, from); | ||
411 | |||
412 | spin_unlock(&tlb_lock); | ||
413 | |||
414 | /* We need to make sure that i$ and d$ are coherent. */ | ||
415 | |||
416 | clear_bit(PG_cache_clean, &to_page->flags); | ||
417 | } | ||
418 | |||
419 | |||
420 | |||
421 | /* | ||
422 | * Any time the kernel writes to a user page cache page, or it is about to | ||
423 | * read from a page cache page this routine is called. | ||
424 | * | ||
425 | * Note: | ||
426 | * The kernel currently only provides one architecture bit in the page | ||
427 | * flags that we use for I$/D$ coherency. Maybe, in future, we can | ||
428 | * use a sepearte bit for deferred dcache aliasing: | ||
429 | * If the page is not mapped yet, we only need to set a flag, | ||
430 | * if mapped, we need to invalidate the page. | ||
431 | */ | ||
432 | // FIXME: we probably need this for WB caches not only for Page Coloring.. | ||
433 | |||
434 | void flush_dcache_page(struct page *page) | ||
435 | { | ||
436 | unsigned long addr = __pa(page_address(page)); | ||
437 | struct address_space *mapping = page_mapping(page); | ||
438 | |||
439 | __flush_invalidate_dcache_page_phys(addr); | ||
440 | |||
441 | if (!test_bit(PG_cache_clean, &page->flags)) | ||
442 | return; | ||
443 | |||
444 | /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/ | ||
445 | #if 0 | ||
446 | if (mapping && !mapping_mapped(mapping)) | ||
447 | clear_bit(PG_cache_clean, &page->flags); | ||
448 | else | ||
449 | #endif | ||
450 | __invalidate_icache_page_phys(addr); | ||
451 | } | ||
452 | |||
453 | void flush_cache_range(struct vm_area_struct* vma, unsigned long s, | ||
454 | unsigned long e) | ||
455 | { | ||
456 | __flush_invalidate_cache_all(); | ||
457 | } | ||
458 | |||
459 | void flush_cache_page(struct vm_area_struct* vma, unsigned long address, | ||
460 | unsigned long pfn) | ||
461 | { | ||
462 | struct page *page = pfn_to_page(pfn); | ||
463 | |||
464 | /* Remove any entry for the old mapping. */ | ||
465 | |||
466 | if (current->active_mm == vma->vm_mm) { | ||
467 | unsigned long addr = __pa(page_address(page)); | ||
468 | __flush_invalidate_dcache_page_phys(addr); | ||
469 | if ((vma->vm_flags & VM_EXEC) != 0) | ||
470 | __invalidate_icache_page_phys(addr); | ||
471 | } else { | ||
472 | BUG(); | ||
473 | } | ||
474 | } | ||
475 | |||
476 | #endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */ | ||
477 | |||
478 | |||
479 | pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr) | ||
480 | { | ||
481 | pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0); | ||
482 | if (likely(pte)) { | ||
483 | pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET); | ||
484 | int i; | ||
485 | for (i = 0; i < 1024; i++, ptep++) | ||
486 | pte_clear(mm, addr, ptep); | ||
487 | } | ||
488 | return pte; | ||
489 | } | ||
490 | |||
491 | struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
492 | { | ||
493 | struct page *page; | ||
494 | |||
495 | page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0); | ||
496 | |||
497 | if (likely(page)) { | ||
498 | pte_t* ptep = kmap_atomic(page, KM_USER0); | ||
499 | int i; | ||
500 | |||
501 | for (i = 0; i < 1024; i++, ptep++) | ||
502 | pte_clear(mm, addr, ptep); | ||
503 | |||
504 | kunmap_atomic(ptep, KM_USER0); | ||
505 | } | ||
506 | return page; | ||
507 | } | ||
508 | |||
509 | |||
510 | /* | ||
511 | * Handle D$/I$ coherency. | ||
512 | * | ||
513 | * Note: | ||
514 | * We only have one architecture bit for the page flags, so we cannot handle | ||
515 | * cache aliasing, yet. | ||
516 | */ | ||
517 | |||
518 | void | ||
519 | update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) | ||
520 | { | ||
521 | unsigned long pfn = pte_pfn(pte); | ||
522 | struct page *page; | ||
523 | unsigned long vaddr = addr & PAGE_MASK; | ||
524 | |||
525 | if (!pfn_valid(pfn)) | ||
526 | return; | ||
527 | |||
528 | page = pfn_to_page(pfn); | ||
529 | |||
530 | invalidate_itlb_mapping(addr); | ||
531 | invalidate_dtlb_mapping(addr); | ||
532 | |||
533 | /* We have a new mapping. Use it. */ | ||
534 | |||
535 | write_dtlb_entry(pte, dtlb_probe(addr)); | ||
536 | |||
537 | /* If the processor can execute from this page, synchronize D$/I$. */ | ||
538 | |||
539 | if ((vma->vm_flags & VM_EXEC) != 0) { | ||
540 | |||
541 | write_itlb_entry(pte, itlb_probe(addr)); | ||
542 | |||
543 | /* Synchronize caches, if not clean. */ | ||
544 | |||
545 | if (!test_and_set_bit(PG_cache_clean, &page->flags)) { | ||
546 | __flush_dcache_page(vaddr); | ||
547 | __invalidate_icache_page(vaddr); | ||
548 | } | ||
549 | } | ||
550 | } | ||
551 | |||