aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c664
1 files changed, 308 insertions, 356 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index fdb1ebb308c9..9f6ca624892d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cache.h>
23 24
24#include <asm/head.h> 25#include <asm/head.h>
25#include <asm/system.h> 26#include <asm/system.h>
@@ -42,22 +43,13 @@ extern void device_scan(void);
42 43
43struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; 44struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
44 45
45unsigned long *sparc64_valid_addr_bitmap; 46unsigned long *sparc64_valid_addr_bitmap __read_mostly;
46 47
47/* Ugly, but necessary... -DaveM */ 48/* Ugly, but necessary... -DaveM */
48unsigned long phys_base; 49unsigned long phys_base __read_mostly;
49unsigned long kern_base; 50unsigned long kern_base __read_mostly;
50unsigned long kern_size; 51unsigned long kern_size __read_mostly;
51unsigned long pfn_base; 52unsigned long pfn_base __read_mostly;
52
53/* This is even uglier. We have a problem where the kernel may not be
54 * located at phys_base. However, initial __alloc_bootmem() calls need to
55 * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
56 * those page mappings wont work. Things are ok after inherit_prom_mappings
57 * is called though. Dave says he'll clean this up some other time.
58 * -- BenC
59 */
60static unsigned long bootmap_base;
61 53
62/* get_new_mmu_context() uses "cache + 1". */ 54/* get_new_mmu_context() uses "cache + 1". */
63DEFINE_SPINLOCK(ctx_alloc_lock); 55DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -73,7 +65,7 @@ extern unsigned long sparc_ramdisk_image64;
73extern unsigned int sparc_ramdisk_image; 65extern unsigned int sparc_ramdisk_image;
74extern unsigned int sparc_ramdisk_size; 66extern unsigned int sparc_ramdisk_size;
75 67
76struct page *mem_map_zero; 68struct page *mem_map_zero __read_mostly;
77 69
78int bigkernel = 0; 70int bigkernel = 0;
79 71
@@ -179,8 +171,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
179 : "g1", "g7"); 171 : "g1", "g7");
180} 172}
181 173
182extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
183
184void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 174void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
185{ 175{
186 struct page *page; 176 struct page *page;
@@ -207,10 +197,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
207 197
208 put_cpu(); 198 put_cpu();
209 } 199 }
210
211 if (get_thread_fault_code())
212 __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
213 address, pte, get_thread_fault_code());
214} 200}
215 201
216void flush_dcache_page(struct page *page) 202void flush_dcache_page(struct page *page)
@@ -309,6 +295,7 @@ struct linux_prom_translation {
309 unsigned long size; 295 unsigned long size;
310 unsigned long data; 296 unsigned long data;
311}; 297};
298static struct linux_prom_translation prom_trans[512] __initdata;
312 299
313extern unsigned long prom_boot_page; 300extern unsigned long prom_boot_page;
314extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); 301extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -318,14 +305,63 @@ extern void register_prom_callbacks(void);
318/* Exported for SMP bootup purposes. */ 305/* Exported for SMP bootup purposes. */
319unsigned long kern_locked_tte_data; 306unsigned long kern_locked_tte_data;
320 307
321void __init early_pgtable_allocfail(char *type) 308/* Exported for kernel TLB miss handling in ktlb.S */
309unsigned long prom_pmd_phys __read_mostly;
310unsigned int swapper_pgd_zero __read_mostly;
311
312/* Allocate power-of-2 aligned chunks from the end of the
313 * kernel image. Return physical address.
314 */
315static inline unsigned long early_alloc_phys(unsigned long size)
316{
317 unsigned long base;
318
319 BUILD_BUG_ON(size & (size - 1));
320
321 kern_size = (kern_size + (size - 1)) & ~(size - 1);
322 base = kern_base + kern_size;
323 kern_size += size;
324
325 return base;
326}
327
328static inline unsigned long load_phys32(unsigned long pa)
329{
330 unsigned long val;
331
332 __asm__ __volatile__("lduwa [%1] %2, %0"
333 : "=&r" (val)
334 : "r" (pa), "i" (ASI_PHYS_USE_EC));
335
336 return val;
337}
338
339static inline unsigned long load_phys64(unsigned long pa)
340{
341 unsigned long val;
342
343 __asm__ __volatile__("ldxa [%1] %2, %0"
344 : "=&r" (val)
345 : "r" (pa), "i" (ASI_PHYS_USE_EC));
346
347 return val;
348}
349
350static inline void store_phys32(unsigned long pa, unsigned long val)
351{
352 __asm__ __volatile__("stwa %0, [%1] %2"
353 : /* no outputs */
354 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
355}
356
357static inline void store_phys64(unsigned long pa, unsigned long val)
322{ 358{
323 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); 359 __asm__ __volatile__("stxa %0, [%1] %2"
324 prom_halt(); 360 : /* no outputs */
361 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
325} 362}
326 363
327#define BASE_PAGE_SIZE 8192 364#define BASE_PAGE_SIZE 8192
328static pmd_t *prompmd;
329 365
330/* 366/*
331 * Translate PROM's mapping we capture at boot time into physical address. 367 * Translate PROM's mapping we capture at boot time into physical address.
@@ -333,278 +369,172 @@ static pmd_t *prompmd;
333 */ 369 */
334unsigned long prom_virt_to_phys(unsigned long promva, int *error) 370unsigned long prom_virt_to_phys(unsigned long promva, int *error)
335{ 371{
336 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); 372 unsigned long pmd_phys = (prom_pmd_phys +
337 pte_t *ptep; 373 ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
374 unsigned long pte_phys;
375 pmd_t pmd_ent;
376 pte_t pte_ent;
338 unsigned long base; 377 unsigned long base;
339 378
340 if (pmd_none(*pmdp)) { 379 pmd_val(pmd_ent) = load_phys32(pmd_phys);
380 if (pmd_none(pmd_ent)) {
341 if (error) 381 if (error)
342 *error = 1; 382 *error = 1;
343 return(0); 383 return 0;
344 } 384 }
345 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); 385
346 if (!pte_present(*ptep)) { 386 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
387 pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
388 pte_val(pte_ent) = load_phys64(pte_phys);
389 if (!pte_present(pte_ent)) {
347 if (error) 390 if (error)
348 *error = 1; 391 *error = 1;
349 return(0); 392 return 0;
350 } 393 }
351 if (error) { 394 if (error) {
352 *error = 0; 395 *error = 0;
353 return(pte_val(*ptep)); 396 return pte_val(pte_ent);
354 } 397 }
355 base = pte_val(*ptep) & _PAGE_PADDR; 398 base = pte_val(pte_ent) & _PAGE_PADDR;
356 return(base + (promva & (BASE_PAGE_SIZE - 1))); 399 return (base + (promva & (BASE_PAGE_SIZE - 1)));
357} 400}
358 401
359static void inherit_prom_mappings(void) 402/* The obp translations are saved based on 8k pagesize, since obp can
403 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
404 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
405 * scheme (also, see rant in inherit_locked_prom_mappings()).
406 */
407static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data)
360{ 408{
361 struct linux_prom_translation *trans; 409 unsigned long vaddr;
362 unsigned long phys_page, tte_vaddr, tte_data;
363 void (*remap_func)(unsigned long, unsigned long, int);
364 pmd_t *pmdp;
365 pte_t *ptep;
366 int node, n, i, tsz;
367 extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
368 410
369 node = prom_finddevice("/virtual-memory"); 411 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
370 n = prom_getproplen(node, "translations"); 412 unsigned long val, pte_phys, pmd_phys;
371 if (n == 0 || n == -1) { 413 pmd_t pmd_ent;
372 prom_printf("Couldn't get translation property\n"); 414 int i;
373 prom_halt();
374 }
375 n += 5 * sizeof(struct linux_prom_translation);
376 for (tsz = 1; tsz < n; tsz <<= 1)
377 /* empty */;
378 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
379 if (trans == NULL) {
380 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
381 prom_halt();
382 }
383 memset(trans, 0, tsz);
384 415
385 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 416 pmd_phys = (prom_pmd_phys +
386 prom_printf("Couldn't get translation property\n"); 417 (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
387 prom_halt(); 418 pmd_val(pmd_ent) = load_phys32(pmd_phys);
388 } 419 if (pmd_none(pmd_ent)) {
389 n = n / sizeof(*trans); 420 pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
390 421
391 /* 422 for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
392 * The obp translations are saved based on 8k pagesize, since obp can 423 store_phys64(pte_phys+i*sizeof(pte_t),0);
393 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
394 * ie obp range, are handled in entry.S and do not use the vpte scheme
395 * (see rant in inherit_locked_prom_mappings()).
396 */
397#define OBP_PMD_SIZE 2048
398 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
399 if (prompmd == NULL)
400 early_pgtable_allocfail("pmd");
401 memset(prompmd, 0, OBP_PMD_SIZE);
402 for (i = 0; i < n; i++) {
403 unsigned long vaddr;
404
405 if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
406 for (vaddr = trans[i].virt;
407 ((vaddr < trans[i].virt + trans[i].size) &&
408 (vaddr < HI_OBP_ADDRESS));
409 vaddr += BASE_PAGE_SIZE) {
410 unsigned long val;
411
412 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
413 if (pmd_none(*pmdp)) {
414 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
415 BASE_PAGE_SIZE,
416 bootmap_base);
417 if (ptep == NULL)
418 early_pgtable_allocfail("pte");
419 memset(ptep, 0, BASE_PAGE_SIZE);
420 pmd_set(pmdp, ptep);
421 }
422 ptep = (pte_t *)__pmd_page(*pmdp) +
423 ((vaddr >> 13) & 0x3ff);
424 424
425 val = trans[i].data; 425 pmd_val(pmd_ent) = pte_phys >> 11UL;
426 store_phys32(pmd_phys, pmd_val(pmd_ent));
427 }
426 428
427 /* Clear diag TTE bits. */ 429 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
428 if (tlb_type == spitfire) 430 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
429 val &= ~0x0003fe0000000000UL;
430 431
431 set_pte_at(&init_mm, vaddr, 432 val = data;
432 ptep, __pte(val | _PAGE_MODIFIED));
433 trans[i].data += BASE_PAGE_SIZE;
434 }
435 }
436 }
437 phys_page = __pa(prompmd);
438 obp_iaddr_patch[0] |= (phys_page >> 10);
439 obp_iaddr_patch[1] |= (phys_page & 0x3ff);
440 flushi((long)&obp_iaddr_patch[0]);
441 obp_daddr_patch[0] |= (phys_page >> 10);
442 obp_daddr_patch[1] |= (phys_page & 0x3ff);
443 flushi((long)&obp_daddr_patch[0]);
444 433
445 /* Now fixup OBP's idea about where we really are mapped. */ 434 /* Clear diag TTE bits. */
446 prom_printf("Remapping the kernel... "); 435 if (tlb_type == spitfire)
436 val &= ~0x0003fe0000000000UL;
447 437
448 /* Spitfire Errata #32 workaround */ 438 store_phys64(pte_phys, val | _PAGE_MODIFIED);
449 /* NOTE: Using plain zero for the context value is
450 * correct here, we are not using the Linux trap
451 * tables yet so we should not use the special
452 * UltraSPARC-III+ page size encodings yet.
453 */
454 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
455 "flush %%g6"
456 : /* No outputs */
457 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
458
459 switch (tlb_type) {
460 default:
461 case spitfire:
462 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
463 break;
464
465 case cheetah:
466 case cheetah_plus:
467 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
468 break;
469 };
470
471 phys_page &= _PAGE_PADDR;
472 phys_page += ((unsigned long)&prom_boot_page -
473 (unsigned long)KERNBASE);
474 439
475 if (tlb_type == spitfire) { 440 data += BASE_PAGE_SIZE;
476 /* Lock this into i/d tlb entry 59 */
477 __asm__ __volatile__(
478 "stxa %%g0, [%2] %3\n\t"
479 "stxa %0, [%1] %4\n\t"
480 "membar #Sync\n\t"
481 "flush %%g6\n\t"
482 "stxa %%g0, [%2] %5\n\t"
483 "stxa %0, [%1] %6\n\t"
484 "membar #Sync\n\t"
485 "flush %%g6"
486 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
487 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
488 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
489 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
490 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
491 : "memory");
492 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
493 /* Lock this into i/d tlb-0 entry 11 */
494 __asm__ __volatile__(
495 "stxa %%g0, [%2] %3\n\t"
496 "stxa %0, [%1] %4\n\t"
497 "membar #Sync\n\t"
498 "flush %%g6\n\t"
499 "stxa %%g0, [%2] %5\n\t"
500 "stxa %0, [%1] %6\n\t"
501 "membar #Sync\n\t"
502 "flush %%g6"
503 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
504 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
505 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
506 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
507 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
508 : "memory");
509 } else {
510 /* Implement me :-) */
511 BUG();
512 } 441 }
442}
513 443
514 tte_vaddr = (unsigned long) KERNBASE; 444static inline int in_obp_range(unsigned long vaddr)
445{
446 return (vaddr >= LOW_OBP_ADDRESS &&
447 vaddr < HI_OBP_ADDRESS);
448}
515 449
516 /* Spitfire Errata #32 workaround */ 450#define OBP_PMD_SIZE 2048
517 /* NOTE: Using plain zero for the context value is 451static void __init build_obp_pgtable(int prom_trans_ents)
518 * correct here, we are not using the Linux trap 452{
519 * tables yet so we should not use the special 453 unsigned long i;
520 * UltraSPARC-III+ page size encodings yet.
521 */
522 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
523 "flush %%g6"
524 : /* No outputs */
525 : "r" (0),
526 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
527
528 if (tlb_type == spitfire)
529 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
530 else
531 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
532 454
533 kern_locked_tte_data = tte_data; 455 prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
456 for (i = 0; i < OBP_PMD_SIZE; i += 4)
457 store_phys32(prom_pmd_phys + i, 0);
458
459 for (i = 0; i < prom_trans_ents; i++) {
460 unsigned long start, end;
534 461
535 remap_func = (void *) ((unsigned long) &prom_remap - 462 if (!in_obp_range(prom_trans[i].virt))
536 (unsigned long) &prom_boot_page); 463 continue;
537 464
465 start = prom_trans[i].virt;
466 end = start + prom_trans[i].size;
467 if (end > HI_OBP_ADDRESS)
468 end = HI_OBP_ADDRESS;
538 469
539 /* Spitfire Errata #32 workaround */ 470 build_obp_range(start, end, prom_trans[i].data);
540 /* NOTE: Using plain zero for the context value is
541 * correct here, we are not using the Linux trap
542 * tables yet so we should not use the special
543 * UltraSPARC-III+ page size encodings yet.
544 */
545 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
546 "flush %%g6"
547 : /* No outputs */
548 : "r" (0),
549 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
550
551 remap_func((tlb_type == spitfire ?
552 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
553 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
554 (unsigned long) KERNBASE,
555 prom_get_mmu_ihandle());
556
557 if (bigkernel)
558 remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
559 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
560
561 /* Flush out that temporary mapping. */
562 spitfire_flush_dtlb_nucleus_page(0x0);
563 spitfire_flush_itlb_nucleus_page(0x0);
564
565 /* Now lock us back into the TLBs via OBP. */
566 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
567 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
568 if (bigkernel) {
569 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
570 tte_vaddr + 0x400000);
571 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
572 tte_vaddr + 0x400000);
573 } 471 }
472}
574 473
575 /* Re-read translations property. */ 474/* Read OBP translations property into 'prom_trans[]'.
576 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { 475 * Return the number of entries.
577 prom_printf("Couldn't get translation property\n"); 476 */
477static int __init read_obp_translations(void)
478{
479 int n, node;
480
481 node = prom_finddevice("/virtual-memory");
482 n = prom_getproplen(node, "translations");
483 if (unlikely(n == 0 || n == -1)) {
484 prom_printf("prom_mappings: Couldn't get size.\n");
485 prom_halt();
486 }
487 if (unlikely(n > sizeof(prom_trans))) {
488 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
578 prom_halt(); 489 prom_halt();
579 } 490 }
580 n = n / sizeof(*trans);
581 491
582 for (i = 0; i < n; i++) { 492 if ((n = prom_getproperty(node, "translations",
583 unsigned long vaddr = trans[i].virt; 493 (char *)&prom_trans[0],
584 unsigned long size = trans[i].size; 494 sizeof(prom_trans))) == -1) {
495 prom_printf("prom_mappings: Couldn't get property.\n");
496 prom_halt();
497 }
498 n = n / sizeof(struct linux_prom_translation);
499 return n;
500}
585 501
586 if (vaddr < 0xf0000000UL) { 502static void __init remap_kernel(void)
587 unsigned long avoid_start = (unsigned long) KERNBASE; 503{
588 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); 504 unsigned long phys_page, tte_vaddr, tte_data;
505 int tlb_ent = sparc64_highest_locked_tlbent();
589 506
590 if (bigkernel) 507 tte_vaddr = (unsigned long) KERNBASE;
591 avoid_end += (4 * 1024 * 1024); 508 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
592 if (vaddr < avoid_start) { 509 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
593 unsigned long top = vaddr + size; 510 _PAGE_CP | _PAGE_CV | _PAGE_P |
511 _PAGE_L | _PAGE_W));
594 512
595 if (top > avoid_start) 513 kern_locked_tte_data = tte_data;
596 top = avoid_start;
597 prom_unmap(top - vaddr, vaddr);
598 }
599 if ((vaddr + size) > avoid_end) {
600 unsigned long bottom = vaddr;
601 514
602 if (bottom < avoid_end) 515 /* Now lock us into the TLBs via OBP. */
603 bottom = avoid_end; 516 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
604 prom_unmap((vaddr + size) - bottom, bottom); 517 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
605 } 518 if (bigkernel) {
606 } 519 prom_dtlb_load(tlb_ent - 1,
520 tte_data + 0x400000,
521 tte_vaddr + 0x400000);
522 prom_itlb_load(tlb_ent - 1,
523 tte_data + 0x400000,
524 tte_vaddr + 0x400000);
607 } 525 }
526}
527
528static void __init inherit_prom_mappings(void)
529{
530 int n;
531
532 n = read_obp_translations();
533 build_obp_pgtable(n);
534
535 /* Now fixup OBP's idea about where we really are mapped. */
536 prom_printf("Remapping the kernel... ");
537 remap_kernel();
608 538
609 prom_printf("done.\n"); 539 prom_printf("done.\n");
610 540
@@ -1347,8 +1277,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1347#endif 1277#endif
1348 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 1278 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1349 1279
1350 bootmap_base = bootmap_pfn << PAGE_SHIFT;
1351
1352 /* Now register the available physical memory with the 1280 /* Now register the available physical memory with the
1353 * allocator. 1281 * allocator.
1354 */ 1282 */
@@ -1398,120 +1326,142 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
1398 return end_pfn; 1326 return end_pfn;
1399} 1327}
1400 1328
1329#ifdef CONFIG_DEBUG_PAGEALLOC
1330static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1331{
1332 unsigned long vstart = PAGE_OFFSET + pstart;
1333 unsigned long vend = PAGE_OFFSET + pend;
1334 unsigned long alloc_bytes = 0UL;
1335
1336 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1337 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
1338 vstart, vend);
1339 prom_halt();
1340 }
1341
1342 while (vstart < vend) {
1343 unsigned long this_end, paddr = __pa(vstart);
1344 pgd_t *pgd = pgd_offset_k(vstart);
1345 pud_t *pud;
1346 pmd_t *pmd;
1347 pte_t *pte;
1348
1349 pud = pud_offset(pgd, vstart);
1350 if (pud_none(*pud)) {
1351 pmd_t *new;
1352
1353 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1354 alloc_bytes += PAGE_SIZE;
1355 pud_populate(&init_mm, pud, new);
1356 }
1357
1358 pmd = pmd_offset(pud, vstart);
1359 if (!pmd_present(*pmd)) {
1360 pte_t *new;
1361
1362 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1363 alloc_bytes += PAGE_SIZE;
1364 pmd_populate_kernel(&init_mm, pmd, new);
1365 }
1366
1367 pte = pte_offset_kernel(pmd, vstart);
1368 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1369 if (this_end > vend)
1370 this_end = vend;
1371
1372 while (vstart < this_end) {
1373 pte_val(*pte) = (paddr | pgprot_val(prot));
1374
1375 vstart += PAGE_SIZE;
1376 paddr += PAGE_SIZE;
1377 pte++;
1378 }
1379 }
1380
1381 return alloc_bytes;
1382}
1383
1384extern struct linux_mlist_p1275 *prom_ptot_ptr;
1385extern unsigned int kvmap_linear_patch[1];
1386
1387static void __init kernel_physical_mapping_init(void)
1388{
1389 struct linux_mlist_p1275 *p = prom_ptot_ptr;
1390 unsigned long mem_alloced = 0UL;
1391
1392 while (p) {
1393 unsigned long phys_start, phys_end;
1394
1395 phys_start = p->start_adr;
1396 phys_end = phys_start + p->num_bytes;
1397 mem_alloced += kernel_map_range(phys_start, phys_end,
1398 PAGE_KERNEL);
1399
1400 p = p->theres_more;
1401 }
1402
1403 printk("Allocated %ld bytes for kernel page tables.\n",
1404 mem_alloced);
1405
1406 kvmap_linear_patch[0] = 0x01000000; /* nop */
1407 flushi(&kvmap_linear_patch[0]);
1408
1409 __flush_tlb_all();
1410}
1411
1412void kernel_map_pages(struct page *page, int numpages, int enable)
1413{
1414 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1415 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1416
1417 kernel_map_range(phys_start, phys_end,
1418 (enable ? PAGE_KERNEL : __pgprot(0)));
1419
1420 /* we should perform an IPI and flush all tlbs,
1421 * but that can deadlock->flush only current cpu.
1422 */
1423 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1424 PAGE_OFFSET + phys_end);
1425}
1426#endif
1427
1401/* paging_init() sets up the page tables */ 1428/* paging_init() sets up the page tables */
1402 1429
1403extern void cheetah_ecache_flush_init(void); 1430extern void cheetah_ecache_flush_init(void);
1404 1431
1405static unsigned long last_valid_pfn; 1432static unsigned long last_valid_pfn;
1433pgd_t swapper_pg_dir[2048];
1406 1434
1407void __init paging_init(void) 1435void __init paging_init(void)
1408{ 1436{
1409 extern pmd_t swapper_pmd_dir[1024]; 1437 unsigned long end_pfn, pages_avail, shift;
1410 extern unsigned int sparc64_vpte_patchme1[1];
1411 extern unsigned int sparc64_vpte_patchme2[1];
1412 unsigned long alias_base = kern_base + PAGE_OFFSET;
1413 unsigned long second_alias_page = 0;
1414 unsigned long pt, flags, end_pfn, pages_avail;
1415 unsigned long shift = alias_base - ((unsigned long)KERNBASE);
1416 unsigned long real_end; 1438 unsigned long real_end;
1417 1439
1418 set_bit(0, mmu_context_bmap); 1440 set_bit(0, mmu_context_bmap);
1419 1441
1442 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1443
1420 real_end = (unsigned long)_end; 1444 real_end = (unsigned long)_end;
1421 if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1445 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1422 bigkernel = 1; 1446 bigkernel = 1;
1423#ifdef CONFIG_BLK_DEV_INITRD 1447 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1424 if (sparc_ramdisk_image || sparc_ramdisk_image64) 1448 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1425 real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); 1449 prom_halt();
1426#endif
1427
1428 /* We assume physical memory starts at some 4mb multiple,
1429 * if this were not true we wouldn't boot up to this point
1430 * anyways.
1431 */
1432 pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
1433 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
1434 local_irq_save(flags);
1435 if (tlb_type == spitfire) {
1436 __asm__ __volatile__(
1437 " stxa %1, [%0] %3\n"
1438 " stxa %2, [%5] %4\n"
1439 " membar #Sync\n"
1440 " flush %%g6\n"
1441 " nop\n"
1442 " nop\n"
1443 " nop\n"
1444 : /* No outputs */
1445 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1446 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
1447 : "memory");
1448 if (real_end >= KERNBASE + 0x340000) {
1449 second_alias_page = alias_base + 0x400000;
1450 __asm__ __volatile__(
1451 " stxa %1, [%0] %3\n"
1452 " stxa %2, [%5] %4\n"
1453 " membar #Sync\n"
1454 " flush %%g6\n"
1455 " nop\n"
1456 " nop\n"
1457 " nop\n"
1458 : /* No outputs */
1459 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1460 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
1461 : "memory");
1462 }
1463 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1464 __asm__ __volatile__(
1465 " stxa %1, [%0] %3\n"
1466 " stxa %2, [%5] %4\n"
1467 " membar #Sync\n"
1468 " flush %%g6\n"
1469 " nop\n"
1470 " nop\n"
1471 " nop\n"
1472 : /* No outputs */
1473 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
1474 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
1475 : "memory");
1476 if (real_end >= KERNBASE + 0x340000) {
1477 second_alias_page = alias_base + 0x400000;
1478 __asm__ __volatile__(
1479 " stxa %1, [%0] %3\n"
1480 " stxa %2, [%5] %4\n"
1481 " membar #Sync\n"
1482 " flush %%g6\n"
1483 " nop\n"
1484 " nop\n"
1485 " nop\n"
1486 : /* No outputs */
1487 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
1488 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
1489 : "memory");
1490 }
1491 } 1450 }
1492 local_irq_restore(flags); 1451
1493 1452 /* Set kernel pgd to upper alias so physical page computations
1494 /* Now set kernel pgd to upper alias so physical page computations
1495 * work. 1453 * work.
1496 */ 1454 */
1497 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1455 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1498 1456
1499 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); 1457 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1500 1458
1501 /* Now can init the kernel/bad page tables. */ 1459 /* Now can init the kernel/bad page tables. */
1502 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1460 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1503 swapper_pmd_dir + (shift / sizeof(pgd_t))); 1461 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1504 1462
1505 sparc64_vpte_patchme1[0] |= 1463 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1506 (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
1507 sparc64_vpte_patchme2[0] |=
1508 (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
1509 flushi((long)&sparc64_vpte_patchme1[0]);
1510 1464
1511 /* Setup bootmem... */
1512 pages_avail = 0;
1513 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1514
1515 /* Inherit non-locked OBP mappings. */ 1465 /* Inherit non-locked OBP mappings. */
1516 inherit_prom_mappings(); 1466 inherit_prom_mappings();
1517 1467
@@ -1527,13 +1477,16 @@ void __init paging_init(void)
1527 1477
1528 inherit_locked_prom_mappings(1); 1478 inherit_locked_prom_mappings(1);
1529 1479
1530 /* We only created DTLB mapping of this stuff. */
1531 spitfire_flush_dtlb_nucleus_page(alias_base);
1532 if (second_alias_page)
1533 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1534
1535 __flush_tlb_all(); 1480 __flush_tlb_all();
1536 1481
1482 /* Setup bootmem... */
1483 pages_avail = 0;
1484 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1485
1486#ifdef CONFIG_DEBUG_PAGEALLOC
1487 kernel_physical_mapping_init();
1488#endif
1489
1537 { 1490 {
1538 unsigned long zones_size[MAX_NR_ZONES]; 1491 unsigned long zones_size[MAX_NR_ZONES];
1539 unsigned long zholes_size[MAX_NR_ZONES]; 1492 unsigned long zholes_size[MAX_NR_ZONES];
@@ -1695,8 +1648,7 @@ void __init mem_init(void)
1695 1648
1696 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); 1649 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1697 i += 1; 1650 i += 1;
1698 sparc64_valid_addr_bitmap = (unsigned long *) 1651 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1699 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
1700 if (sparc64_valid_addr_bitmap == NULL) { 1652 if (sparc64_valid_addr_bitmap == NULL) {
1701 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); 1653 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1702 prom_halt(); 1654 prom_halt();
@@ -1749,7 +1701,7 @@ void __init mem_init(void)
1749 cheetah_ecache_flush_init(); 1701 cheetah_ecache_flush_init();
1750} 1702}
1751 1703
1752void free_initmem (void) 1704void free_initmem(void)
1753{ 1705{
1754 unsigned long addr, initend; 1706 unsigned long addr, initend;
1755 1707