diff options
| author | Tony Luck <tony.luck@intel.com> | 2005-10-20 13:41:44 -0400 |
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2005-10-20 13:41:44 -0400 |
| commit | 9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch) | |
| tree | 4fe1cca94fdba8b705c87615bee06d3346f687ce /arch/sparc64/mm/init.c | |
| parent | 17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff) | |
| parent | ac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff) | |
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341
since this file has been moved from arch/ia64/lib/swiotlb.c to
lib/swiotlb.c
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/sparc64/mm/init.c')
| -rw-r--r-- | arch/sparc64/mm/init.c | 868 |
1 files changed, 376 insertions, 492 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index fdb1ebb308c9..1e44ee26cee8 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
| 21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/kprobes.h> | 22 | #include <linux/kprobes.h> |
| 23 | #include <linux/cache.h> | ||
| 24 | #include <linux/sort.h> | ||
| 23 | 25 | ||
| 24 | #include <asm/head.h> | 26 | #include <asm/head.h> |
| 25 | #include <asm/system.h> | 27 | #include <asm/system.h> |
| @@ -40,24 +42,80 @@ | |||
| 40 | 42 | ||
| 41 | extern void device_scan(void); | 43 | extern void device_scan(void); |
| 42 | 44 | ||
| 43 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; | 45 | #define MAX_BANKS 32 |
| 44 | 46 | ||
| 45 | unsigned long *sparc64_valid_addr_bitmap; | 47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; |
| 48 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | ||
| 49 | static int pavail_ents __initdata; | ||
| 50 | static int pavail_rescan_ents __initdata; | ||
| 51 | |||
| 52 | static int cmp_p64(const void *a, const void *b) | ||
| 53 | { | ||
| 54 | const struct linux_prom64_registers *x = a, *y = b; | ||
| 55 | |||
| 56 | if (x->phys_addr > y->phys_addr) | ||
| 57 | return 1; | ||
| 58 | if (x->phys_addr < y->phys_addr) | ||
| 59 | return -1; | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static void __init read_obp_memory(const char *property, | ||
| 64 | struct linux_prom64_registers *regs, | ||
| 65 | int *num_ents) | ||
| 66 | { | ||
| 67 | int node = prom_finddevice("/memory"); | ||
| 68 | int prop_size = prom_getproplen(node, property); | ||
| 69 | int ents, ret, i; | ||
| 70 | |||
| 71 | ents = prop_size / sizeof(struct linux_prom64_registers); | ||
| 72 | if (ents > MAX_BANKS) { | ||
| 73 | prom_printf("The machine has more %s property entries than " | ||
| 74 | "this kernel can support (%d).\n", | ||
| 75 | property, MAX_BANKS); | ||
| 76 | prom_halt(); | ||
| 77 | } | ||
| 78 | |||
| 79 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | ||
| 80 | if (ret == -1) { | ||
| 81 | prom_printf("Couldn't get %s property from /memory.\n"); | ||
| 82 | prom_halt(); | ||
| 83 | } | ||
| 84 | |||
| 85 | *num_ents = ents; | ||
| 86 | |||
| 87 | /* Sanitize what we got from the firmware, by page aligning | ||
| 88 | * everything. | ||
| 89 | */ | ||
| 90 | for (i = 0; i < ents; i++) { | ||
| 91 | unsigned long base, size; | ||
| 92 | |||
| 93 | base = regs[i].phys_addr; | ||
| 94 | size = regs[i].reg_size; | ||
| 95 | |||
| 96 | size &= PAGE_MASK; | ||
| 97 | if (base & ~PAGE_MASK) { | ||
| 98 | unsigned long new_base = PAGE_ALIGN(base); | ||
| 99 | |||
| 100 | size -= new_base - base; | ||
| 101 | if ((long) size < 0L) | ||
| 102 | size = 0UL; | ||
| 103 | base = new_base; | ||
| 104 | } | ||
| 105 | regs[i].phys_addr = base; | ||
| 106 | regs[i].reg_size = size; | ||
| 107 | } | ||
| 108 | sort(regs, ents, sizeof(struct linux_prom64_registers), | ||
| 109 | cmp_p64, NULL); | ||
| 110 | } | ||
| 111 | |||
| 112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | ||
| 46 | 113 | ||
| 47 | /* Ugly, but necessary... -DaveM */ | 114 | /* Ugly, but necessary... -DaveM */ |
| 48 | unsigned long phys_base; | 115 | unsigned long phys_base __read_mostly; |
| 49 | unsigned long kern_base; | 116 | unsigned long kern_base __read_mostly; |
| 50 | unsigned long kern_size; | 117 | unsigned long kern_size __read_mostly; |
| 51 | unsigned long pfn_base; | 118 | unsigned long pfn_base __read_mostly; |
| 52 | |||
| 53 | /* This is even uglier. We have a problem where the kernel may not be | ||
| 54 | * located at phys_base. However, initial __alloc_bootmem() calls need to | ||
| 55 | * be adjusted to be within the 4-8Megs that the kernel is mapped to, else | ||
| 56 | * those page mappings wont work. Things are ok after inherit_prom_mappings | ||
| 57 | * is called though. Dave says he'll clean this up some other time. | ||
| 58 | * -- BenC | ||
| 59 | */ | ||
| 60 | static unsigned long bootmap_base; | ||
| 61 | 119 | ||
| 62 | /* get_new_mmu_context() uses "cache + 1". */ | 120 | /* get_new_mmu_context() uses "cache + 1". */ |
| 63 | DEFINE_SPINLOCK(ctx_alloc_lock); | 121 | DEFINE_SPINLOCK(ctx_alloc_lock); |
| @@ -73,7 +131,13 @@ extern unsigned long sparc_ramdisk_image64; | |||
| 73 | extern unsigned int sparc_ramdisk_image; | 131 | extern unsigned int sparc_ramdisk_image; |
| 74 | extern unsigned int sparc_ramdisk_size; | 132 | extern unsigned int sparc_ramdisk_size; |
| 75 | 133 | ||
| 76 | struct page *mem_map_zero; | 134 | struct page *mem_map_zero __read_mostly; |
| 135 | |||
| 136 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; | ||
| 137 | |||
| 138 | unsigned long sparc64_kern_pri_context __read_mostly; | ||
| 139 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | ||
| 140 | unsigned long sparc64_kern_sec_context __read_mostly; | ||
| 77 | 141 | ||
| 78 | int bigkernel = 0; | 142 | int bigkernel = 0; |
| 79 | 143 | ||
| @@ -179,8 +243,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c | |||
| 179 | : "g1", "g7"); | 243 | : "g1", "g7"); |
| 180 | } | 244 | } |
| 181 | 245 | ||
| 182 | extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code); | ||
| 183 | |||
| 184 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | 246 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
| 185 | { | 247 | { |
| 186 | struct page *page; | 248 | struct page *page; |
| @@ -207,10 +269,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p | |||
| 207 | 269 | ||
| 208 | put_cpu(); | 270 | put_cpu(); |
| 209 | } | 271 | } |
| 210 | |||
| 211 | if (get_thread_fault_code()) | ||
| 212 | __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context), | ||
| 213 | address, pte, get_thread_fault_code()); | ||
| 214 | } | 272 | } |
| 215 | 273 | ||
| 216 | void flush_dcache_page(struct page *page) | 274 | void flush_dcache_page(struct page *page) |
| @@ -310,6 +368,11 @@ struct linux_prom_translation { | |||
| 310 | unsigned long data; | 368 | unsigned long data; |
| 311 | }; | 369 | }; |
| 312 | 370 | ||
| 371 | /* Exported for kernel TLB miss handling in ktlb.S */ | ||
| 372 | struct linux_prom_translation prom_trans[512] __read_mostly; | ||
| 373 | unsigned int prom_trans_ents __read_mostly; | ||
| 374 | unsigned int swapper_pgd_zero __read_mostly; | ||
| 375 | |||
| 313 | extern unsigned long prom_boot_page; | 376 | extern unsigned long prom_boot_page; |
| 314 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | 377 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); |
| 315 | extern int prom_get_mmu_ihandle(void); | 378 | extern int prom_get_mmu_ihandle(void); |
| @@ -318,297 +381,162 @@ extern void register_prom_callbacks(void); | |||
| 318 | /* Exported for SMP bootup purposes. */ | 381 | /* Exported for SMP bootup purposes. */ |
| 319 | unsigned long kern_locked_tte_data; | 382 | unsigned long kern_locked_tte_data; |
| 320 | 383 | ||
| 321 | void __init early_pgtable_allocfail(char *type) | ||
| 322 | { | ||
| 323 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); | ||
| 324 | prom_halt(); | ||
| 325 | } | ||
| 326 | |||
| 327 | #define BASE_PAGE_SIZE 8192 | ||
| 328 | static pmd_t *prompmd; | ||
| 329 | |||
| 330 | /* | 384 | /* |
| 331 | * Translate PROM's mapping we capture at boot time into physical address. | 385 | * Translate PROM's mapping we capture at boot time into physical address. |
| 332 | * The second parameter is only set from prom_callback() invocations. | 386 | * The second parameter is only set from prom_callback() invocations. |
| 333 | */ | 387 | */ |
| 334 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | 388 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) |
| 335 | { | 389 | { |
| 336 | pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); | 390 | int i; |
| 337 | pte_t *ptep; | 391 | |
| 338 | unsigned long base; | 392 | for (i = 0; i < prom_trans_ents; i++) { |
| 339 | 393 | struct linux_prom_translation *p = &prom_trans[i]; | |
| 340 | if (pmd_none(*pmdp)) { | 394 | |
| 341 | if (error) | 395 | if (promva >= p->virt && |
| 342 | *error = 1; | 396 | promva < (p->virt + p->size)) { |
| 343 | return(0); | 397 | unsigned long base = p->data & _PAGE_PADDR; |
| 344 | } | 398 | |
| 345 | ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); | 399 | if (error) |
| 346 | if (!pte_present(*ptep)) { | 400 | *error = 0; |
| 347 | if (error) | 401 | return base + (promva & (8192 - 1)); |
| 348 | *error = 1; | 402 | } |
| 349 | return(0); | ||
| 350 | } | ||
| 351 | if (error) { | ||
| 352 | *error = 0; | ||
| 353 | return(pte_val(*ptep)); | ||
| 354 | } | 403 | } |
| 355 | base = pte_val(*ptep) & _PAGE_PADDR; | 404 | if (error) |
| 356 | return(base + (promva & (BASE_PAGE_SIZE - 1))); | 405 | *error = 1; |
| 406 | return 0UL; | ||
| 357 | } | 407 | } |
| 358 | 408 | ||
| 359 | static void inherit_prom_mappings(void) | 409 | /* The obp translations are saved based on 8k pagesize, since obp can |
| 410 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | ||
| 411 | * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte | ||
| 412 | * scheme (also, see rant in inherit_locked_prom_mappings()). | ||
| 413 | */ | ||
| 414 | static inline int in_obp_range(unsigned long vaddr) | ||
| 360 | { | 415 | { |
| 361 | struct linux_prom_translation *trans; | 416 | return (vaddr >= LOW_OBP_ADDRESS && |
| 362 | unsigned long phys_page, tte_vaddr, tte_data; | 417 | vaddr < HI_OBP_ADDRESS); |
| 363 | void (*remap_func)(unsigned long, unsigned long, int); | 418 | } |
| 364 | pmd_t *pmdp; | 419 | |
| 365 | pte_t *ptep; | 420 | static int cmp_ptrans(const void *a, const void *b) |
| 366 | int node, n, i, tsz; | 421 | { |
| 367 | extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2]; | 422 | const struct linux_prom_translation *x = a, *y = b; |
| 423 | |||
| 424 | if (x->virt > y->virt) | ||
| 425 | return 1; | ||
| 426 | if (x->virt < y->virt) | ||
| 427 | return -1; | ||
| 428 | return 0; | ||
| 429 | } | ||
| 430 | |||
| 431 | /* Read OBP translations property into 'prom_trans[]'. */ | ||
| 432 | static void __init read_obp_translations(void) | ||
| 433 | { | ||
| 434 | int n, node, ents, first, last, i; | ||
| 368 | 435 | ||
| 369 | node = prom_finddevice("/virtual-memory"); | 436 | node = prom_finddevice("/virtual-memory"); |
| 370 | n = prom_getproplen(node, "translations"); | 437 | n = prom_getproplen(node, "translations"); |
| 371 | if (n == 0 || n == -1) { | 438 | if (unlikely(n == 0 || n == -1)) { |
| 372 | prom_printf("Couldn't get translation property\n"); | 439 | prom_printf("prom_mappings: Couldn't get size.\n"); |
| 373 | prom_halt(); | 440 | prom_halt(); |
| 374 | } | 441 | } |
| 375 | n += 5 * sizeof(struct linux_prom_translation); | 442 | if (unlikely(n > sizeof(prom_trans))) { |
| 376 | for (tsz = 1; tsz < n; tsz <<= 1) | 443 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); |
| 377 | /* empty */; | ||
| 378 | trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base); | ||
| 379 | if (trans == NULL) { | ||
| 380 | prom_printf("inherit_prom_mappings: Cannot alloc translations.\n"); | ||
| 381 | prom_halt(); | 444 | prom_halt(); |
| 382 | } | 445 | } |
| 383 | memset(trans, 0, tsz); | ||
| 384 | 446 | ||
| 385 | if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { | 447 | if ((n = prom_getproperty(node, "translations", |
| 386 | prom_printf("Couldn't get translation property\n"); | 448 | (char *)&prom_trans[0], |
| 449 | sizeof(prom_trans))) == -1) { | ||
| 450 | prom_printf("prom_mappings: Couldn't get property.\n"); | ||
| 387 | prom_halt(); | 451 | prom_halt(); |
| 388 | } | 452 | } |
| 389 | n = n / sizeof(*trans); | ||
| 390 | 453 | ||
| 391 | /* | 454 | n = n / sizeof(struct linux_prom_translation); |
| 392 | * The obp translations are saved based on 8k pagesize, since obp can | ||
| 393 | * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, | ||
| 394 | * ie obp range, are handled in entry.S and do not use the vpte scheme | ||
| 395 | * (see rant in inherit_locked_prom_mappings()). | ||
| 396 | */ | ||
| 397 | #define OBP_PMD_SIZE 2048 | ||
| 398 | prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base); | ||
| 399 | if (prompmd == NULL) | ||
| 400 | early_pgtable_allocfail("pmd"); | ||
| 401 | memset(prompmd, 0, OBP_PMD_SIZE); | ||
| 402 | for (i = 0; i < n; i++) { | ||
| 403 | unsigned long vaddr; | ||
| 404 | |||
| 405 | if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) { | ||
| 406 | for (vaddr = trans[i].virt; | ||
| 407 | ((vaddr < trans[i].virt + trans[i].size) && | ||
| 408 | (vaddr < HI_OBP_ADDRESS)); | ||
| 409 | vaddr += BASE_PAGE_SIZE) { | ||
| 410 | unsigned long val; | ||
| 411 | |||
| 412 | pmdp = prompmd + ((vaddr >> 23) & 0x7ff); | ||
| 413 | if (pmd_none(*pmdp)) { | ||
| 414 | ptep = __alloc_bootmem(BASE_PAGE_SIZE, | ||
| 415 | BASE_PAGE_SIZE, | ||
| 416 | bootmap_base); | ||
| 417 | if (ptep == NULL) | ||
| 418 | early_pgtable_allocfail("pte"); | ||
| 419 | memset(ptep, 0, BASE_PAGE_SIZE); | ||
| 420 | pmd_set(pmdp, ptep); | ||
| 421 | } | ||
| 422 | ptep = (pte_t *)__pmd_page(*pmdp) + | ||
| 423 | ((vaddr >> 13) & 0x3ff); | ||
| 424 | 455 | ||
| 425 | val = trans[i].data; | 456 | ents = n; |
| 426 | 457 | ||
| 427 | /* Clear diag TTE bits. */ | 458 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), |
| 428 | if (tlb_type == spitfire) | 459 | cmp_ptrans, NULL); |
| 429 | val &= ~0x0003fe0000000000UL; | ||
| 430 | 460 | ||
| 431 | set_pte_at(&init_mm, vaddr, | 461 | /* Now kick out all the non-OBP entries. */ |
| 432 | ptep, __pte(val | _PAGE_MODIFIED)); | 462 | for (i = 0; i < ents; i++) { |
| 433 | trans[i].data += BASE_PAGE_SIZE; | 463 | if (in_obp_range(prom_trans[i].virt)) |
| 434 | } | 464 | break; |
| 435 | } | 465 | } |
| 466 | first = i; | ||
| 467 | for (; i < ents; i++) { | ||
| 468 | if (!in_obp_range(prom_trans[i].virt)) | ||
| 469 | break; | ||
| 436 | } | 470 | } |
| 437 | phys_page = __pa(prompmd); | 471 | last = i; |
| 438 | obp_iaddr_patch[0] |= (phys_page >> 10); | ||
| 439 | obp_iaddr_patch[1] |= (phys_page & 0x3ff); | ||
| 440 | flushi((long)&obp_iaddr_patch[0]); | ||
| 441 | obp_daddr_patch[0] |= (phys_page >> 10); | ||
| 442 | obp_daddr_patch[1] |= (phys_page & 0x3ff); | ||
| 443 | flushi((long)&obp_daddr_patch[0]); | ||
| 444 | 472 | ||
| 445 | /* Now fixup OBP's idea about where we really are mapped. */ | 473 | for (i = 0; i < (last - first); i++) { |
| 446 | prom_printf("Remapping the kernel... "); | 474 | struct linux_prom_translation *src = &prom_trans[i + first]; |
| 475 | struct linux_prom_translation *dest = &prom_trans[i]; | ||
| 447 | 476 | ||
| 448 | /* Spitfire Errata #32 workaround */ | 477 | *dest = *src; |
| 449 | /* NOTE: Using plain zero for the context value is | 478 | } |
| 450 | * correct here, we are not using the Linux trap | 479 | for (; i < ents; i++) { |
| 451 | * tables yet so we should not use the special | 480 | struct linux_prom_translation *dest = &prom_trans[i]; |
| 452 | * UltraSPARC-III+ page size encodings yet. | 481 | dest->virt = dest->size = dest->data = 0x0UL; |
| 453 | */ | 482 | } |
| 454 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 483 | |
| 455 | "flush %%g6" | 484 | prom_trans_ents = last - first; |
| 456 | : /* No outputs */ | ||
| 457 | : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
| 458 | |||
| 459 | switch (tlb_type) { | ||
| 460 | default: | ||
| 461 | case spitfire: | ||
| 462 | phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); | ||
| 463 | break; | ||
| 464 | |||
| 465 | case cheetah: | ||
| 466 | case cheetah_plus: | ||
| 467 | phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); | ||
| 468 | break; | ||
| 469 | }; | ||
| 470 | |||
| 471 | phys_page &= _PAGE_PADDR; | ||
| 472 | phys_page += ((unsigned long)&prom_boot_page - | ||
| 473 | (unsigned long)KERNBASE); | ||
| 474 | 485 | ||
| 475 | if (tlb_type == spitfire) { | 486 | if (tlb_type == spitfire) { |
| 476 | /* Lock this into i/d tlb entry 59 */ | 487 | /* Clear diag TTE bits. */ |
| 477 | __asm__ __volatile__( | 488 | for (i = 0; i < prom_trans_ents; i++) |
| 478 | "stxa %%g0, [%2] %3\n\t" | 489 | prom_trans[i].data &= ~0x0003fe0000000000UL; |
| 479 | "stxa %0, [%1] %4\n\t" | ||
| 480 | "membar #Sync\n\t" | ||
| 481 | "flush %%g6\n\t" | ||
| 482 | "stxa %%g0, [%2] %5\n\t" | ||
| 483 | "stxa %0, [%1] %6\n\t" | ||
| 484 | "membar #Sync\n\t" | ||
| 485 | "flush %%g6" | ||
| 486 | : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | | ||
| 487 | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), | ||
| 488 | "r" (59 << 3), "r" (TLB_TAG_ACCESS), | ||
| 489 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), | ||
| 490 | "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) | ||
| 491 | : "memory"); | ||
| 492 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
| 493 | /* Lock this into i/d tlb-0 entry 11 */ | ||
| 494 | __asm__ __volatile__( | ||
| 495 | "stxa %%g0, [%2] %3\n\t" | ||
| 496 | "stxa %0, [%1] %4\n\t" | ||
| 497 | "membar #Sync\n\t" | ||
| 498 | "flush %%g6\n\t" | ||
| 499 | "stxa %%g0, [%2] %5\n\t" | ||
| 500 | "stxa %0, [%1] %6\n\t" | ||
| 501 | "membar #Sync\n\t" | ||
| 502 | "flush %%g6" | ||
| 503 | : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | | ||
| 504 | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), | ||
| 505 | "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS), | ||
| 506 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), | ||
| 507 | "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) | ||
| 508 | : "memory"); | ||
| 509 | } else { | ||
| 510 | /* Implement me :-) */ | ||
| 511 | BUG(); | ||
| 512 | } | 490 | } |
| 491 | } | ||
| 513 | 492 | ||
| 514 | tte_vaddr = (unsigned long) KERNBASE; | 493 | static void __init remap_kernel(void) |
| 494 | { | ||
| 495 | unsigned long phys_page, tte_vaddr, tte_data; | ||
| 496 | int tlb_ent = sparc64_highest_locked_tlbent(); | ||
| 515 | 497 | ||
| 516 | /* Spitfire Errata #32 workaround */ | 498 | tte_vaddr = (unsigned long) KERNBASE; |
| 517 | /* NOTE: Using plain zero for the context value is | 499 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
| 518 | * correct here, we are not using the Linux trap | 500 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | |
| 519 | * tables yet so we should not use the special | 501 | _PAGE_CP | _PAGE_CV | _PAGE_P | |
| 520 | * UltraSPARC-III+ page size encodings yet. | 502 | _PAGE_L | _PAGE_W)); |
| 521 | */ | ||
| 522 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
| 523 | "flush %%g6" | ||
| 524 | : /* No outputs */ | ||
| 525 | : "r" (0), | ||
| 526 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
| 527 | |||
| 528 | if (tlb_type == spitfire) | ||
| 529 | tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); | ||
| 530 | else | ||
| 531 | tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent()); | ||
| 532 | 503 | ||
| 533 | kern_locked_tte_data = tte_data; | 504 | kern_locked_tte_data = tte_data; |
| 534 | 505 | ||
| 535 | remap_func = (void *) ((unsigned long) &prom_remap - | 506 | /* Now lock us into the TLBs via OBP. */ |
| 536 | (unsigned long) &prom_boot_page); | 507 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); |
| 537 | 508 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | |
| 538 | |||
| 539 | /* Spitfire Errata #32 workaround */ | ||
| 540 | /* NOTE: Using plain zero for the context value is | ||
| 541 | * correct here, we are not using the Linux trap | ||
| 542 | * tables yet so we should not use the special | ||
| 543 | * UltraSPARC-III+ page size encodings yet. | ||
| 544 | */ | ||
| 545 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
| 546 | "flush %%g6" | ||
| 547 | : /* No outputs */ | ||
| 548 | : "r" (0), | ||
| 549 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
| 550 | |||
| 551 | remap_func((tlb_type == spitfire ? | ||
| 552 | (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) : | ||
| 553 | (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)), | ||
| 554 | (unsigned long) KERNBASE, | ||
| 555 | prom_get_mmu_ihandle()); | ||
| 556 | |||
| 557 | if (bigkernel) | ||
| 558 | remap_func(((tte_data + 0x400000) & _PAGE_PADDR), | ||
| 559 | (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle()); | ||
| 560 | |||
| 561 | /* Flush out that temporary mapping. */ | ||
| 562 | spitfire_flush_dtlb_nucleus_page(0x0); | ||
| 563 | spitfire_flush_itlb_nucleus_page(0x0); | ||
| 564 | |||
| 565 | /* Now lock us back into the TLBs via OBP. */ | ||
| 566 | prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); | ||
| 567 | prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); | ||
| 568 | if (bigkernel) { | 509 | if (bigkernel) { |
| 569 | prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, | 510 | tlb_ent -= 1; |
| 570 | tte_vaddr + 0x400000); | 511 | prom_dtlb_load(tlb_ent, |
| 571 | prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, | 512 | tte_data + 0x400000, |
| 572 | tte_vaddr + 0x400000); | 513 | tte_vaddr + 0x400000); |
| 514 | prom_itlb_load(tlb_ent, | ||
| 515 | tte_data + 0x400000, | ||
| 516 | tte_vaddr + 0x400000); | ||
| 573 | } | 517 | } |
| 574 | 518 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; | |
| 575 | /* Re-read translations property. */ | 519 | if (tlb_type == cheetah_plus) { |
| 576 | if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { | 520 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | |
| 577 | prom_printf("Couldn't get translation property\n"); | 521 | CTX_CHEETAH_PLUS_NUC); |
| 578 | prom_halt(); | 522 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; |
| 523 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | ||
| 579 | } | 524 | } |
| 580 | n = n / sizeof(*trans); | 525 | } |
| 581 | |||
| 582 | for (i = 0; i < n; i++) { | ||
| 583 | unsigned long vaddr = trans[i].virt; | ||
| 584 | unsigned long size = trans[i].size; | ||
| 585 | |||
| 586 | if (vaddr < 0xf0000000UL) { | ||
| 587 | unsigned long avoid_start = (unsigned long) KERNBASE; | ||
| 588 | unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); | ||
| 589 | |||
| 590 | if (bigkernel) | ||
| 591 | avoid_end += (4 * 1024 * 1024); | ||
| 592 | if (vaddr < avoid_start) { | ||
| 593 | unsigned long top = vaddr + size; | ||
| 594 | 526 | ||
| 595 | if (top > avoid_start) | ||
| 596 | top = avoid_start; | ||
| 597 | prom_unmap(top - vaddr, vaddr); | ||
| 598 | } | ||
| 599 | if ((vaddr + size) > avoid_end) { | ||
| 600 | unsigned long bottom = vaddr; | ||
| 601 | 527 | ||
| 602 | if (bottom < avoid_end) | 528 | static void __init inherit_prom_mappings(void) |
| 603 | bottom = avoid_end; | 529 | { |
| 604 | prom_unmap((vaddr + size) - bottom, bottom); | 530 | read_obp_translations(); |
| 605 | } | ||
| 606 | } | ||
| 607 | } | ||
| 608 | 531 | ||
| 532 | /* Now fixup OBP's idea about where we really are mapped. */ | ||
| 533 | prom_printf("Remapping the kernel... "); | ||
| 534 | remap_kernel(); | ||
| 609 | prom_printf("done.\n"); | 535 | prom_printf("done.\n"); |
| 610 | 536 | ||
| 537 | prom_printf("Registering callbacks... "); | ||
| 611 | register_prom_callbacks(); | 538 | register_prom_callbacks(); |
| 539 | prom_printf("done.\n"); | ||
| 612 | } | 540 | } |
| 613 | 541 | ||
| 614 | /* The OBP specifications for sun4u mark 0xfffffffc00000000 and | 542 | /* The OBP specifications for sun4u mark 0xfffffffc00000000 and |
| @@ -792,8 +720,8 @@ void inherit_locked_prom_mappings(int save_p) | |||
| 792 | } | 720 | } |
| 793 | } | 721 | } |
| 794 | if (tlb_type == spitfire) { | 722 | if (tlb_type == spitfire) { |
| 795 | int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; | 723 | int high = sparc64_highest_unlocked_tlb_ent; |
| 796 | for (i = 0; i < high; i++) { | 724 | for (i = 0; i <= high; i++) { |
| 797 | unsigned long data; | 725 | unsigned long data; |
| 798 | 726 | ||
| 799 | /* Spitfire Errata #32 workaround */ | 727 | /* Spitfire Errata #32 workaround */ |
| @@ -881,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p) | |||
| 881 | } | 809 | } |
| 882 | } | 810 | } |
| 883 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 811 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
| 884 | int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; | 812 | int high = sparc64_highest_unlocked_tlb_ent; |
| 885 | 813 | ||
| 886 | for (i = 0; i < high; i++) { | 814 | for (i = 0; i <= high; i++) { |
| 887 | unsigned long data; | 815 | unsigned long data; |
| 888 | 816 | ||
| 889 | data = cheetah_get_ldtlb_data(i); | 817 | data = cheetah_get_ldtlb_data(i); |
| @@ -1276,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
| 1276 | int i; | 1204 | int i; |
| 1277 | 1205 | ||
| 1278 | #ifdef CONFIG_DEBUG_BOOTMEM | 1206 | #ifdef CONFIG_DEBUG_BOOTMEM |
| 1279 | prom_printf("bootmem_init: Scan sp_banks, "); | 1207 | prom_printf("bootmem_init: Scan pavail, "); |
| 1280 | #endif | 1208 | #endif |
| 1281 | 1209 | ||
| 1282 | bytes_avail = 0UL; | 1210 | bytes_avail = 0UL; |
| 1283 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 1211 | for (i = 0; i < pavail_ents; i++) { |
| 1284 | end_of_phys_memory = sp_banks[i].base_addr + | 1212 | end_of_phys_memory = pavail[i].phys_addr + |
| 1285 | sp_banks[i].num_bytes; | 1213 | pavail[i].reg_size; |
| 1286 | bytes_avail += sp_banks[i].num_bytes; | 1214 | bytes_avail += pavail[i].reg_size; |
| 1287 | if (cmdline_memory_size) { | 1215 | if (cmdline_memory_size) { |
| 1288 | if (bytes_avail > cmdline_memory_size) { | 1216 | if (bytes_avail > cmdline_memory_size) { |
| 1289 | unsigned long slack = bytes_avail - cmdline_memory_size; | 1217 | unsigned long slack = bytes_avail - cmdline_memory_size; |
| @@ -1291,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
| 1291 | bytes_avail -= slack; | 1219 | bytes_avail -= slack; |
| 1292 | end_of_phys_memory -= slack; | 1220 | end_of_phys_memory -= slack; |
| 1293 | 1221 | ||
| 1294 | sp_banks[i].num_bytes -= slack; | 1222 | pavail[i].reg_size -= slack; |
| 1295 | if (sp_banks[i].num_bytes == 0) { | 1223 | if ((long)pavail[i].reg_size <= 0L) { |
| 1296 | sp_banks[i].base_addr = 0xdeadbeef; | 1224 | pavail[i].phys_addr = 0xdeadbeefUL; |
| 1225 | pavail[i].reg_size = 0UL; | ||
| 1226 | pavail_ents = i; | ||
| 1297 | } else { | 1227 | } else { |
| 1298 | sp_banks[i+1].num_bytes = 0; | 1228 | pavail[i+1].reg_size = 0Ul; |
| 1299 | sp_banks[i+1].base_addr = 0xdeadbeef; | 1229 | pavail[i+1].phys_addr = 0xdeadbeefUL; |
| 1230 | pavail_ents = i + 1; | ||
| 1300 | } | 1231 | } |
| 1301 | break; | 1232 | break; |
| 1302 | } | 1233 | } |
| @@ -1347,17 +1278,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
| 1347 | #endif | 1278 | #endif |
| 1348 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | 1279 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); |
| 1349 | 1280 | ||
| 1350 | bootmap_base = bootmap_pfn << PAGE_SHIFT; | ||
| 1351 | |||
| 1352 | /* Now register the available physical memory with the | 1281 | /* Now register the available physical memory with the |
| 1353 | * allocator. | 1282 | * allocator. |
| 1354 | */ | 1283 | */ |
| 1355 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | 1284 | for (i = 0; i < pavail_ents; i++) { |
| 1356 | #ifdef CONFIG_DEBUG_BOOTMEM | 1285 | #ifdef CONFIG_DEBUG_BOOTMEM |
| 1357 | prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", | 1286 | prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", |
| 1358 | i, sp_banks[i].base_addr, sp_banks[i].num_bytes); | 1287 | i, pavail[i].phys_addr, pavail[i].reg_size); |
| 1359 | #endif | 1288 | #endif |
| 1360 | free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); | 1289 | free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); |
| 1361 | } | 1290 | } |
| 1362 | 1291 | ||
| 1363 | #ifdef CONFIG_BLK_DEV_INITRD | 1292 | #ifdef CONFIG_BLK_DEV_INITRD |
| @@ -1398,121 +1327,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) | |||
| 1398 | return end_pfn; | 1327 | return end_pfn; |
| 1399 | } | 1328 | } |
| 1400 | 1329 | ||
| 1330 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
| 1331 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | ||
| 1332 | { | ||
| 1333 | unsigned long vstart = PAGE_OFFSET + pstart; | ||
| 1334 | unsigned long vend = PAGE_OFFSET + pend; | ||
| 1335 | unsigned long alloc_bytes = 0UL; | ||
| 1336 | |||
| 1337 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | ||
| 1338 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", | ||
| 1339 | vstart, vend); | ||
| 1340 | prom_halt(); | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | while (vstart < vend) { | ||
| 1344 | unsigned long this_end, paddr = __pa(vstart); | ||
| 1345 | pgd_t *pgd = pgd_offset_k(vstart); | ||
| 1346 | pud_t *pud; | ||
| 1347 | pmd_t *pmd; | ||
| 1348 | pte_t *pte; | ||
| 1349 | |||
| 1350 | pud = pud_offset(pgd, vstart); | ||
| 1351 | if (pud_none(*pud)) { | ||
| 1352 | pmd_t *new; | ||
| 1353 | |||
| 1354 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
| 1355 | alloc_bytes += PAGE_SIZE; | ||
| 1356 | pud_populate(&init_mm, pud, new); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | pmd = pmd_offset(pud, vstart); | ||
| 1360 | if (!pmd_present(*pmd)) { | ||
| 1361 | pte_t *new; | ||
| 1362 | |||
| 1363 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
| 1364 | alloc_bytes += PAGE_SIZE; | ||
| 1365 | pmd_populate_kernel(&init_mm, pmd, new); | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | pte = pte_offset_kernel(pmd, vstart); | ||
| 1369 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | ||
| 1370 | if (this_end > vend) | ||
| 1371 | this_end = vend; | ||
| 1372 | |||
| 1373 | while (vstart < this_end) { | ||
| 1374 | pte_val(*pte) = (paddr | pgprot_val(prot)); | ||
| 1375 | |||
| 1376 | vstart += PAGE_SIZE; | ||
| 1377 | paddr += PAGE_SIZE; | ||
| 1378 | pte++; | ||
| 1379 | } | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | return alloc_bytes; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
| 1386 | static int pall_ents __initdata; | ||
| 1387 | |||
| 1388 | extern unsigned int kvmap_linear_patch[1]; | ||
| 1389 | |||
| 1390 | static void __init kernel_physical_mapping_init(void) | ||
| 1391 | { | ||
| 1392 | unsigned long i, mem_alloced = 0UL; | ||
| 1393 | |||
| 1394 | read_obp_memory("reg", &pall[0], &pall_ents); | ||
| 1395 | |||
| 1396 | for (i = 0; i < pall_ents; i++) { | ||
| 1397 | unsigned long phys_start, phys_end; | ||
| 1398 | |||
| 1399 | phys_start = pall[i].phys_addr; | ||
| 1400 | phys_end = phys_start + pall[i].reg_size; | ||
| 1401 | mem_alloced += kernel_map_range(phys_start, phys_end, | ||
| 1402 | PAGE_KERNEL); | ||
| 1403 | } | ||
| 1404 | |||
| 1405 | printk("Allocated %ld bytes for kernel page tables.\n", | ||
| 1406 | mem_alloced); | ||
| 1407 | |||
| 1408 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | ||
| 1409 | flushi(&kvmap_linear_patch[0]); | ||
| 1410 | |||
| 1411 | __flush_tlb_all(); | ||
| 1412 | } | ||
| 1413 | |||
| 1414 | void kernel_map_pages(struct page *page, int numpages, int enable) | ||
| 1415 | { | ||
| 1416 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | ||
| 1417 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | ||
| 1418 | |||
| 1419 | kernel_map_range(phys_start, phys_end, | ||
| 1420 | (enable ? PAGE_KERNEL : __pgprot(0))); | ||
| 1421 | |||
| 1422 | /* we should perform an IPI and flush all tlbs, | ||
| 1423 | * but that can deadlock->flush only current cpu. | ||
| 1424 | */ | ||
| 1425 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | ||
| 1426 | PAGE_OFFSET + phys_end); | ||
| 1427 | } | ||
| 1428 | #endif | ||
| 1429 | |||
| 1430 | unsigned long __init find_ecache_flush_span(unsigned long size) | ||
| 1431 | { | ||
| 1432 | int i; | ||
| 1433 | |||
| 1434 | for (i = 0; i < pavail_ents; i++) { | ||
| 1435 | if (pavail[i].reg_size >= size) | ||
| 1436 | return pavail[i].phys_addr; | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | return ~0UL; | ||
| 1440 | } | ||
| 1441 | |||
| 1401 | /* paging_init() sets up the page tables */ | 1442 | /* paging_init() sets up the page tables */ |
| 1402 | 1443 | ||
| 1403 | extern void cheetah_ecache_flush_init(void); | 1444 | extern void cheetah_ecache_flush_init(void); |
| 1404 | 1445 | ||
| 1405 | static unsigned long last_valid_pfn; | 1446 | static unsigned long last_valid_pfn; |
| 1447 | pgd_t swapper_pg_dir[2048]; | ||
| 1406 | 1448 | ||
| 1407 | void __init paging_init(void) | 1449 | void __init paging_init(void) |
| 1408 | { | 1450 | { |
| 1409 | extern pmd_t swapper_pmd_dir[1024]; | 1451 | unsigned long end_pfn, pages_avail, shift; |
| 1410 | extern unsigned int sparc64_vpte_patchme1[1]; | 1452 | unsigned long real_end, i; |
| 1411 | extern unsigned int sparc64_vpte_patchme2[1]; | 1453 | |
| 1412 | unsigned long alias_base = kern_base + PAGE_OFFSET; | 1454 | /* Find available physical memory... */ |
| 1413 | unsigned long second_alias_page = 0; | 1455 | read_obp_memory("available", &pavail[0], &pavail_ents); |
| 1414 | unsigned long pt, flags, end_pfn, pages_avail; | 1456 | |
| 1415 | unsigned long shift = alias_base - ((unsigned long)KERNBASE); | 1457 | phys_base = 0xffffffffffffffffUL; |
| 1416 | unsigned long real_end; | 1458 | for (i = 0; i < pavail_ents; i++) |
| 1459 | phys_base = min(phys_base, pavail[i].phys_addr); | ||
| 1460 | |||
| 1461 | pfn_base = phys_base >> PAGE_SHIFT; | ||
| 1462 | |||
| 1463 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
| 1464 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
| 1417 | 1465 | ||
| 1418 | set_bit(0, mmu_context_bmap); | 1466 | set_bit(0, mmu_context_bmap); |
| 1419 | 1467 | ||
| 1468 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | ||
| 1469 | |||
| 1420 | real_end = (unsigned long)_end; | 1470 | real_end = (unsigned long)_end; |
| 1421 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | 1471 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) |
| 1422 | bigkernel = 1; | 1472 | bigkernel = 1; |
| 1423 | #ifdef CONFIG_BLK_DEV_INITRD | 1473 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { |
| 1424 | if (sparc_ramdisk_image || sparc_ramdisk_image64) | 1474 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); |
| 1425 | real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); | 1475 | prom_halt(); |
| 1426 | #endif | ||
| 1427 | |||
| 1428 | /* We assume physical memory starts at some 4mb multiple, | ||
| 1429 | * if this were not true we wouldn't boot up to this point | ||
| 1430 | * anyways. | ||
| 1431 | */ | ||
| 1432 | pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB; | ||
| 1433 | pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W; | ||
| 1434 | local_irq_save(flags); | ||
| 1435 | if (tlb_type == spitfire) { | ||
| 1436 | __asm__ __volatile__( | ||
| 1437 | " stxa %1, [%0] %3\n" | ||
| 1438 | " stxa %2, [%5] %4\n" | ||
| 1439 | " membar #Sync\n" | ||
| 1440 | " flush %%g6\n" | ||
| 1441 | " nop\n" | ||
| 1442 | " nop\n" | ||
| 1443 | " nop\n" | ||
| 1444 | : /* No outputs */ | ||
| 1445 | : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), | ||
| 1446 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3) | ||
| 1447 | : "memory"); | ||
| 1448 | if (real_end >= KERNBASE + 0x340000) { | ||
| 1449 | second_alias_page = alias_base + 0x400000; | ||
| 1450 | __asm__ __volatile__( | ||
| 1451 | " stxa %1, [%0] %3\n" | ||
| 1452 | " stxa %2, [%5] %4\n" | ||
| 1453 | " membar #Sync\n" | ||
| 1454 | " flush %%g6\n" | ||
| 1455 | " nop\n" | ||
| 1456 | " nop\n" | ||
| 1457 | " nop\n" | ||
| 1458 | : /* No outputs */ | ||
| 1459 | : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), | ||
| 1460 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3) | ||
| 1461 | : "memory"); | ||
| 1462 | } | ||
| 1463 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
| 1464 | __asm__ __volatile__( | ||
| 1465 | " stxa %1, [%0] %3\n" | ||
| 1466 | " stxa %2, [%5] %4\n" | ||
| 1467 | " membar #Sync\n" | ||
| 1468 | " flush %%g6\n" | ||
| 1469 | " nop\n" | ||
| 1470 | " nop\n" | ||
| 1471 | " nop\n" | ||
| 1472 | : /* No outputs */ | ||
| 1473 | : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), | ||
| 1474 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3)) | ||
| 1475 | : "memory"); | ||
| 1476 | if (real_end >= KERNBASE + 0x340000) { | ||
| 1477 | second_alias_page = alias_base + 0x400000; | ||
| 1478 | __asm__ __volatile__( | ||
| 1479 | " stxa %1, [%0] %3\n" | ||
| 1480 | " stxa %2, [%5] %4\n" | ||
| 1481 | " membar #Sync\n" | ||
| 1482 | " flush %%g6\n" | ||
| 1483 | " nop\n" | ||
| 1484 | " nop\n" | ||
| 1485 | " nop\n" | ||
| 1486 | : /* No outputs */ | ||
| 1487 | : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), | ||
| 1488 | "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3)) | ||
| 1489 | : "memory"); | ||
| 1490 | } | ||
| 1491 | } | 1476 | } |
| 1492 | local_irq_restore(flags); | 1477 | |
| 1493 | 1478 | /* Set kernel pgd to upper alias so physical page computations | |
| 1494 | /* Now set kernel pgd to upper alias so physical page computations | ||
| 1495 | * work. | 1479 | * work. |
| 1496 | */ | 1480 | */ |
| 1497 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | 1481 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); |
| 1498 | 1482 | ||
| 1499 | memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); | 1483 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
| 1500 | 1484 | ||
| 1501 | /* Now can init the kernel/bad page tables. */ | 1485 | /* Now can init the kernel/bad page tables. */ |
| 1502 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | 1486 | pud_set(pud_offset(&swapper_pg_dir[0], 0), |
| 1503 | swapper_pmd_dir + (shift / sizeof(pgd_t))); | 1487 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
| 1504 | 1488 | ||
| 1505 | sparc64_vpte_patchme1[0] |= | 1489 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); |
| 1506 | (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10); | ||
| 1507 | sparc64_vpte_patchme2[0] |= | ||
| 1508 | (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff); | ||
| 1509 | flushi((long)&sparc64_vpte_patchme1[0]); | ||
| 1510 | 1490 | ||
| 1511 | /* Setup bootmem... */ | ||
| 1512 | pages_avail = 0; | ||
| 1513 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | ||
| 1514 | |||
| 1515 | /* Inherit non-locked OBP mappings. */ | ||
| 1516 | inherit_prom_mappings(); | 1491 | inherit_prom_mappings(); |
| 1517 | 1492 | ||
| 1518 | /* Ok, we can use our TLB miss and window trap handlers safely. | 1493 | /* Ok, we can use our TLB miss and window trap handlers safely. |
| @@ -1527,13 +1502,16 @@ void __init paging_init(void) | |||
| 1527 | 1502 | ||
| 1528 | inherit_locked_prom_mappings(1); | 1503 | inherit_locked_prom_mappings(1); |
| 1529 | 1504 | ||
| 1530 | /* We only created DTLB mapping of this stuff. */ | ||
| 1531 | spitfire_flush_dtlb_nucleus_page(alias_base); | ||
| 1532 | if (second_alias_page) | ||
| 1533 | spitfire_flush_dtlb_nucleus_page(second_alias_page); | ||
| 1534 | |||
| 1535 | __flush_tlb_all(); | 1505 | __flush_tlb_all(); |
| 1536 | 1506 | ||
| 1507 | /* Setup bootmem... */ | ||
| 1508 | pages_avail = 0; | ||
| 1509 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | ||
| 1510 | |||
| 1511 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
| 1512 | kernel_physical_mapping_init(); | ||
| 1513 | #endif | ||
| 1514 | |||
| 1537 | { | 1515 | { |
| 1538 | unsigned long zones_size[MAX_NR_ZONES]; | 1516 | unsigned long zones_size[MAX_NR_ZONES]; |
| 1539 | unsigned long zholes_size[MAX_NR_ZONES]; | 1517 | unsigned long zholes_size[MAX_NR_ZONES]; |
| @@ -1554,128 +1532,35 @@ void __init paging_init(void) | |||
| 1554 | device_scan(); | 1532 | device_scan(); |
| 1555 | } | 1533 | } |
| 1556 | 1534 | ||
| 1557 | /* Ok, it seems that the prom can allocate some more memory chunks | ||
| 1558 | * as a side effect of some prom calls we perform during the | ||
| 1559 | * boot sequence. My most likely theory is that it is from the | ||
| 1560 | * prom_set_traptable() call, and OBP is allocating a scratchpad | ||
| 1561 | * for saving client program register state etc. | ||
| 1562 | */ | ||
| 1563 | static void __init sort_memlist(struct linux_mlist_p1275 *thislist) | ||
| 1564 | { | ||
| 1565 | int swapi = 0; | ||
| 1566 | int i, mitr; | ||
| 1567 | unsigned long tmpaddr, tmpsize; | ||
| 1568 | unsigned long lowest; | ||
| 1569 | |||
| 1570 | for (i = 0; thislist[i].theres_more != 0; i++) { | ||
| 1571 | lowest = thislist[i].start_adr; | ||
| 1572 | for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) | ||
| 1573 | if (thislist[mitr].start_adr < lowest) { | ||
| 1574 | lowest = thislist[mitr].start_adr; | ||
| 1575 | swapi = mitr; | ||
| 1576 | } | ||
| 1577 | if (lowest == thislist[i].start_adr) | ||
| 1578 | continue; | ||
| 1579 | tmpaddr = thislist[swapi].start_adr; | ||
| 1580 | tmpsize = thislist[swapi].num_bytes; | ||
| 1581 | for (mitr = swapi; mitr > i; mitr--) { | ||
| 1582 | thislist[mitr].start_adr = thislist[mitr-1].start_adr; | ||
| 1583 | thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; | ||
| 1584 | } | ||
| 1585 | thislist[i].start_adr = tmpaddr; | ||
| 1586 | thislist[i].num_bytes = tmpsize; | ||
| 1587 | } | ||
| 1588 | } | ||
| 1589 | |||
| 1590 | void __init rescan_sp_banks(void) | ||
| 1591 | { | ||
| 1592 | struct linux_prom64_registers memlist[64]; | ||
| 1593 | struct linux_mlist_p1275 avail[64], *mlist; | ||
| 1594 | unsigned long bytes, base_paddr; | ||
| 1595 | int num_regs, node = prom_finddevice("/memory"); | ||
| 1596 | int i; | ||
| 1597 | |||
| 1598 | num_regs = prom_getproperty(node, "available", | ||
| 1599 | (char *) memlist, sizeof(memlist)); | ||
| 1600 | num_regs = (num_regs / sizeof(struct linux_prom64_registers)); | ||
| 1601 | for (i = 0; i < num_regs; i++) { | ||
| 1602 | avail[i].start_adr = memlist[i].phys_addr; | ||
| 1603 | avail[i].num_bytes = memlist[i].reg_size; | ||
| 1604 | avail[i].theres_more = &avail[i + 1]; | ||
| 1605 | } | ||
| 1606 | avail[i - 1].theres_more = NULL; | ||
| 1607 | sort_memlist(avail); | ||
| 1608 | |||
| 1609 | mlist = &avail[0]; | ||
| 1610 | i = 0; | ||
| 1611 | bytes = mlist->num_bytes; | ||
| 1612 | base_paddr = mlist->start_adr; | ||
| 1613 | |||
| 1614 | sp_banks[0].base_addr = base_paddr; | ||
| 1615 | sp_banks[0].num_bytes = bytes; | ||
| 1616 | |||
| 1617 | while (mlist->theres_more != NULL){ | ||
| 1618 | i++; | ||
| 1619 | mlist = mlist->theres_more; | ||
| 1620 | bytes = mlist->num_bytes; | ||
| 1621 | if (i >= SPARC_PHYS_BANKS-1) { | ||
| 1622 | printk ("The machine has more banks than " | ||
| 1623 | "this kernel can support\n" | ||
| 1624 | "Increase the SPARC_PHYS_BANKS " | ||
| 1625 | "setting (currently %d)\n", | ||
| 1626 | SPARC_PHYS_BANKS); | ||
| 1627 | i = SPARC_PHYS_BANKS-1; | ||
| 1628 | break; | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | sp_banks[i].base_addr = mlist->start_adr; | ||
| 1632 | sp_banks[i].num_bytes = mlist->num_bytes; | ||
| 1633 | } | ||
| 1634 | |||
| 1635 | i++; | ||
| 1636 | sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; | ||
| 1637 | sp_banks[i].num_bytes = 0; | ||
| 1638 | |||
| 1639 | for (i = 0; sp_banks[i].num_bytes != 0; i++) | ||
| 1640 | sp_banks[i].num_bytes &= PAGE_MASK; | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | static void __init taint_real_pages(void) | 1535 | static void __init taint_real_pages(void) |
| 1644 | { | 1536 | { |
| 1645 | struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS]; | ||
| 1646 | int i; | 1537 | int i; |
| 1647 | 1538 | ||
| 1648 | for (i = 0; i < SPARC_PHYS_BANKS; i++) { | 1539 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
| 1649 | saved_sp_banks[i].base_addr = | ||
| 1650 | sp_banks[i].base_addr; | ||
| 1651 | saved_sp_banks[i].num_bytes = | ||
| 1652 | sp_banks[i].num_bytes; | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | rescan_sp_banks(); | ||
| 1656 | 1540 | ||
| 1657 | /* Find changes discovered in the sp_bank rescan and | 1541 | /* Find changes discovered in the physmem available rescan and |
| 1658 | * reserve the lost portions in the bootmem maps. | 1542 | * reserve the lost portions in the bootmem maps. |
| 1659 | */ | 1543 | */ |
| 1660 | for (i = 0; saved_sp_banks[i].num_bytes; i++) { | 1544 | for (i = 0; i < pavail_ents; i++) { |
| 1661 | unsigned long old_start, old_end; | 1545 | unsigned long old_start, old_end; |
| 1662 | 1546 | ||
| 1663 | old_start = saved_sp_banks[i].base_addr; | 1547 | old_start = pavail[i].phys_addr; |
| 1664 | old_end = old_start + | 1548 | old_end = old_start + |
| 1665 | saved_sp_banks[i].num_bytes; | 1549 | pavail[i].reg_size; |
| 1666 | while (old_start < old_end) { | 1550 | while (old_start < old_end) { |
| 1667 | int n; | 1551 | int n; |
| 1668 | 1552 | ||
| 1669 | for (n = 0; sp_banks[n].num_bytes; n++) { | 1553 | for (n = 0; pavail_rescan_ents; n++) { |
| 1670 | unsigned long new_start, new_end; | 1554 | unsigned long new_start, new_end; |
| 1671 | 1555 | ||
| 1672 | new_start = sp_banks[n].base_addr; | 1556 | new_start = pavail_rescan[n].phys_addr; |
| 1673 | new_end = new_start + sp_banks[n].num_bytes; | 1557 | new_end = new_start + |
| 1558 | pavail_rescan[n].reg_size; | ||
| 1674 | 1559 | ||
| 1675 | if (new_start <= old_start && | 1560 | if (new_start <= old_start && |
| 1676 | new_end >= (old_start + PAGE_SIZE)) { | 1561 | new_end >= (old_start + PAGE_SIZE)) { |
| 1677 | set_bit (old_start >> 22, | 1562 | set_bit(old_start >> 22, |
| 1678 | sparc64_valid_addr_bitmap); | 1563 | sparc64_valid_addr_bitmap); |
| 1679 | goto do_next_page; | 1564 | goto do_next_page; |
| 1680 | } | 1565 | } |
| 1681 | } | 1566 | } |
| @@ -1695,8 +1580,7 @@ void __init mem_init(void) | |||
| 1695 | 1580 | ||
| 1696 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | 1581 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); |
| 1697 | i += 1; | 1582 | i += 1; |
| 1698 | sparc64_valid_addr_bitmap = (unsigned long *) | 1583 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); |
| 1699 | __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base); | ||
| 1700 | if (sparc64_valid_addr_bitmap == NULL) { | 1584 | if (sparc64_valid_addr_bitmap == NULL) { |
| 1701 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | 1585 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); |
| 1702 | prom_halt(); | 1586 | prom_halt(); |
| @@ -1749,7 +1633,7 @@ void __init mem_init(void) | |||
| 1749 | cheetah_ecache_flush_init(); | 1633 | cheetah_ecache_flush_init(); |
| 1750 | } | 1634 | } |
| 1751 | 1635 | ||
| 1752 | void free_initmem (void) | 1636 | void free_initmem(void) |
| 1753 | { | 1637 | { |
| 1754 | unsigned long addr, initend; | 1638 | unsigned long addr, initend; |
| 1755 | 1639 | ||
