aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c280
1 files changed, 100 insertions, 180 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index f146071a4b2a..cafadcbcdf38 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -122,24 +122,19 @@ static void __init read_obp_memory(const char *property,
122 size = 0UL; 122 size = 0UL;
123 base = new_base; 123 base = new_base;
124 } 124 }
125 regs[i].phys_addr = base; 125 if (size == 0UL) {
126 regs[i].reg_size = size; 126 /* If it is empty, simply get rid of it.
127 } 127 * This simplifies the logic of the other
128 128 * functions that process these arrays.
129 for (i = 0; i < ents; i++) { 129 */
130 if (regs[i].reg_size == 0UL) { 130 memmove(&regs[i], &regs[i + 1],
131 int j; 131 (ents - i - 1) * sizeof(regs[0]));
132
133 for (j = i; j < ents - 1; j++) {
134 regs[j].phys_addr =
135 regs[j+1].phys_addr;
136 regs[j].reg_size =
137 regs[j+1].reg_size;
138 }
139
140 ents--;
141 i--; 132 i--;
133 ents--;
134 continue;
142 } 135 }
136 regs[i].phys_addr = base;
137 regs[i].reg_size = size;
143 } 138 }
144 139
145 *num_ents = ents; 140 *num_ents = ents;
@@ -154,15 +149,6 @@ unsigned long *sparc64_valid_addr_bitmap __read_mostly;
154unsigned long kern_base __read_mostly; 149unsigned long kern_base __read_mostly;
155unsigned long kern_size __read_mostly; 150unsigned long kern_size __read_mostly;
156 151
157/* get_new_mmu_context() uses "cache + 1". */
158DEFINE_SPINLOCK(ctx_alloc_lock);
159unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
160#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
161unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
162
163/* References to special section boundaries */
164extern char _start[], _end[];
165
166/* Initial ramdisk setup */ 152/* Initial ramdisk setup */
167extern unsigned long sparc_ramdisk_image64; 153extern unsigned long sparc_ramdisk_image64;
168extern unsigned int sparc_ramdisk_image; 154extern unsigned int sparc_ramdisk_image;
@@ -406,19 +392,70 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
406 if (tlb_type == spitfire) { 392 if (tlb_type == spitfire) {
407 unsigned long kaddr; 393 unsigned long kaddr;
408 394
409 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) 395 /* This code only runs on Spitfire cpus so this is
410 __flush_icache_page(__get_phys(kaddr)); 396 * why we can assume _PAGE_PADDR_4U.
397 */
398 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
399 unsigned long paddr, mask = _PAGE_PADDR_4U;
400
401 if (kaddr >= PAGE_OFFSET)
402 paddr = kaddr & mask;
403 else {
404 pgd_t *pgdp = pgd_offset_k(kaddr);
405 pud_t *pudp = pud_offset(pgdp, kaddr);
406 pmd_t *pmdp = pmd_offset(pudp, kaddr);
407 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
408
409 paddr = pte_val(*ptep) & mask;
410 }
411 __flush_icache_page(paddr);
412 }
411 } 413 }
412} 414}
413 415
414void show_mem(void) 416void show_mem(void)
415{ 417{
416 printk("Mem-info:\n"); 418 unsigned long total = 0, reserved = 0;
419 unsigned long shared = 0, cached = 0;
420 pg_data_t *pgdat;
421
422 printk(KERN_INFO "Mem-info:\n");
417 show_free_areas(); 423 show_free_areas();
418 printk("Free swap: %6ldkB\n", 424 printk(KERN_INFO "Free swap: %6ldkB\n",
419 nr_swap_pages << (PAGE_SHIFT-10)); 425 nr_swap_pages << (PAGE_SHIFT-10));
420 printk("%ld pages of RAM\n", num_physpages); 426 for_each_online_pgdat(pgdat) {
421 printk("%lu free pages\n", nr_free_pages()); 427 unsigned long i, flags;
428
429 pgdat_resize_lock(pgdat, &flags);
430 for (i = 0; i < pgdat->node_spanned_pages; i++) {
431 struct page *page = pgdat_page_nr(pgdat, i);
432 total++;
433 if (PageReserved(page))
434 reserved++;
435 else if (PageSwapCache(page))
436 cached++;
437 else if (page_count(page))
438 shared += page_count(page) - 1;
439 }
440 pgdat_resize_unlock(pgdat, &flags);
441 }
442
443 printk(KERN_INFO "%lu pages of RAM\n", total);
444 printk(KERN_INFO "%lu reserved pages\n", reserved);
445 printk(KERN_INFO "%lu pages shared\n", shared);
446 printk(KERN_INFO "%lu pages swap cached\n", cached);
447
448 printk(KERN_INFO "%lu pages dirty\n",
449 global_page_state(NR_FILE_DIRTY));
450 printk(KERN_INFO "%lu pages writeback\n",
451 global_page_state(NR_WRITEBACK));
452 printk(KERN_INFO "%lu pages mapped\n",
453 global_page_state(NR_FILE_MAPPED));
454 printk(KERN_INFO "%lu pages slab\n",
455 global_page_state(NR_SLAB_RECLAIMABLE) +
456 global_page_state(NR_SLAB_UNRECLAIMABLE));
457 printk(KERN_INFO "%lu pages pagetables\n",
458 global_page_state(NR_PAGETABLE));
422} 459}
423 460
424void mmu_info(struct seq_file *m) 461void mmu_info(struct seq_file *m)
@@ -658,6 +695,13 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
658} 695}
659#endif /* DCACHE_ALIASING_POSSIBLE */ 696#endif /* DCACHE_ALIASING_POSSIBLE */
660 697
698/* get_new_mmu_context() uses "cache + 1". */
699DEFINE_SPINLOCK(ctx_alloc_lock);
700unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
701#define MAX_CTX_NR (1UL << CTX_NR_BITS)
702#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
703DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
704
661/* Caller does TLB context flushing on local CPU if necessary. 705/* Caller does TLB context flushing on local CPU if necessary.
662 * The caller also ensures that CTX_VALID(mm->context) is false. 706 * The caller also ensures that CTX_VALID(mm->context) is false.
663 * 707 *
@@ -717,95 +761,6 @@ out:
717 smp_new_mmu_context_version(); 761 smp_new_mmu_context_version();
718} 762}
719 763
720void sparc_ultra_dump_itlb(void)
721{
722 int slot;
723
724 if (tlb_type == spitfire) {
725 printk ("Contents of itlb: ");
726 for (slot = 0; slot < 14; slot++) printk (" ");
727 printk ("%2x:%016lx,%016lx\n",
728 0,
729 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
730 for (slot = 1; slot < 64; slot+=3) {
731 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
732 slot,
733 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
734 slot+1,
735 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
736 slot+2,
737 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
738 }
739 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
740 printk ("Contents of itlb0:\n");
741 for (slot = 0; slot < 16; slot+=2) {
742 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
743 slot,
744 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
745 slot+1,
746 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
747 }
748 printk ("Contents of itlb2:\n");
749 for (slot = 0; slot < 128; slot+=2) {
750 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
751 slot,
752 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
753 slot+1,
754 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
755 }
756 }
757}
758
759void sparc_ultra_dump_dtlb(void)
760{
761 int slot;
762
763 if (tlb_type == spitfire) {
764 printk ("Contents of dtlb: ");
765 for (slot = 0; slot < 14; slot++) printk (" ");
766 printk ("%2x:%016lx,%016lx\n", 0,
767 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
768 for (slot = 1; slot < 64; slot+=3) {
769 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
770 slot,
771 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
772 slot+1,
773 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
774 slot+2,
775 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
776 }
777 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
778 printk ("Contents of dtlb0:\n");
779 for (slot = 0; slot < 16; slot+=2) {
780 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
781 slot,
782 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
783 slot+1,
784 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
785 }
786 printk ("Contents of dtlb2:\n");
787 for (slot = 0; slot < 512; slot+=2) {
788 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
789 slot,
790 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
791 slot+1,
792 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
793 }
794 if (tlb_type == cheetah_plus) {
795 printk ("Contents of dtlb3:\n");
796 for (slot = 0; slot < 512; slot+=2) {
797 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
798 slot,
799 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
800 slot+1,
801 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
802 }
803 }
804 }
805}
806
807extern unsigned long cmdline_memory_size;
808
809/* Find a free area for the bootmem map, avoiding the kernel image 764/* Find a free area for the bootmem map, avoiding the kernel image
810 * and the initial ramdisk. 765 * and the initial ramdisk.
811 */ 766 */
@@ -815,8 +770,8 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
815 unsigned long avoid_start, avoid_end, bootmap_size; 770 unsigned long avoid_start, avoid_end, bootmap_size;
816 int i; 771 int i;
817 772
818 bootmap_size = ((end_pfn - start_pfn) + 7) / 8; 773 bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
819 bootmap_size = ALIGN(bootmap_size, sizeof(long)); 774 bootmap_size <<= PAGE_SHIFT;
820 775
821 avoid_start = avoid_end = 0; 776 avoid_start = avoid_end = 0;
822#ifdef CONFIG_BLK_DEV_INITRD 777#ifdef CONFIG_BLK_DEV_INITRD
@@ -983,6 +938,20 @@ static void __init trim_pavail(unsigned long *cur_size_p,
983 } 938 }
984} 939}
985 940
941/* About pages_avail, this is the value we will use to calculate
942 * the zholes_size[] argument given to free_area_init_node(). The
943 * page allocator uses this to calculate nr_kernel_pages,
944 * nr_all_pages and zone->present_pages. On NUMA it is used
945 * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
946 *
947 * So this number should really be set to what the page allocator
948 * actually ends up with. This means:
949 * 1) It should include bootmem map pages, we'll release those.
950 * 2) It should not include the kernel image, except for the
951 * __init sections which we will also release.
952 * 3) It should include the initrd image, since we'll release
953 * that too.
954 */
986static unsigned long __init bootmem_init(unsigned long *pages_avail, 955static unsigned long __init bootmem_init(unsigned long *pages_avail,
987 unsigned long phys_base) 956 unsigned long phys_base)
988{ 957{
@@ -1069,7 +1038,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1069 initrd_start, initrd_end); 1038 initrd_start, initrd_end);
1070#endif 1039#endif
1071 reserve_bootmem(initrd_start, size); 1040 reserve_bootmem(initrd_start, size);
1072 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1073 1041
1074 initrd_start += PAGE_OFFSET; 1042 initrd_start += PAGE_OFFSET;
1075 initrd_end += PAGE_OFFSET; 1043 initrd_end += PAGE_OFFSET;
@@ -1082,6 +1050,11 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1082 reserve_bootmem(kern_base, kern_size); 1050 reserve_bootmem(kern_base, kern_size);
1083 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; 1051 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1084 1052
1053 /* Add back in the initmem pages. */
1054 size = ((unsigned long)(__init_end) & PAGE_MASK) -
1055 PAGE_ALIGN((unsigned long)__init_begin);
1056 *pages_avail += size >> PAGE_SHIFT;
1057
1085 /* Reserve the bootmem map. We do not account for it 1058 /* Reserve the bootmem map. We do not account for it
1086 * in pages_avail because we will release that memory 1059 * in pages_avail because we will release that memory
1087 * in free_all_bootmem. 1060 * in free_all_bootmem.
@@ -1092,7 +1065,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1092 (bootmap_pfn << PAGE_SHIFT), size); 1065 (bootmap_pfn << PAGE_SHIFT), size);
1093#endif 1066#endif
1094 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); 1067 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1095 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1096 1068
1097 for (i = 0; i < pavail_ents; i++) { 1069 for (i = 0; i < pavail_ents; i++) {
1098 unsigned long start_pfn, end_pfn; 1070 unsigned long start_pfn, end_pfn;
@@ -1584,6 +1556,10 @@ void __init mem_init(void)
1584#ifdef CONFIG_DEBUG_BOOTMEM 1556#ifdef CONFIG_DEBUG_BOOTMEM
1585 prom_printf("mem_init: Calling free_all_bootmem().\n"); 1557 prom_printf("mem_init: Calling free_all_bootmem().\n");
1586#endif 1558#endif
1559
1560 /* We subtract one to account for the mem_map_zero page
1561 * allocated below.
1562 */
1587 totalram_pages = num_physpages = free_all_bootmem() - 1; 1563 totalram_pages = num_physpages = free_all_bootmem() - 1;
1588 1564
1589 /* 1565 /*
@@ -1883,62 +1859,6 @@ static unsigned long kern_large_tte(unsigned long paddr)
1883 return val | paddr; 1859 return val | paddr;
1884} 1860}
1885 1861
1886/*
1887 * Translate PROM's mapping we capture at boot time into physical address.
1888 * The second parameter is only set from prom_callback() invocations.
1889 */
1890unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1891{
1892 unsigned long mask;
1893 int i;
1894
1895 mask = _PAGE_PADDR_4U;
1896 if (tlb_type == hypervisor)
1897 mask = _PAGE_PADDR_4V;
1898
1899 for (i = 0; i < prom_trans_ents; i++) {
1900 struct linux_prom_translation *p = &prom_trans[i];
1901
1902 if (promva >= p->virt &&
1903 promva < (p->virt + p->size)) {
1904 unsigned long base = p->data & mask;
1905
1906 if (error)
1907 *error = 0;
1908 return base + (promva & (8192 - 1));
1909 }
1910 }
1911 if (error)
1912 *error = 1;
1913 return 0UL;
1914}
1915
1916/* XXX We should kill off this ugly thing at so me point. XXX */
1917unsigned long sun4u_get_pte(unsigned long addr)
1918{
1919 pgd_t *pgdp;
1920 pud_t *pudp;
1921 pmd_t *pmdp;
1922 pte_t *ptep;
1923 unsigned long mask = _PAGE_PADDR_4U;
1924
1925 if (tlb_type == hypervisor)
1926 mask = _PAGE_PADDR_4V;
1927
1928 if (addr >= PAGE_OFFSET)
1929 return addr & mask;
1930
1931 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1932 return prom_virt_to_phys(addr, NULL);
1933
1934 pgdp = pgd_offset_k(addr);
1935 pudp = pud_offset(pgdp, addr);
1936 pmdp = pmd_offset(pudp, addr);
1937 ptep = pte_offset_kernel(pmdp, addr);
1938
1939 return pte_val(*ptep) & mask;
1940}
1941
1942/* If not locked, zap it. */ 1862/* If not locked, zap it. */
1943void __flush_tlb_all(void) 1863void __flush_tlb_all(void)
1944{ 1864{