aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-08 05:16:07 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:22 -0500
commitd1112018b4bc82adf5c8a9c15a08954328f023ae (patch)
tree4d94ef6c153f028cfaaff711cf7d4f07aa90e9b4 /arch
parentee29074d3bd23848905f52c515974e0cd0219faa (diff)
[SPARC64]: Move over to sparsemem.
This has been pending for a long time, and the fact that we waste a ton of ram on some configurations kind of pushed things over the edge. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/Kconfig6
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c7
-rw-r--r--arch/sparc64/mm/init.c134
3 files changed, 104 insertions, 43 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 4c0a50a76554..a253a39c3ff6 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -186,6 +186,12 @@ endchoice
186 186
187endmenu 187endmenu
188 188
189config ARCH_SPARSEMEM_ENABLE
190 def_bool y
191
192config ARCH_SPARSEMEM_DEFAULT
193 def_bool y
194
189source "mm/Kconfig" 195source "mm/Kconfig"
190 196
191config GENERIC_ISA_DMA 197config GENERIC_ISA_DMA
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index e87fe7dfc7de..9914a17651b4 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -95,9 +95,6 @@ extern int __ashrdi3(int, int);
95 95
96extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); 96extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
97 97
98extern unsigned long phys_base;
99extern unsigned long pfn_base;
100
101extern unsigned int sys_call_table[]; 98extern unsigned int sys_call_table[];
102 99
103extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 100extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
@@ -346,11 +343,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
346EXPORT_SYMBOL(__clear_user); 343EXPORT_SYMBOL(__clear_user);
347 344
348/* Various address conversion macros use this. */ 345/* Various address conversion macros use this. */
349EXPORT_SYMBOL(phys_base);
350EXPORT_SYMBOL(pfn_base);
351EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 346EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
352EXPORT_SYMBOL(page_to_pfn);
353EXPORT_SYMBOL(pfn_to_page);
354 347
355/* No version information on this, heavily used in inline asm, 348/* No version information on this, heavily used in inline asm,
356 * and will always be 'void __ret_efault(void)'. 349 * and will always be 'void __ret_efault(void)'.
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index a63939347b3d..5f67b53b3a5b 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -130,11 +130,9 @@ static void __init read_obp_memory(const char *property,
130 130
131unsigned long *sparc64_valid_addr_bitmap __read_mostly; 131unsigned long *sparc64_valid_addr_bitmap __read_mostly;
132 132
133/* Ugly, but necessary... -DaveM */ 133/* Kernel physical address base and size in bytes. */
134unsigned long phys_base __read_mostly;
135unsigned long kern_base __read_mostly; 134unsigned long kern_base __read_mostly;
136unsigned long kern_size __read_mostly; 135unsigned long kern_size __read_mostly;
137unsigned long pfn_base __read_mostly;
138 136
139/* get_new_mmu_context() uses "cache + 1". */ 137/* get_new_mmu_context() uses "cache + 1". */
140DEFINE_SPINLOCK(ctx_alloc_lock); 138DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -368,16 +366,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
368 } 366 }
369} 367}
370 368
371unsigned long page_to_pfn(struct page *page)
372{
373 return (unsigned long) ((page - mem_map) + pfn_base);
374}
375
376struct page *pfn_to_page(unsigned long pfn)
377{
378 return (mem_map + (pfn - pfn_base));
379}
380
381void show_mem(void) 369void show_mem(void)
382{ 370{
383 printk("Mem-info:\n"); 371 printk("Mem-info:\n");
@@ -773,9 +761,78 @@ void sparc_ultra_dump_dtlb(void)
773 761
774extern unsigned long cmdline_memory_size; 762extern unsigned long cmdline_memory_size;
775 763
776unsigned long __init bootmem_init(unsigned long *pages_avail) 764/* Find a free area for the bootmem map, avoiding the kernel image
765 * and the initial ramdisk.
766 */
767static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
768 unsigned long end_pfn)
777{ 769{
778 unsigned long bootmap_size, start_pfn, end_pfn; 770 unsigned long avoid_start, avoid_end, bootmap_size;
771 int i;
772
773 bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
774 bootmap_size = ALIGN(bootmap_size, sizeof(long));
775
776 avoid_start = avoid_end = 0;
777#ifdef CONFIG_BLK_DEV_INITRD
778 avoid_start = initrd_start;
779 avoid_end = PAGE_ALIGN(initrd_end);
780#endif
781
782#ifdef CONFIG_DEBUG_BOOTMEM
783 prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
784 kern_base, PAGE_ALIGN(kern_base + kern_size),
785 avoid_start, avoid_end);
786#endif
787 for (i = 0; i < pavail_ents; i++) {
788 unsigned long start, end;
789
790 start = pavail[i].phys_addr;
791 end = start + pavail[i].reg_size;
792
793 while (start < end) {
794 if (start >= kern_base &&
795 start < PAGE_ALIGN(kern_base + kern_size)) {
796 start = PAGE_ALIGN(kern_base + kern_size);
797 continue;
798 }
799 if (start >= avoid_start && start < avoid_end) {
800 start = avoid_end;
801 continue;
802 }
803
804 if ((end - start) < bootmap_size)
805 break;
806
807 if (start < kern_base &&
808 (start + bootmap_size) > kern_base) {
809 start = PAGE_ALIGN(kern_base + kern_size);
810 continue;
811 }
812
813 if (start < avoid_start &&
814 (start + bootmap_size) > avoid_start) {
815 start = avoid_end;
816 continue;
817 }
818
819 /* OK, it doesn't overlap anything, use it. */
820#ifdef CONFIG_DEBUG_BOOTMEM
821 prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
822 start >> PAGE_SHIFT, start);
823#endif
824 return start >> PAGE_SHIFT;
825 }
826 }
827
828 prom_printf("Cannot find free area for bootmap, aborting.\n");
829 prom_halt();
830}
831
832static unsigned long __init bootmem_init(unsigned long *pages_avail,
833 unsigned long phys_base)
834{
835 unsigned long bootmap_size, end_pfn;
779 unsigned long end_of_phys_memory = 0UL; 836 unsigned long end_of_phys_memory = 0UL;
780 unsigned long bootmap_pfn, bytes_avail, size; 837 unsigned long bootmap_pfn, bytes_avail, size;
781 int i; 838 int i;
@@ -813,14 +870,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
813 870
814 *pages_avail = bytes_avail >> PAGE_SHIFT; 871 *pages_avail = bytes_avail >> PAGE_SHIFT;
815 872
816 /* Start with page aligned address of last symbol in kernel
817 * image. The kernel is hard mapped below PAGE_OFFSET in a
818 * 4MB locked TLB translation.
819 */
820 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
821
822 bootmap_pfn = start_pfn;
823
824 end_pfn = end_of_phys_memory >> PAGE_SHIFT; 873 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
825 874
826#ifdef CONFIG_BLK_DEV_INITRD 875#ifdef CONFIG_BLK_DEV_INITRD
@@ -837,23 +886,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
837 "(0x%016lx > 0x%016lx)\ndisabling initrd\n", 886 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
838 initrd_end, end_of_phys_memory); 887 initrd_end, end_of_phys_memory);
839 initrd_start = 0; 888 initrd_start = 0;
840 } 889 initrd_end = 0;
841 if (initrd_start) {
842 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
843 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
844 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
845 } 890 }
846 } 891 }
847#endif 892#endif
848 /* Initialize the boot-time allocator. */ 893 /* Initialize the boot-time allocator. */
849 max_pfn = max_low_pfn = end_pfn; 894 max_pfn = max_low_pfn = end_pfn;
850 min_low_pfn = pfn_base; 895 min_low_pfn = (phys_base >> PAGE_SHIFT);
896
897 bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
851 898
852#ifdef CONFIG_DEBUG_BOOTMEM 899#ifdef CONFIG_DEBUG_BOOTMEM
853 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", 900 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
854 min_low_pfn, bootmap_pfn, max_low_pfn); 901 min_low_pfn, bootmap_pfn, max_low_pfn);
855#endif 902#endif
856 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); 903 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
904 (phys_base >> PAGE_SHIFT),
905 end_pfn);
857 906
858 /* Now register the available physical memory with the 907 /* Now register the available physical memory with the
859 * allocator. 908 * allocator.
@@ -901,6 +950,20 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
901 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); 950 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
902 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; 951 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
903 952
953 for (i = 0; i < pavail_ents; i++) {
954 unsigned long start_pfn, end_pfn;
955
956 start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
957 end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
958#ifdef CONFIG_DEBUG_BOOTMEM
959 prom_printf("memory_present(0, %lx, %lx)\n",
960 start_pfn, end_pfn);
961#endif
962 memory_present(0, start_pfn, end_pfn);
963 }
964
965 sparse_init();
966
904 return end_pfn; 967 return end_pfn;
905} 968}
906 969
@@ -1180,7 +1243,7 @@ static void sun4v_pgprot_init(void);
1180 1243
1181void __init paging_init(void) 1244void __init paging_init(void)
1182{ 1245{
1183 unsigned long end_pfn, pages_avail, shift; 1246 unsigned long end_pfn, pages_avail, shift, phys_base;
1184 unsigned long real_end, i; 1247 unsigned long real_end, i;
1185 1248
1186 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1249 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
@@ -1211,8 +1274,6 @@ void __init paging_init(void)
1211 for (i = 0; i < pavail_ents; i++) 1274 for (i = 0; i < pavail_ents; i++)
1212 phys_base = min(phys_base, pavail[i].phys_addr); 1275 phys_base = min(phys_base, pavail[i].phys_addr);
1213 1276
1214 pfn_base = phys_base >> PAGE_SHIFT;
1215
1216 set_bit(0, mmu_context_bmap); 1277 set_bit(0, mmu_context_bmap);
1217 1278
1218 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1279 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
@@ -1248,7 +1309,9 @@ void __init paging_init(void)
1248 1309
1249 /* Setup bootmem... */ 1310 /* Setup bootmem... */
1250 pages_avail = 0; 1311 pages_avail = 0;
1251 last_valid_pfn = end_pfn = bootmem_init(&pages_avail); 1312 last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1313
1314 max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
1252 1315
1253 kernel_physical_mapping_init(); 1316 kernel_physical_mapping_init();
1254 1317
@@ -1261,7 +1324,7 @@ void __init paging_init(void)
1261 for (znum = 0; znum < MAX_NR_ZONES; znum++) 1324 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1262 zones_size[znum] = zholes_size[znum] = 0; 1325 zones_size[znum] = zholes_size[znum] = 0;
1263 1326
1264 npages = end_pfn - pfn_base; 1327 npages = end_pfn - (phys_base >> PAGE_SHIFT);
1265 zones_size[ZONE_DMA] = npages; 1328 zones_size[ZONE_DMA] = npages;
1266 zholes_size[ZONE_DMA] = npages - pages_avail; 1329 zholes_size[ZONE_DMA] = npages - pages_avail;
1267 1330
@@ -1336,7 +1399,6 @@ void __init mem_init(void)
1336 1399
1337 taint_real_pages(); 1400 taint_real_pages();
1338 1401
1339 max_mapnr = last_valid_pfn - pfn_base;
1340 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1402 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1341 1403
1342#ifdef CONFIG_DEBUG_BOOTMEM 1404#ifdef CONFIG_DEBUG_BOOTMEM