aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c100
1 files changed, 18 insertions, 82 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3e6c4ecb8fee..90e644a0e933 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -722,57 +722,12 @@ out:
722static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, 722static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
723 unsigned long end_pfn) 723 unsigned long end_pfn)
724{ 724{
725 unsigned long avoid_start, avoid_end, bootmap_size; 725 unsigned long bootmap_size;
726 int i;
727 726
728 bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn); 727 bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
729 bootmap_size <<= PAGE_SHIFT; 728 bootmap_size <<= PAGE_SHIFT;
730 729
731 avoid_start = avoid_end = 0; 730 return lmb_alloc(bootmap_size, PAGE_SIZE) >> PAGE_SHIFT;
732#ifdef CONFIG_BLK_DEV_INITRD
733 avoid_start = initrd_start;
734 avoid_end = PAGE_ALIGN(initrd_end);
735#endif
736
737 for (i = 0; i < pavail_ents; i++) {
738 unsigned long start, end;
739
740 start = pavail[i].phys_addr;
741 end = start + pavail[i].reg_size;
742
743 while (start < end) {
744 if (start >= kern_base &&
745 start < PAGE_ALIGN(kern_base + kern_size)) {
746 start = PAGE_ALIGN(kern_base + kern_size);
747 continue;
748 }
749 if (start >= avoid_start && start < avoid_end) {
750 start = avoid_end;
751 continue;
752 }
753
754 if ((end - start) < bootmap_size)
755 break;
756
757 if (start < kern_base &&
758 (start + bootmap_size) > kern_base) {
759 start = PAGE_ALIGN(kern_base + kern_size);
760 continue;
761 }
762
763 if (start < avoid_start &&
764 (start + bootmap_size) > avoid_start) {
765 start = avoid_end;
766 continue;
767 }
768
769 /* OK, it doesn't overlap anything, use it. */
770 return start >> PAGE_SHIFT;
771 }
772 }
773
774 prom_printf("Cannot find free area for bootmap, aborting.\n");
775 prom_halt();
776} 731}
777 732
778static void __init find_ramdisk(unsigned long phys_base) 733static void __init find_ramdisk(unsigned long phys_base)
@@ -825,8 +780,7 @@ static void __init find_ramdisk(unsigned long phys_base)
825static unsigned long __init bootmem_init(unsigned long *pages_avail, 780static unsigned long __init bootmem_init(unsigned long *pages_avail,
826 unsigned long phys_base) 781 unsigned long phys_base)
827{ 782{
828 unsigned long bootmap_size, end_pfn; 783 unsigned long end_pfn;
829 unsigned long bootmap_pfn, size;
830 int i; 784 int i;
831 785
832 *pages_avail = lmb_phys_mem_size() >> PAGE_SHIFT; 786 *pages_avail = lmb_phys_mem_size() >> PAGE_SHIFT;
@@ -836,49 +790,31 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
836 max_pfn = max_low_pfn = end_pfn; 790 max_pfn = max_low_pfn = end_pfn;
837 min_low_pfn = (phys_base >> PAGE_SHIFT); 791 min_low_pfn = (phys_base >> PAGE_SHIFT);
838 792
839 bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); 793 init_bootmem_node(NODE_DATA(0),
840 794 choose_bootmap_pfn(min_low_pfn, end_pfn),
841 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, 795 min_low_pfn, end_pfn);
842 min_low_pfn, end_pfn);
843 796
844 /* Now register the available physical memory with the 797 /* Now register the available physical memory with the
845 * allocator. 798 * allocator.
846 */ 799 */
847 for (i = 0; i < pavail_ents; i++) 800 for (i = 0; i < lmb.memory.cnt; i++)
848 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); 801 free_bootmem(lmb.memory.region[i].base,
802 lmb_size_bytes(&lmb.memory, i));
849 803
850#ifdef CONFIG_BLK_DEV_INITRD 804 for (i = 0; i < lmb.reserved.cnt; i++)
851 if (initrd_start) { 805 reserve_bootmem(lmb.reserved.region[i].base,
852 size = initrd_end - initrd_start; 806 lmb_size_bytes(&lmb.reserved, i),
853 807 BOOTMEM_DEFAULT);
854 /* Reserve the initrd image area. */
855 reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
856 808
857 initrd_start += PAGE_OFFSET;
858 initrd_end += PAGE_OFFSET;
859 }
860#endif
861 /* Reserve the kernel text/data/bss. */
862 reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT);
863 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; 809 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
864 810
865 /* Add back in the initmem pages. */ 811 for (i = 0; i < lmb.memory.cnt; ++i) {
866 size = ((unsigned long)(__init_end) & PAGE_MASK) - 812 unsigned long start_pfn, end_pfn, pages;
867 PAGE_ALIGN((unsigned long)__init_begin);
868 *pages_avail += size >> PAGE_SHIFT;
869 813
870 /* Reserve the bootmem map. We do not account for it 814 pages = lmb_size_pages(&lmb.memory, i);
871 * in pages_avail because we will release that memory 815 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
872 * in free_all_bootmem. 816 end_pfn = start_pfn + pages;
873 */
874 size = bootmap_size;
875 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
876
877 for (i = 0; i < pavail_ents; i++) {
878 unsigned long start_pfn, end_pfn;
879 817
880 start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
881 end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
882 memory_present(0, start_pfn, end_pfn); 818 memory_present(0, start_pfn, end_pfn);
883 } 819 }
884 820