diff options
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 8 | ||||
| -rw-r--r-- | arch/powerpc/mm/mem.c | 92 | ||||
| -rw-r--r-- | arch/powerpc/mm/numa.c | 17 |
3 files changed, 46 insertions, 71 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b1a3784744db..4072b871497d 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -588,7 +588,7 @@ static void __init htab_initialize(void) | |||
| 588 | unsigned long pteg_count; | 588 | unsigned long pteg_count; |
| 589 | unsigned long prot; | 589 | unsigned long prot; |
| 590 | unsigned long base = 0, size = 0, limit; | 590 | unsigned long base = 0, size = 0, limit; |
| 591 | int i; | 591 | struct memblock_region *reg; |
| 592 | 592 | ||
| 593 | DBG(" -> htab_initialize()\n"); | 593 | DBG(" -> htab_initialize()\n"); |
| 594 | 594 | ||
| @@ -659,9 +659,9 @@ static void __init htab_initialize(void) | |||
| 659 | */ | 659 | */ |
| 660 | 660 | ||
| 661 | /* create bolted the linear mapping in the hash table */ | 661 | /* create bolted the linear mapping in the hash table */ |
| 662 | for (i=0; i < memblock.memory.cnt; i++) { | 662 | for_each_memblock(memory, reg) { |
| 663 | base = (unsigned long)__va(memblock.memory.regions[i].base); | 663 | base = (unsigned long)__va(reg->base); |
| 664 | size = memblock.memory.region[i].size; | 664 | size = reg->size; |
| 665 | 665 | ||
| 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
| 667 | base, size, prot); | 667 | base, size, prot); |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a33f5c186fb7..52df5428ece4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) | |||
| 82 | return pfn < max_pfn; | 82 | return pfn < max_pfn; |
| 83 | #else | 83 | #else |
| 84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
| 85 | int i; | 85 | struct memblock_region *reg; |
| 86 | for (i=0; i < memblock.memory.cnt; i++) { | ||
| 87 | unsigned long base; | ||
| 88 | 86 | ||
| 89 | base = memblock.memory.regions[i].base; | 87 | for_each_memblock(memory, reg) |
| 90 | 88 | if (paddr >= reg->base && paddr < (reg->base + reg->size)) | |
| 91 | if ((paddr >= base) && | ||
| 92 | (paddr < (base + memblock.memory.regions[i].size))) { | ||
| 93 | return 1; | 89 | return 1; |
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | return 0; | 90 | return 0; |
| 98 | #endif | 91 | #endif |
| 99 | } | 92 | } |
| @@ -149,23 +142,19 @@ int | |||
| 149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 142 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
| 150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 143 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
| 151 | { | 144 | { |
| 152 | struct memblock_region res; | 145 | struct memblock_region *reg; |
| 153 | unsigned long pfn, len; | 146 | unsigned long end_pfn = start_pfn + nr_pages; |
| 154 | u64 end; | 147 | unsigned long tstart, tend; |
| 155 | int ret = -1; | 148 | int ret = -1; |
| 156 | 149 | ||
| 157 | res.base = (u64) start_pfn << PAGE_SHIFT; | 150 | for_each_memblock(memory, reg) { |
| 158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 151 | tstart = max(start_pfn, memblock_region_base_pfn(reg)); |
| 159 | 152 | tend = min(end_pfn, memblock_region_end_pfn(reg)); | |
| 160 | end = res.base + res.size - 1; | 153 | if (tstart >= tend) |
| 161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { | 154 | continue; |
| 162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 155 | ret = (*func)(tstart, tend - tstart, arg); |
| 163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | ||
| 164 | ret = (*func)(pfn, len, arg); | ||
| 165 | if (ret) | 156 | if (ret) |
| 166 | break; | 157 | break; |
| 167 | res.base += (res.size + 1); | ||
| 168 | res.size = (end - res.base + 1); | ||
| 169 | } | 158 | } |
| 170 | return ret; | 159 | return ret; |
| 171 | } | 160 | } |
| @@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); | |||
| 179 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 168 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 180 | void __init do_init_bootmem(void) | 169 | void __init do_init_bootmem(void) |
| 181 | { | 170 | { |
| 182 | unsigned long i; | ||
| 183 | unsigned long start, bootmap_pages; | 171 | unsigned long start, bootmap_pages; |
| 184 | unsigned long total_pages; | 172 | unsigned long total_pages; |
| 173 | struct memblock_region *reg; | ||
| 185 | int boot_mapsize; | 174 | int boot_mapsize; |
| 186 | 175 | ||
| 187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 176 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
| @@ -204,10 +193,10 @@ void __init do_init_bootmem(void) | |||
| 204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 193 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
| 205 | 194 | ||
| 206 | /* Add active regions with valid PFNs */ | 195 | /* Add active regions with valid PFNs */ |
| 207 | for (i = 0; i < memblock.memory.cnt; i++) { | 196 | for_each_memblock(memory, reg) { |
| 208 | unsigned long start_pfn, end_pfn; | 197 | unsigned long start_pfn, end_pfn; |
| 209 | start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; | 198 | start_pfn = memblock_region_base_pfn(reg); |
| 210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 199 | end_pfn = memblock_region_end_pfn(reg); |
| 211 | add_active_range(0, start_pfn, end_pfn); | 200 | add_active_range(0, start_pfn, end_pfn); |
| 212 | } | 201 | } |
| 213 | 202 | ||
| @@ -218,29 +207,21 @@ void __init do_init_bootmem(void) | |||
| 218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 207 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
| 219 | 208 | ||
| 220 | /* reserve the sections we're already using */ | 209 | /* reserve the sections we're already using */ |
| 221 | for (i = 0; i < memblock.reserved.cnt; i++) { | 210 | for_each_memblock(reserved, reg) { |
| 222 | unsigned long addr = memblock.reserved.regions[i].base + | 211 | unsigned long top = reg->base + reg->size - 1; |
| 223 | memblock_size_bytes(&memblock.reserved, i) - 1; | 212 | if (top < lowmem_end_addr) |
| 224 | if (addr < lowmem_end_addr) | 213 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 225 | reserve_bootmem(memblock.reserved.regions[i].base, | 214 | else if (reg->base < lowmem_end_addr) { |
| 226 | memblock_size_bytes(&memblock.reserved, i), | 215 | unsigned long trunc_size = lowmem_end_addr - reg->base; |
| 227 | BOOTMEM_DEFAULT); | 216 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); |
| 228 | else if (memblock.reserved.regions[i].base < lowmem_end_addr) { | ||
| 229 | unsigned long adjusted_size = lowmem_end_addr - | ||
| 230 | memblock.reserved.regions[i].base; | ||
| 231 | reserve_bootmem(memblock.reserved.regions[i].base, | ||
| 232 | adjusted_size, BOOTMEM_DEFAULT); | ||
| 233 | } | 217 | } |
| 234 | } | 218 | } |
| 235 | #else | 219 | #else |
| 236 | free_bootmem_with_active_regions(0, max_pfn); | 220 | free_bootmem_with_active_regions(0, max_pfn); |
| 237 | 221 | ||
| 238 | /* reserve the sections we're already using */ | 222 | /* reserve the sections we're already using */ |
| 239 | for (i = 0; i < memblock.reserved.cnt; i++) | 223 | for_each_memblock(reserved, reg) |
| 240 | reserve_bootmem(memblock.reserved.regions[i].base, | 224 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 241 | memblock_size_bytes(&memblock.reserved, i), | ||
| 242 | BOOTMEM_DEFAULT); | ||
| 243 | |||
| 244 | #endif | 225 | #endif |
| 245 | /* XXX need to clip this if using highmem? */ | 226 | /* XXX need to clip this if using highmem? */ |
| 246 | sparse_memory_present_with_active_regions(0); | 227 | sparse_memory_present_with_active_regions(0); |
| @@ -251,22 +232,15 @@ void __init do_init_bootmem(void) | |||
| 251 | /* mark pages that don't exist as nosave */ | 232 | /* mark pages that don't exist as nosave */ |
| 252 | static int __init mark_nonram_nosave(void) | 233 | static int __init mark_nonram_nosave(void) |
| 253 | { | 234 | { |
| 254 | unsigned long memblock_next_region_start_pfn, | 235 | struct memblock_region *reg, *prev = NULL; |
| 255 | memblock_region_max_pfn; | 236 | |
| 256 | int i; | 237 | for_each_memblock(memory, reg) { |
| 257 | 238 | if (prev && | |
| 258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { | 239 | memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) |
| 259 | memblock_region_max_pfn = | 240 | register_nosave_region(memblock_region_end_pfn(prev), |
| 260 | (memblock.memory.regions[i].base >> PAGE_SHIFT) + | 241 | memblock_region_base_pfn(reg)); |
| 261 | (memblock.memory.regions[i].size >> PAGE_SHIFT); | 242 | prev = reg; |
| 262 | memblock_next_region_start_pfn = | ||
| 263 | memblock.memory.regions[i+1].base >> PAGE_SHIFT; | ||
| 264 | |||
| 265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) | ||
| 266 | register_nosave_region(memblock_region_max_pfn, | ||
| 267 | memblock_next_region_start_pfn); | ||
| 268 | } | 243 | } |
| 269 | |||
| 270 | return 0; | 244 | return 0; |
| 271 | } | 245 | } |
| 272 | 246 | ||
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aa731af720c0..9ba9ba1a430d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -746,16 +746,17 @@ static void __init setup_nonnuma(void) | |||
| 746 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 746 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
| 747 | unsigned long total_ram = memblock_phys_mem_size(); | 747 | unsigned long total_ram = memblock_phys_mem_size(); |
| 748 | unsigned long start_pfn, end_pfn; | 748 | unsigned long start_pfn, end_pfn; |
| 749 | unsigned int i, nid = 0; | 749 | unsigned int nid = 0; |
| 750 | struct memblock_region *reg; | ||
| 750 | 751 | ||
| 751 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 752 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
| 752 | top_of_ram, total_ram); | 753 | top_of_ram, total_ram); |
| 753 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 754 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
| 754 | (top_of_ram - total_ram) >> 20); | 755 | (top_of_ram - total_ram) >> 20); |
| 755 | 756 | ||
| 756 | for (i = 0; i < memblock.memory.cnt; ++i) { | 757 | for_each_memblock(memory, reg) { |
| 757 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 758 | start_pfn = memblock_region_base_pfn(reg); |
| 758 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 759 | end_pfn = memblock_region_end_pfn(reg); |
| 759 | 760 | ||
| 760 | fake_numa_create_new_node(end_pfn, &nid); | 761 | fake_numa_create_new_node(end_pfn, &nid); |
| 761 | add_active_range(nid, start_pfn, end_pfn); | 762 | add_active_range(nid, start_pfn, end_pfn); |
| @@ -891,11 +892,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
| 891 | static void mark_reserved_regions_for_nid(int nid) | 892 | static void mark_reserved_regions_for_nid(int nid) |
| 892 | { | 893 | { |
| 893 | struct pglist_data *node = NODE_DATA(nid); | 894 | struct pglist_data *node = NODE_DATA(nid); |
| 894 | int i; | 895 | struct memblock_region *reg; |
| 895 | 896 | ||
| 896 | for (i = 0; i < memblock.reserved.cnt; i++) { | 897 | for_each_memblock(reserved, reg) { |
| 897 | unsigned long physbase = memblock.reserved.region[i].base; | 898 | unsigned long physbase = reg->base; |
| 898 | unsigned long size = memblock.reserved.region[i].size; | 899 | unsigned long size = reg->size; |
| 899 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 900 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
| 900 | unsigned long end_pfn = PFN_UP(physbase + size); | 901 | unsigned long end_pfn = PFN_UP(physbase + size); |
| 901 | struct node_active_region node_ar; | 902 | struct node_active_region node_ar; |
