diff options
Diffstat (limited to 'mm/sparse.c')
| -rw-r--r-- | mm/sparse.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 1302f8348d51..e03b39f3540f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -209,6 +209,12 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, | |||
| 209 | return 1; | 209 | return 1; |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | __attribute__((weak)) | ||
| 213 | void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) | ||
| 214 | { | ||
| 215 | return NULL; | ||
| 216 | } | ||
| 217 | |||
| 212 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 218 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
| 213 | { | 219 | { |
| 214 | struct page *map; | 220 | struct page *map; |
| @@ -219,6 +225,11 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
| 219 | if (map) | 225 | if (map) |
| 220 | return map; | 226 | return map; |
| 221 | 227 | ||
| 228 | map = alloc_bootmem_high_node(NODE_DATA(nid), | ||
| 229 | sizeof(struct page) * PAGES_PER_SECTION); | ||
| 230 | if (map) | ||
| 231 | return map; | ||
| 232 | |||
| 222 | map = alloc_bootmem_node(NODE_DATA(nid), | 233 | map = alloc_bootmem_node(NODE_DATA(nid), |
| 223 | sizeof(struct page) * PAGES_PER_SECTION); | 234 | sizeof(struct page) * PAGES_PER_SECTION); |
| 224 | if (map) | 235 | if (map) |
| @@ -229,6 +240,27 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
| 229 | return NULL; | 240 | return NULL; |
| 230 | } | 241 | } |
| 231 | 242 | ||
| 243 | /* | ||
| 244 | * Allocate the accumulated non-linear sections, allocate a mem_map | ||
| 245 | * for each and record the physical to section mapping. | ||
| 246 | */ | ||
| 247 | void __init sparse_init(void) | ||
| 248 | { | ||
| 249 | unsigned long pnum; | ||
| 250 | struct page *map; | ||
| 251 | |||
| 252 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | ||
| 253 | if (!valid_section_nr(pnum)) | ||
| 254 | continue; | ||
| 255 | |||
| 256 | map = sparse_early_mem_map_alloc(pnum); | ||
| 257 | if (!map) | ||
| 258 | continue; | ||
| 259 | sparse_init_one_section(__nr_to_section(pnum), pnum, map); | ||
| 260 | } | ||
| 261 | } | ||
| 262 | |||
| 263 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
| 232 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 264 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
| 233 | { | 265 | { |
| 234 | struct page *page, *ret; | 266 | struct page *page, *ret; |
| @@ -269,27 +301,6 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
| 269 | } | 301 | } |
| 270 | 302 | ||
| 271 | /* | 303 | /* |
| 272 | * Allocate the accumulated non-linear sections, allocate a mem_map | ||
| 273 | * for each and record the physical to section mapping. | ||
| 274 | */ | ||
| 275 | void __init sparse_init(void) | ||
| 276 | { | ||
| 277 | unsigned long pnum; | ||
| 278 | struct page *map; | ||
| 279 | |||
| 280 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | ||
| 281 | if (!valid_section_nr(pnum)) | ||
| 282 | continue; | ||
| 283 | |||
| 284 | map = sparse_early_mem_map_alloc(pnum); | ||
| 285 | if (!map) | ||
| 286 | continue; | ||
| 287 | sparse_init_one_section(__nr_to_section(pnum), pnum, map); | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
| 292 | /* | ||
| 293 | * returns the number of sections whose mem_maps were properly | 304 | * returns the number of sections whose mem_maps were properly |
| 294 | * set. If this is <=0, then that means that the passed-in | 305 | * set. If this is <=0, then that means that the passed-in |
| 295 | * map was not consumed and must be freed. | 306 | * map was not consumed and must be freed. |
