aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c197
1 files changed, 179 insertions, 18 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 6ce4aab69e99..dc0cc4d43ff3 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -2,6 +2,7 @@
2 * sparse memory mappings. 2 * sparse memory mappings.
3 */ 3 */
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/slab.h>
5#include <linux/mmzone.h> 6#include <linux/mmzone.h>
6#include <linux/bootmem.h> 7#include <linux/bootmem.h>
7#include <linux/highmem.h> 8#include <linux/highmem.h>
@@ -271,7 +272,8 @@ static unsigned long *__kmalloc_section_usemap(void)
271 272
272#ifdef CONFIG_MEMORY_HOTREMOVE 273#ifdef CONFIG_MEMORY_HOTREMOVE
273static unsigned long * __init 274static unsigned long * __init
274sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 275sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
276 unsigned long count)
275{ 277{
276 unsigned long section_nr; 278 unsigned long section_nr;
277 279
@@ -286,7 +288,7 @@ sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
286 * this problem. 288 * this problem.
287 */ 289 */
288 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 290 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
289 return alloc_bootmem_section(usemap_size(), section_nr); 291 return alloc_bootmem_section(usemap_size() * count, section_nr);
290} 292}
291 293
292static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 294static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -329,7 +331,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
329} 331}
330#else 332#else
331static unsigned long * __init 333static unsigned long * __init
332sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 334sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
335 unsigned long count)
333{ 336{
334 return NULL; 337 return NULL;
335} 338}
@@ -339,27 +342,40 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
339} 342}
340#endif /* CONFIG_MEMORY_HOTREMOVE */ 343#endif /* CONFIG_MEMORY_HOTREMOVE */
341 344
342static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) 345static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
346 unsigned long pnum_begin,
347 unsigned long pnum_end,
348 unsigned long usemap_count, int nodeid)
343{ 349{
344 unsigned long *usemap; 350 void *usemap;
345 struct mem_section *ms = __nr_to_section(pnum); 351 unsigned long pnum;
346 int nid = sparse_early_nid(ms); 352 int size = usemap_size();
347
348 usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
349 if (usemap)
350 return usemap;
351 353
352 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); 354 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
355 usemap_count);
353 if (usemap) { 356 if (usemap) {
354 check_usemap_section_nr(nid, usemap); 357 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
355 return usemap; 358 if (!present_section_nr(pnum))
359 continue;
360 usemap_map[pnum] = usemap;
361 usemap += size;
362 }
363 return;
356 } 364 }
357 365
358 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ 366 usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
359 nid = 0; 367 if (usemap) {
368 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
369 if (!present_section_nr(pnum))
370 continue;
371 usemap_map[pnum] = usemap;
372 usemap += size;
373 check_usemap_section_nr(nodeid, usemap_map[pnum]);
374 }
375 return;
376 }
360 377
361 printk(KERN_WARNING "%s: allocation failed\n", __func__); 378 printk(KERN_WARNING "%s: allocation failed\n", __func__);
362 return NULL;
363} 379}
364 380
365#ifndef CONFIG_SPARSEMEM_VMEMMAP 381#ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -375,8 +391,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
375 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); 391 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
376 return map; 392 return map;
377} 393}
394void __init sparse_mem_maps_populate_node(struct page **map_map,
395 unsigned long pnum_begin,
396 unsigned long pnum_end,
397 unsigned long map_count, int nodeid)
398{
399 void *map;
400 unsigned long pnum;
401 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
402
403 map = alloc_remap(nodeid, size * map_count);
404 if (map) {
405 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
406 if (!present_section_nr(pnum))
407 continue;
408 map_map[pnum] = map;
409 map += size;
410 }
411 return;
412 }
413
414 size = PAGE_ALIGN(size);
415 map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
416 if (map) {
417 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
418 if (!present_section_nr(pnum))
419 continue;
420 map_map[pnum] = map;
421 map += size;
422 }
423 return;
424 }
425
426 /* fallback */
427 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
428 struct mem_section *ms;
429
430 if (!present_section_nr(pnum))
431 continue;
432 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
433 if (map_map[pnum])
434 continue;
435 ms = __nr_to_section(pnum);
436 printk(KERN_ERR "%s: sparsemem memory map backing failed "
437 "some memory will not be available.\n", __func__);
438 ms->section_mem_map = 0;
439 }
440}
378#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 441#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
379 442
443#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
444static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
445 unsigned long pnum_begin,
446 unsigned long pnum_end,
447 unsigned long map_count, int nodeid)
448{
449 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
450 map_count, nodeid);
451}
452#else
380static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 453static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
381{ 454{
382 struct page *map; 455 struct page *map;
@@ -392,10 +465,12 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
392 ms->section_mem_map = 0; 465 ms->section_mem_map = 0;
393 return NULL; 466 return NULL;
394} 467}
468#endif
395 469
396void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) 470void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
397{ 471{
398} 472}
473
399/* 474/*
400 * Allocate the accumulated non-linear sections, allocate a mem_map 475 * Allocate the accumulated non-linear sections, allocate a mem_map
401 * for each and record the physical to section mapping. 476 * for each and record the physical to section mapping.
@@ -407,6 +482,14 @@ void __init sparse_init(void)
407 unsigned long *usemap; 482 unsigned long *usemap;
408 unsigned long **usemap_map; 483 unsigned long **usemap_map;
409 int size; 484 int size;
485 int nodeid_begin = 0;
486 unsigned long pnum_begin = 0;
487 unsigned long usemap_count;
488#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
489 unsigned long map_count;
490 int size2;
491 struct page **map_map;
492#endif
410 493
411 /* 494 /*
412 * map is using big page (aka 2M in x86 64 bit) 495 * map is using big page (aka 2M in x86 64 bit)
@@ -425,10 +508,81 @@ void __init sparse_init(void)
425 panic("can not allocate usemap_map\n"); 508 panic("can not allocate usemap_map\n");
426 509
427 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 510 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
511 struct mem_section *ms;
512
428 if (!present_section_nr(pnum)) 513 if (!present_section_nr(pnum))
429 continue; 514 continue;
430 usemap_map[pnum] = sparse_early_usemap_alloc(pnum); 515 ms = __nr_to_section(pnum);
516 nodeid_begin = sparse_early_nid(ms);
517 pnum_begin = pnum;
518 break;
431 } 519 }
520 usemap_count = 1;
521 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
522 struct mem_section *ms;
523 int nodeid;
524
525 if (!present_section_nr(pnum))
526 continue;
527 ms = __nr_to_section(pnum);
528 nodeid = sparse_early_nid(ms);
529 if (nodeid == nodeid_begin) {
530 usemap_count++;
531 continue;
532 }
533 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
534 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
535 usemap_count, nodeid_begin);
536 /* new start, update count etc*/
537 nodeid_begin = nodeid;
538 pnum_begin = pnum;
539 usemap_count = 1;
540 }
541 /* ok, last chunk */
542 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
543 usemap_count, nodeid_begin);
544
545#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
546 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
547 map_map = alloc_bootmem(size2);
548 if (!map_map)
549 panic("can not allocate map_map\n");
550
551 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
552 struct mem_section *ms;
553
554 if (!present_section_nr(pnum))
555 continue;
556 ms = __nr_to_section(pnum);
557 nodeid_begin = sparse_early_nid(ms);
558 pnum_begin = pnum;
559 break;
560 }
561 map_count = 1;
562 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
563 struct mem_section *ms;
564 int nodeid;
565
566 if (!present_section_nr(pnum))
567 continue;
568 ms = __nr_to_section(pnum);
569 nodeid = sparse_early_nid(ms);
570 if (nodeid == nodeid_begin) {
571 map_count++;
572 continue;
573 }
574 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
575 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
576 map_count, nodeid_begin);
577 /* new start, update count etc*/
578 nodeid_begin = nodeid;
579 pnum_begin = pnum;
580 map_count = 1;
581 }
582 /* ok, last chunk */
583 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
584 map_count, nodeid_begin);
585#endif
432 586
433 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 587 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
434 if (!present_section_nr(pnum)) 588 if (!present_section_nr(pnum))
@@ -438,7 +592,11 @@ void __init sparse_init(void)
438 if (!usemap) 592 if (!usemap)
439 continue; 593 continue;
440 594
595#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
596 map = map_map[pnum];
597#else
441 map = sparse_early_mem_map_alloc(pnum); 598 map = sparse_early_mem_map_alloc(pnum);
599#endif
442 if (!map) 600 if (!map)
443 continue; 601 continue;
444 602
@@ -448,6 +606,9 @@ void __init sparse_init(void)
448 606
449 vmemmap_populate_print_last(); 607 vmemmap_populate_print_last();
450 608
609#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
610 free_bootmem(__pa(map_map), size2);
611#endif
451 free_bootmem(__pa(usemap_map), size); 612 free_bootmem(__pa(usemap_map), size);
452} 613}
453 614