diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2006-06-27 05:53:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 20:32:35 -0400 |
commit | bc02af93dd2bbddce1b55e0a493f833a1b7cf140 (patch) | |
tree | bda7998531b9c612c5597be9158f082d1cb5ab26 /arch | |
parent | dd56a8e36f91f63c0a31e8a118d87b7cf01526b8 (diff) |
[PATCH] pgdat allocation for new node add (specify node id)
Change the name of old add_memory() to arch_add_memory. And use node id to
get pgdat for the node at NODE_DATA().
Note: Powerpc's old add_memory() is defined as __devinit. However,
add_memory() is usually called only after bootup.
I suppose it may be redundant. But, I'm not well known about powerpc.
So, I keep it. (But, __meminit is better at least.)
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/mm/init.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 11 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 66 |
4 files changed, 49 insertions, 35 deletions
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index bf19513f0cea..468592531793 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -654,7 +654,7 @@ void __init mem_init(void) | |||
654 | */ | 654 | */ |
655 | #ifdef CONFIG_MEMORY_HOTPLUG | 655 | #ifdef CONFIG_MEMORY_HOTPLUG |
656 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 656 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
657 | int add_memory(u64 start, u64 size) | 657 | int arch_add_memory(int nid, u64 start, u64 size) |
658 | { | 658 | { |
659 | struct pglist_data *pgdata = &contig_page_data; | 659 | struct pglist_data *pgdata = &contig_page_data; |
660 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 660 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 11f08001f8c2..38306e98f04b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -652,7 +652,7 @@ void online_page(struct page *page) | |||
652 | num_physpages++; | 652 | num_physpages++; |
653 | } | 653 | } |
654 | 654 | ||
655 | int add_memory(u64 start, u64 size) | 655 | int arch_add_memory(int nid, u64 start, u64 size) |
656 | { | 656 | { |
657 | pg_data_t *pgdat; | 657 | pg_data_t *pgdat; |
658 | struct zone *zone; | 658 | struct zone *zone; |
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size) | |||
660 | unsigned long nr_pages = size >> PAGE_SHIFT; | 660 | unsigned long nr_pages = size >> PAGE_SHIFT; |
661 | int ret; | 661 | int ret; |
662 | 662 | ||
663 | pgdat = NODE_DATA(0); | 663 | pgdat = NODE_DATA(nid); |
664 | 664 | ||
665 | zone = pgdat->node_zones + ZONE_NORMAL; | 665 | zone = pgdat->node_zones + ZONE_NORMAL; |
666 | ret = __add_pages(zone, start_pfn, nr_pages); | 666 | ret = __add_pages(zone, start_pfn, nr_pages); |
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size) | |||
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(add_memory); | ||
675 | 674 | ||
676 | int remove_memory(u64 start, u64 size) | 675 | int remove_memory(u64 start, u64 size) |
677 | { | 676 | { |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 69f3b9a20beb..089d939a0b3e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -114,15 +114,20 @@ void online_page(struct page *page) | |||
114 | num_physpages++; | 114 | num_physpages++; |
115 | } | 115 | } |
116 | 116 | ||
117 | int __devinit add_memory(u64 start, u64 size) | 117 | #ifdef CONFIG_NUMA |
118 | int memory_add_physaddr_to_nid(u64 start) | ||
119 | { | ||
120 | return hot_add_scn_to_nid(start); | ||
121 | } | ||
122 | #endif | ||
123 | |||
124 | int __devinit arch_add_memory(int nid, u64 start, u64 size) | ||
118 | { | 125 | { |
119 | struct pglist_data *pgdata; | 126 | struct pglist_data *pgdata; |
120 | struct zone *zone; | 127 | struct zone *zone; |
121 | int nid; | ||
122 | unsigned long start_pfn = start >> PAGE_SHIFT; | 128 | unsigned long start_pfn = start >> PAGE_SHIFT; |
123 | unsigned long nr_pages = size >> PAGE_SHIFT; | 129 | unsigned long nr_pages = size >> PAGE_SHIFT; |
124 | 130 | ||
125 | nid = hot_add_scn_to_nid(start); | ||
126 | pgdata = NODE_DATA(nid); | 131 | pgdata = NODE_DATA(nid); |
127 | 132 | ||
128 | start = (unsigned long)__va(start); | 133 | start = (unsigned long)__va(start); |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 02add1d1dfa8..51fbf3eddf13 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -506,8 +506,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |||
506 | /* | 506 | /* |
507 | * Memory hotplug specific functions | 507 | * Memory hotplug specific functions |
508 | */ | 508 | */ |
509 | #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) | ||
510 | |||
511 | void online_page(struct page *page) | 509 | void online_page(struct page *page) |
512 | { | 510 | { |
513 | ClearPageReserved(page); | 511 | ClearPageReserved(page); |
@@ -517,31 +515,17 @@ void online_page(struct page *page) | |||
517 | num_physpages++; | 515 | num_physpages++; |
518 | } | 516 | } |
519 | 517 | ||
520 | #ifndef CONFIG_MEMORY_HOTPLUG | 518 | #ifdef CONFIG_MEMORY_HOTPLUG |
521 | /* | 519 | /* |
522 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | 520 | * XXX: memory_add_physaddr_to_nid() is to find node id from physical address |
523 | * just online the pages. | 521 | * via probe interface of sysfs. If acpi notifies hot-add event, then it |
522 | * can tell node id by searching dsdt. But, probe interface doesn't have | ||
523 | * node id. So, return 0 as node id at this time. | ||
524 | */ | 524 | */ |
525 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | 525 | #ifdef CONFIG_NUMA |
526 | int memory_add_physaddr_to_nid(u64 start) | ||
526 | { | 527 | { |
527 | int err = -EIO; | 528 | return 0; |
528 | unsigned long pfn; | ||
529 | unsigned long total = 0, mem = 0; | ||
530 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
531 | if (pfn_valid(pfn)) { | ||
532 | online_page(pfn_to_page(pfn)); | ||
533 | err = 0; | ||
534 | mem++; | ||
535 | } | ||
536 | total++; | ||
537 | } | ||
538 | if (!err) { | ||
539 | z->spanned_pages += total; | ||
540 | z->present_pages += mem; | ||
541 | z->zone_pgdat->node_spanned_pages += total; | ||
542 | z->zone_pgdat->node_present_pages += mem; | ||
543 | } | ||
544 | return err; | ||
545 | } | 529 | } |
546 | #endif | 530 | #endif |
547 | 531 | ||
@@ -549,9 +533,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | |||
549 | * Memory is added always to NORMAL zone. This means you will never get | 533 | * Memory is added always to NORMAL zone. This means you will never get |
550 | * additional DMA/DMA32 memory. | 534 | * additional DMA/DMA32 memory. |
551 | */ | 535 | */ |
552 | int add_memory(u64 start, u64 size) | 536 | int arch_add_memory(int nid, u64 start, u64 size) |
553 | { | 537 | { |
554 | struct pglist_data *pgdat = NODE_DATA(0); | 538 | struct pglist_data *pgdat = NODE_DATA(nid); |
555 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; | 539 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; |
556 | unsigned long start_pfn = start >> PAGE_SHIFT; | 540 | unsigned long start_pfn = start >> PAGE_SHIFT; |
557 | unsigned long nr_pages = size >> PAGE_SHIFT; | 541 | unsigned long nr_pages = size >> PAGE_SHIFT; |
@@ -568,7 +552,7 @@ error: | |||
568 | printk("%s: Problem encountered in __add_pages!\n", __func__); | 552 | printk("%s: Problem encountered in __add_pages!\n", __func__); |
569 | return ret; | 553 | return ret; |
570 | } | 554 | } |
571 | EXPORT_SYMBOL_GPL(add_memory); | 555 | EXPORT_SYMBOL_GPL(arch_add_memory); |
572 | 556 | ||
573 | int remove_memory(u64 start, u64 size) | 557 | int remove_memory(u64 start, u64 size) |
574 | { | 558 | { |
@@ -576,7 +560,33 @@ int remove_memory(u64 start, u64 size) | |||
576 | } | 560 | } |
577 | EXPORT_SYMBOL_GPL(remove_memory); | 561 | EXPORT_SYMBOL_GPL(remove_memory); |
578 | 562 | ||
579 | #endif | 563 | #else /* CONFIG_MEMORY_HOTPLUG */ |
564 | /* | ||
565 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | ||
566 | * just online the pages. | ||
567 | */ | ||
568 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | ||
569 | { | ||
570 | int err = -EIO; | ||
571 | unsigned long pfn; | ||
572 | unsigned long total = 0, mem = 0; | ||
573 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
574 | if (pfn_valid(pfn)) { | ||
575 | online_page(pfn_to_page(pfn)); | ||
576 | err = 0; | ||
577 | mem++; | ||
578 | } | ||
579 | total++; | ||
580 | } | ||
581 | if (!err) { | ||
582 | z->spanned_pages += total; | ||
583 | z->present_pages += mem; | ||
584 | z->zone_pgdat->node_spanned_pages += total; | ||
585 | z->zone_pgdat->node_present_pages += mem; | ||
586 | } | ||
587 | return err; | ||
588 | } | ||
589 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
580 | 590 | ||
581 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, | 591 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, |
582 | kcore_vsyscall; | 592 | kcore_vsyscall; |