aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhang Yanfei <zhangyanfei@cn.fujitsu.com>2013-11-12 18:07:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 22:09:06 -0500
commit85b35feaecd4d2284505b22708795bc1f03fc897 (patch)
tree0160ae1452e94c2fcaf5f20b84c0f4b39a6882d2
parent071aee138410210e3764f3ae8d37ef46dc6d3b42 (diff)
mm/sparsemem: use PAGES_PER_SECTION to remove redundant nr_pages parameter
For below functions, - sparse_add_one_section() - kmalloc_section_memmap() - __kmalloc_section_memmap() - __kfree_section_memmap() they are always invoked to operate on one memory section, so it is redundant to always pass a nr_pages parameter, which is the page numbers in one section. So we can directly use predefined macro PAGES_PER_SECTION instead of passing the parameter. Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/sparse.c33
3 files changed, 17 insertions, 22 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 22203c293f07..4ca3d951fe91 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -268,8 +268,7 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
268extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 268extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
269extern bool is_memblock_offlined(struct memory_block *mem); 269extern bool is_memblock_offlined(struct memory_block *mem);
270extern void remove_memory(int nid, u64 start, u64 size); 270extern void remove_memory(int nid, u64 start, u64 size);
271extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 271extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
272 int nr_pages);
273extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); 272extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
274extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 273extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
275 unsigned long pnum); 274 unsigned long pnum);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8285346be663..1b6fe8ca71e6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -401,13 +401,12 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
401static int __meminit __add_section(int nid, struct zone *zone, 401static int __meminit __add_section(int nid, struct zone *zone,
402 unsigned long phys_start_pfn) 402 unsigned long phys_start_pfn)
403{ 403{
404 int nr_pages = PAGES_PER_SECTION;
405 int ret; 404 int ret;
406 405
407 if (pfn_valid(phys_start_pfn)) 406 if (pfn_valid(phys_start_pfn))
408 return -EEXIST; 407 return -EEXIST;
409 408
410 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 409 ret = sparse_add_one_section(zone, phys_start_pfn);
411 410
412 if (ret < 0) 411 if (ret < 0)
413 return ret; 412 return ret;
diff --git a/mm/sparse.c b/mm/sparse.c
index 4ac1d7ef548f..fbb9dbc6aca9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -590,16 +590,15 @@ void __init sparse_init(void)
590 590
591#ifdef CONFIG_MEMORY_HOTPLUG 591#ifdef CONFIG_MEMORY_HOTPLUG
592#ifdef CONFIG_SPARSEMEM_VMEMMAP 592#ifdef CONFIG_SPARSEMEM_VMEMMAP
593static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 593static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
594 unsigned long nr_pages)
595{ 594{
596 /* This will make the necessary allocations eventually. */ 595 /* This will make the necessary allocations eventually. */
597 return sparse_mem_map_populate(pnum, nid); 596 return sparse_mem_map_populate(pnum, nid);
598} 597}
599static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 598static void __kfree_section_memmap(struct page *memmap)
600{ 599{
601 unsigned long start = (unsigned long)memmap; 600 unsigned long start = (unsigned long)memmap;
602 unsigned long end = (unsigned long)(memmap + nr_pages); 601 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
603 602
604 vmemmap_free(start, end); 603 vmemmap_free(start, end);
605} 604}
@@ -613,10 +612,10 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
613} 612}
614#endif /* CONFIG_MEMORY_HOTREMOVE */ 613#endif /* CONFIG_MEMORY_HOTREMOVE */
615#else 614#else
616static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 615static struct page *__kmalloc_section_memmap(void)
617{ 616{
618 struct page *page, *ret; 617 struct page *page, *ret;
619 unsigned long memmap_size = sizeof(struct page) * nr_pages; 618 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
620 619
621 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 620 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
622 if (page) 621 if (page)
@@ -634,19 +633,18 @@ got_map_ptr:
634 return ret; 633 return ret;
635} 634}
636 635
637static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 636static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
638 unsigned long nr_pages)
639{ 637{
640 return __kmalloc_section_memmap(nr_pages); 638 return __kmalloc_section_memmap();
641} 639}
642 640
643static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 641static void __kfree_section_memmap(struct page *memmap)
644{ 642{
645 if (is_vmalloc_addr(memmap)) 643 if (is_vmalloc_addr(memmap))
646 vfree(memmap); 644 vfree(memmap);
647 else 645 else
648 free_pages((unsigned long)memmap, 646 free_pages((unsigned long)memmap,
649 get_order(sizeof(struct page) * nr_pages)); 647 get_order(sizeof(struct page) * PAGES_PER_SECTION));
650} 648}
651 649
652#ifdef CONFIG_MEMORY_HOTREMOVE 650#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -684,8 +682,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
684 * set. If this is <=0, then that means that the passed-in 682 * set. If this is <=0, then that means that the passed-in
685 * map was not consumed and must be freed. 683 * map was not consumed and must be freed.
686 */ 684 */
687int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 685int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
688 int nr_pages)
689{ 686{
690 unsigned long section_nr = pfn_to_section_nr(start_pfn); 687 unsigned long section_nr = pfn_to_section_nr(start_pfn);
691 struct pglist_data *pgdat = zone->zone_pgdat; 688 struct pglist_data *pgdat = zone->zone_pgdat;
@@ -702,12 +699,12 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
702 ret = sparse_index_init(section_nr, pgdat->node_id); 699 ret = sparse_index_init(section_nr, pgdat->node_id);
703 if (ret < 0 && ret != -EEXIST) 700 if (ret < 0 && ret != -EEXIST)
704 return ret; 701 return ret;
705 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); 702 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
706 if (!memmap) 703 if (!memmap)
707 return -ENOMEM; 704 return -ENOMEM;
708 usemap = __kmalloc_section_usemap(); 705 usemap = __kmalloc_section_usemap();
709 if (!usemap) { 706 if (!usemap) {
710 __kfree_section_memmap(memmap, nr_pages); 707 __kfree_section_memmap(memmap);
711 return -ENOMEM; 708 return -ENOMEM;
712 } 709 }
713 710
@@ -719,7 +716,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
719 goto out; 716 goto out;
720 } 717 }
721 718
722 memset(memmap, 0, sizeof(struct page) * nr_pages); 719 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
723 720
724 ms->section_mem_map |= SECTION_MARKED_PRESENT; 721 ms->section_mem_map |= SECTION_MARKED_PRESENT;
725 722
@@ -729,7 +726,7 @@ out:
729 pgdat_resize_unlock(pgdat, &flags); 726 pgdat_resize_unlock(pgdat, &flags);
730 if (ret <= 0) { 727 if (ret <= 0) {
731 kfree(usemap); 728 kfree(usemap);
732 __kfree_section_memmap(memmap, nr_pages); 729 __kfree_section_memmap(memmap);
733 } 730 }
734 return ret; 731 return ret;
735} 732}
@@ -771,7 +768,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
771 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 768 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
772 kfree(usemap); 769 kfree(usemap);
773 if (memmap) 770 if (memmap)
774 __kfree_section_memmap(memmap, PAGES_PER_SECTION); 771 __kfree_section_memmap(memmap);
775 return; 772 return;
776 } 773 }
777 774