aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/memory.c
diff options
context:
space:
mode:
authorSeth Jennings <sjenning@linux.vnet.ibm.com>2013-08-20 13:13:03 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-21 14:49:47 -0400
commitcb5e39b8038be913030a7b01d4396cfa5f9ded7b (patch)
tree63b185655cf8f74ecdc3b97a51fe76f85a75fc73 /drivers/base/memory.c
parent37171e3cb7a2f6fc594b524c940beb1ce85cc935 (diff)
drivers: base: refactor add_memory_section() to add_memory_block()
Right now memory_dev_init() maintains the memory block pointer between iterations of add_memory_section(). This is nasty. This patch refactors add_memory_section() to become add_memory_block(). The refactoring pulls the section scanning out of memory_dev_init() and simplifies the signature. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/base/memory.c')
-rw-r--r--drivers/base/memory.c48
1 files changed, 21 insertions, 27 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9438d541b5c3..0187fe483d7c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -602,32 +602,31 @@ static int init_memory_block(struct memory_block **memory,
602 return ret; 602 return ret;
603} 603}
604 604
605static int add_memory_section(struct mem_section *section, 605static int add_memory_block(int base_section_nr)
606 struct memory_block **mem_p)
607{ 606{
608 struct memory_block *mem = NULL; 607 struct memory_block *mem;
609 int scn_nr = __section_nr(section); 608 int i, ret, section_count = 0, section_nr;
610 int ret = 0;
611
612 if (mem_p && *mem_p) {
613 if (scn_nr >= (*mem_p)->start_section_nr &&
614 scn_nr <= (*mem_p)->end_section_nr) {
615 mem = *mem_p;
616 }
617 }
618 609
619 if (mem) 610 for (i = base_section_nr;
620 mem->section_count++; 611 (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
621 else { 612 i++) {
622 ret = init_memory_block(&mem, section, MEM_ONLINE); 613 if (!present_section_nr(i))
623 /* store memory_block pointer for next loop */ 614 continue;
624 if (!ret && mem_p) 615 if (section_count == 0)
625 *mem_p = mem; 616 section_nr = i;
617 section_count++;
626 } 618 }
627 619
628 return ret; 620 if (section_count == 0)
621 return 0;
622 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
623 if (ret)
624 return ret;
625 mem->section_count = section_count;
626 return 0;
629} 627}
630 628
629
631/* 630/*
632 * need an interface for the VM to add new memory regions, 631 * need an interface for the VM to add new memory regions,
633 * but without onlining it. 632 * but without onlining it.
@@ -733,7 +732,6 @@ int __init memory_dev_init(void)
733 int ret; 732 int ret;
734 int err; 733 int err;
735 unsigned long block_sz; 734 unsigned long block_sz;
736 struct memory_block *mem = NULL;
737 735
738 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); 736 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
739 if (ret) 737 if (ret)
@@ -747,12 +745,8 @@ int __init memory_dev_init(void)
747 * during boot and have been initialized 745 * during boot and have been initialized
748 */ 746 */
749 mutex_lock(&mem_sysfs_mutex); 747 mutex_lock(&mem_sysfs_mutex);
750 for (i = 0; i < NR_MEM_SECTIONS; i++) { 748 for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
751 if (!present_section_nr(i)) 749 err = add_memory_block(i);
752 continue;
753 /* don't need to reuse memory_block if only one per block */
754 err = add_memory_section(__nr_to_section(i),
755 (sections_per_block == 1) ? NULL : &mem);
756 if (!ret) 750 if (!ret)
757 ret = err; 751 ret = err;
758 } 752 }