diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-03 20:26:03 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-03 20:26:03 -0400 |
commit | 412a4ac5e9cf7fdeb6af562c25547a9b9da7674f (patch) | |
tree | a8ce13cbc9c47c99799e5e3e3ad26ba78274ee73 /arch/powerpc/mm/numa.c | |
parent | e8e5c2155b0035b6e04f29be67f6444bc914005b (diff) | |
parent | 0c2daaafcdec726e89cbccca61d576de8429c537 (diff) |
Merge commit 'gcl/next' into next
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r-- | arch/powerpc/mm/numa.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f78f19e0a2a4..338c6f39eab2 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/nodemask.h> | 17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
20 | #include <linux/lmb.h> | 20 | #include <linux/memblock.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/pfn.h> | 22 | #include <linux/pfn.h> |
23 | #include <asm/sparsemem.h> | 23 | #include <asm/sparsemem.h> |
@@ -407,7 +407,7 @@ struct of_drconf_cell { | |||
407 | #define DRCONF_MEM_RESERVED 0x00000080 | 407 | #define DRCONF_MEM_RESERVED 0x00000080 |
408 | 408 | ||
409 | /* | 409 | /* |
410 | * Read the next lmb list entry from the ibm,dynamic-memory property | 410 | * Read the next memblock list entry from the ibm,dynamic-memory property |
411 | * and return the information in the provided of_drconf_cell structure. | 411 | * and return the information in the provided of_drconf_cell structure. |
412 | */ | 412 | */ |
413 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | 413 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) |
@@ -428,8 +428,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |||
428 | /* | 428 | /* |
429 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | 429 | * Retreive and validate the ibm,dynamic-memory property of the device tree. |
430 | * | 430 | * |
431 | * The layout of the ibm,dynamic-memory property is a number N of lmb | 431 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
432 | * list entries followed by N lmb list entries. Each lmb list entry | 432 | * list entries followed by N memblock list entries. Each memblock list entry |
433 | * contains information as layed out in the of_drconf_cell struct above. | 433 | * contains information as layed out in the of_drconf_cell struct above. |
434 | */ | 434 | */ |
435 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | 435 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) |
@@ -454,15 +454,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * Retreive and validate the ibm,lmb-size property for drconf memory | 457 | * Retreive and validate the ibm,memblock-size property for drconf memory |
458 | * from the device tree. | 458 | * from the device tree. |
459 | */ | 459 | */ |
460 | static u64 of_get_lmb_size(struct device_node *memory) | 460 | static u64 of_get_memblock_size(struct device_node *memory) |
461 | { | 461 | { |
462 | const u32 *prop; | 462 | const u32 *prop; |
463 | u32 len; | 463 | u32 len; |
464 | 464 | ||
465 | prop = of_get_property(memory, "ibm,lmb-size", &len); | 465 | prop = of_get_property(memory, "ibm,memblock-size", &len); |
466 | if (!prop || len < sizeof(unsigned int)) | 466 | if (!prop || len < sizeof(unsigned int)) |
467 | return 0; | 467 | return 0; |
468 | 468 | ||
@@ -596,19 +596,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
596 | unsigned long size) | 596 | unsigned long size) |
597 | { | 597 | { |
598 | /* | 598 | /* |
599 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | 599 | * We use memblock_end_of_DRAM() in here instead of memory_limit because |
600 | * we've already adjusted it for the limit and it takes care of | 600 | * we've already adjusted it for the limit and it takes care of |
601 | * having memory holes below the limit. Also, in the case of | 601 | * having memory holes below the limit. Also, in the case of |
602 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | 602 | * iommu_is_off, memory_limit is not set but is implicitly enforced. |
603 | */ | 603 | */ |
604 | 604 | ||
605 | if (start + size <= lmb_end_of_DRAM()) | 605 | if (start + size <= memblock_end_of_DRAM()) |
606 | return size; | 606 | return size; |
607 | 607 | ||
608 | if (start >= lmb_end_of_DRAM()) | 608 | if (start >= memblock_end_of_DRAM()) |
609 | return 0; | 609 | return 0; |
610 | 610 | ||
611 | return lmb_end_of_DRAM() - start; | 611 | return memblock_end_of_DRAM() - start; |
612 | } | 612 | } |
613 | 613 | ||
614 | /* | 614 | /* |
@@ -618,7 +618,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
618 | static inline int __init read_usm_ranges(const u32 **usm) | 618 | static inline int __init read_usm_ranges(const u32 **usm) |
619 | { | 619 | { |
620 | /* | 620 | /* |
621 | * For each lmb in ibm,dynamic-memory a corresponding | 621 | * For each memblock in ibm,dynamic-memory a corresponding |
622 | * entry in linux,drconf-usable-memory property contains | 622 | * entry in linux,drconf-usable-memory property contains |
623 | * a counter followed by that many (base, size) duple. | 623 | * a counter followed by that many (base, size) duple. |
624 | * read the counter from linux,drconf-usable-memory | 624 | * read the counter from linux,drconf-usable-memory |
@@ -634,7 +634,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
634 | { | 634 | { |
635 | const u32 *dm, *usm; | 635 | const u32 *dm, *usm; |
636 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | 636 | unsigned int n, rc, ranges, is_kexec_kdump = 0; |
637 | unsigned long lmb_size, base, size, sz; | 637 | unsigned long memblock_size, base, size, sz; |
638 | int nid; | 638 | int nid; |
639 | struct assoc_arrays aa; | 639 | struct assoc_arrays aa; |
640 | 640 | ||
@@ -642,8 +642,8 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
642 | if (!n) | 642 | if (!n) |
643 | return; | 643 | return; |
644 | 644 | ||
645 | lmb_size = of_get_lmb_size(memory); | 645 | memblock_size = of_get_memblock_size(memory); |
646 | if (!lmb_size) | 646 | if (!memblock_size) |
647 | return; | 647 | return; |
648 | 648 | ||
649 | rc = of_get_assoc_arrays(memory, &aa); | 649 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -667,7 +667,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
667 | continue; | 667 | continue; |
668 | 668 | ||
669 | base = drmem.base_addr; | 669 | base = drmem.base_addr; |
670 | size = lmb_size; | 670 | size = memblock_size; |
671 | ranges = 1; | 671 | ranges = 1; |
672 | 672 | ||
673 | if (is_kexec_kdump) { | 673 | if (is_kexec_kdump) { |
@@ -787,7 +787,7 @@ new_range: | |||
787 | } | 787 | } |
788 | 788 | ||
789 | /* | 789 | /* |
790 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory | 790 | * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory |
791 | * property in the ibm,dynamic-reconfiguration-memory node. | 791 | * property in the ibm,dynamic-reconfiguration-memory node. |
792 | */ | 792 | */ |
793 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 793 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
@@ -799,8 +799,8 @@ new_range: | |||
799 | 799 | ||
800 | static void __init setup_nonnuma(void) | 800 | static void __init setup_nonnuma(void) |
801 | { | 801 | { |
802 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
803 | unsigned long total_ram = lmb_phys_mem_size(); | 803 | unsigned long total_ram = memblock_phys_mem_size(); |
804 | unsigned long start_pfn, end_pfn; | 804 | unsigned long start_pfn, end_pfn; |
805 | unsigned int i, nid = 0; | 805 | unsigned int i, nid = 0; |
806 | 806 | ||
@@ -809,9 +809,9 @@ static void __init setup_nonnuma(void) | |||
809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
810 | (top_of_ram - total_ram) >> 20); | 810 | (top_of_ram - total_ram) >> 20); |
811 | 811 | ||
812 | for (i = 0; i < lmb.memory.cnt; ++i) { | 812 | for (i = 0; i < memblock.memory.cnt; ++i) { |
813 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | 813 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; |
814 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 814 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); |
815 | 815 | ||
816 | fake_numa_create_new_node(end_pfn, &nid); | 816 | fake_numa_create_new_node(end_pfn, &nid); |
817 | add_active_range(nid, start_pfn, end_pfn); | 817 | add_active_range(nid, start_pfn, end_pfn); |
@@ -869,7 +869,7 @@ static void __init dump_numa_memory_topology(void) | |||
869 | 869 | ||
870 | count = 0; | 870 | count = 0; |
871 | 871 | ||
872 | for (i = 0; i < lmb_end_of_DRAM(); | 872 | for (i = 0; i < memblock_end_of_DRAM(); |
873 | i += (1 << SECTION_SIZE_BITS)) { | 873 | i += (1 << SECTION_SIZE_BITS)) { |
874 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | 874 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { |
875 | if (count == 0) | 875 | if (count == 0) |
@@ -889,7 +889,7 @@ static void __init dump_numa_memory_topology(void) | |||
889 | } | 889 | } |
890 | 890 | ||
891 | /* | 891 | /* |
892 | * Allocate some memory, satisfying the lmb or bootmem allocator where | 892 | * Allocate some memory, satisfying the memblock or bootmem allocator where |
893 | * required. nid is the preferred node and end is the physical address of | 893 | * required. nid is the preferred node and end is the physical address of |
894 | * the highest address in the node. | 894 | * the highest address in the node. |
895 | * | 895 | * |
@@ -903,11 +903,11 @@ static void __init *careful_zallocation(int nid, unsigned long size, | |||
903 | int new_nid; | 903 | int new_nid; |
904 | unsigned long ret_paddr; | 904 | unsigned long ret_paddr; |
905 | 905 | ||
906 | ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | 906 | ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
907 | 907 | ||
908 | /* retry over all memory */ | 908 | /* retry over all memory */ |
909 | if (!ret_paddr) | 909 | if (!ret_paddr) |
910 | ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); | 910 | ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); |
911 | 911 | ||
912 | if (!ret_paddr) | 912 | if (!ret_paddr) |
913 | panic("numa.c: cannot allocate %lu bytes for node %d", | 913 | panic("numa.c: cannot allocate %lu bytes for node %d", |
@@ -917,14 +917,14 @@ static void __init *careful_zallocation(int nid, unsigned long size, | |||
917 | 917 | ||
918 | /* | 918 | /* |
919 | * We initialize the nodes in numeric order: 0, 1, 2... | 919 | * We initialize the nodes in numeric order: 0, 1, 2... |
920 | * and hand over control from the LMB allocator to the | 920 | * and hand over control from the MEMBLOCK allocator to the |
921 | * bootmem allocator. If this function is called for | 921 | * bootmem allocator. If this function is called for |
922 | * node 5, then we know that all nodes <5 are using the | 922 | * node 5, then we know that all nodes <5 are using the |
923 | * bootmem allocator instead of the LMB allocator. | 923 | * bootmem allocator instead of the MEMBLOCK allocator. |
924 | * | 924 | * |
925 | * So, check the nid from which this allocation came | 925 | * So, check the nid from which this allocation came |
926 | * and double check to see if we need to use bootmem | 926 | * and double check to see if we need to use bootmem |
927 | * instead of the LMB. We don't free the LMB memory | 927 | * instead of the MEMBLOCK. We don't free the MEMBLOCK memory |
928 | * since it would be useless. | 928 | * since it would be useless. |
929 | */ | 929 | */ |
930 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); | 930 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); |
@@ -949,9 +949,9 @@ static void mark_reserved_regions_for_nid(int nid) | |||
949 | struct pglist_data *node = NODE_DATA(nid); | 949 | struct pglist_data *node = NODE_DATA(nid); |
950 | int i; | 950 | int i; |
951 | 951 | ||
952 | for (i = 0; i < lmb.reserved.cnt; i++) { | 952 | for (i = 0; i < memblock.reserved.cnt; i++) { |
953 | unsigned long physbase = lmb.reserved.region[i].base; | 953 | unsigned long physbase = memblock.reserved.region[i].base; |
954 | unsigned long size = lmb.reserved.region[i].size; | 954 | unsigned long size = memblock.reserved.region[i].size; |
955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
956 | unsigned long end_pfn = PFN_UP(physbase + size); | 956 | unsigned long end_pfn = PFN_UP(physbase + size); |
957 | struct node_active_region node_ar; | 957 | struct node_active_region node_ar; |
@@ -959,7 +959,7 @@ static void mark_reserved_regions_for_nid(int nid) | |||
959 | node->node_spanned_pages; | 959 | node->node_spanned_pages; |
960 | 960 | ||
961 | /* | 961 | /* |
962 | * Check to make sure that this lmb.reserved area is | 962 | * Check to make sure that this memblock.reserved area is |
963 | * within the bounds of the node that we care about. | 963 | * within the bounds of the node that we care about. |
964 | * Checking the nid of the start and end points is not | 964 | * Checking the nid of the start and end points is not |
965 | * sufficient because the reserved area could span the | 965 | * sufficient because the reserved area could span the |
@@ -1017,7 +1017,7 @@ void __init do_init_bootmem(void) | |||
1017 | int nid; | 1017 | int nid; |
1018 | 1018 | ||
1019 | min_low_pfn = 0; | 1019 | min_low_pfn = 0; |
1020 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 1020 | max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
1021 | max_pfn = max_low_pfn; | 1021 | max_pfn = max_low_pfn; |
1022 | 1022 | ||
1023 | if (parse_numa_properties()) | 1023 | if (parse_numa_properties()) |
@@ -1094,7 +1094,7 @@ void __init paging_init(void) | |||
1094 | { | 1094 | { |
1095 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 1095 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1096 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 1096 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
1097 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; | 1097 | max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; |
1098 | free_area_init_nodes(max_zone_pfns); | 1098 | free_area_init_nodes(max_zone_pfns); |
1099 | } | 1099 | } |
1100 | 1100 | ||
@@ -1128,7 +1128,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1128 | { | 1128 | { |
1129 | const u32 *dm; | 1129 | const u32 *dm; |
1130 | unsigned int drconf_cell_cnt, rc; | 1130 | unsigned int drconf_cell_cnt, rc; |
1131 | unsigned long lmb_size; | 1131 | unsigned long memblock_size; |
1132 | struct assoc_arrays aa; | 1132 | struct assoc_arrays aa; |
1133 | int nid = -1; | 1133 | int nid = -1; |
1134 | 1134 | ||
@@ -1136,8 +1136,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1136 | if (!drconf_cell_cnt) | 1136 | if (!drconf_cell_cnt) |
1137 | return -1; | 1137 | return -1; |
1138 | 1138 | ||
1139 | lmb_size = of_get_lmb_size(memory); | 1139 | memblock_size = of_get_memblock_size(memory); |
1140 | if (!lmb_size) | 1140 | if (!memblock_size) |
1141 | return -1; | 1141 | return -1; |
1142 | 1142 | ||
1143 | rc = of_get_assoc_arrays(memory, &aa); | 1143 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -1156,7 +1156,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1156 | continue; | 1156 | continue; |
1157 | 1157 | ||
1158 | if ((scn_addr < drmem.base_addr) | 1158 | if ((scn_addr < drmem.base_addr) |
1159 | || (scn_addr >= (drmem.base_addr + lmb_size))) | 1159 | || (scn_addr >= (drmem.base_addr + memblock_size))) |
1160 | continue; | 1160 | continue; |
1161 | 1161 | ||
1162 | nid = of_drconf_to_nid_single(&drmem, &aa); | 1162 | nid = of_drconf_to_nid_single(&drmem, &aa); |
@@ -1169,7 +1169,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1169 | /* | 1169 | /* |
1170 | * Find the node associated with a hot added memory section for memory | 1170 | * Find the node associated with a hot added memory section for memory |
1171 | * represented in the device tree as a node (i.e. memory@XXXX) for | 1171 | * represented in the device tree as a node (i.e. memory@XXXX) for |
1172 | * each lmb. | 1172 | * each memblock. |
1173 | */ | 1173 | */ |
1174 | int hot_add_node_scn_to_nid(unsigned long scn_addr) | 1174 | int hot_add_node_scn_to_nid(unsigned long scn_addr) |
1175 | { | 1175 | { |
@@ -1210,8 +1210,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr) | |||
1210 | 1210 | ||
1211 | /* | 1211 | /* |
1212 | * Find the node associated with a hot added memory section. Section | 1212 | * Find the node associated with a hot added memory section. Section |
1213 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | 1213 | * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that |
1214 | * sections are fully contained within a single LMB. | 1214 | * sections are fully contained within a single MEMBLOCK. |
1215 | */ | 1215 | */ |
1216 | int hot_add_scn_to_nid(unsigned long scn_addr) | 1216 | int hot_add_scn_to_nid(unsigned long scn_addr) |
1217 | { | 1217 | { |