diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-08-02 02:29:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-08-02 02:31:54 -0400 |
commit | 3772b734720e1a3f2dc1d95cfdfaa5332f4ccf01 (patch) | |
tree | a1a8cc85948c086aa12a1d8014151a7ca7c04ea8 /arch/powerpc | |
parent | 9fc3af467d0749989518a23f7289a6f44e5cb214 (diff) | |
parent | 9fe6206f400646a2322096b56c59891d530e8d51 (diff) |
Merge commit 'v2.6.35' into perf/core
Conflicts:
tools/perf/Makefile
tools/perf/util/hist.c
Merge reason: Resolve the conflicts and update to latest upstream.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kexec.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event_fsl_emb.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 53 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage-hash64.c | 40 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 24 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-memory.c | 22 |
9 files changed, 95 insertions, 71 deletions
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 2a9cd74a841e..076327f2eff7 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h | |||
@@ -8,9 +8,9 @@ | |||
8 | * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory | 8 | * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory |
9 | * and therefore we can only deal with memory within this range | 9 | * and therefore we can only deal with memory within this range |
10 | */ | 10 | */ |
11 | #define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) | 11 | #define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) |
12 | #define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) | 12 | #define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) |
13 | #define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) | 13 | #define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) |
14 | 14 | ||
15 | #else | 15 | #else |
16 | 16 | ||
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2102b214a87c..0e398cfee2c8 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
250 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | 250 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
251 | pte_t *ptep, unsigned long trap, int local, int ssize, | 251 | pte_t *ptep, unsigned long trap, int local, int ssize, |
252 | unsigned int shift, unsigned int mmu_psize); | 252 | unsigned int shift, unsigned int mmu_psize); |
253 | 253 | extern void hash_failure_debug(unsigned long ea, unsigned long access, | |
254 | unsigned long vsid, unsigned long trap, | ||
255 | int ssize, int psize, unsigned long pte); | ||
254 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | 256 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
255 | unsigned long pstart, unsigned long prot, | 257 | unsigned long pstart, unsigned long prot, |
256 | int psize, int ssize); | 258 | int psize, int ssize); |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 369872f6cf78..babcceecd2ea 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
566 | * Finally record data if requested. | 566 | * Finally record data if requested. |
567 | */ | 567 | */ |
568 | if (record) { | 568 | if (record) { |
569 | struct perf_sample_data data = { | 569 | struct perf_sample_data data; |
570 | .period = event->hw.last_period, | 570 | |
571 | }; | 571 | perf_sample_data_init(&data, 0); |
572 | 572 | ||
573 | if (perf_event_overflow(event, nmi, &data, regs)) { | 573 | if (perf_event_overflow(event, nmi, &data, regs)) { |
574 | /* | 574 | /* |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9d3953983fb7..fed9bf6187d1 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -414,7 +414,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
414 | u64 base, size, memblock_size; | 414 | u64 base, size, memblock_size; |
415 | unsigned int is_kexec_kdump = 0, rngs; | 415 | unsigned int is_kexec_kdump = 0, rngs; |
416 | 416 | ||
417 | ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); | 417 | ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); |
418 | if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) | 418 | if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) |
419 | return 0; | 419 | return 0; |
420 | memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); | 420 | memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index a719f53921a5..3079f6b44cf5 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K) | |||
68 | std r8,STK_PARM(r8)(r1) | 68 | std r8,STK_PARM(r8)(r1) |
69 | std r9,STK_PARM(r9)(r1) | 69 | std r9,STK_PARM(r9)(r1) |
70 | 70 | ||
71 | /* Add _PAGE_PRESENT to access */ | ||
72 | ori r4,r4,_PAGE_PRESENT | ||
73 | |||
74 | /* Save non-volatile registers. | 71 | /* Save non-volatile registers. |
75 | * r31 will hold "old PTE" | 72 | * r31 will hold "old PTE" |
76 | * r30 is "new PTE" | 73 | * r30 is "new PTE" |
@@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K) | |||
347 | std r8,STK_PARM(r8)(r1) | 344 | std r8,STK_PARM(r8)(r1) |
348 | std r9,STK_PARM(r9)(r1) | 345 | std r9,STK_PARM(r9)(r1) |
349 | 346 | ||
350 | /* Add _PAGE_PRESENT to access */ | ||
351 | ori r4,r4,_PAGE_PRESENT | ||
352 | |||
353 | /* Save non-volatile registers. | 347 | /* Save non-volatile registers. |
354 | * r31 will hold "old PTE" | 348 | * r31 will hold "old PTE" |
355 | * r30 is "new PTE" | 349 | * r30 is "new PTE" |
@@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K) | |||
687 | std r8,STK_PARM(r8)(r1) | 681 | std r8,STK_PARM(r8)(r1) |
688 | std r9,STK_PARM(r9)(r1) | 682 | std r9,STK_PARM(r9)(r1) |
689 | 683 | ||
690 | /* Add _PAGE_PRESENT to access */ | ||
691 | ori r4,r4,_PAGE_PRESENT | ||
692 | |||
693 | /* Save non-volatile registers. | 684 | /* Save non-volatile registers. |
694 | * r31 will hold "old PTE" | 685 | * r31 will hold "old PTE" |
695 | * r30 is "new PTE" | 686 | * r30 is "new PTE" |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 98f262de5585..09dffe6efa46 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -871,6 +871,18 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) | |||
871 | } | 871 | } |
872 | #endif | 872 | #endif |
873 | 873 | ||
874 | void hash_failure_debug(unsigned long ea, unsigned long access, | ||
875 | unsigned long vsid, unsigned long trap, | ||
876 | int ssize, int psize, unsigned long pte) | ||
877 | { | ||
878 | if (!printk_ratelimit()) | ||
879 | return; | ||
880 | pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", | ||
881 | ea, access, current->comm); | ||
882 | pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n", | ||
883 | trap, vsid, ssize, psize, pte); | ||
884 | } | ||
885 | |||
874 | /* Result code is: | 886 | /* Result code is: |
875 | * 0 - handled | 887 | * 0 - handled |
876 | * 1 - normal page fault | 888 | * 1 - normal page fault |
@@ -955,6 +967,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
955 | return 1; | 967 | return 1; |
956 | } | 968 | } |
957 | 969 | ||
970 | /* Add _PAGE_PRESENT to the required access perm */ | ||
971 | access |= _PAGE_PRESENT; | ||
972 | |||
973 | /* Pre-check access permissions (will be re-checked atomically | ||
974 | * in __hash_page_XX but this pre-check is a fast path | ||
975 | */ | ||
976 | if (access & ~pte_val(*ptep)) { | ||
977 | DBG_LOW(" no access !\n"); | ||
978 | return 1; | ||
979 | } | ||
980 | |||
958 | #ifdef CONFIG_HUGETLB_PAGE | 981 | #ifdef CONFIG_HUGETLB_PAGE |
959 | if (hugeshift) | 982 | if (hugeshift) |
960 | return __hash_page_huge(ea, access, vsid, ptep, trap, local, | 983 | return __hash_page_huge(ea, access, vsid, ptep, trap, local, |
@@ -967,14 +990,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
967 | DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), | 990 | DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), |
968 | pte_val(*(ptep + PTRS_PER_PTE))); | 991 | pte_val(*(ptep + PTRS_PER_PTE))); |
969 | #endif | 992 | #endif |
970 | /* Pre-check access permissions (will be re-checked atomically | ||
971 | * in __hash_page_XX but this pre-check is a fast path | ||
972 | */ | ||
973 | if (access & ~pte_val(*ptep)) { | ||
974 | DBG_LOW(" no access !\n"); | ||
975 | return 1; | ||
976 | } | ||
977 | |||
978 | /* Do actual hashing */ | 993 | /* Do actual hashing */ |
979 | #ifdef CONFIG_PPC_64K_PAGES | 994 | #ifdef CONFIG_PPC_64K_PAGES |
980 | /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ | 995 | /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ |
@@ -1033,6 +1048,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1033 | local, ssize, spp); | 1048 | local, ssize, spp); |
1034 | } | 1049 | } |
1035 | 1050 | ||
1051 | /* Dump some info in case of hash insertion failure, they should | ||
1052 | * never happen so it is really useful to know if/when they do | ||
1053 | */ | ||
1054 | if (rc == -1) | ||
1055 | hash_failure_debug(ea, access, vsid, trap, ssize, psize, | ||
1056 | pte_val(*ptep)); | ||
1036 | #ifndef CONFIG_PPC_64K_PAGES | 1057 | #ifndef CONFIG_PPC_64K_PAGES |
1037 | DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); | 1058 | DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); |
1038 | #else | 1059 | #else |
@@ -1051,8 +1072,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1051 | void *pgdir; | 1072 | void *pgdir; |
1052 | pte_t *ptep; | 1073 | pte_t *ptep; |
1053 | unsigned long flags; | 1074 | unsigned long flags; |
1054 | int local = 0; | 1075 | int rc, ssize, local = 0; |
1055 | int ssize; | ||
1056 | 1076 | ||
1057 | BUG_ON(REGION_ID(ea) != USER_REGION_ID); | 1077 | BUG_ON(REGION_ID(ea) != USER_REGION_ID); |
1058 | 1078 | ||
@@ -1098,11 +1118,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1098 | /* Hash it in */ | 1118 | /* Hash it in */ |
1099 | #ifdef CONFIG_PPC_HAS_HASH_64K | 1119 | #ifdef CONFIG_PPC_HAS_HASH_64K |
1100 | if (mm->context.user_psize == MMU_PAGE_64K) | 1120 | if (mm->context.user_psize == MMU_PAGE_64K) |
1101 | __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); | 1121 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); |
1102 | else | 1122 | else |
1103 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1123 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1104 | __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, | 1124 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, |
1105 | subpage_protection(pgdir, ea)); | 1125 | subpage_protection(pgdir, ea)); |
1126 | |||
1127 | /* Dump some info in case of hash insertion failure, they should | ||
1128 | * never happen so it is really useful to know if/when they do | ||
1129 | */ | ||
1130 | if (rc == -1) | ||
1131 | hash_failure_debug(ea, access, vsid, trap, ssize, | ||
1132 | mm->context.user_psize, pte_val(*ptep)); | ||
1106 | 1133 | ||
1107 | local_irq_restore(flags); | 1134 | local_irq_restore(flags); |
1108 | } | 1135 | } |
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 199539882f92..cc5c273086cf 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c | |||
@@ -21,21 +21,13 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
21 | unsigned long old_pte, new_pte; | 21 | unsigned long old_pte, new_pte; |
22 | unsigned long va, rflags, pa, sz; | 22 | unsigned long va, rflags, pa, sz; |
23 | long slot; | 23 | long slot; |
24 | int err = 1; | ||
25 | 24 | ||
26 | BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); | 25 | BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); |
27 | 26 | ||
28 | /* Search the Linux page table for a match with va */ | 27 | /* Search the Linux page table for a match with va */ |
29 | va = hpt_va(ea, vsid, ssize); | 28 | va = hpt_va(ea, vsid, ssize); |
30 | 29 | ||
31 | /* | 30 | /* At this point, we have a pte (old_pte) which can be used to build |
32 | * Check the user's access rights to the page. If access should be | ||
33 | * prevented then send the problem up to do_page_fault. | ||
34 | */ | ||
35 | if (unlikely(access & ~pte_val(*ptep))) | ||
36 | goto out; | ||
37 | /* | ||
38 | * At this point, we have a pte (old_pte) which can be used to build | ||
39 | * or update an HPTE. There are 2 cases: | 31 | * or update an HPTE. There are 2 cases: |
40 | * | 32 | * |
41 | * 1. There is a valid (present) pte with no associated HPTE (this is | 33 | * 1. There is a valid (present) pte with no associated HPTE (this is |
@@ -49,9 +41,17 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
49 | 41 | ||
50 | do { | 42 | do { |
51 | old_pte = pte_val(*ptep); | 43 | old_pte = pte_val(*ptep); |
52 | if (old_pte & _PAGE_BUSY) | 44 | /* If PTE busy, retry the access */ |
53 | goto out; | 45 | if (unlikely(old_pte & _PAGE_BUSY)) |
46 | return 0; | ||
47 | /* If PTE permissions don't match, take page fault */ | ||
48 | if (unlikely(access & ~old_pte)) | ||
49 | return 1; | ||
50 | /* Try to lock the PTE, add ACCESSED and DIRTY if it was | ||
51 | * a write access */ | ||
54 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; | 52 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; |
53 | if (access & _PAGE_RW) | ||
54 | new_pte |= _PAGE_DIRTY; | ||
55 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, | 55 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, |
56 | old_pte, new_pte)); | 56 | old_pte, new_pte)); |
57 | 57 | ||
@@ -121,8 +121,16 @@ repeat: | |||
121 | } | 121 | } |
122 | } | 122 | } |
123 | 123 | ||
124 | if (unlikely(slot == -2)) | 124 | /* |
125 | panic("hash_huge_page: pte_insert failed\n"); | 125 | * Hypervisor failure. Restore old pte and return -1 |
126 | * similar to __hash_page_* | ||
127 | */ | ||
128 | if (unlikely(slot == -2)) { | ||
129 | *ptep = __pte(old_pte); | ||
130 | hash_failure_debug(ea, access, vsid, trap, ssize, | ||
131 | mmu_psize, old_pte); | ||
132 | return -1; | ||
133 | } | ||
126 | 134 | ||
127 | new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); | 135 | new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); |
128 | } | 136 | } |
@@ -131,9 +139,5 @@ repeat: | |||
131 | * No need to use ldarx/stdcx here | 139 | * No need to use ldarx/stdcx here |
132 | */ | 140 | */ |
133 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | 141 | *ptep = __pte(new_pte & ~_PAGE_BUSY); |
134 | 142 | return 0; | |
135 | err = 0; | ||
136 | |||
137 | out: | ||
138 | return err; | ||
139 | } | 143 | } |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f47364585ecd..aa731af720c0 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Retreive and validate the ibm,memblock-size property for drconf memory | 401 | * Retreive and validate the ibm,lmb-size property for drconf memory |
402 | * from the device tree. | 402 | * from the device tree. |
403 | */ | 403 | */ |
404 | static u64 of_get_memblock_size(struct device_node *memory) | 404 | static u64 of_get_lmb_size(struct device_node *memory) |
405 | { | 405 | { |
406 | const u32 *prop; | 406 | const u32 *prop; |
407 | u32 len; | 407 | u32 len; |
408 | 408 | ||
409 | prop = of_get_property(memory, "ibm,memblock-size", &len); | 409 | prop = of_get_property(memory, "ibm,lmb-size", &len); |
410 | if (!prop || len < sizeof(unsigned int)) | 410 | if (!prop || len < sizeof(unsigned int)) |
411 | return 0; | 411 | return 0; |
412 | 412 | ||
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, | |||
562 | static inline int __init read_usm_ranges(const u32 **usm) | 562 | static inline int __init read_usm_ranges(const u32 **usm) |
563 | { | 563 | { |
564 | /* | 564 | /* |
565 | * For each memblock in ibm,dynamic-memory a corresponding | 565 | * For each lmb in ibm,dynamic-memory a corresponding |
566 | * entry in linux,drconf-usable-memory property contains | 566 | * entry in linux,drconf-usable-memory property contains |
567 | * a counter followed by that many (base, size) duple. | 567 | * a counter followed by that many (base, size) duple. |
568 | * read the counter from linux,drconf-usable-memory | 568 | * read the counter from linux,drconf-usable-memory |
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
578 | { | 578 | { |
579 | const u32 *dm, *usm; | 579 | const u32 *dm, *usm; |
580 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | 580 | unsigned int n, rc, ranges, is_kexec_kdump = 0; |
581 | unsigned long memblock_size, base, size, sz; | 581 | unsigned long lmb_size, base, size, sz; |
582 | int nid; | 582 | int nid; |
583 | struct assoc_arrays aa; | 583 | struct assoc_arrays aa; |
584 | 584 | ||
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
586 | if (!n) | 586 | if (!n) |
587 | return; | 587 | return; |
588 | 588 | ||
589 | memblock_size = of_get_memblock_size(memory); | 589 | lmb_size = of_get_lmb_size(memory); |
590 | if (!memblock_size) | 590 | if (!lmb_size) |
591 | return; | 591 | return; |
592 | 592 | ||
593 | rc = of_get_assoc_arrays(memory, &aa); | 593 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
611 | continue; | 611 | continue; |
612 | 612 | ||
613 | base = drmem.base_addr; | 613 | base = drmem.base_addr; |
614 | size = memblock_size; | 614 | size = lmb_size; |
615 | ranges = 1; | 615 | ranges = 1; |
616 | 616 | ||
617 | if (is_kexec_kdump) { | 617 | if (is_kexec_kdump) { |
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1072 | { | 1072 | { |
1073 | const u32 *dm; | 1073 | const u32 *dm; |
1074 | unsigned int drconf_cell_cnt, rc; | 1074 | unsigned int drconf_cell_cnt, rc; |
1075 | unsigned long memblock_size; | 1075 | unsigned long lmb_size; |
1076 | struct assoc_arrays aa; | 1076 | struct assoc_arrays aa; |
1077 | int nid = -1; | 1077 | int nid = -1; |
1078 | 1078 | ||
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1080 | if (!drconf_cell_cnt) | 1080 | if (!drconf_cell_cnt) |
1081 | return -1; | 1081 | return -1; |
1082 | 1082 | ||
1083 | memblock_size = of_get_memblock_size(memory); | 1083 | lmb_size = of_get_lmb_size(memory); |
1084 | if (!memblock_size) | 1084 | if (!lmb_size) |
1085 | return -1; | 1085 | return -1; |
1086 | 1086 | ||
1087 | rc = of_get_assoc_arrays(memory, &aa); | 1087 | rc = of_get_assoc_arrays(memory, &aa); |
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |||
1100 | continue; | 1100 | continue; |
1101 | 1101 | ||
1102 | if ((scn_addr < drmem.base_addr) | 1102 | if ((scn_addr < drmem.base_addr) |
1103 | || (scn_addr >= (drmem.base_addr + memblock_size))) | 1103 | || (scn_addr >= (drmem.base_addr + lmb_size))) |
1104 | continue; | 1104 | continue; |
1105 | 1105 | ||
1106 | nid = of_drconf_to_nid_single(&drmem, &aa); | 1106 | nid = of_drconf_to_nid_single(&drmem, &aa); |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index deab5f946090..bc8803664140 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np) | |||
69 | const char *type; | 69 | const char *type; |
70 | const unsigned int *regs; | 70 | const unsigned int *regs; |
71 | unsigned long base; | 71 | unsigned long base; |
72 | unsigned int memblock_size; | 72 | unsigned int lmb_size; |
73 | int ret = -EINVAL; | 73 | int ret = -EINVAL; |
74 | 74 | ||
75 | /* | 75 | /* |
@@ -87,9 +87,9 @@ static int pseries_remove_memory(struct device_node *np) | |||
87 | return ret; | 87 | return ret; |
88 | 88 | ||
89 | base = *(unsigned long *)regs; | 89 | base = *(unsigned long *)regs; |
90 | memblock_size = regs[3]; | 90 | lmb_size = regs[3]; |
91 | 91 | ||
92 | ret = pseries_remove_memblock(base, memblock_size); | 92 | ret = pseries_remove_memblock(base, lmb_size); |
93 | return ret; | 93 | return ret; |
94 | } | 94 | } |
95 | 95 | ||
@@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np) | |||
98 | const char *type; | 98 | const char *type; |
99 | const unsigned int *regs; | 99 | const unsigned int *regs; |
100 | unsigned long base; | 100 | unsigned long base; |
101 | unsigned int memblock_size; | 101 | unsigned int lmb_size; |
102 | int ret = -EINVAL; | 102 | int ret = -EINVAL; |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -116,36 +116,36 @@ static int pseries_add_memory(struct device_node *np) | |||
116 | return ret; | 116 | return ret; |
117 | 117 | ||
118 | base = *(unsigned long *)regs; | 118 | base = *(unsigned long *)regs; |
119 | memblock_size = regs[3]; | 119 | lmb_size = regs[3]; |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Update memory region to represent the memory add | 122 | * Update memory region to represent the memory add |
123 | */ | 123 | */ |
124 | ret = memblock_add(base, memblock_size); | 124 | ret = memblock_add(base, lmb_size); |
125 | return (ret < 0) ? -EINVAL : 0; | 125 | return (ret < 0) ? -EINVAL : 0; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) | 128 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) |
129 | { | 129 | { |
130 | struct device_node *np; | 130 | struct device_node *np; |
131 | const unsigned long *memblock_size; | 131 | const unsigned long *lmb_size; |
132 | int rc; | 132 | int rc; |
133 | 133 | ||
134 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 134 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
135 | if (!np) | 135 | if (!np) |
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | 137 | ||
138 | memblock_size = of_get_property(np, "ibm,memblock-size", NULL); | 138 | lmb_size = of_get_property(np, "ibm,lmb-size", NULL); |
139 | if (!memblock_size) { | 139 | if (!lmb_size) { |
140 | of_node_put(np); | 140 | of_node_put(np); |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | } | 142 | } |
143 | 143 | ||
144 | if (action == PSERIES_DRCONF_MEM_ADD) { | 144 | if (action == PSERIES_DRCONF_MEM_ADD) { |
145 | rc = memblock_add(*base, *memblock_size); | 145 | rc = memblock_add(*base, *lmb_size); |
146 | rc = (rc < 0) ? -EINVAL : 0; | 146 | rc = (rc < 0) ? -EINVAL : 0; |
147 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { | 147 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { |
148 | rc = pseries_remove_memblock(*base, *memblock_size); | 148 | rc = pseries_remove_memblock(*base, *lmb_size); |
149 | } else { | 149 | } else { |
150 | rc = -EINVAL; | 150 | rc = -EINVAL; |
151 | } | 151 | } |