diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:45:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:45:08 -0400 |
commit | dca45ad8af54963c005393a484ad117b8ba6150f (patch) | |
tree | 7c9a6966283a6bb12b54e5680a67d203be292930 /arch/powerpc/kernel | |
parent | 68c38fc3cb4e5a60f502ee9c45f3dfe70e5165ad (diff) | |
parent | cd5b8f8755a89a57fc8c408d284b8b613f090345 (diff) |
Merge branch 'linus' into sched/core
Merge reason: Move from the -rc3 to the almost-rc6 base.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/btext.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash_dump.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-swiotlb.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/fsl_booke_entry_mapping.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec_64.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 62 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init_check.sh | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas_flash.c | 39 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 4 |
23 files changed, 111 insertions, 143 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 26e58630ed7b..625942ae5585 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/lmb.h> | 10 | #include <linux/memblock.h> |
11 | 11 | ||
12 | #include <asm/sections.h> | 12 | #include <asm/sections.h> |
13 | #include <asm/prom.h> | 13 | #include <asm/prom.h> |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index b46f2e09bd81..417f7b05a9ce 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/lmb.h> | 27 | #include <linux/memblock.h> |
28 | 28 | ||
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/machdep.h> | 30 | #include <asm/machdep.h> |
@@ -447,7 +447,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
447 | crash_kexec_prepare_cpus(crashing_cpu); | 447 | crash_kexec_prepare_cpus(crashing_cpu); |
448 | cpu_set(crashing_cpu, cpus_in_crash); | 448 | cpu_set(crashing_cpu, cpus_in_crash); |
449 | crash_kexec_stop_spus(); | 449 | crash_kexec_stop_spus(); |
450 | #ifdef CONFIG_PPC_STD_MMU_64 | 450 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) |
451 | crash_kexec_wait_realmode(crashing_cpu); | 451 | crash_kexec_wait_realmode(crashing_cpu); |
452 | #endif | 452 | #endif |
453 | if (ppc_md.kexec_cpu_down) | 453 | if (ppc_md.kexec_cpu_down) |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 5fb667a60894..40f524643ba6 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/crash_dump.h> | 14 | #include <linux/crash_dump.h> |
15 | #include <linux/bootmem.h> | 15 | #include <linux/bootmem.h> |
16 | #include <linux/lmb.h> | 16 | #include <linux/memblock.h> |
17 | #include <asm/code-patching.h> | 17 | #include <asm/code-patching.h> |
18 | #include <asm/kdump.h> | 18 | #include <asm/kdump.h> |
19 | #include <asm/prom.h> | 19 | #include <asm/prom.h> |
@@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | |||
33 | #ifndef CONFIG_RELOCATABLE | 33 | #ifndef CONFIG_RELOCATABLE |
34 | void __init reserve_kdump_trampoline(void) | 34 | void __init reserve_kdump_trampoline(void) |
35 | { | 35 | { |
36 | lmb_reserve(0, KDUMP_RESERVE_LIMIT); | 36 | memblock_reserve(0, KDUMP_RESERVE_LIMIT); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void __init create_trampoline(unsigned long addr) | 39 | static void __init create_trampoline(unsigned long addr) |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index e7fe218b8697..02f724f36753 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb, | |||
71 | sd->max_direct_dma_addr = 0; | 71 | sd->max_direct_dma_addr = 0; |
72 | 72 | ||
73 | /* May need to bounce if the device can't address all of DRAM */ | 73 | /* May need to bounce if the device can't address all of DRAM */ |
74 | if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM()) | 74 | if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) |
75 | set_dma_ops(dev, &swiotlb_dma_ops); | 75 | set_dma_ops(dev, &swiotlb_dma_ops); |
76 | 76 | ||
77 | return NOTIFY_DONE; | 77 | return NOTIFY_DONE; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8d1de6f31d5a..84d6367ec003 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/dma-debug.h> | 10 | #include <linux/dma-debug.h> |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/lmb.h> | 12 | #include <linux/memblock.h> |
13 | #include <asm/bug.h> | 13 | #include <asm/bug.h> |
14 | #include <asm/abs_addr.h> | 14 | #include <asm/abs_addr.h> |
15 | 15 | ||
@@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
89 | /* Could be improved so platforms can set the limit in case | 89 | /* Could be improved so platforms can set the limit in case |
90 | * they have limited DMA windows | 90 | * they have limited DMA windows |
91 | */ | 91 | */ |
92 | return mask >= (lmb_end_of_DRAM() - 1); | 92 | return mask >= (memblock_end_of_DRAM() - 1); |
93 | #else | 93 | #else |
94 | return 1; | 94 | return 1; |
95 | #endif | 95 | #endif |
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S index beb4d78a2304..a92c79be2728 100644 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S | |||
@@ -205,8 +205,7 @@ next_tlb_setup: | |||
205 | bdnz+ next_tlb_setup | 205 | bdnz+ next_tlb_setup |
206 | 206 | ||
207 | /* 7. Jump to our 1:1 mapping */ | 207 | /* 7. Jump to our 1:1 mapping */ |
208 | li r6, 0 | 208 | mr r6, r25 |
209 | |||
210 | #else | 209 | #else |
211 | #error You need to specify the mapping or not use this at all. | 210 | #error You need to specify the mapping or not use this at all. |
212 | #endif | 211 | #endif |
@@ -217,7 +216,6 @@ next_tlb_setup: | |||
217 | 1: mflr r9 | 216 | 1: mflr r9 |
218 | rlwimi r6,r9,0,20,31 | 217 | rlwimi r6,r9,0,20,31 |
219 | addi r6,r6,(2f - 1b) | 218 | addi r6,r6,(2f - 1b) |
220 | add r6, r6, r25 | ||
221 | mtspr SPRN_SRR0,r6 | 219 | mtspr SPRN_SRR0,r6 |
222 | mtspr SPRN_SRR1,r7 | 220 | mtspr SPRN_SRR1,r7 |
223 | rfi /* start execution out of TLB1[0] entry */ | 221 | rfi /* start execution out of TLB1[0] entry */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 30817d9b20cb..77be3d058a65 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -295,7 +295,10 @@ void fixup_irqs(const struct cpumask *map) | |||
295 | 295 | ||
296 | for_each_irq(irq) { | 296 | for_each_irq(irq) { |
297 | desc = irq_to_desc(irq); | 297 | desc = irq_to_desc(irq); |
298 | if (desc && desc->status & IRQ_PER_CPU) | 298 | if (!desc) |
299 | continue; | ||
300 | |||
301 | if (desc->status & IRQ_PER_CPU) | ||
299 | continue; | 302 | continue; |
300 | 303 | ||
301 | cpumask_and(mask, desc->affinity, map); | 304 | cpumask_and(mask, desc->affinity, map); |
@@ -317,7 +320,6 @@ void fixup_irqs(const struct cpumask *map) | |||
317 | } | 320 | } |
318 | #endif | 321 | #endif |
319 | 322 | ||
320 | #ifdef CONFIG_IRQSTACKS | ||
321 | static inline void handle_one_irq(unsigned int irq) | 323 | static inline void handle_one_irq(unsigned int irq) |
322 | { | 324 | { |
323 | struct thread_info *curtp, *irqtp; | 325 | struct thread_info *curtp, *irqtp; |
@@ -358,12 +360,6 @@ static inline void handle_one_irq(unsigned int irq) | |||
358 | if (irqtp->flags) | 360 | if (irqtp->flags) |
359 | set_bits(irqtp->flags, &curtp->flags); | 361 | set_bits(irqtp->flags, &curtp->flags); |
360 | } | 362 | } |
361 | #else | ||
362 | static inline void handle_one_irq(unsigned int irq) | ||
363 | { | ||
364 | generic_handle_irq(irq); | ||
365 | } | ||
366 | #endif | ||
367 | 363 | ||
368 | static inline void check_stack_overflow(void) | 364 | static inline void check_stack_overflow(void) |
369 | { | 365 | { |
@@ -455,7 +451,6 @@ void exc_lvl_ctx_init(void) | |||
455 | } | 451 | } |
456 | #endif | 452 | #endif |
457 | 453 | ||
458 | #ifdef CONFIG_IRQSTACKS | ||
459 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; | 454 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
460 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | 455 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; |
461 | 456 | ||
@@ -492,10 +487,6 @@ static inline void do_softirq_onstack(void) | |||
492 | irqtp->task = NULL; | 487 | irqtp->task = NULL; |
493 | } | 488 | } |
494 | 489 | ||
495 | #else | ||
496 | #define do_softirq_onstack() __do_softirq() | ||
497 | #endif /* CONFIG_IRQSTACKS */ | ||
498 | |||
499 | void do_softirq(void) | 490 | void do_softirq(void) |
500 | { | 491 | { |
501 | unsigned long flags; | 492 | unsigned long flags; |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index bb3d893a8353..89f005116aac 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/reboot.h> | 13 | #include <linux/reboot.h> |
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | #include <linux/lmb.h> | 15 | #include <linux/memblock.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <asm/machdep.h> | 17 | #include <asm/machdep.h> |
18 | #include <asm/prom.h> | 18 | #include <asm/prom.h> |
@@ -66,11 +66,11 @@ void __init reserve_crashkernel(void) | |||
66 | unsigned long long crash_size, crash_base; | 66 | unsigned long long crash_size, crash_base; |
67 | int ret; | 67 | int ret; |
68 | 68 | ||
69 | /* this is necessary because of lmb_phys_mem_size() */ | 69 | /* this is necessary because of memblock_phys_mem_size() */ |
70 | lmb_analyze(); | 70 | memblock_analyze(); |
71 | 71 | ||
72 | /* use common parsing */ | 72 | /* use common parsing */ |
73 | ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), | 73 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), |
74 | &crash_size, &crash_base); | 74 | &crash_size, &crash_base); |
75 | if (ret == 0 && crash_size > 0) { | 75 | if (ret == 0 && crash_size > 0) { |
76 | crashk_res.start = crash_base; | 76 | crashk_res.start = crash_base; |
@@ -133,9 +133,9 @@ void __init reserve_crashkernel(void) | |||
133 | "for crashkernel (System RAM: %ldMB)\n", | 133 | "for crashkernel (System RAM: %ldMB)\n", |
134 | (unsigned long)(crash_size >> 20), | 134 | (unsigned long)(crash_size >> 20), |
135 | (unsigned long)(crashk_res.start >> 20), | 135 | (unsigned long)(crashk_res.start >> 20), |
136 | (unsigned long)(lmb_phys_mem_size() >> 20)); | 136 | (unsigned long)(memblock_phys_mem_size() >> 20)); |
137 | 137 | ||
138 | lmb_reserve(crashk_res.start, crash_size); | 138 | memblock_reserve(crashk_res.start, crash_size); |
139 | } | 139 | } |
140 | 140 | ||
141 | int overlaps_crashkernel(unsigned long start, unsigned long size) | 141 | int overlaps_crashkernel(unsigned long start, unsigned long size) |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 26f9900f773c..ed31a29c4ff7 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -182,28 +182,12 @@ static void kexec_prepare_cpus_wait(int wait_state) | |||
182 | 182 | ||
183 | my_cpu = get_cpu(); | 183 | my_cpu = get_cpu(); |
184 | /* Make sure each CPU has atleast made it to the state we need */ | 184 | /* Make sure each CPU has atleast made it to the state we need */ |
185 | for (i=0; i < NR_CPUS; i++) { | 185 | for_each_online_cpu(i) { |
186 | if (i == my_cpu) | 186 | if (i == my_cpu) |
187 | continue; | 187 | continue; |
188 | 188 | ||
189 | while (paca[i].kexec_state < wait_state) { | 189 | while (paca[i].kexec_state < wait_state) { |
190 | barrier(); | 190 | barrier(); |
191 | if (!cpu_possible(i)) { | ||
192 | printk("kexec: cpu %d hw_cpu_id %d is not" | ||
193 | " possible, ignoring\n", | ||
194 | i, paca[i].hw_cpu_id); | ||
195 | break; | ||
196 | } | ||
197 | if (!cpu_online(i)) { | ||
198 | /* Fixme: this can be spinning in | ||
199 | * pSeries_secondary_wait with a paca | ||
200 | * waiting for it to go online. | ||
201 | */ | ||
202 | printk("kexec: cpu %d hw_cpu_id %d is not" | ||
203 | " online, ignoring\n", | ||
204 | i, paca[i].hw_cpu_id); | ||
205 | break; | ||
206 | } | ||
207 | if (i != notified) { | 191 | if (i != notified) { |
208 | printk( "kexec: waiting for cpu %d (physical" | 192 | printk( "kexec: waiting for cpu %d (physical" |
209 | " %d) to enter %i state\n", | 193 | " %d) to enter %i state\n", |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index dc66d52dcff5..6bbd7a604d24 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | .text | 34 | .text |
35 | 35 | ||
36 | #ifdef CONFIG_IRQSTACKS | ||
37 | _GLOBAL(call_do_softirq) | 36 | _GLOBAL(call_do_softirq) |
38 | mflr r0 | 37 | mflr r0 |
39 | stw r0,4(r1) | 38 | stw r0,4(r1) |
@@ -56,7 +55,6 @@ _GLOBAL(call_handle_irq) | |||
56 | lwz r0,4(r1) | 55 | lwz r0,4(r1) |
57 | mtlr r0 | 56 | mtlr r0 |
58 | blr | 57 | blr |
59 | #endif /* CONFIG_IRQSTACKS */ | ||
60 | 58 | ||
61 | /* | 59 | /* |
62 | * This returns the high 64 bits of the product of two 64-bit numbers. | 60 | * This returns the high 64 bits of the product of two 64-bit numbers. |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index a2b18dffa03e..e5144906a56d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | .text | 29 | .text |
30 | 30 | ||
31 | #ifdef CONFIG_IRQSTACKS | ||
32 | _GLOBAL(call_do_softirq) | 31 | _GLOBAL(call_do_softirq) |
33 | mflr r0 | 32 | mflr r0 |
34 | std r0,16(r1) | 33 | std r0,16(r1) |
@@ -52,7 +51,6 @@ _GLOBAL(call_handle_irq) | |||
52 | ld r0,16(r1) | 51 | ld r0,16(r1) |
53 | mtlr r0 | 52 | mtlr r0 |
54 | blr | 53 | blr |
55 | #endif /* CONFIG_IRQSTACKS */ | ||
56 | 54 | ||
57 | .section ".toc","aw" | 55 | .section ".toc","aw" |
58 | PPC64_CACHES: | 56 | PPC64_CACHES: |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f88acf0218db..139a773853f4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/threads.h> | 10 | #include <linux/threads.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/lmb.h> | 12 | #include <linux/memblock.h> |
13 | 13 | ||
14 | #include <asm/firmware.h> | 14 | #include <asm/firmware.h> |
15 | #include <asm/lppaca.h> | 15 | #include <asm/lppaca.h> |
@@ -117,7 +117,7 @@ void __init allocate_pacas(void) | |||
117 | * the first segment. On iSeries they must be within the area mapped | 117 | * the first segment. On iSeries they must be within the area mapped |
118 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | 118 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
119 | */ | 119 | */ |
120 | limit = min(0x10000000ULL, lmb.rmo_size); | 120 | limit = min(0x10000000ULL, memblock.rmo_size); |
121 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 121 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
122 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | 122 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
123 | 123 | ||
@@ -128,7 +128,7 @@ void __init allocate_pacas(void) | |||
128 | 128 | ||
129 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); | 129 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); |
130 | 130 | ||
131 | paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); | 131 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); |
132 | memset(paca, 0, paca_size); | 132 | memset(paca, 0, paca_size); |
133 | 133 | ||
134 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | 134 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", |
@@ -148,7 +148,7 @@ void __init free_unused_pacas(void) | |||
148 | if (new_size >= paca_size) | 148 | if (new_size >= paca_size) |
149 | return; | 149 | return; |
150 | 150 | ||
151 | lmb_free(__pa(paca) + new_size, paca_size - new_size); | 151 | memblock_free(__pa(paca) + new_size, paca_size - new_size); |
152 | 152 | ||
153 | printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", | 153 | printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", |
154 | paca_size - new_size); | 154 | paca_size - new_size); |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 43b83c35cf54..5c14ffe51258 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -791,8 +791,11 @@ static void power_pmu_disable(struct perf_event *event) | |||
791 | cpuhw = &__get_cpu_var(cpu_hw_events); | 791 | cpuhw = &__get_cpu_var(cpu_hw_events); |
792 | for (i = 0; i < cpuhw->n_events; ++i) { | 792 | for (i = 0; i < cpuhw->n_events; ++i) { |
793 | if (event == cpuhw->event[i]) { | 793 | if (event == cpuhw->event[i]) { |
794 | while (++i < cpuhw->n_events) | 794 | while (++i < cpuhw->n_events) { |
795 | cpuhw->event[i-1] = cpuhw->event[i]; | 795 | cpuhw->event[i-1] = cpuhw->event[i]; |
796 | cpuhw->events[i-1] = cpuhw->events[i]; | ||
797 | cpuhw->flags[i-1] = cpuhw->flags[i]; | ||
798 | } | ||
796 | --cpuhw->n_events; | 799 | --cpuhw->n_events; |
797 | ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); | 800 | ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); |
798 | if (event->hw.idx) { | 801 | if (event->hw.idx) { |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 22f08cb7e7d1..43855c9f84de 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1005,7 +1005,6 @@ out: | |||
1005 | return error; | 1005 | return error; |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | #ifdef CONFIG_IRQSTACKS | ||
1009 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | 1008 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, |
1010 | unsigned long nbytes) | 1009 | unsigned long nbytes) |
1011 | { | 1010 | { |
@@ -1030,10 +1029,6 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | |||
1030 | return 0; | 1029 | return 0; |
1031 | } | 1030 | } |
1032 | 1031 | ||
1033 | #else | ||
1034 | #define valid_irq_stack(sp, p, nb) 0 | ||
1035 | #endif /* CONFIG_IRQSTACKS */ | ||
1036 | |||
1037 | int validate_sp(unsigned long sp, struct task_struct *p, | 1032 | int validate_sp(unsigned long sp, struct task_struct *p, |
1038 | unsigned long nbytes) | 1033 | unsigned long nbytes) |
1039 | { | 1034 | { |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05131d634e73..9d3953983fb7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/kexec.h> | 31 | #include <linux/kexec.h> |
32 | #include <linux/debugfs.h> | 32 | #include <linux/debugfs.h> |
33 | #include <linux/irq.h> | 33 | #include <linux/irq.h> |
34 | #include <linux/lmb.h> | 34 | #include <linux/memblock.h> |
35 | 35 | ||
36 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
37 | #include <asm/rtas.h> | 37 | #include <asm/rtas.h> |
@@ -98,7 +98,7 @@ static void __init move_device_tree(void) | |||
98 | 98 | ||
99 | if ((memory_limit && (start + size) > memory_limit) || | 99 | if ((memory_limit && (start + size) > memory_limit) || |
100 | overlaps_crashkernel(start, size)) { | 100 | overlaps_crashkernel(start, size)) { |
101 | p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); | 101 | p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); |
102 | memcpy(p, initial_boot_params, size); | 102 | memcpy(p, initial_boot_params, size); |
103 | initial_boot_params = (struct boot_param_header *)p; | 103 | initial_boot_params = (struct boot_param_header *)p; |
104 | DBG("Moved device tree to 0x%p\n", p); | 104 | DBG("Moved device tree to 0x%p\n", p); |
@@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
411 | { | 411 | { |
412 | __be32 *dm, *ls, *usm; | 412 | __be32 *dm, *ls, *usm; |
413 | unsigned long l, n, flags; | 413 | unsigned long l, n, flags; |
414 | u64 base, size, lmb_size; | 414 | u64 base, size, memblock_size; |
415 | unsigned int is_kexec_kdump = 0, rngs; | 415 | unsigned int is_kexec_kdump = 0, rngs; |
416 | 416 | ||
417 | ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); | 417 | ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); |
418 | if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) | 418 | if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) |
419 | return 0; | 419 | return 0; |
420 | lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); | 420 | memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); |
421 | 421 | ||
422 | dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); | 422 | dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); |
423 | if (dm == NULL || l < sizeof(__be32)) | 423 | if (dm == NULL || l < sizeof(__be32)) |
@@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
442 | or if the block is not assigned to this partition (0x8) */ | 442 | or if the block is not assigned to this partition (0x8) */ |
443 | if ((flags & 0x80) || !(flags & 0x8)) | 443 | if ((flags & 0x80) || !(flags & 0x8)) |
444 | continue; | 444 | continue; |
445 | size = lmb_size; | 445 | size = memblock_size; |
446 | rngs = 1; | 446 | rngs = 1; |
447 | if (is_kexec_kdump) { | 447 | if (is_kexec_kdump) { |
448 | /* | 448 | /* |
449 | * For each lmb in ibm,dynamic-memory, a corresponding | 449 | * For each memblock in ibm,dynamic-memory, a corresponding |
450 | * entry in linux,drconf-usable-memory property contains | 450 | * entry in linux,drconf-usable-memory property contains |
451 | * a counter 'p' followed by 'p' (base, size) duple. | 451 | * a counter 'p' followed by 'p' (base, size) duple. |
452 | * Now read the counter from | 452 | * Now read the counter from |
@@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
469 | if ((base + size) > 0x80000000ul) | 469 | if ((base + size) > 0x80000000ul) |
470 | size = 0x80000000ul - base; | 470 | size = 0x80000000ul - base; |
471 | } | 471 | } |
472 | lmb_add(base, size); | 472 | memblock_add(base, size); |
473 | } while (--rngs); | 473 | } while (--rngs); |
474 | } | 474 | } |
475 | lmb_dump_all(); | 475 | memblock_dump_all(); |
476 | return 0; | 476 | return 0; |
477 | } | 477 | } |
478 | #else | 478 | #else |
@@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
501 | } | 501 | } |
502 | #endif | 502 | #endif |
503 | 503 | ||
504 | lmb_add(base, size); | 504 | memblock_add(base, size); |
505 | 505 | ||
506 | memstart_addr = min((u64)memstart_addr, base); | 506 | memstart_addr = min((u64)memstart_addr, base); |
507 | } | 507 | } |
508 | 508 | ||
509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
510 | { | 510 | { |
511 | return lmb_alloc(size, align); | 511 | return memblock_alloc(size, align); |
512 | } | 512 | } |
513 | 513 | ||
514 | #ifdef CONFIG_BLK_DEV_INITRD | 514 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -534,12 +534,12 @@ static void __init early_reserve_mem(void) | |||
534 | /* before we do anything, lets reserve the dt blob */ | 534 | /* before we do anything, lets reserve the dt blob */ |
535 | self_base = __pa((unsigned long)initial_boot_params); | 535 | self_base = __pa((unsigned long)initial_boot_params); |
536 | self_size = initial_boot_params->totalsize; | 536 | self_size = initial_boot_params->totalsize; |
537 | lmb_reserve(self_base, self_size); | 537 | memblock_reserve(self_base, self_size); |
538 | 538 | ||
539 | #ifdef CONFIG_BLK_DEV_INITRD | 539 | #ifdef CONFIG_BLK_DEV_INITRD |
540 | /* then reserve the initrd, if any */ | 540 | /* then reserve the initrd, if any */ |
541 | if (initrd_start && (initrd_end > initrd_start)) | 541 | if (initrd_start && (initrd_end > initrd_start)) |
542 | lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); | 542 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); |
543 | #endif /* CONFIG_BLK_DEV_INITRD */ | 543 | #endif /* CONFIG_BLK_DEV_INITRD */ |
544 | 544 | ||
545 | #ifdef CONFIG_PPC32 | 545 | #ifdef CONFIG_PPC32 |
@@ -560,7 +560,7 @@ static void __init early_reserve_mem(void) | |||
560 | if (base_32 == self_base && size_32 == self_size) | 560 | if (base_32 == self_base && size_32 == self_size) |
561 | continue; | 561 | continue; |
562 | DBG("reserving: %x -> %x\n", base_32, size_32); | 562 | DBG("reserving: %x -> %x\n", base_32, size_32); |
563 | lmb_reserve(base_32, size_32); | 563 | memblock_reserve(base_32, size_32); |
564 | } | 564 | } |
565 | return; | 565 | return; |
566 | } | 566 | } |
@@ -571,7 +571,7 @@ static void __init early_reserve_mem(void) | |||
571 | if (size == 0) | 571 | if (size == 0) |
572 | break; | 572 | break; |
573 | DBG("reserving: %llx -> %llx\n", base, size); | 573 | DBG("reserving: %llx -> %llx\n", base, size); |
574 | lmb_reserve(base, size); | 574 | memblock_reserve(base, size); |
575 | } | 575 | } |
576 | } | 576 | } |
577 | 577 | ||
@@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void) | |||
594 | return phyp_dump_info->reserve_bootvar; | 594 | return phyp_dump_info->reserve_bootvar; |
595 | 595 | ||
596 | /* divide by 20 to get 5% of value */ | 596 | /* divide by 20 to get 5% of value */ |
597 | tmp = lmb_end_of_DRAM(); | 597 | tmp = memblock_end_of_DRAM(); |
598 | do_div(tmp, 20); | 598 | do_div(tmp, 20); |
599 | 599 | ||
600 | /* round it down in multiples of 256 */ | 600 | /* round it down in multiples of 256 */ |
@@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void) | |||
633 | if (phyp_dump_info->phyp_dump_is_active) { | 633 | if (phyp_dump_info->phyp_dump_is_active) { |
634 | /* Reserve *everything* above RMR.Area freed by userland tools*/ | 634 | /* Reserve *everything* above RMR.Area freed by userland tools*/ |
635 | base = variable_reserve_size; | 635 | base = variable_reserve_size; |
636 | size = lmb_end_of_DRAM() - base; | 636 | size = memblock_end_of_DRAM() - base; |
637 | 637 | ||
638 | /* XXX crashed_ram_end is wrong, since it may be beyond | 638 | /* XXX crashed_ram_end is wrong, since it may be beyond |
639 | * the memory_limit, it will need to be adjusted. */ | 639 | * the memory_limit, it will need to be adjusted. */ |
640 | lmb_reserve(base, size); | 640 | memblock_reserve(base, size); |
641 | 641 | ||
642 | phyp_dump_info->init_reserve_start = base; | 642 | phyp_dump_info->init_reserve_start = base; |
643 | phyp_dump_info->init_reserve_size = size; | 643 | phyp_dump_info->init_reserve_size = size; |
@@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void) | |||
645 | size = phyp_dump_info->cpu_state_size + | 645 | size = phyp_dump_info->cpu_state_size + |
646 | phyp_dump_info->hpte_region_size + | 646 | phyp_dump_info->hpte_region_size + |
647 | variable_reserve_size; | 647 | variable_reserve_size; |
648 | base = lmb_end_of_DRAM() - size; | 648 | base = memblock_end_of_DRAM() - size; |
649 | lmb_reserve(base, size); | 649 | memblock_reserve(base, size); |
650 | phyp_dump_info->init_reserve_start = base; | 650 | phyp_dump_info->init_reserve_start = base; |
651 | phyp_dump_info->init_reserve_size = size; | 651 | phyp_dump_info->init_reserve_size = size; |
652 | } | 652 | } |
@@ -681,8 +681,8 @@ void __init early_init_devtree(void *params) | |||
681 | */ | 681 | */ |
682 | of_scan_flat_dt(early_init_dt_scan_chosen, NULL); | 682 | of_scan_flat_dt(early_init_dt_scan_chosen, NULL); |
683 | 683 | ||
684 | /* Scan memory nodes and rebuild LMBs */ | 684 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
685 | lmb_init(); | 685 | memblock_init(); |
686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
688 | 688 | ||
@@ -690,11 +690,11 @@ void __init early_init_devtree(void *params) | |||
690 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 690 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
691 | parse_early_param(); | 691 | parse_early_param(); |
692 | 692 | ||
693 | /* Reserve LMB regions used by kernel, initrd, dt, etc... */ | 693 | /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ |
694 | lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); | 694 | memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); |
695 | /* If relocatable, reserve first 32k for interrupt vectors etc. */ | 695 | /* If relocatable, reserve first 32k for interrupt vectors etc. */ |
696 | if (PHYSICAL_START > MEMORY_START) | 696 | if (PHYSICAL_START > MEMORY_START) |
697 | lmb_reserve(MEMORY_START, 0x8000); | 697 | memblock_reserve(MEMORY_START, 0x8000); |
698 | reserve_kdump_trampoline(); | 698 | reserve_kdump_trampoline(); |
699 | reserve_crashkernel(); | 699 | reserve_crashkernel(); |
700 | early_reserve_mem(); | 700 | early_reserve_mem(); |
@@ -706,17 +706,17 @@ void __init early_init_devtree(void *params) | |||
706 | 706 | ||
707 | /* Ensure that total memory size is page-aligned, because | 707 | /* Ensure that total memory size is page-aligned, because |
708 | * otherwise mark_bootmem() gets upset. */ | 708 | * otherwise mark_bootmem() gets upset. */ |
709 | lmb_analyze(); | 709 | memblock_analyze(); |
710 | memsize = lmb_phys_mem_size(); | 710 | memsize = memblock_phys_mem_size(); |
711 | if ((memsize & PAGE_MASK) != memsize) | 711 | if ((memsize & PAGE_MASK) != memsize) |
712 | limit = memsize & PAGE_MASK; | 712 | limit = memsize & PAGE_MASK; |
713 | } | 713 | } |
714 | lmb_enforce_memory_limit(limit); | 714 | memblock_enforce_memory_limit(limit); |
715 | 715 | ||
716 | lmb_analyze(); | 716 | memblock_analyze(); |
717 | lmb_dump_all(); | 717 | memblock_dump_all(); |
718 | 718 | ||
719 | DBG("Phys. mem: %llx\n", lmb_phys_mem_size()); | 719 | DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); |
720 | 720 | ||
721 | /* We may need to relocate the flat tree, do it now. | 721 | /* We may need to relocate the flat tree, do it now. |
722 | * FIXME .. and the initrd too? */ | 722 | * FIXME .. and the initrd too? */ |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 97d4bd9442d3..3b6f8ae9b8cc 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -872,7 +872,7 @@ static void __init prom_send_capabilities(void) | |||
872 | "ibm_architecture_vec structure inconsistent: 0x%x !\n", | 872 | "ibm_architecture_vec structure inconsistent: 0x%x !\n", |
873 | *cores); | 873 | *cores); |
874 | } else { | 874 | } else { |
875 | *cores = NR_CPUS / prom_count_smt_threads(); | 875 | *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); |
876 | prom_printf("Max number of cores passed to firmware: 0x%x\n", | 876 | prom_printf("Max number of cores passed to firmware: 0x%x\n", |
877 | (unsigned long)*cores); | 877 | (unsigned long)*cores); |
878 | } | 878 | } |
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 1ac136b128f0..9f82f4937892 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh | |||
@@ -52,12 +52,18 @@ do | |||
52 | if [ "${UNDEF:0:9}" = "_restgpr_" ]; then | 52 | if [ "${UNDEF:0:9}" = "_restgpr_" ]; then |
53 | OK=1 | 53 | OK=1 |
54 | fi | 54 | fi |
55 | if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then | ||
56 | OK=1 | ||
57 | fi | ||
55 | if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then | 58 | if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then |
56 | OK=1 | 59 | OK=1 |
57 | fi | 60 | fi |
58 | if [ "${UNDEF:0:9}" = "_savegpr_" ]; then | 61 | if [ "${UNDEF:0:9}" = "_savegpr_" ]; then |
59 | OK=1 | 62 | OK=1 |
60 | fi | 63 | fi |
64 | if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then | ||
65 | OK=1 | ||
66 | fi | ||
61 | if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then | 67 | if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then |
62 | OK=1 | 68 | OK=1 |
63 | fi | 69 | fi |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 0e1ec6f746f6..d0516dbee762 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/cpumask.h> | 24 | #include <linux/cpumask.h> |
25 | #include <linux/lmb.h> | 25 | #include <linux/memblock.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | 27 | ||
28 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
@@ -934,11 +934,11 @@ void __init rtas_initialize(void) | |||
934 | */ | 934 | */ |
935 | #ifdef CONFIG_PPC64 | 935 | #ifdef CONFIG_PPC64 |
936 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { | 936 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
937 | rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); | 937 | rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); |
938 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); | 938 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
939 | } | 939 | } |
940 | #endif | 940 | #endif |
941 | rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); | 941 | rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); |
942 | 942 | ||
943 | #ifdef CONFIG_RTAS_ERROR_LOGGING | 943 | #ifdef CONFIG_RTAS_ERROR_LOGGING |
944 | rtas_last_error_token = rtas_token("rtas-last-error"); | 944 | rtas_last_error_token = rtas_token("rtas-last-error"); |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index bfc2abafac44..67a84d8f118d 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -94,12 +94,8 @@ struct flash_block_list { | |||
94 | struct flash_block_list *next; | 94 | struct flash_block_list *next; |
95 | struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; | 95 | struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; |
96 | }; | 96 | }; |
97 | struct flash_block_list_header { /* just the header of flash_block_list */ | ||
98 | unsigned long num_blocks; | ||
99 | struct flash_block_list *next; | ||
100 | }; | ||
101 | 97 | ||
102 | static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; | 98 | static struct flash_block_list *rtas_firmware_flash_list; |
103 | 99 | ||
104 | /* Use slab cache to guarantee 4k alignment */ | 100 | /* Use slab cache to guarantee 4k alignment */ |
105 | static struct kmem_cache *flash_block_cache = NULL; | 101 | static struct kmem_cache *flash_block_cache = NULL; |
@@ -108,13 +104,14 @@ static struct kmem_cache *flash_block_cache = NULL; | |||
108 | 104 | ||
109 | /* Local copy of the flash block list. | 105 | /* Local copy of the flash block list. |
110 | * We only allow one open of the flash proc file and create this | 106 | * We only allow one open of the flash proc file and create this |
111 | * list as we go. This list will be put in the | 107 | * list as we go. The rtas_firmware_flash_list varable will be |
112 | * rtas_firmware_flash_list var once it is fully read. | 108 | * set once the data is fully read. |
113 | * | 109 | * |
114 | * For convenience as we build the list we use virtual addrs, | 110 | * For convenience as we build the list we use virtual addrs, |
115 | * we do not fill in the version number, and the length field | 111 | * we do not fill in the version number, and the length field |
116 | * is treated as the number of entries currently in the block | 112 | * is treated as the number of entries currently in the block |
117 | * (i.e. not a byte count). This is all fixed on release. | 113 | * (i.e. not a byte count). This is all fixed when calling |
114 | * the flash routine. | ||
118 | */ | 115 | */ |
119 | 116 | ||
120 | /* Status int must be first member of struct */ | 117 | /* Status int must be first member of struct */ |
@@ -201,16 +198,16 @@ static int rtas_flash_release(struct inode *inode, struct file *file) | |||
201 | if (uf->flist) { | 198 | if (uf->flist) { |
202 | /* File was opened in write mode for a new flash attempt */ | 199 | /* File was opened in write mode for a new flash attempt */ |
203 | /* Clear saved list */ | 200 | /* Clear saved list */ |
204 | if (rtas_firmware_flash_list.next) { | 201 | if (rtas_firmware_flash_list) { |
205 | free_flash_list(rtas_firmware_flash_list.next); | 202 | free_flash_list(rtas_firmware_flash_list); |
206 | rtas_firmware_flash_list.next = NULL; | 203 | rtas_firmware_flash_list = NULL; |
207 | } | 204 | } |
208 | 205 | ||
209 | if (uf->status != FLASH_AUTH) | 206 | if (uf->status != FLASH_AUTH) |
210 | uf->status = flash_list_valid(uf->flist); | 207 | uf->status = flash_list_valid(uf->flist); |
211 | 208 | ||
212 | if (uf->status == FLASH_IMG_READY) | 209 | if (uf->status == FLASH_IMG_READY) |
213 | rtas_firmware_flash_list.next = uf->flist; | 210 | rtas_firmware_flash_list = uf->flist; |
214 | else | 211 | else |
215 | free_flash_list(uf->flist); | 212 | free_flash_list(uf->flist); |
216 | 213 | ||
@@ -593,7 +590,7 @@ static void rtas_flash_firmware(int reboot_type) | |||
593 | unsigned long rtas_block_list; | 590 | unsigned long rtas_block_list; |
594 | int i, status, update_token; | 591 | int i, status, update_token; |
595 | 592 | ||
596 | if (rtas_firmware_flash_list.next == NULL) | 593 | if (rtas_firmware_flash_list == NULL) |
597 | return; /* nothing to do */ | 594 | return; /* nothing to do */ |
598 | 595 | ||
599 | if (reboot_type != SYS_RESTART) { | 596 | if (reboot_type != SYS_RESTART) { |
@@ -610,20 +607,25 @@ static void rtas_flash_firmware(int reboot_type) | |||
610 | return; | 607 | return; |
611 | } | 608 | } |
612 | 609 | ||
613 | /* NOTE: the "first" block list is a global var with no data | 610 | /* |
614 | * blocks in the kernel data segment. We do this because | 611 | * NOTE: the "first" block must be under 4GB, so we create |
615 | * we want to ensure this block_list addr is under 4GB. | 612 | * an entry with no data blocks in the reserved buffer in |
613 | * the kernel data segment. | ||
616 | */ | 614 | */ |
617 | rtas_firmware_flash_list.num_blocks = 0; | 615 | spin_lock(&rtas_data_buf_lock); |
618 | flist = (struct flash_block_list *)&rtas_firmware_flash_list; | 616 | flist = (struct flash_block_list *)&rtas_data_buf[0]; |
617 | flist->num_blocks = 0; | ||
618 | flist->next = rtas_firmware_flash_list; | ||
619 | rtas_block_list = virt_to_abs(flist); | 619 | rtas_block_list = virt_to_abs(flist); |
620 | if (rtas_block_list >= 4UL*1024*1024*1024) { | 620 | if (rtas_block_list >= 4UL*1024*1024*1024) { |
621 | printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); | 621 | printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); |
622 | spin_unlock(&rtas_data_buf_lock); | ||
622 | return; | 623 | return; |
623 | } | 624 | } |
624 | 625 | ||
625 | printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n"); | 626 | printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n"); |
626 | /* Update the block_list in place. */ | 627 | /* Update the block_list in place. */ |
628 | rtas_firmware_flash_list = NULL; /* too hard to backout on error */ | ||
627 | image_size = 0; | 629 | image_size = 0; |
628 | for (f = flist; f; f = next) { | 630 | for (f = flist; f; f = next) { |
629 | /* Translate data addrs to absolute */ | 631 | /* Translate data addrs to absolute */ |
@@ -664,6 +666,7 @@ static void rtas_flash_firmware(int reboot_type) | |||
664 | printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status); | 666 | printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status); |
665 | break; | 667 | break; |
666 | } | 668 | } |
669 | spin_unlock(&rtas_data_buf_lock); | ||
667 | } | 670 | } |
668 | 671 | ||
669 | static void remove_flash_pde(struct proc_dir_entry *dp) | 672 | static void remove_flash_pde(struct proc_dir_entry *dp) |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5e4d852f640c..b7e6c7e193ae 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/serial_8250.h> | 33 | #include <linux/serial_8250.h> |
34 | #include <linux/debugfs.h> | 34 | #include <linux/debugfs.h> |
35 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
36 | #include <linux/lmb.h> | 36 | #include <linux/memblock.h> |
37 | #include <linux/of_platform.h> | 37 | #include <linux/of_platform.h> |
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/paca.h> | 39 | #include <asm/paca.h> |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 8f58986c2ad9..a10ffc85ada7 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/root_dev.h> | 16 | #include <linux/root_dev.h> |
17 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
18 | #include <linux/console.h> | 18 | #include <linux/console.h> |
19 | #include <linux/lmb.h> | 19 | #include <linux/memblock.h> |
20 | 20 | ||
21 | #include <asm/io.h> | 21 | #include <asm/io.h> |
22 | #include <asm/prom.h> | 22 | #include <asm/prom.h> |
@@ -241,23 +241,19 @@ int __init ppc_init(void) | |||
241 | 241 | ||
242 | arch_initcall(ppc_init); | 242 | arch_initcall(ppc_init); |
243 | 243 | ||
244 | #ifdef CONFIG_IRQSTACKS | ||
245 | static void __init irqstack_early_init(void) | 244 | static void __init irqstack_early_init(void) |
246 | { | 245 | { |
247 | unsigned int i; | 246 | unsigned int i; |
248 | 247 | ||
249 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
250 | * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ | 249 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
251 | for_each_possible_cpu(i) { | 250 | for_each_possible_cpu(i) { |
252 | softirq_ctx[i] = (struct thread_info *) | 251 | softirq_ctx[i] = (struct thread_info *) |
253 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
254 | hardirq_ctx[i] = (struct thread_info *) | 253 | hardirq_ctx[i] = (struct thread_info *) |
255 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 254 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
256 | } | 255 | } |
257 | } | 256 | } |
258 | #else | ||
259 | #define irqstack_early_init() | ||
260 | #endif | ||
261 | 257 | ||
262 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 258 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
263 | static void __init exc_lvl_early_init(void) | 259 | static void __init exc_lvl_early_init(void) |
@@ -265,15 +261,15 @@ static void __init exc_lvl_early_init(void) | |||
265 | unsigned int i; | 261 | unsigned int i; |
266 | 262 | ||
267 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 263 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
268 | * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ | 264 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
269 | for_each_possible_cpu(i) { | 265 | for_each_possible_cpu(i) { |
270 | critirq_ctx[i] = (struct thread_info *) | 266 | critirq_ctx[i] = (struct thread_info *) |
271 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 267 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
272 | #ifdef CONFIG_BOOKE | 268 | #ifdef CONFIG_BOOKE |
273 | dbgirq_ctx[i] = (struct thread_info *) | 269 | dbgirq_ctx[i] = (struct thread_info *) |
274 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 270 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
275 | mcheckirq_ctx[i] = (struct thread_info *) | 271 | mcheckirq_ctx[i] = (struct thread_info *) |
276 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 272 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
277 | #endif | 273 | #endif |
278 | } | 274 | } |
279 | } | 275 | } |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f3fb5a79de52..d135f93cb0f6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/lockdep.h> | 36 | #include <linux/lockdep.h> |
37 | #include <linux/lmb.h> | 37 | #include <linux/memblock.h> |
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/kdump.h> | 39 | #include <asm/kdump.h> |
40 | #include <asm/prom.h> | 40 | #include <asm/prom.h> |
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca) | |||
158 | * the CPU that ignores the top 2 bits of the address in real | 158 | * the CPU that ignores the top 2 bits of the address in real |
159 | * mode so we can access kernel globals normally provided we | 159 | * mode so we can access kernel globals normally provided we |
160 | * only toy with things in the RMO region. From here, we do | 160 | * only toy with things in the RMO region. From here, we do |
161 | * some early parsing of the device-tree to setup out LMB | 161 | * some early parsing of the device-tree to setup out MEMBLOCK |
162 | * data structures, and allocate & initialize the hash table | 162 | * data structures, and allocate & initialize the hash table |
163 | * and segment tables so we can start running with translation | 163 | * and segment tables so we can start running with translation |
164 | * enabled. | 164 | * enabled. |
@@ -404,7 +404,7 @@ void __init setup_system(void) | |||
404 | 404 | ||
405 | printk("-----------------------------------------------------\n"); | 405 | printk("-----------------------------------------------------\n"); |
406 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); | 406 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); |
407 | printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); | 407 | printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); |
408 | if (ppc64_caches.dline_size != 0x80) | 408 | if (ppc64_caches.dline_size != 0x80) |
409 | printk("ppc64_caches.dcache_line_size = 0x%x\n", | 409 | printk("ppc64_caches.dcache_line_size = 0x%x\n", |
410 | ppc64_caches.dline_size); | 410 | ppc64_caches.dline_size); |
@@ -432,7 +432,6 @@ static u64 slb0_limit(void) | |||
432 | return 1UL << SID_SHIFT; | 432 | return 1UL << SID_SHIFT; |
433 | } | 433 | } |
434 | 434 | ||
435 | #ifdef CONFIG_IRQSTACKS | ||
436 | static void __init irqstack_early_init(void) | 435 | static void __init irqstack_early_init(void) |
437 | { | 436 | { |
438 | u64 limit = slb0_limit(); | 437 | u64 limit = slb0_limit(); |
@@ -444,16 +443,13 @@ static void __init irqstack_early_init(void) | |||
444 | */ | 443 | */ |
445 | for_each_possible_cpu(i) { | 444 | for_each_possible_cpu(i) { |
446 | softirq_ctx[i] = (struct thread_info *) | 445 | softirq_ctx[i] = (struct thread_info *) |
447 | __va(lmb_alloc_base(THREAD_SIZE, | 446 | __va(memblock_alloc_base(THREAD_SIZE, |
448 | THREAD_SIZE, limit)); | 447 | THREAD_SIZE, limit)); |
449 | hardirq_ctx[i] = (struct thread_info *) | 448 | hardirq_ctx[i] = (struct thread_info *) |
450 | __va(lmb_alloc_base(THREAD_SIZE, | 449 | __va(memblock_alloc_base(THREAD_SIZE, |
451 | THREAD_SIZE, limit)); | 450 | THREAD_SIZE, limit)); |
452 | } | 451 | } |
453 | } | 452 | } |
454 | #else | ||
455 | #define irqstack_early_init() | ||
456 | #endif | ||
457 | 453 | ||
458 | #ifdef CONFIG_PPC_BOOK3E | 454 | #ifdef CONFIG_PPC_BOOK3E |
459 | static void __init exc_lvl_early_init(void) | 455 | static void __init exc_lvl_early_init(void) |
@@ -462,11 +458,11 @@ static void __init exc_lvl_early_init(void) | |||
462 | 458 | ||
463 | for_each_possible_cpu(i) { | 459 | for_each_possible_cpu(i) { |
464 | critirq_ctx[i] = (struct thread_info *) | 460 | critirq_ctx[i] = (struct thread_info *) |
465 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 461 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
466 | dbgirq_ctx[i] = (struct thread_info *) | 462 | dbgirq_ctx[i] = (struct thread_info *) |
467 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 463 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
468 | mcheckirq_ctx[i] = (struct thread_info *) | 464 | mcheckirq_ctx[i] = (struct thread_info *) |
469 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 465 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
470 | } | 466 | } |
471 | } | 467 | } |
472 | #else | 468 | #else |
@@ -491,11 +487,11 @@ static void __init emergency_stack_init(void) | |||
491 | * bringup, we need to get at them in real mode. This means they | 487 | * bringup, we need to get at them in real mode. This means they |
492 | * must also be within the RMO region. | 488 | * must also be within the RMO region. |
493 | */ | 489 | */ |
494 | limit = min(slb0_limit(), lmb.rmo_size); | 490 | limit = min(slb0_limit(), memblock.rmo_size); |
495 | 491 | ||
496 | for_each_possible_cpu(i) { | 492 | for_each_possible_cpu(i) { |
497 | unsigned long sp; | 493 | unsigned long sp; |
498 | sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); | 494 | sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); |
499 | sp += THREAD_SIZE; | 495 | sp += THREAD_SIZE; |
500 | paca[i].emergency_sp = __va(sp); | 496 | paca[i].emergency_sp = __va(sp); |
501 | } | 497 | } |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d84d19224a95..13002fe206e7 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/elf.h> | 22 | #include <linux/elf.h> |
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | #include <linux/bootmem.h> | 24 | #include <linux/bootmem.h> |
25 | #include <linux/lmb.h> | 25 | #include <linux/memblock.h> |
26 | 26 | ||
27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
28 | #include <asm/system.h> | 28 | #include <asm/system.h> |
@@ -734,7 +734,7 @@ static int __init vdso_init(void) | |||
734 | vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; | 734 | vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; |
735 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 735 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
736 | vdso_data->platform |= 1; | 736 | vdso_data->platform |= 1; |
737 | vdso_data->physicalMemorySize = lmb_phys_mem_size(); | 737 | vdso_data->physicalMemorySize = memblock_phys_mem_size(); |
738 | vdso_data->dcache_size = ppc64_caches.dsize; | 738 | vdso_data->dcache_size = ppc64_caches.dsize; |
739 | vdso_data->dcache_line_size = ppc64_caches.dline_size; | 739 | vdso_data->dcache_line_size = ppc64_caches.dline_size; |
740 | vdso_data->icache_size = ppc64_caches.isize; | 740 | vdso_data->icache_size = ppc64_caches.isize; |