diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-01-16 03:33:30 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-16 03:33:30 -0500 |
commit | 860fc2f2640ec348b9520ca4649b1bfd23d91bc2 (patch) | |
tree | 73d90d6ef86893c89bb70e78a2b63295d531f371 /arch/powerpc | |
parent | 197749981e539c1eb5863f417de6dd4e2c02b76c (diff) | |
parent | bee09ed91cacdbffdbcd3b05de8409c77ec9fcd6 (diff) |
Merge branch 'perf/urgent' into perf/core
Pick up the latest fixes, refresh the development tree.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc')
32 files changed, 253 insertions, 169 deletions
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c2..a618dfc13e4c 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts | |||
@@ -58,7 +58,6 @@ | |||
58 | compatible = "fsl,mpc5121-immr"; | 58 | compatible = "fsl,mpc5121-immr"; |
59 | #address-cells = <1>; | 59 | #address-cells = <1>; |
60 | #size-cells = <1>; | 60 | #size-cells = <1>; |
61 | #interrupt-cells = <2>; | ||
62 | ranges = <0x0 0x80000000 0x400000>; | 61 | ranges = <0x0 0x80000000 0x400000>; |
63 | reg = <0x80000000 0x400000>; | 62 | reg = <0x80000000 0x400000>; |
64 | bus-frequency = <66000000>; // 66 MHz ips bus | 63 | bus-frequency = <66000000>; // 66 MHz ips bus |
@@ -189,6 +188,10 @@ | |||
189 | reg = <0xA000 0x1000>; | 188 | reg = <0xA000 0x1000>; |
190 | }; | 189 | }; |
191 | 190 | ||
191 | // disable USB1 port | ||
192 | // TODO: | ||
193 | // correct pinmux config and fix USB3320 ulpi dependency | ||
194 | // before re-enabling it | ||
192 | usb@3000 { | 195 | usb@3000 { |
193 | compatible = "fsl,mpc5121-usb2-dr"; | 196 | compatible = "fsl,mpc5121-usb2-dr"; |
194 | reg = <0x3000 0x400>; | 197 | reg = <0x3000 0x400>; |
@@ -197,6 +200,7 @@ | |||
197 | interrupts = <43 0x8>; | 200 | interrupts = <43 0x8>; |
198 | dr_mode = "host"; | 201 | dr_mode = "host"; |
199 | phy_type = "ulpi"; | 202 | phy_type = "ulpi"; |
203 | status = "disabled"; | ||
200 | }; | 204 | }; |
201 | 205 | ||
202 | // 5125 PSCs are not 52xx or 5121 PSC compatible | 206 | // 5125 PSCs are not 52xx or 5121 PSC compatible |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -284,7 +284,7 @@ do_kvm_##n: \ | |||
284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ |
285 | beq- 1f; \ | 285 | beq- 1f; \ |
286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
287 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 287 | 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ |
288 | blt+ cr1,3f; /* abort if it is */ \ | 288 | blt+ cr1,3f; /* abort if it is */ \ |
289 | li r1,(n); /* will be reloaded later */ \ | 289 | li r1,(n); /* will be reloaded later */ \ |
290 | sth r1,PACA_TRAP_SAVE(r13); \ | 290 | sth r1,PACA_TRAP_SAVE(r13); \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
196 | struct kvm_vcpu *vcpu); | ||
197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
195 | 199 | ||
196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
79 | ulong vmhandler; | 79 | ulong vmhandler; |
80 | ulong scratch0; | 80 | ulong scratch0; |
81 | ulong scratch1; | 81 | ulong scratch1; |
82 | ulong scratch2; | ||
82 | u8 in_guest; | 83 | u8 in_guest; |
83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
84 | u8 napping; | 85 | u8 napping; |
@@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
106 | }; | 107 | }; |
107 | 108 | ||
108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
110 | bool in_use; | ||
109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
110 | u32 cr; | 112 | u32 cr; |
111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
720 | int64_t opal_pci_poll(uint64_t phb_id); | 720 | int64_t opal_pci_poll(uint64_t phb_id); |
721 | int64_t opal_return_cpu(void); | 721 | int64_t opal_return_cpu(void); |
722 | 722 | ||
723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); | 723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); |
724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); |
725 | 725 | ||
726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
727 | uint32_t addr, uint32_t data, uint32_t sz); | 727 | uint32_t addr, uint32_t data, uint32_t sz); |
728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
729 | uint32_t addr, uint32_t *data, uint32_t sz); | 729 | uint32_t addr, __be32 *data, uint32_t sz); |
730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | 730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); |
731 | int64_t opal_manage_flash(uint8_t op); | 731 | int64_t opal_manage_flash(uint8_t op); |
732 | int64_t opal_update_flash(uint64_t blk_list); | 732 | int64_t opal_update_flash(uint64_t blk_list); |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
39 | 39 | ||
40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h | |||
@@ -4,13 +4,18 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself based on its endian mode. |
8 | */ | 8 | */ |
9 | #include <linux/unaligned/access_ok.h> | 9 | #include <linux/unaligned/access_ok.h> |
10 | #include <linux/unaligned/generic.h> | 10 | #include <linux/unaligned/generic.h> |
11 | 11 | ||
12 | #ifdef __LITTLE_ENDIAN__ | ||
13 | #define get_unaligned __get_unaligned_le | ||
14 | #define put_unaligned __put_unaligned_le | ||
15 | #else | ||
12 | #define get_unaligned __get_unaligned_be | 16 | #define get_unaligned __get_unaligned_be |
13 | #define put_unaligned __put_unaligned_be | 17 | #define put_unaligned __put_unaligned_be |
18 | #endif | ||
14 | 19 | ||
15 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 21 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -576,6 +576,7 @@ int main(void) | |||
576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | 124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) |
125 | { | 125 | { |
126 | unsigned long addr; | 126 | unsigned long addr; |
127 | const u32 *basep, *sizep; | 127 | const __be32 *basep, *sizep; |
128 | unsigned int rtas_start = 0, rtas_end = 0; | 128 | unsigned int rtas_start = 0, rtas_end = 0; |
129 | 129 | ||
130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); |
131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); |
132 | 132 | ||
133 | if (basep && sizep) { | 133 | if (basep && sizep) { |
134 | rtas_start = *basep; | 134 | rtas_start = be32_to_cpup(basep); |
135 | rtas_end = *basep + *sizep; | 135 | rtas_end = rtas_start + be32_to_cpup(sizep); |
136 | } | 136 | } |
137 | 137 | ||
138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) | |||
80 | * of the function that the cpu should jump to to continue | 80 | * of the function that the cpu should jump to to continue |
81 | * initialization. | 81 | * initialization. |
82 | */ | 82 | */ |
83 | .balign 8 | ||
83 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
84 | __secondary_hold_spinloop: | 85 | __secondary_hold_spinloop: |
85 | .llong 0x0 | 86 | .llong 0x0 |
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start) | |||
470 | mtctr r8 | 471 | mtctr r8 |
471 | bctr | 472 | bctr |
472 | 473 | ||
474 | .balign 8 | ||
473 | p_end: .llong _end - _stext | 475 | p_end: .llong _end - _stext |
474 | 476 | ||
475 | 4: /* Now copy the rest of the kernel up to _end */ | 477 | 4: /* Now copy the rest of the kernel up to _end */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
339 | #endif | 339 | #endif |
340 | } | 340 | } |
341 | 341 | ||
342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
343 | { | 343 | { |
344 | /* | 344 | /* |
345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
348 | */ | 348 | */ |
349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
350 | 350 | ||
351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
356 | #endif | 356 | #endif |
357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
362 | #endif | 362 | #endif |
363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
367 | #endif | 367 | #endif |
368 | } | 368 | } |
369 | /* | 369 | /* |
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
372 | * stored in the new thread. | 372 | * stored in the new thread. |
373 | */ | 373 | */ |
374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
375 | { | 375 | { |
376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
684 | 684 | ||
685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
687 | #else | 687 | #else |
688 | /* | 688 | /* |
689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e1dc51..078145acf7fb 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) | |||
1986 | /* Get the full OF pathname of the stdout device */ | 1986 | /* Get the full OF pathname of the stdout device */ |
1987 | memset(path, 0, 256); | 1987 | memset(path, 0, 256); |
1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); | 1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); |
1989 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); | ||
1990 | val = cpu_to_be32(stdout_node); | ||
1991 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", | ||
1992 | &val, sizeof(val)); | ||
1993 | prom_printf("OF stdout device is: %s\n", of_stdout_device); | 1989 | prom_printf("OF stdout device is: %s\n", of_stdout_device); |
1994 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", | 1990 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", |
1995 | path, strlen(path) + 1); | 1991 | path, strlen(path) + 1); |
1996 | 1992 | ||
1997 | /* If it's a display, note it */ | 1993 | /* instance-to-package fails on PA-Semi */ |
1998 | memset(type, 0, sizeof(type)); | 1994 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); |
1999 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | 1995 | if (stdout_node != PROM_ERROR) { |
2000 | if (strcmp(type, "display") == 0) | 1996 | val = cpu_to_be32(stdout_node); |
2001 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | 1997 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", |
1998 | &val, sizeof(val)); | ||
1999 | |||
2000 | /* If it's a display, note it */ | ||
2001 | memset(type, 0, sizeof(type)); | ||
2002 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | ||
2003 | if (strcmp(type, "display") == 0) | ||
2004 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | ||
2005 | } | ||
2002 | } | 2006 | } |
2003 | 2007 | ||
2004 | static int __init prom_find_machine_type(void) | 2008 | static int __init prom_find_machine_type(void) |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1555 | 1555 | ||
1556 | flush_fp_to_thread(child); | 1556 | flush_fp_to_thread(child); |
1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1558 | memcpy(&tmp, &child->thread.fp_state.fpr, | 1558 | memcpy(&tmp, &child->thread.TS_FPR(fpidx), |
1559 | sizeof(long)); | 1559 | sizeof(long)); |
1560 | else | 1560 | else |
1561 | tmp = child->thread.fp_state.fpscr; | 1561 | tmp = child->thread.fp_state.fpscr; |
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1588 | 1588 | ||
1589 | flush_fp_to_thread(child); | 1589 | flush_fp_to_thread(child); |
1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1591 | memcpy(&child->thread.fp_state.fpr, &data, | 1591 | memcpy(&child->thread.TS_FPR(fpidx), &data, |
1592 | sizeof(long)); | 1592 | sizeof(long)); |
1593 | else | 1593 | else |
1594 | child->thread.fp_state.fpscr = data; | 1594 | child->thread.fp_state.fpscr = data; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) | |||
479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && |
480 | (dn = of_find_node_by_path("/rtas"))) { | 480 | (dn = of_find_node_by_path("/rtas"))) { |
481 | int num_addr_cell, num_size_cell, maxcpus; | 481 | int num_addr_cell, num_size_cell, maxcpus; |
482 | const unsigned int *ireg; | 482 | const __be32 *ireg; |
483 | 483 | ||
484 | num_addr_cell = of_n_addr_cells(dn); | 484 | num_addr_cell = of_n_addr_cells(dn); |
485 | num_size_cell = of_n_size_cells(dn); | 485 | num_size_cell = of_n_size_cells(dn); |
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) | |||
489 | if (!ireg) | 489 | if (!ireg) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | maxcpus = ireg[num_addr_cell + num_size_cell]; | 492 | maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); |
493 | 493 | ||
494 | /* Double maxcpus for processors which have SMT capability */ | 494 | /* Double maxcpus for processors which have SMT capability */ |
495 | if (cpu_has_feature(CPU_FTR_SMT)) | 495 | if (cpu_has_feature(CPU_FTR_SMT)) |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
580 | int cpu_to_core_id(int cpu) | 580 | int cpu_to_core_id(int cpu) |
581 | { | 581 | { |
582 | struct device_node *np; | 582 | struct device_node *np; |
583 | const int *reg; | 583 | const __be32 *reg; |
584 | int id = -1; | 584 | int id = -1; |
585 | 585 | ||
586 | np = of_get_cpu_node(cpu, NULL); | 586 | np = of_get_cpu_node(cpu, NULL); |
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) | |||
591 | if (!reg) | 591 | if (!reg) |
592 | goto out; | 592 | goto out; |
593 | 593 | ||
594 | id = *reg; | 594 | id = be32_to_cpup(reg); |
595 | out: | 595 | out: |
596 | of_node_put(np); | 596 | of_node_put(np); |
597 | return id; | 597 | return id; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
470 | } | 470 | } |
471 | 471 | ||
472 | preempt_disable(); | ||
472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
475 | if (index < 0) | 476 | if (index < 0) { |
477 | preempt_enable(); | ||
476 | return -ENOENT; | 478 | return -ENOENT; |
479 | } | ||
477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
483 | hptep[0] = v; | 486 | hptep[0] = v; |
487 | preempt_enable(); | ||
484 | 488 | ||
485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
665 | return -EFAULT; | 669 | return -EFAULT; |
666 | } else { | 670 | } else { |
667 | page = pages[0]; | 671 | page = pages[0]; |
672 | pfn = page_to_pfn(page); | ||
668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
669 | page = compound_head(page); | 674 | page = compound_head(page); |
670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
689 | } | 694 | } |
690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
691 | } | 696 | } |
692 | pfn = page_to_pfn(page); | ||
693 | } | 697 | } |
694 | 698 | ||
695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
708 | } | 712 | } |
709 | 713 | ||
710 | /* Set the HPTE to point to pfn */ | 714 | /* |
711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
718 | */ | ||
719 | if (psize < PAGE_SIZE) | ||
720 | psize = PAGE_SIZE; | ||
721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
132 | { | 132 | { |
133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
134 | unsigned long flags; | ||
134 | 135 | ||
135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
145 | } | 146 | } |
146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
147 | } | 148 | } |
148 | 149 | ||
149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
150 | { | 151 | { |
151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
153 | unsigned long flags; | ||
152 | 154 | ||
153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
159 | } | 161 | } |
160 | 162 | ||
161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
486 | */ | 488 | */ |
487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
494 | } else { | 496 | } else { |
495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
496 | } | 498 | } |
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
520 | return; | 522 | return; |
521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
591 | 593 | ||
594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
593 | 597 | ||
594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
1115 | 1119 | ||
1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1117 | return; | 1121 | return; |
1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
1119 | now = mftb(); | 1123 | now = mftb(); |
1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
228 | pa |= gpa & ~PAGE_MASK; | ||
228 | } else { | 229 | } else { |
229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
242 | pa |= hva & (pte_size - 1); | ||
243 | pa |= gpa & ~PAGE_MASK; | ||
241 | } | 244 | } |
242 | } | 245 | } |
243 | 246 | ||
244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
246 | if (pa && pte_size > psize) | ||
247 | pa |= gpa & (pte_size - 1); | ||
248 | 249 | ||
249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
250 | ptel |= pa; | 251 | ptel |= pa; |
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
750 | }; | 751 | }; |
751 | 752 | ||
753 | /* When called from virtmode, this func should be protected by | ||
754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
755 | * can trigger deadlock issue. | ||
756 | */ | ||
752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
753 | unsigned long valid) | 758 | unsigned long valid) |
754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
153 | 153 | ||
154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
155 | 155 | ||
156 | |||
157 | /* | 156 | /* |
158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
@@ -224,6 +223,11 @@ kvm_start_guest: | |||
224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
225 | li r0, 0 | 224 | li r0, 0 |
226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
226 | /* | ||
227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
228 | * the nap_count, because once the increment to nap_count is | ||
229 | * visible we could be given another vcpu. | ||
230 | */ | ||
227 | lwsync | 231 | lwsync |
228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
@@ -241,7 +245,6 @@ kvm_start_guest: | |||
241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
244 | lwsync /* make previous updates visible */ | ||
245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
753 | */ | 756 | */ |
754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
755 | std r9, HSTATE_HOST_R2(r13) | ||
756 | 758 | ||
757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
764 | #endif | 766 | #endif |
765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
990 | */ | 992 | */ |
991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
993 | lwsync | ||
994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
999 | bne 41b | 1000 | bne 41b |
1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
1001 | 1002 | ||
1002 | /* | 1003 | /* |
1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
1032 | beq 43f | 1033 | beq 43f |
1034 | /* Order entry/exit update vs. IPIs */ | ||
1035 | sync | ||
1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
1640 | bne 31b | 1643 | bne 31b |
1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
1645 | isync | ||
1641 | li r0,1 | 1646 | li r0,1 |
1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
1644 | lwsync | ||
1645 | mr r4,r3 | 1648 | mr r4,r3 |
1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
130 | * R13 = PACA | 130 | * R13 = PACA |
131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
132 | * MSR.EE = 1 | ||
132 | * | 133 | * |
133 | */ | 134 | */ |
134 | 135 | ||
136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
137 | |||
138 | /* | ||
139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
140 | * the exit handler id to the vcpu and restore it from there later. | ||
141 | */ | ||
142 | stw r12, VCPU_TRAP(r3) | ||
143 | |||
135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
140 | nop | 149 | nop |
141 | 150 | ||
142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
143 | /* Re-enable interrupts */ | ||
144 | ld r3, HSTATE_HOST_MSR(r13) | ||
145 | ori r3, r3, MSR_EE | ||
146 | MTMSR_EERI(r3) | ||
147 | |||
148 | /* | 152 | /* |
149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
151 | */ | 155 | */ |
152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
154 | |||
155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
156 | 159 | ||
157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
@@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
178 | 181 | ||
179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
181 | 184 | ||
182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
69 | svcpu->in_use = 0; | ||
69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
70 | #endif | 71 | #endif |
71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
78 | { | 79 | { |
79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
82 | if (svcpu->in_use) { | ||
83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
84 | } | ||
81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
117 | svcpu->in_use = true; | ||
113 | } | 118 | } |
114 | 119 | ||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
118 | { | 123 | { |
124 | /* | ||
125 | * vcpu_put would just call us again because in_use hasn't | ||
126 | * been updated yet. | ||
127 | */ | ||
128 | preempt_disable(); | ||
129 | |||
130 | /* | ||
131 | * Maybe we were already preempted and synced the svcpu from | ||
132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
133 | */ | ||
134 | if (!svcpu->in_use) | ||
135 | goto out; | ||
136 | |||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
160 | svcpu->in_use = false; | ||
161 | |||
162 | out: | ||
163 | preempt_enable(); | ||
142 | } | 164 | } |
143 | 165 | ||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
153 | 153 | ||
154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
157 | /* | 156 | /* |
158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
160 | * interrupts until we have finished transferring stuff | ||
161 | * to or from the PACA. | ||
162 | */ | 159 | */ |
163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
164 | #endif | ||
165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
682 | { | 682 | { |
683 | int ret, s; | 683 | int ret, s; |
684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
687 | int fpexc_mode; | 687 | int fpexc_mode; |
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
723 | #endif | 723 | #endif |
724 | 724 | ||
725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
730 | 730 | ||
731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
737 | 737 | ||
738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
741 | 741 | ||
742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..596a285c0755 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -9,6 +9,14 @@ | |||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
11 | 11 | ||
12 | #ifdef __BIG_ENDIAN__ | ||
13 | #define sLd sld /* Shift towards low-numbered address. */ | ||
14 | #define sHd srd /* Shift towards high-numbered address. */ | ||
15 | #else | ||
16 | #define sLd srd /* Shift towards low-numbered address. */ | ||
17 | #define sHd sld /* Shift towards high-numbered address. */ | ||
18 | #endif | ||
19 | |||
12 | .align 7 | 20 | .align 7 |
13 | _GLOBAL(__copy_tofrom_user) | 21 | _GLOBAL(__copy_tofrom_user) |
14 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
118 | 126 | ||
119 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | 127 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ |
120 | 25: ld r0,8(r4) | 128 | 25: ld r0,8(r4) |
121 | sld r6,r9,r10 | 129 | sLd r6,r9,r10 |
122 | 26: ldu r9,16(r4) | 130 | 26: ldu r9,16(r4) |
123 | srd r7,r0,r11 | 131 | sHd r7,r0,r11 |
124 | sld r8,r0,r10 | 132 | sLd r8,r0,r10 |
125 | or r7,r7,r6 | 133 | or r7,r7,r6 |
126 | blt cr6,79f | 134 | blt cr6,79f |
127 | 27: ld r0,8(r4) | 135 | 27: ld r0,8(r4) |
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
129 | 137 | ||
130 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | 138 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ |
131 | 29: ldu r9,8(r4) | 139 | 29: ldu r9,8(r4) |
132 | sld r8,r0,r10 | 140 | sLd r8,r0,r10 |
133 | addi r3,r3,-8 | 141 | addi r3,r3,-8 |
134 | blt cr6,5f | 142 | blt cr6,5f |
135 | 30: ld r0,8(r4) | 143 | 30: ld r0,8(r4) |
136 | srd r12,r9,r11 | 144 | sHd r12,r9,r11 |
137 | sld r6,r9,r10 | 145 | sLd r6,r9,r10 |
138 | 31: ldu r9,16(r4) | 146 | 31: ldu r9,16(r4) |
139 | or r12,r8,r12 | 147 | or r12,r8,r12 |
140 | srd r7,r0,r11 | 148 | sHd r7,r0,r11 |
141 | sld r8,r0,r10 | 149 | sLd r8,r0,r10 |
142 | addi r3,r3,16 | 150 | addi r3,r3,16 |
143 | beq cr6,78f | 151 | beq cr6,78f |
144 | 152 | ||
145 | 1: or r7,r7,r6 | 153 | 1: or r7,r7,r6 |
146 | 32: ld r0,8(r4) | 154 | 32: ld r0,8(r4) |
147 | 76: std r12,8(r3) | 155 | 76: std r12,8(r3) |
148 | 2: srd r12,r9,r11 | 156 | 2: sHd r12,r9,r11 |
149 | sld r6,r9,r10 | 157 | sLd r6,r9,r10 |
150 | 33: ldu r9,16(r4) | 158 | 33: ldu r9,16(r4) |
151 | or r12,r8,r12 | 159 | or r12,r8,r12 |
152 | 77: stdu r7,16(r3) | 160 | 77: stdu r7,16(r3) |
153 | srd r7,r0,r11 | 161 | sHd r7,r0,r11 |
154 | sld r8,r0,r10 | 162 | sLd r8,r0,r10 |
155 | bdnz 1b | 163 | bdnz 1b |
156 | 164 | ||
157 | 78: std r12,8(r3) | 165 | 78: std r12,8(r3) |
158 | or r7,r7,r6 | 166 | or r7,r7,r6 |
159 | 79: std r7,16(r3) | 167 | 79: std r7,16(r3) |
160 | 5: srd r12,r9,r11 | 168 | 5: sHd r12,r9,r11 |
161 | or r12,r8,r12 | 169 | or r12,r8,r12 |
162 | 80: std r12,24(r3) | 170 | 80: std r12,24(r3) |
163 | bne 6f | 171 | bne 6f |
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
165 | blr | 173 | blr |
166 | 6: cmpwi cr1,r5,8 | 174 | 6: cmpwi cr1,r5,8 |
167 | addi r3,r3,32 | 175 | addi r3,r3,32 |
168 | sld r9,r9,r10 | 176 | sLd r9,r9,r10 |
169 | ble cr1,7f | 177 | ble cr1,7f |
170 | 34: ld r0,8(r4) | 178 | 34: ld r0,8(r4) |
171 | srd r7,r0,r11 | 179 | sHd r7,r0,r11 |
172 | or r9,r7,r9 | 180 | or r9,r7,r9 |
173 | 7: | 181 | 7: |
174 | bf cr7*4+1,1f | 182 | bf cr7*4+1,1f |
183 | #ifdef __BIG_ENDIAN__ | ||
175 | rotldi r9,r9,32 | 184 | rotldi r9,r9,32 |
185 | #endif | ||
176 | 94: stw r9,0(r3) | 186 | 94: stw r9,0(r3) |
187 | #ifdef __LITTLE_ENDIAN__ | ||
188 | rotrdi r9,r9,32 | ||
189 | #endif | ||
177 | addi r3,r3,4 | 190 | addi r3,r3,4 |
178 | 1: bf cr7*4+2,2f | 191 | 1: bf cr7*4+2,2f |
192 | #ifdef __BIG_ENDIAN__ | ||
179 | rotldi r9,r9,16 | 193 | rotldi r9,r9,16 |
194 | #endif | ||
180 | 95: sth r9,0(r3) | 195 | 95: sth r9,0(r3) |
196 | #ifdef __LITTLE_ENDIAN__ | ||
197 | rotrdi r9,r9,16 | ||
198 | #endif | ||
181 | addi r3,r3,2 | 199 | addi r3,r3,2 |
182 | 2: bf cr7*4+3,3f | 200 | 2: bf cr7*4+3,3f |
201 | #ifdef __BIG_ENDIAN__ | ||
183 | rotldi r9,r9,8 | 202 | rotldi r9,r9,8 |
203 | #endif | ||
184 | 96: stb r9,0(r3) | 204 | 96: stb r9,0(r3) |
205 | #ifdef __LITTLE_ENDIAN__ | ||
206 | rotrdi r9,r9,8 | ||
207 | #endif | ||
185 | 3: li r3,0 | 208 | 3: li r3,0 |
186 | blr | 209 | blr |
187 | 210 | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee7818..d7ddcee7feb8 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include "powernv.h" | 36 | #include "powernv.h" |
37 | #include "pci.h" | 37 | #include "pci.h" |
38 | 38 | ||
39 | static char *hub_diag = NULL; | ||
40 | static int ioda_eeh_nb_init = 0; | 39 | static int ioda_eeh_nb_init = 0; |
41 | 40 | ||
42 | static int ioda_eeh_event(struct notifier_block *nb, | 41 | static int ioda_eeh_event(struct notifier_block *nb, |
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
140 | ioda_eeh_nb_init = 1; | 139 | ioda_eeh_nb_init = 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | /* We needn't HUB diag-data on PHB3 */ | ||
144 | if (phb->type == PNV_PHB_IODA1 && !hub_diag) { | ||
145 | hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
146 | if (!hub_diag) { | ||
147 | pr_err("%s: Out of memory !\n", __func__); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_FS | 142 | #ifdef CONFIG_DEBUG_FS |
153 | if (phb->dbgfs) { | 143 | if (phb->dbgfs) { |
154 | debugfs_create_file("err_injct_outbound", 0600, | 144 | debugfs_create_file("err_injct_outbound", 0600, |
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | |||
633 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | 623 | static void ioda_eeh_hub_diag(struct pci_controller *hose) |
634 | { | 624 | { |
635 | struct pnv_phb *phb = hose->private_data; | 625 | struct pnv_phb *phb = hose->private_data; |
636 | struct OpalIoP7IOCErrorData *data; | 626 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; |
637 | long rc; | 627 | long rc; |
638 | 628 | ||
639 | data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; | 629 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); |
640 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); | ||
641 | if (rc != OPAL_SUCCESS) { | 630 | if (rc != OPAL_SUCCESS) { |
642 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", | 631 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", |
643 | __func__, phb->hub_id, rc); | 632 | __func__, phb->hub_id, rc); |
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) | |||
820 | struct OpalIoPhbErrorCommon *common; | 809 | struct OpalIoPhbErrorCommon *common; |
821 | long rc; | 810 | long rc; |
822 | 811 | ||
823 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | 812 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, |
824 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); | 813 | PNV_PCI_DIAG_BUF_SIZE); |
825 | if (rc != OPAL_SUCCESS) { | 814 | if (rc != OPAL_SUCCESS) { |
826 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | 815 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", |
827 | __func__, hose->global_number, rc); | 816 | __func__, hose->global_number, rc); |
828 | return; | 817 | return; |
829 | } | 818 | } |
830 | 819 | ||
820 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | ||
831 | switch (common->ioType) { | 821 | switch (common->ioType) { |
832 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: | 822 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: |
833 | ioda_eeh_p7ioc_phb_diag(hose, common); | 823 | ioda_eeh_p7ioc_phb_diag(hose, common); |
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4f9892..79d83cad3d67 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c | |||
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; | |||
24 | static u8 opal_lpc_inb(unsigned long port) | 24 | static u8 opal_lpc_inb(unsigned long port) |
25 | { | 25 | { |
26 | int64_t rc; | 26 | int64_t rc; |
27 | uint32_t data; | 27 | __be32 data; |
28 | 28 | ||
29 | if (opal_lpc_chip_id < 0 || port > 0xffff) | 29 | if (opal_lpc_chip_id < 0 || port > 0xffff) |
30 | return 0xff; | 30 | return 0xff; |
31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | 31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); |
32 | return rc ? 0xff : data; | 32 | return rc ? 0xff : be32_to_cpu(data); |
33 | } | 33 | } |
34 | 34 | ||
35 | static __le16 __opal_lpc_inw(unsigned long port) | 35 | static __le16 __opal_lpc_inw(unsigned long port) |
36 | { | 36 | { |
37 | int64_t rc; | 37 | int64_t rc; |
38 | uint32_t data; | 38 | __be32 data; |
39 | 39 | ||
40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) | 40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) |
41 | return 0xffff; | 41 | return 0xffff; |
42 | if (port & 1) | 42 | if (port & 1) |
43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | 43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); |
44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | 44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); |
45 | return rc ? 0xffff : data; | 45 | return rc ? 0xffff : be32_to_cpu(data); |
46 | } | 46 | } |
47 | static u16 opal_lpc_inw(unsigned long port) | 47 | static u16 opal_lpc_inw(unsigned long port) |
48 | { | 48 | { |
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) | |||
52 | static __le32 __opal_lpc_inl(unsigned long port) | 52 | static __le32 __opal_lpc_inl(unsigned long port) |
53 | { | 53 | { |
54 | int64_t rc; | 54 | int64_t rc; |
55 | uint32_t data; | 55 | __be32 data; |
56 | 56 | ||
57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) | 57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) |
58 | return 0xffffffff; | 58 | return 0xffffffff; |
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) | |||
62 | (__le32)opal_lpc_inb(port + 2) << 8 | | 62 | (__le32)opal_lpc_inb(port + 2) << 8 | |
63 | opal_lpc_inb(port + 3); | 63 | opal_lpc_inb(port + 3); |
64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | 64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); |
65 | return rc ? 0xffffffff : data; | 65 | return rc ? 0xffffffff : be32_to_cpu(data); |
66 | } | 66 | } |
67 | 67 | ||
68 | static u32 opal_lpc_inl(unsigned long port) | 68 | static u32 opal_lpc_inl(unsigned long port) |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8fd55ac..4fbf276ac99e 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
96 | { | 96 | { |
97 | struct opal_scom_map *m = map; | 97 | struct opal_scom_map *m = map; |
98 | int64_t rc; | 98 | int64_t rc; |
99 | __be64 v; | ||
99 | 100 | ||
100 | reg = opal_scom_unmangle(reg); | 101 | reg = opal_scom_unmangle(reg); |
101 | rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); | 102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); |
103 | *value = be64_to_cpu(v); | ||
102 | return opal_xscom_err_xlate(rc); | 104 | return opal_xscom_err_xlate(rc); |
103 | } | 105 | } |
104 | 106 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e..1ed8d5f40f5a 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -172,11 +172,13 @@ struct pnv_phb { | |||
172 | } ioda; | 172 | } ioda; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* PHB status structure */ | 175 | /* PHB and hub status structure */ |
176 | union { | 176 | union { |
177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; | 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; |
178 | struct OpalIoP7IOCPhbErrorData p7ioc; | 178 | struct OpalIoP7IOCPhbErrorData p7ioc; |
179 | struct OpalIoP7IOCErrorData hub_diag; | ||
179 | } diag; | 180 | } diag; |
181 | |||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern struct pci_ops pnv_pci_ops; | 184 | extern struct pci_ops pnv_pci_ops; |
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007eae64..c9fecf09b8fa 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c | |||
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
157 | { | 157 | { |
158 | struct hvcall_ppp_data ppp_data; | 158 | struct hvcall_ppp_data ppp_data; |
159 | struct device_node *root; | 159 | struct device_node *root; |
160 | const int *perf_level; | 160 | const __be32 *perf_level; |
161 | int rc; | 161 | int rc; |
162 | 162 | ||
163 | rc = h_get_ppp(&ppp_data); | 163 | rc = h_get_ppp(&ppp_data); |
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
201 | perf_level = of_get_property(root, | 201 | perf_level = of_get_property(root, |
202 | "ibm,partition-performance-parameters-level", | 202 | "ibm,partition-performance-parameters-level", |
203 | NULL); | 203 | NULL); |
204 | if (perf_level && (*perf_level >= 1)) { | 204 | if (perf_level && (be32_to_cpup(perf_level) >= 1)) { |
205 | seq_printf(m, | 205 | seq_printf(m, |
206 | "physical_procs_allocated_to_virtualization=%d\n", | 206 | "physical_procs_allocated_to_virtualization=%d\n", |
207 | ppp_data.phys_platform_procs); | 207 | ppp_data.phys_platform_procs); |
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
435 | int partition_potential_processors; | 435 | int partition_potential_processors; |
436 | int partition_active_processors; | 436 | int partition_active_processors; |
437 | struct device_node *rtas_node; | 437 | struct device_node *rtas_node; |
438 | const int *lrdrp = NULL; | 438 | const __be32 *lrdrp = NULL; |
439 | 439 | ||
440 | rtas_node = of_find_node_by_path("/rtas"); | 440 | rtas_node = of_find_node_by_path("/rtas"); |
441 | if (rtas_node) | 441 | if (rtas_node) |
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
444 | if (lrdrp == NULL) { | 444 | if (lrdrp == NULL) { |
445 | partition_potential_processors = vdso_data->processorCount; | 445 | partition_potential_processors = vdso_data->processorCount; |
446 | } else { | 446 | } else { |
447 | partition_potential_processors = *(lrdrp + 4); | 447 | partition_potential_processors = be32_to_cpup(lrdrp + 4); |
448 | } | 448 | } |
449 | of_node_put(rtas_node); | 449 | of_node_put(rtas_node); |
450 | 450 | ||
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
654 | const char *model = ""; | 654 | const char *model = ""; |
655 | const char *system_id = ""; | 655 | const char *system_id = ""; |
656 | const char *tmp; | 656 | const char *tmp; |
657 | const unsigned int *lp_index_ptr; | 657 | const __be32 *lp_index_ptr; |
658 | unsigned int lp_index = 0; | 658 | unsigned int lp_index = 0; |
659 | 659 | ||
660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | 660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | 670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", |
671 | NULL); | 671 | NULL); |
672 | if (lp_index_ptr) | 672 | if (lp_index_ptr) |
673 | lp_index = *lp_index_ptr; | 673 | lp_index = be32_to_cpup(lp_index_ptr); |
674 | of_node_put(rootdn); | 674 | of_node_put(rootdn); |
675 | } | 675 | } |
676 | seq_printf(m, "serial_number=%s\n", system_id); | 676 | seq_printf(m, "serial_number=%s\n", system_id); |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..0c882e83c4ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
130 | { | 130 | { |
131 | struct device_node *dn; | 131 | struct device_node *dn; |
132 | struct pci_dn *pdn; | 132 | struct pci_dn *pdn; |
133 | const u32 *req_msi; | 133 | const __be32 *p; |
134 | u32 req_msi; | ||
134 | 135 | ||
135 | pdn = pci_get_pdn(pdev); | 136 | pdn = pci_get_pdn(pdev); |
136 | if (!pdn) | 137 | if (!pdn) |
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
138 | 139 | ||
139 | dn = pdn->node; | 140 | dn = pdn->node; |
140 | 141 | ||
141 | req_msi = of_get_property(dn, prop_name, NULL); | 142 | p = of_get_property(dn, prop_name, NULL); |
142 | if (!req_msi) { | 143 | if (!p) { |
143 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | 144 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); |
144 | return -ENOENT; | 145 | return -ENOENT; |
145 | } | 146 | } |
146 | 147 | ||
147 | if (*req_msi < nvec) { | 148 | req_msi = be32_to_cpup(p); |
149 | if (req_msi < nvec) { | ||
148 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | 150 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); |
149 | 151 | ||
150 | if (*req_msi == 0) /* Be paranoid */ | 152 | if (req_msi == 0) /* Be paranoid */ |
151 | return -ENOSPC; | 153 | return -ENOSPC; |
152 | 154 | ||
153 | return *req_msi; | 155 | return req_msi; |
154 | } | 156 | } |
155 | 157 | ||
156 | return 0; | 158 | return 0; |
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) | |||
171 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | 173 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) |
172 | { | 174 | { |
173 | struct device_node *dn; | 175 | struct device_node *dn; |
174 | const u32 *p; | 176 | const __be32 *p; |
175 | 177 | ||
176 | dn = of_node_get(pci_device_to_OF_node(dev)); | 178 | dn = of_node_get(pci_device_to_OF_node(dev)); |
177 | while (dn) { | 179 | while (dn) { |
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
179 | if (p) { | 181 | if (p) { |
180 | pr_debug("rtas_msi: found prop on dn %s\n", | 182 | pr_debug("rtas_msi: found prop on dn %s\n", |
181 | dn->full_name); | 183 | dn->full_name); |
182 | *total = *p; | 184 | *total = be32_to_cpup(p); |
183 | return dn; | 185 | return dn; |
184 | } | 186 | } |
185 | 187 | ||
@@ -232,13 +234,13 @@ struct msi_counts { | |||
232 | static void *count_non_bridge_devices(struct device_node *dn, void *data) | 234 | static void *count_non_bridge_devices(struct device_node *dn, void *data) |
233 | { | 235 | { |
234 | struct msi_counts *counts = data; | 236 | struct msi_counts *counts = data; |
235 | const u32 *p; | 237 | const __be32 *p; |
236 | u32 class; | 238 | u32 class; |
237 | 239 | ||
238 | pr_debug("rtas_msi: counting %s\n", dn->full_name); | 240 | pr_debug("rtas_msi: counting %s\n", dn->full_name); |
239 | 241 | ||
240 | p = of_get_property(dn, "class-code", NULL); | 242 | p = of_get_property(dn, "class-code", NULL); |
241 | class = p ? *p : 0; | 243 | class = p ? be32_to_cpup(p) : 0; |
242 | 244 | ||
243 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | 245 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) |
244 | counts->num_devices++; | 246 | counts->num_devices++; |
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) | |||
249 | static void *count_spare_msis(struct device_node *dn, void *data) | 251 | static void *count_spare_msis(struct device_node *dn, void *data) |
250 | { | 252 | { |
251 | struct msi_counts *counts = data; | 253 | struct msi_counts *counts = data; |
252 | const u32 *p; | 254 | const __be32 *p; |
253 | int req; | 255 | int req; |
254 | 256 | ||
255 | if (dn == counts->requestor) | 257 | if (dn == counts->requestor) |
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) | |||
260 | req = 0; | 262 | req = 0; |
261 | p = of_get_property(dn, "ibm,req#msi", NULL); | 263 | p = of_get_property(dn, "ibm,req#msi", NULL); |
262 | if (p) | 264 | if (p) |
263 | req = *p; | 265 | req = be32_to_cpup(p); |
264 | 266 | ||
265 | p = of_get_property(dn, "ibm,req#msi-x", NULL); | 267 | p = of_get_property(dn, "ibm,req#msi-x", NULL); |
266 | if (p) | 268 | if (p) |
267 | req = max(req, (int)*p); | 269 | req = max(req, (int)be32_to_cpup(p)); |
268 | } | 270 | } |
269 | 271 | ||
270 | if (req < counts->quota) | 272 | if (req < counts->quota) |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58d4664..d7096f2f7751 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | |||
43 | static DEFINE_SPINLOCK(nvram_lock); | 43 | static DEFINE_SPINLOCK(nvram_lock); |
44 | 44 | ||
45 | struct err_log_info { | 45 | struct err_log_info { |
46 | int error_type; | 46 | __be32 error_type; |
47 | unsigned int seq_num; | 47 | __be32 seq_num; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct nvram_os_partition { | 50 | struct nvram_os_partition { |
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { | |||
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct oops_log_info { | 81 | struct oops_log_info { |
82 | u16 version; | 82 | __be16 version; |
83 | u16 report_length; | 83 | __be16 report_length; |
84 | u64 timestamp; | 84 | __be64 timestamp; |
85 | } __attribute__((packed)); | 85 | } __attribute__((packed)); |
86 | 86 | ||
87 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 87 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | |||
291 | length = part->size; | 291 | length = part->size; |
292 | } | 292 | } |
293 | 293 | ||
294 | info.error_type = err_type; | 294 | info.error_type = cpu_to_be32(err_type); |
295 | info.seq_num = error_log_cnt; | 295 | info.seq_num = cpu_to_be32(error_log_cnt); |
296 | 296 | ||
297 | tmp_index = part->index; | 297 | tmp_index = part->index; |
298 | 298 | ||
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, | |||
364 | } | 364 | } |
365 | 365 | ||
366 | if (part->os_partition) { | 366 | if (part->os_partition) { |
367 | *error_log_cnt = info.seq_num; | 367 | *error_log_cnt = be32_to_cpu(info.seq_num); |
368 | *err_type = info.error_type; | 368 | *err_type = be32_to_cpu(info.error_type); |
369 | } | 369 | } |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) | |||
529 | pr_err("nvram: logging uncompressed oops/panic report\n"); | 529 | pr_err("nvram: logging uncompressed oops/panic report\n"); |
530 | return -1; | 530 | return -1; |
531 | } | 531 | } |
532 | oops_hdr->version = OOPS_HDR_VERSION; | 532 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
533 | oops_hdr->report_length = (u16) zipped_len; | 533 | oops_hdr->report_length = cpu_to_be16(zipped_len); |
534 | oops_hdr->timestamp = get_seconds(); | 534 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
574 | clobbering_unread_rtas_event()) | 574 | clobbering_unread_rtas_event()) |
575 | return -1; | 575 | return -1; |
576 | 576 | ||
577 | oops_hdr->version = OOPS_HDR_VERSION; | 577 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
578 | oops_hdr->report_length = (u16) size; | 578 | oops_hdr->report_length = cpu_to_be16(size); |
579 | oops_hdr->timestamp = get_seconds(); | 579 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
580 | 580 | ||
581 | if (compressed) | 581 | if (compressed) |
582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | 582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; |
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | |||
670 | size_t length, hdr_size; | 670 | size_t length, hdr_size; |
671 | 671 | ||
672 | oops_hdr = (struct oops_log_info *)buff; | 672 | oops_hdr = (struct oops_log_info *)buff; |
673 | if (oops_hdr->version < OOPS_HDR_VERSION) { | 673 | if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { |
674 | /* Old format oops header had 2-byte record size */ | 674 | /* Old format oops header had 2-byte record size */ |
675 | hdr_size = sizeof(u16); | 675 | hdr_size = sizeof(u16); |
676 | length = oops_hdr->version; | 676 | length = be16_to_cpu(oops_hdr->version); |
677 | time->tv_sec = 0; | 677 | time->tv_sec = 0; |
678 | time->tv_nsec = 0; | 678 | time->tv_nsec = 0; |
679 | } else { | 679 | } else { |
680 | hdr_size = sizeof(*oops_hdr); | 680 | hdr_size = sizeof(*oops_hdr); |
681 | length = oops_hdr->report_length; | 681 | length = be16_to_cpu(oops_hdr->report_length); |
682 | time->tv_sec = oops_hdr->timestamp; | 682 | time->tv_sec = be64_to_cpu(oops_hdr->timestamp); |
683 | time->tv_nsec = 0; | 683 | time->tv_nsec = 0; |
684 | } | 684 | } |
685 | *buf = kmalloc(length, GFP_KERNEL); | 685 | *buf = kmalloc(length, GFP_KERNEL); |
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
889 | kmsg_dump_get_buffer(dumper, false, | 889 | kmsg_dump_get_buffer(dumper, false, |
890 | oops_data, oops_data_sz, &text_len); | 890 | oops_data, oops_data_sz, &text_len); |
891 | err_type = ERR_TYPE_KERNEL_PANIC; | 891 | err_type = ERR_TYPE_KERNEL_PANIC; |
892 | oops_hdr->version = OOPS_HDR_VERSION; | 892 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
893 | oops_hdr->report_length = (u16) text_len; | 893 | oops_hdr->report_length = cpu_to_be16(text_len); |
894 | oops_hdr->timestamp = get_seconds(); | 894 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
895 | } | 895 | } |
896 | 896 | ||
897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | 897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, |
898 | (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 898 | (int) (sizeof(*oops_hdr) + text_len), err_type, |
899 | ++oops_count); | 899 | ++oops_count); |
900 | 900 | ||
901 | spin_unlock_irqrestore(&lock, flags); | 901 | spin_unlock_irqrestore(&lock, flags); |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856cdf47..70670a2d9cf2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
113 | { | 113 | { |
114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
116 | const uint32_t *pcie_link_speed_stats; | 116 | const __be32 *pcie_link_speed_stats; |
117 | 117 | ||
118 | bus = bridge->bus; | 118 | bus = bridge->bus; |
119 | 119 | ||
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
125 | pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, | 125 | pcie_link_speed_stats = of_get_property(pdn, |
126 | "ibm,pcie-link-speed-stats", NULL); | 126 | "ibm,pcie-link-speed-stats", NULL); |
127 | if (pcie_link_speed_stats) | 127 | if (pcie_link_speed_stats) |
128 | break; | 128 | break; |
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | switch (pcie_link_speed_stats[0]) { | 138 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
139 | case 0x01: | 139 | case 0x01: |
140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
141 | break; | 141 | break; |
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | switch (pcie_link_speed_stats[1]) { | 150 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
151 | case 0x01: | 151 | case 0x01: |
152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
153 | break; | 153 | break; |