aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2013-08-06 12:01:46 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-08-14 01:33:35 -0400
commit7ffcf8ec26f4b94b95b1297131d223b121d951e5 (patch)
tree7b4b47eaeafe56c253350b14470fecf03b40277f /arch/powerpc
parentc72cd555e828b710bce8c3635254dbb483397142 (diff)
powerpc: Fix little endian lppaca, slb_shadow and dtl_entry
The lppaca, slb_shadow and dtl_entry hypervisor structures are big endian, so we have to byte swap them in little endian builds. LE KVM hosts will also need to be fixed but for now add an #error to remind us. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/asm-compat.h9
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h3
-rw-r--r--arch/powerpc/kernel/entry_64.S11
-rw-r--r--arch/powerpc/kernel/lparcfg.c9
-rw-r--r--arch/powerpc/kernel/paca.c10
-rw-r--r--arch/powerpc/kernel/time.c16
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S4
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/slb.c9
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
15 files changed, 65 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 6e82f5f9a6fd..4b237aa35660 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -32,6 +32,15 @@
32#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS) 32#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
33#define PPC_LR_STKOFF 16 33#define PPC_LR_STKOFF 16
34#define PPC_MIN_STKFRM 112 34#define PPC_MIN_STKFRM 112
35
36#ifdef __BIG_ENDIAN__
37#define LDX_BE stringify_in_c(ldx)
38#define STDX_BE stringify_in_c(stdx)
39#else
40#define LDX_BE stringify_in_c(ldbrx)
41#define STDX_BE stringify_in_c(stdbrx)
42#endif
43
35#else /* 32-bit */ 44#else /* 32-bit */
36 45
37/* operations for longs and pointers */ 46/* operations for longs and pointers */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index b5c85f18faee..4ebb4f8f4188 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION; \
54 /* from user - see if there are any DTL entries to process */ \ 54 /* from user - see if there are any DTL entries to process */ \
55 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ 55 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
56 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ 56 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
57 ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \ 57 addi r10,r10,LPPACA_DTLIDX; \
58 LDX_BE r10,0,r10; /* get log write index */ \
58 cmpd cr1,r11,r10; \ 59 cmpd cr1,r11,r10; \
59 beq+ cr1,33f; \ 60 beq+ cr1,33f; \
60 bl .accumulate_stolen_time; \ 61 bl .accumulate_stolen_time; \
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c1055a150b88..707fbfde1324 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION
102 /* if from user, see if there are any DTL entries to process */ 102 /* if from user, see if there are any DTL entries to process */
103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ 103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */ 104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
105 ld r10,LPPACA_DTLIDX(r10) /* get log write index */ 105 addi r10,r10,LPPACA_DTLIDX
106 LDX_BE r10,0,r10 /* get log write index */
106 cmpd cr1,r11,r10 107 cmpd cr1,r11,r10
107 beq+ cr1,33f 108 beq+ cr1,33f
108 bl .accumulate_stolen_time 109 bl .accumulate_stolen_time
@@ -531,9 +532,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
531 */ 532 */
532 ld r9,PACA_SLBSHADOWPTR(r13) 533 ld r9,PACA_SLBSHADOWPTR(r13)
533 li r12,0 534 li r12,0
534 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 535 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
535 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 536 li r12,SLBSHADOW_STACKVSID
536 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 537 STDX_BE r7,r12,r9 /* Save VSID */
538 li r12,SLBSHADOW_STACKESID
539 STDX_BE r0,r12,r9 /* Save ESID */
537 540
538 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when 541 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
539 * we have 1TB segments, the only CPUs known to have the errata 542 * we have 1TB segments, the only CPUs known to have the errata
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index e6024c2ed5c7..0204089ebdd4 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -387,8 +387,8 @@ static void pseries_cmo_data(struct seq_file *m)
387 return; 387 return;
388 388
389 for_each_possible_cpu(cpu) { 389 for_each_possible_cpu(cpu) {
390 cmo_faults += lppaca_of(cpu).cmo_faults; 390 cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
391 cmo_fault_time += lppaca_of(cpu).cmo_fault_time; 391 cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
392 } 392 }
393 393
394 seq_printf(m, "cmo_faults=%lu\n", cmo_faults); 394 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
@@ -406,8 +406,9 @@ static void splpar_dispatch_data(struct seq_file *m)
406 unsigned long dispatch_dispersions = 0; 406 unsigned long dispatch_dispersions = 0;
407 407
408 for_each_possible_cpu(cpu) { 408 for_each_possible_cpu(cpu) {
409 dispatches += lppaca_of(cpu).yield_count; 409 dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
410 dispatch_dispersions += lppaca_of(cpu).dispersion_count; 410 dispatch_dispersions +=
411 be32_to_cpu(lppaca_of(cpu).dispersion_count);
411 } 412 }
412 413
413 seq_printf(m, "dispatches=%lu\n", dispatches); 414 seq_printf(m, "dispatches=%lu\n", dispatches);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f8f24685f10a..3fc16e3beb9f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -34,10 +34,10 @@ extern unsigned long __toc_start;
34 */ 34 */
35struct lppaca lppaca[] = { 35struct lppaca lppaca[] = {
36 [0 ... (NR_LPPACAS-1)] = { 36 [0 ... (NR_LPPACAS-1)] = {
37 .desc = 0xd397d781, /* "LpPa" */ 37 .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
38 .size = sizeof(struct lppaca), 38 .size = cpu_to_be16(sizeof(struct lppaca)),
39 .fpregs_in_use = 1, 39 .fpregs_in_use = 1,
40 .slb_count = 64, 40 .slb_count = cpu_to_be16(64),
41 .vmxregs_in_use = 0, 41 .vmxregs_in_use = 0,
42 .page_ins = 0, 42 .page_ins = 0,
43 }, 43 },
@@ -101,8 +101,8 @@ static inline void free_lppacas(void) { }
101 */ 101 */
102struct slb_shadow slb_shadow[] __cacheline_aligned = { 102struct slb_shadow slb_shadow[] __cacheline_aligned = {
103 [0 ... (NR_CPUS-1)] = { 103 [0 ... (NR_CPUS-1)] = {
104 .persistent = SLB_NUM_BOLTED, 104 .persistent = cpu_to_be32(SLB_NUM_BOLTED),
105 .buffer_length = sizeof(struct slb_shadow), 105 .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
106 }, 106 },
107}; 107};
108 108
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index c863aa1f524a..b2bcd34f72d2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -210,18 +210,18 @@ static u64 scan_dispatch_log(u64 stop_tb)
210 if (!dtl) 210 if (!dtl)
211 return 0; 211 return 0;
212 212
213 if (i == vpa->dtl_idx) 213 if (i == be64_to_cpu(vpa->dtl_idx))
214 return 0; 214 return 0;
215 while (i < vpa->dtl_idx) { 215 while (i < be64_to_cpu(vpa->dtl_idx)) {
216 if (dtl_consumer) 216 if (dtl_consumer)
217 dtl_consumer(dtl, i); 217 dtl_consumer(dtl, i);
218 dtb = dtl->timebase; 218 dtb = be64_to_cpu(dtl->timebase);
219 tb_delta = dtl->enqueue_to_dispatch_time + 219 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
220 dtl->ready_to_enqueue_time; 220 be32_to_cpu(dtl->ready_to_enqueue_time);
221 barrier(); 221 barrier();
222 if (i + N_DISPATCH_LOG < vpa->dtl_idx) { 222 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
223 /* buffer has overflowed */ 223 /* buffer has overflowed */
224 i = vpa->dtl_idx - N_DISPATCH_LOG; 224 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
225 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); 225 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
226 continue; 226 continue;
227 } 227 }
@@ -269,7 +269,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
269{ 269{
270 u64 stolen = 0; 270 u64 stolen = 0;
271 271
272 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { 272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
273 stolen = scan_dispatch_log(stop_tb); 273 stolen = scan_dispatch_log(stop_tb);
274 get_paca()->system_time -= stolen; 274 get_paca()->system_time -= stolen;
275 } 275 }
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 4f0caecc0f9d..4f12e8f0c718 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,6 +17,10 @@
17 * Authors: Alexander Graf <agraf@suse.de> 17 * Authors: Alexander Graf <agraf@suse.de>
18 */ 18 */
19 19
20#ifdef __LITTLE_ENDIAN__
21#error Need to fix SLB shadow accesses in little endian mode
22#endif
23
20#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) 24#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) 25#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \ 26#define UNBOLT_SLB_ENTRY(num) \
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e4c70d..20e7fcdc4c95 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -29,6 +29,10 @@
29#include <asm/kvm_book3s_asm.h> 29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h> 30#include <asm/mmu-hash64.h>
31 31
32#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif
35
32/***************************************************************************** 36/*****************************************************************************
33 * * 37 * *
34 * Real Mode handlers that need to be in the linear mapping * 38 * Real Mode handlers that need to be in the linear mapping *
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index bb7cfecf2788..0c9c8d7d0734 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock)
32 return; 32 return;
33 holder_cpu = lock_value & 0xffff; 33 holder_cpu = lock_value & 0xffff;
34 BUG_ON(holder_cpu >= NR_CPUS); 34 BUG_ON(holder_cpu >= NR_CPUS);
35 yield_count = lppaca_of(holder_cpu).yield_count; 35 yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
36 if ((yield_count & 1) == 0) 36 if ((yield_count & 1) == 0)
37 return; /* virtual cpu is currently running */ 37 return; /* virtual cpu is currently running */
38 rmb(); 38 rmb();
@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw)
57 return; /* no write lock at present */ 57 return; /* no write lock at present */
58 holder_cpu = lock_value & 0xffff; 58 holder_cpu = lock_value & 0xffff;
59 BUG_ON(holder_cpu >= NR_CPUS); 59 BUG_ON(holder_cpu >= NR_CPUS);
60 yield_count = lppaca_of(holder_cpu).yield_count; 60 yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
61 if ((yield_count & 1) == 0) 61 if ((yield_count & 1) == 0)
62 return; /* virtual cpu is currently running */ 62 return; /* virtual cpu is currently running */
63 rmb(); 63 rmb();
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8726779e1409..76d8e7cc7805 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -443,8 +443,12 @@ good_area:
443 regs, address); 443 regs, address);
444#ifdef CONFIG_PPC_SMLPAR 444#ifdef CONFIG_PPC_SMLPAR
445 if (firmware_has_feature(FW_FEATURE_CMO)) { 445 if (firmware_has_feature(FW_FEATURE_CMO)) {
446 u32 page_ins;
447
446 preempt_disable(); 448 preempt_disable();
447 get_lppaca()->page_ins += (1 << PAGE_FACTOR); 449 page_ins = be32_to_cpu(get_lppaca()->page_ins);
450 page_ins += 1 << PAGE_FACTOR;
451 get_lppaca()->page_ins = cpu_to_be32(page_ins);
448 preempt_enable(); 452 preempt_enable();
449 } 453 }
450#endif /* CONFIG_PPC_SMLPAR */ 454#endif /* CONFIG_PPC_SMLPAR */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a538c80db2df..9d1d33cd2be5 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
66 * we only update the current CPU's SLB shadow buffer. 66 * we only update the current CPU's SLB shadow buffer.
67 */ 67 */
68 get_slb_shadow()->save_area[entry].esid = 0; 68 get_slb_shadow()->save_area[entry].esid = 0;
69 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); 69 get_slb_shadow()->save_area[entry].vsid =
70 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); 70 cpu_to_be64(mk_vsid_data(ea, ssize, flags));
71 get_slb_shadow()->save_area[entry].esid =
72 cpu_to_be64(mk_esid_data(ea, ssize, entry));
71} 73}
72 74
73static inline void slb_shadow_clear(unsigned long entry) 75static inline void slb_shadow_clear(unsigned long entry)
@@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void)
112 } else { 114 } else {
113 /* Update stack entry; others don't change */ 115 /* Update stack entry; others don't change */
114 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); 116 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
115 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; 117 ksp_vsid_data =
118 be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
116 } 119 }
117 120
118 /* We need to do this all in asm, so we're sure we don't touch 121 /* We need to do this all in asm, so we're sure we don't touch
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 0cc0ac07a55d..238240e02ef8 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -87,7 +87,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
87 barrier(); 87 barrier();
88 88
89 /* check for hypervisor ring buffer overflow, ignore this entry if so */ 89 /* check for hypervisor ring buffer overflow, ignore this entry if so */
90 if (index + N_DISPATCH_LOG < vpa->dtl_idx) 90 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
91 return; 91 return;
92 92
93 ++wp; 93 ++wp;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 60b6f4e8d63d..0b7c86e3d75d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -106,7 +106,7 @@ void vpa_init(int cpu)
106 lppaca_of(cpu).dtl_idx = 0; 106 lppaca_of(cpu).dtl_idx = 0;
107 107
108 /* hypervisor reads buffer length from this field */ 108 /* hypervisor reads buffer length from this field */
109 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 109 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
110 ret = register_dtl(hwcpu, __pa(dtl)); 110 ret = register_dtl(hwcpu, __pa(dtl));
111 if (ret) 111 if (ret)
112 pr_err("WARNING: DTL registration of cpu %d (hw %d) " 112 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 92db881be27e..14899b1db1e9 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -45,7 +45,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
45 45
46static inline void idle_loop_epilog(unsigned long in_purr) 46static inline void idle_loop_epilog(unsigned long in_purr)
47{ 47{
48 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; 48 u64 wait_cycles;
49
50 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
51 wait_cycles += mfspr(SPRN_PURR) - in_purr;
52 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
49 get_lppaca()->idle = 0; 53 get_lppaca()->idle = 0;
50} 54}
51 55
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b19cd8334630..33d619665cb7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
323 get_paca()->lppaca_ptr->dtl_idx = 0; 323 get_paca()->lppaca_ptr->dtl_idx = 0;
324 324
325 /* hypervisor reads buffer length from this field */ 325 /* hypervisor reads buffer length from this field */
326 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 326 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); 327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
328 if (ret) 328 if (ret)
329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " 329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "