aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/mmiowb.h20
-rw-r--r--arch/powerpc/include/asm/paca.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h17
-rw-r--r--arch/powerpc/xmon/xmon.c5
7 files changed, 33 insertions, 50 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2d0be82c3061..5e3d0853c31d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -132,6 +132,7 @@ config PPC
132 select ARCH_HAS_FORTIFY_SOURCE 132 select ARCH_HAS_FORTIFY_SOURCE
133 select ARCH_HAS_GCOV_PROFILE_ALL 133 select ARCH_HAS_GCOV_PROFILE_ALL
134 select ARCH_HAS_KCOV 134 select ARCH_HAS_KCOV
135 select ARCH_HAS_MMIOWB if PPC64
135 select ARCH_HAS_PHYS_TO_DMA 136 select ARCH_HAS_PHYS_TO_DMA
136 select ARCH_HAS_PMEM_API if PPC64 137 select ARCH_HAS_PMEM_API if PPC64
137 select ARCH_HAS_PTE_SPECIAL 138 select ARCH_HAS_PTE_SPECIAL
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 74b6605ca55f..a0c132bedfae 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += export.h
7generic-y += irq_regs.h 7generic-y += irq_regs.h
8generic-y += local64.h 8generic-y += local64.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += mmiowb.h
11generic-y += preempt.h 10generic-y += preempt.h
12generic-y += rwsem.h 11generic-y += rwsem.h
13generic-y += vtime.h 12generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 4b73847e9b95..1fad67b46409 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -34,14 +34,11 @@ extern struct pci_dev *isa_bridge_pcidev;
34#include <asm/byteorder.h> 34#include <asm/byteorder.h>
35#include <asm/synch.h> 35#include <asm/synch.h>
36#include <asm/delay.h> 36#include <asm/delay.h>
37#include <asm/mmiowb.h>
37#include <asm/mmu.h> 38#include <asm/mmu.h>
38#include <asm/ppc_asm.h> 39#include <asm/ppc_asm.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
40 41
41#ifdef CONFIG_PPC64
42#include <asm/paca.h>
43#endif
44
45#define SIO_CONFIG_RA 0x398 42#define SIO_CONFIG_RA 0x398
46#define SIO_CONFIG_RD 0x399 43#define SIO_CONFIG_RD 0x399
47 44
@@ -107,12 +104,6 @@ extern bool isa_io_special;
107 * 104 *
108 */ 105 */
109 106
110#ifdef CONFIG_PPC64
111#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0)
112#else
113#define IO_SET_SYNC_FLAG()
114#endif
115
116#define DEF_MMIO_IN_X(name, size, insn) \ 107#define DEF_MMIO_IN_X(name, size, insn) \
117static inline u##size name(const volatile u##size __iomem *addr) \ 108static inline u##size name(const volatile u##size __iomem *addr) \
118{ \ 109{ \
@@ -127,7 +118,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
127{ \ 118{ \
128 __asm__ __volatile__("sync;"#insn" %1,%y0" \ 119 __asm__ __volatile__("sync;"#insn" %1,%y0" \
129 : "=Z" (*addr) : "r" (val) : "memory"); \ 120 : "=Z" (*addr) : "r" (val) : "memory"); \
130 IO_SET_SYNC_FLAG(); \ 121 mmiowb_set_pending(); \
131} 122}
132 123
133#define DEF_MMIO_IN_D(name, size, insn) \ 124#define DEF_MMIO_IN_D(name, size, insn) \
@@ -144,7 +135,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
144{ \ 135{ \
145 __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ 136 __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
146 : "=m" (*addr) : "r" (val) : "memory"); \ 137 : "=m" (*addr) : "r" (val) : "memory"); \
147 IO_SET_SYNC_FLAG(); \ 138 mmiowb_set_pending(); \
148} 139}
149 140
150DEF_MMIO_IN_D(in_8, 8, lbz); 141DEF_MMIO_IN_D(in_8, 8, lbz);
@@ -652,24 +643,6 @@ static inline void name at \
652 643
653#include <asm-generic/iomap.h> 644#include <asm-generic/iomap.h>
654 645
655#ifdef CONFIG_PPC32
656#define mmiowb()
657#else
658/*
659 * Enforce synchronisation of stores vs. spin_unlock
660 * (this does it explicitly, though our implementation of spin_unlock
661 * does it implicitely too)
662 */
663static inline void mmiowb(void)
664{
665 unsigned long tmp;
666
667 __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
668 : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
669 : "memory");
670}
671#endif /* !CONFIG_PPC32 */
672
673static inline void iosync(void) 646static inline void iosync(void)
674{ 647{
675 __asm__ __volatile__ ("sync" : : : "memory"); 648 __asm__ __volatile__ ("sync" : : : "memory");
diff --git a/arch/powerpc/include/asm/mmiowb.h b/arch/powerpc/include/asm/mmiowb.h
new file mode 100644
index 000000000000..b10180613507
--- /dev/null
+++ b/arch/powerpc/include/asm/mmiowb.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_MMIOWB_H
3#define _ASM_POWERPC_MMIOWB_H
4
5#ifdef CONFIG_MMIOWB
6
7#include <linux/compiler.h>
8#include <asm/barrier.h>
9#include <asm/paca.h>
10
11#define arch_mmiowb_state() (&local_paca->mmiowb_state)
12#define mmiowb() mb()
13
14#else
15#define mmiowb() do { } while (0)
16#endif /* CONFIG_MMIOWB */
17
18#include <asm-generic/mmiowb.h>
19
20#endif /* _ASM_POWERPC_MMIOWB_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e843bc5d1a0f..134e912d403f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -34,6 +34,8 @@
34#include <asm/cpuidle.h> 34#include <asm/cpuidle.h>
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36 36
37#include <asm-generic/mmiowb_types.h>
38
37register struct paca_struct *local_paca asm("r13"); 39register struct paca_struct *local_paca asm("r13");
38 40
39#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP) 41#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
@@ -171,7 +173,6 @@ struct paca_struct {
171 u16 trap_save; /* Used when bad stack is encountered */ 173 u16 trap_save; /* Used when bad stack is encountered */
172 u8 irq_soft_mask; /* mask for irq soft masking */ 174 u8 irq_soft_mask; /* mask for irq soft masking */
173 u8 irq_happened; /* irq happened while soft-disabled */ 175 u8 irq_happened; /* irq happened while soft-disabled */
174 u8 io_sync; /* writel() needs spin_unlock sync */
175 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ 176 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
176 u8 nap_state_lost; /* NV GPR values lost in power7_idle */ 177 u8 nap_state_lost; /* NV GPR values lost in power7_idle */
177#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 178#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -264,6 +265,9 @@ struct paca_struct {
264#ifdef CONFIG_STACKPROTECTOR 265#ifdef CONFIG_STACKPROTECTOR
265 unsigned long canary; 266 unsigned long canary;
266#endif 267#endif
268#ifdef CONFIG_MMIOWB
269 struct mmiowb_state mmiowb_state;
270#endif
267} ____cacheline_aligned; 271} ____cacheline_aligned;
268 272
269extern void copy_mm_to_paca(struct mm_struct *mm); 273extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 685c72310f5d..15b39c407c4e 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -39,19 +39,6 @@
39#define LOCK_TOKEN 1 39#define LOCK_TOKEN 1
40#endif 40#endif
41 41
42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
43#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
44#define SYNC_IO do { \
45 if (unlikely(get_paca()->io_sync)) { \
46 mb(); \
47 get_paca()->io_sync = 0; \
48 } \
49 } while (0)
50#else
51#define CLEAR_IO_SYNC
52#define SYNC_IO
53#endif
54
55#ifdef CONFIG_PPC_PSERIES 42#ifdef CONFIG_PPC_PSERIES
56#define vcpu_is_preempted vcpu_is_preempted 43#define vcpu_is_preempted vcpu_is_preempted
57static inline bool vcpu_is_preempted(int cpu) 44static inline bool vcpu_is_preempted(int cpu)
@@ -99,7 +86,6 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
99 86
100static inline int arch_spin_trylock(arch_spinlock_t *lock) 87static inline int arch_spin_trylock(arch_spinlock_t *lock)
101{ 88{
102 CLEAR_IO_SYNC;
103 return __arch_spin_trylock(lock) == 0; 89 return __arch_spin_trylock(lock) == 0;
104} 90}
105 91
@@ -130,7 +116,6 @@ extern void __rw_yield(arch_rwlock_t *lock);
130 116
131static inline void arch_spin_lock(arch_spinlock_t *lock) 117static inline void arch_spin_lock(arch_spinlock_t *lock)
132{ 118{
133 CLEAR_IO_SYNC;
134 while (1) { 119 while (1) {
135 if (likely(__arch_spin_trylock(lock) == 0)) 120 if (likely(__arch_spin_trylock(lock) == 0))
136 break; 121 break;
@@ -148,7 +133,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
148{ 133{
149 unsigned long flags_dis; 134 unsigned long flags_dis;
150 135
151 CLEAR_IO_SYNC;
152 while (1) { 136 while (1) {
153 if (likely(__arch_spin_trylock(lock) == 0)) 137 if (likely(__arch_spin_trylock(lock) == 0))
154 break; 138 break;
@@ -167,7 +151,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
167 151
168static inline void arch_spin_unlock(arch_spinlock_t *lock) 152static inline void arch_spin_unlock(arch_spinlock_t *lock)
169{ 153{
170 SYNC_IO;
171 __asm__ __volatile__("# arch_spin_unlock\n\t" 154 __asm__ __volatile__("# arch_spin_unlock\n\t"
172 PPC_RELEASE_BARRIER: : :"memory"); 155 PPC_RELEASE_BARRIER: : :"memory");
173 lock->slock = 0; 156 lock->slock = 0;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a0f44f992360..13c6a47e6150 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2429,7 +2429,10 @@ static void dump_one_paca(int cpu)
2429 DUMP(p, trap_save, "%#-*x"); 2429 DUMP(p, trap_save, "%#-*x");
2430 DUMP(p, irq_soft_mask, "%#-*x"); 2430 DUMP(p, irq_soft_mask, "%#-*x");
2431 DUMP(p, irq_happened, "%#-*x"); 2431 DUMP(p, irq_happened, "%#-*x");
2432 DUMP(p, io_sync, "%#-*x"); 2432#ifdef CONFIG_MMIOWB
2433 DUMP(p, mmiowb_state.nesting_count, "%#-*x");
2434 DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x");
2435#endif
2433 DUMP(p, irq_work_pending, "%#-*x"); 2436 DUMP(p, irq_work_pending, "%#-*x");
2434 DUMP(p, nap_state_lost, "%#-*x"); 2437 DUMP(p, nap_state_lost, "%#-*x");
2435 DUMP(p, sprg_vdso, "%#-*llx"); 2438 DUMP(p, sprg_vdso, "%#-*llx");