aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:13 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:05 -0400
commit5927145efd5de71976e62e2822511b13014d7e56 (patch)
tree18021521ba0a8c8806cb2750035680d4efeef197
parentc55b8550fa57ba4f5e507be406ff9fc2845713e8 (diff)
x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk
There were only a few Pentium Pro multiprocessors systems where this errata applied. They are more than 20 years old now, and we've slowly dropped places which put the workarounds in and discouraged anyone from enabling the workaround. Get rid of it for good. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-2-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/Kconfig.cpu13
-rw-r--r--arch/x86/entry/vdso/vdso32/vclock_gettime.c2
-rw-r--r--arch/x86/include/asm/barrier.h30
-rw-r--r--arch/x86/include/asm/io.h15
-rw-r--r--arch/x86/kernel/pci-nommu.c19
-rw-r--r--arch/x86/um/asm/barrier.h4
6 files changed, 0 insertions, 83 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 65a9a4716e34..f0c5ef578153 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
315 default "4" if MELAN || M486 || MGEODEGX1 315 default "4" if MELAN || M486 || MGEODEGX1
316 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 316 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
317 317
318config X86_PPRO_FENCE
319 bool "PentiumPro memory ordering errata workaround"
320 depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
321 ---help---
322 Old PentiumPro multiprocessor systems had errata that could cause
323 memory operations to violate the x86 ordering standard in rare cases.
324 Enabling this option will attempt to work around some (but not all)
325 occurrences of this problem, at the cost of much heavier spinlock and
326 memory barrier operations.
327
328 If unsure, say n here. Even distro kernels should think twice before
329 enabling this: there are few systems, and an unlikely bug.
330
331config X86_F00F_BUG 318config X86_F00F_BUG
332 def_bool y 319 def_bool y
333 depends on M586MMX || M586TSC || M586 || M486 320 depends on M586MMX || M586TSC || M586 || M486
diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
index 7780bbfb06ef..9242b28418d5 100644
--- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
@@ -5,8 +5,6 @@
5#undef CONFIG_OPTIMIZE_INLINING 5#undef CONFIG_OPTIMIZE_INLINING
6#endif 6#endif
7 7
8#undef CONFIG_X86_PPRO_FENCE
9
10#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
11 9
12/* 10/*
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index e1259f043ae9..042b5e892ed1 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
52#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ 52#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53 "lfence", X86_FEATURE_LFENCE_RDTSC) 53 "lfence", X86_FEATURE_LFENCE_RDTSC)
54 54
55#ifdef CONFIG_X86_PPRO_FENCE
56#define dma_rmb() rmb()
57#else
58#define dma_rmb() barrier() 55#define dma_rmb() barrier()
59#endif
60#define dma_wmb() barrier() 56#define dma_wmb() barrier()
61 57
62#ifdef CONFIG_X86_32 58#ifdef CONFIG_X86_32
@@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
68#define __smp_wmb() barrier() 64#define __smp_wmb() barrier()
69#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) 65#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
70 66
71#if defined(CONFIG_X86_PPRO_FENCE)
72
73/*
74 * For this option x86 doesn't have a strong TSO memory
75 * model and we should fall back to full barriers.
76 */
77
78#define __smp_store_release(p, v) \
79do { \
80 compiletime_assert_atomic_type(*p); \
81 __smp_mb(); \
82 WRITE_ONCE(*p, v); \
83} while (0)
84
85#define __smp_load_acquire(p) \
86({ \
87 typeof(*p) ___p1 = READ_ONCE(*p); \
88 compiletime_assert_atomic_type(*p); \
89 __smp_mb(); \
90 ___p1; \
91})
92
93#else /* regular x86 TSO memory ordering */
94
95#define __smp_store_release(p, v) \ 67#define __smp_store_release(p, v) \
96do { \ 68do { \
97 compiletime_assert_atomic_type(*p); \ 69 compiletime_assert_atomic_type(*p); \
@@ -107,8 +79,6 @@ do { \
107 ___p1; \ 79 ___p1; \
108}) 80})
109 81
110#endif
111
112/* Atomic operations are already serializing on x86 */ 82/* Atomic operations are already serializing on x86 */
113#define __smp_mb__before_atomic() barrier() 83#define __smp_mb__before_atomic() barrier()
114#define __smp_mb__after_atomic() barrier() 84#define __smp_mb__after_atomic() barrier()
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 95e948627fd0..f6e5b9375d8c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
232 */ 232 */
233#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 233#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
234 234
235/*
236 * Cache management
237 *
238 * This needed for two cases
239 * 1. Out of order aware processors
240 * 2. Accidentally out of order processors (PPro errata #51)
241 */
242
243static inline void flush_write_buffers(void)
244{
245#if defined(CONFIG_X86_PPRO_FENCE)
246 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
247#endif
248}
249
250#endif /* __KERNEL__ */ 235#endif /* __KERNEL__ */
251 236
252extern void native_io_delay(void); 237extern void native_io_delay(void);
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 618285e475c6..ac7ea3a8242f 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
37 WARN_ON(size == 0); 37 WARN_ON(size == 0);
38 if (!check_addr("map_single", dev, bus, size)) 38 if (!check_addr("map_single", dev, bus, size))
39 return NOMMU_MAPPING_ERROR; 39 return NOMMU_MAPPING_ERROR;
40 flush_write_buffers();
41 return bus; 40 return bus;
42} 41}
43 42
@@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return 0; 71 return 0;
73 s->dma_length = s->length; 72 s->dma_length = s->length;
74 } 73 }
75 flush_write_buffers();
76 return nents; 74 return nents;
77} 75}
78 76
79static void nommu_sync_single_for_device(struct device *dev,
80 dma_addr_t addr, size_t size,
81 enum dma_data_direction dir)
82{
83 flush_write_buffers();
84}
85
86
87static void nommu_sync_sg_for_device(struct device *dev,
88 struct scatterlist *sg, int nelems,
89 enum dma_data_direction dir)
90{
91 flush_write_buffers();
92}
93
94static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) 77static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
95{ 78{
96 return dma_addr == NOMMU_MAPPING_ERROR; 79 return dma_addr == NOMMU_MAPPING_ERROR;
@@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
101 .free = dma_generic_free_coherent, 84 .free = dma_generic_free_coherent,
102 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
103 .map_page = nommu_map_page, 86 .map_page = nommu_map_page,
104 .sync_single_for_device = nommu_sync_single_for_device,
105 .sync_sg_for_device = nommu_sync_sg_for_device,
106 .is_phys = 1, 87 .is_phys = 1,
107 .mapping_error = nommu_mapping_error, 88 .mapping_error = nommu_mapping_error,
108 .dma_supported = x86_dma_supported, 89 .dma_supported = x86_dma_supported,
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index b7d73400ea29..f31e5d903161 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -30,11 +30,7 @@
30 30
31#endif /* CONFIG_X86_32 */ 31#endif /* CONFIG_X86_32 */
32 32
33#ifdef CONFIG_X86_PPRO_FENCE
34#define dma_rmb() rmb()
35#else /* CONFIG_X86_PPRO_FENCE */
36#define dma_rmb() barrier() 33#define dma_rmb() barrier()
37#endif /* CONFIG_X86_PPRO_FENCE */
38#define dma_wmb() barrier() 34#define dma_wmb() barrier()
39 35
40#include <asm-generic/barrier.h> 36#include <asm-generic/barrier.h>