diff options
author | Dan Williams <dan.j.williams@intel.com> | 2015-08-24 18:29:38 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2015-08-27 19:40:59 -0400 |
commit | 96601adb745186ccbcf5b078d4756f13381ec2af (patch) | |
tree | 1dff922da6a102e55978278c8ad078f30a7384b8 | |
parent | 41e94a851304f7acac840adec4004f8aeee53ad4 (diff) |
x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB
Given that a write-back (WB) mapping plus non-temporal stores is
expected to be the most efficient way to access PMEM, update the
definition of ARCH_HAS_PMEM_API to imply arch support for
WB-mapped-PMEM. This is needed as a pre-requisite for adding PMEM to
the direct map and mapping it with struct page.
The above clarification for X86_64 means that memcpy_to_pmem() is
permitted to use the non-temporal arch_memcpy_to_pmem() rather than
needlessly fall back to default_memcpy_to_pmem() when the pcommit
instruction is not available. When arch_memcpy_to_pmem() is not
guaranteed to flush writes out of cache, i.e. on older X86_32
implementations where non-temporal stores may just dirty cache,
ARCH_HAS_PMEM_API is simply disabled.
The default fall back for persistent memory handling remains. Namely,
map it with the WT (write-through) cache-type and hope for the best.
arch_has_pmem_api() is updated to only indicate whether the arch
provides the proper helpers to meet the minimum "writes are visible
outside the cache hierarchy after memcpy_to_pmem() + wmb_pmem()". Code
that cares whether wmb_pmem() actually flushes writes to pmem must now
call arch_has_wmb_pmem() directly.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
[hch: set ARCH_HAS_PMEM_API=n on x86_32]
Reviewed-by: Christoph Hellwig <hch@lst.de>
[toshi: x86_32 compile fixes]
Signed-off-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pmem.h | 9 | ||||
-rw-r--r-- | drivers/acpi/nfit.c | 3 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 2 | ||||
-rw-r--r-- | include/linux/pmem.h | 36 |
5 files changed, 27 insertions, 25 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 03ab6122325a..ef4c6bbb3af1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -27,7 +27,7 @@ config X86 | |||
27 | select ARCH_HAS_ELF_RANDOMIZE | 27 | select ARCH_HAS_ELF_RANDOMIZE |
28 | select ARCH_HAS_FAST_MULTIPLIER | 28 | select ARCH_HAS_FAST_MULTIPLIER |
29 | select ARCH_HAS_GCOV_PROFILE_ALL | 29 | select ARCH_HAS_GCOV_PROFILE_ALL |
30 | select ARCH_HAS_PMEM_API | 30 | select ARCH_HAS_PMEM_API if X86_64 |
31 | select ARCH_HAS_MMIO_FLUSH | 31 | select ARCH_HAS_MMIO_FLUSH |
32 | select ARCH_HAS_SG_CHAIN | 32 | select ARCH_HAS_SG_CHAIN |
33 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 33 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index bb026c5adf8a..d8ce3ec816ab 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <asm/cpufeature.h> | 18 | #include <asm/cpufeature.h> |
19 | #include <asm/special_insns.h> | 19 | #include <asm/special_insns.h> |
20 | 20 | ||
21 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WB | ||
22 | |||
23 | #ifdef CONFIG_ARCH_HAS_PMEM_API | 21 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
24 | /** | 22 | /** |
25 | * arch_memcpy_to_pmem - copy data to persistent memory | 23 | * arch_memcpy_to_pmem - copy data to persistent memory |
@@ -143,18 +141,13 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size) | |||
143 | __arch_wb_cache_pmem(vaddr, size); | 141 | __arch_wb_cache_pmem(vaddr, size); |
144 | } | 142 | } |
145 | 143 | ||
146 | static inline bool arch_has_wmb_pmem(void) | 144 | static inline bool __arch_has_wmb_pmem(void) |
147 | { | 145 | { |
148 | #ifdef CONFIG_X86_64 | ||
149 | /* | 146 | /* |
150 | * We require that wmb() be an 'sfence', that is only guaranteed on | 147 | * We require that wmb() be an 'sfence', that is only guaranteed on |
151 | * 64-bit builds | 148 | * 64-bit builds |
152 | */ | 149 | */ |
153 | return static_cpu_has(X86_FEATURE_PCOMMIT); | 150 | return static_cpu_has(X86_FEATURE_PCOMMIT); |
154 | #else | ||
155 | return false; | ||
156 | #endif | ||
157 | } | 151 | } |
158 | #endif /* CONFIG_ARCH_HAS_PMEM_API */ | 152 | #endif /* CONFIG_ARCH_HAS_PMEM_API */ |
159 | |||
160 | #endif /* __ASM_X86_PMEM_H__ */ | 153 | #endif /* __ASM_X86_PMEM_H__ */ |
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 56fff0141636..f61e69fa2ad1 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/sort.h> | 20 | #include <linux/sort.h> |
21 | #include <linux/pmem.h> | 21 | #include <linux/pmem.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <asm/cacheflush.h> | ||
23 | #include "nfit.h" | 24 | #include "nfit.h" |
24 | 25 | ||
25 | /* | 26 | /* |
@@ -1371,7 +1372,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
1371 | return -ENOMEM; | 1372 | return -ENOMEM; |
1372 | } | 1373 | } |
1373 | 1374 | ||
1374 | if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush) | 1375 | if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) |
1375 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 1376 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
1376 | 1377 | ||
1377 | if (mmio->line_size == 0) | 1378 | if (mmio->line_size == 0) |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 3b5b9cb758b6..20bf122328da 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -125,7 +125,7 @@ static struct pmem_device *pmem_alloc(struct device *dev, | |||
125 | 125 | ||
126 | pmem->phys_addr = res->start; | 126 | pmem->phys_addr = res->start; |
127 | pmem->size = resource_size(res); | 127 | pmem->size = resource_size(res); |
128 | if (!arch_has_pmem_api()) | 128 | if (!arch_has_wmb_pmem()) |
129 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 129 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
130 | 130 | ||
131 | if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size, | 131 | if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size, |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index a9d84bf335ee..85f810b33917 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h | |||
@@ -17,16 +17,23 @@ | |||
17 | #include <linux/uio.h> | 17 | #include <linux/uio.h> |
18 | 18 | ||
19 | #ifdef CONFIG_ARCH_HAS_PMEM_API | 19 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
20 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WB | ||
20 | #include <asm/pmem.h> | 21 | #include <asm/pmem.h> |
21 | #else | 22 | #else |
22 | static inline void arch_wmb_pmem(void) | 23 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WT |
24 | /* | ||
25 | * These are simply here to enable compilation, all call sites gate | ||
26 | * calling these symbols with arch_has_pmem_api() and redirect to the | ||
27 | * implementation in asm/pmem.h. | ||
28 | */ | ||
29 | static inline bool __arch_has_wmb_pmem(void) | ||
23 | { | 30 | { |
24 | BUG(); | 31 | return false; |
25 | } | 32 | } |
26 | 33 | ||
27 | static inline bool arch_has_wmb_pmem(void) | 34 | static inline void arch_wmb_pmem(void) |
28 | { | 35 | { |
29 | return false; | 36 | BUG(); |
30 | } | 37 | } |
31 | 38 | ||
32 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | 39 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, |
@@ -53,7 +60,6 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size) | |||
53 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), | 60 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), |
54 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). | 61 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). |
55 | */ | 62 | */ |
56 | |||
57 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | 63 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) |
58 | { | 64 | { |
59 | memcpy(dst, (void __force const *) src, size); | 65 | memcpy(dst, (void __force const *) src, size); |
@@ -64,8 +70,13 @@ static inline void memunmap_pmem(struct device *dev, void __pmem *addr) | |||
64 | devm_memunmap(dev, (void __force *) addr); | 70 | devm_memunmap(dev, (void __force *) addr); |
65 | } | 71 | } |
66 | 72 | ||
73 | static inline bool arch_has_pmem_api(void) | ||
74 | { | ||
75 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); | ||
76 | } | ||
77 | |||
67 | /** | 78 | /** |
68 | * arch_has_pmem_api - true if wmb_pmem() ensures durability | 79 | * arch_has_wmb_pmem - true if wmb_pmem() ensures durability |
69 | * | 80 | * |
70 | * For a given cpu implementation within an architecture it is possible | 81 | * For a given cpu implementation within an architecture it is possible |
71 | * that wmb_pmem() resolves to a nop. In the case this returns | 82 | * that wmb_pmem() resolves to a nop. In the case this returns |
@@ -73,9 +84,9 @@ static inline void memunmap_pmem(struct device *dev, void __pmem *addr) | |||
73 | * fall back to a different data consistency model, or otherwise notify | 84 | * fall back to a different data consistency model, or otherwise notify |
74 | * the user. | 85 | * the user. |
75 | */ | 86 | */ |
76 | static inline bool arch_has_pmem_api(void) | 87 | static inline bool arch_has_wmb_pmem(void) |
77 | { | 88 | { |
78 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); | 89 | return arch_has_pmem_api() && __arch_has_wmb_pmem(); |
79 | } | 90 | } |
80 | 91 | ||
81 | /* | 92 | /* |
@@ -120,13 +131,8 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) | |||
120 | static inline void __pmem *memremap_pmem(struct device *dev, | 131 | static inline void __pmem *memremap_pmem(struct device *dev, |
121 | resource_size_t offset, unsigned long size) | 132 | resource_size_t offset, unsigned long size) |
122 | { | 133 | { |
123 | #ifdef ARCH_MEMREMAP_PMEM | ||
124 | return (void __pmem *) devm_memremap(dev, offset, size, | 134 | return (void __pmem *) devm_memremap(dev, offset, size, |
125 | ARCH_MEMREMAP_PMEM); | 135 | ARCH_MEMREMAP_PMEM); |
126 | #else | ||
127 | return (void __pmem *) devm_memremap(dev, offset, size, | ||
128 | MEMREMAP_WT); | ||
129 | #endif | ||
130 | } | 136 | } |
131 | 137 | ||
132 | /** | 138 | /** |
@@ -158,8 +164,10 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) | |||
158 | */ | 164 | */ |
159 | static inline void wmb_pmem(void) | 165 | static inline void wmb_pmem(void) |
160 | { | 166 | { |
161 | if (arch_has_pmem_api()) | 167 | if (arch_has_wmb_pmem()) |
162 | arch_wmb_pmem(); | 168 | arch_wmb_pmem(); |
169 | else | ||
170 | wmb(); | ||
163 | } | 171 | } |
164 | 172 | ||
165 | /** | 173 | /** |