aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2017-05-30 01:40:44 -0400
committerDan Williams <dan.j.williams@intel.com>2017-06-15 17:35:24 -0400
commit4e4f00a9b51a1c52ebdd728a1caeb3b9fe48c39d (patch)
tree949f4063c4ec29fa77660d12390942976dce8d0f
parent81f558701ae8d5677635118751b1b4043094c7e9 (diff)
x86, dax, libnvdimm: remove wb_cache_pmem() indirection
With all handling of the CONFIG_ARCH_HAS_PMEM_API case being moved to libnvdimm and the pmem driver directly we do not need to provide global wrappers and fallbacks in the CONFIG_ARCH_HAS_PMEM_API=n case. The pmem driver will simply not link to arch_wb_cache_pmem() in that case. Same as before, pmem flushing is only defined for x86_64, via clean_cache_range(), but it is straightforward to add other archs in the future. arch_wb_cache_pmem() is an exported function since the pmem module needs to find it, but it is privately declared in drivers/nvdimm/pmem.h because there are no consumers outside of the pmem driver. Cc: <x86@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Oliver O'Halloran <oohall@gmail.com> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--arch/x86/include/asm/pmem.h21
-rw-r--r--arch/x86/lib/usercopy_64.c6
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvdimm/pmem.h8
-rw-r--r--include/linux/pmem.h19
5 files changed, 15 insertions, 41 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index f4c119d253f3..4759a179aa52 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -44,27 +44,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
44 BUG(); 44 BUG();
45} 45}
46 46
47/**
48 * arch_wb_cache_pmem - write back a cache range with CLWB
49 * @vaddr: virtual start address
50 * @size: number of bytes to write back
51 *
52 * Write back a cache range using the CLWB (cache line write back)
53 * instruction. Note that @size is internally rounded up to be cache
54 * line size aligned.
55 */
56static inline void arch_wb_cache_pmem(void *addr, size_t size)
57{
58 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
59 unsigned long clflush_mask = x86_clflush_size - 1;
60 void *vend = addr + size;
61 void *p;
62
63 for (p = (void *)((unsigned long)addr & ~clflush_mask);
64 p < vend; p += x86_clflush_size)
65 clwb(p);
66}
67
68static inline void arch_invalidate_pmem(void *addr, size_t size) 47static inline void arch_invalidate_pmem(void *addr, size_t size)
69{ 48{
70 clflush_cache_range(addr, size); 49 clflush_cache_range(addr, size);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index f42d2fd86ca3..75d3776123cc 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -97,6 +97,12 @@ static void clean_cache_range(void *addr, size_t size)
97 clwb(p); 97 clwb(p);
98} 98}
99 99
100void arch_wb_cache_pmem(void *addr, size_t size)
101{
102 clean_cache_range(addr, size);
103}
104EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
105
100long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) 106long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
101{ 107{
102 unsigned long flushed, dest = (unsigned long) dst; 108 unsigned long flushed, dest = (unsigned long) dst;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 823b07774244..3b87702d46bb 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -245,7 +245,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
245static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, 245static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
246 void *addr, size_t size) 246 void *addr, size_t size)
247{ 247{
248 wb_cache_pmem(addr, size); 248 arch_wb_cache_pmem(addr, size);
249} 249}
250 250
251static const struct dax_operations pmem_dax_ops = { 251static const struct dax_operations pmem_dax_ops = {
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 7f4dbd72a90a..c4b3371c7f88 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -5,6 +5,14 @@
5#include <linux/pfn_t.h> 5#include <linux/pfn_t.h>
6#include <linux/fs.h> 6#include <linux/fs.h>
7 7
8#ifdef CONFIG_ARCH_HAS_PMEM_API
9void arch_wb_cache_pmem(void *addr, size_t size);
10#else
11static inline void arch_wb_cache_pmem(void *addr, size_t size)
12{
13}
14#endif
15
8/* this definition is in it's own header for tools/testing/nvdimm to consume */ 16/* this definition is in it's own header for tools/testing/nvdimm to consume */
9struct pmem_device { 17struct pmem_device {
10 /* One contiguous memory region per device */ 18 /* One contiguous memory region per device */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 772bd02a5b52..33ae761f010a 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,11 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
31 BUG(); 31 BUG();
32} 32}
33 33
34static inline void arch_wb_cache_pmem(void *addr, size_t size)
35{
36 BUG();
37}
38
39static inline void arch_invalidate_pmem(void *addr, size_t size) 34static inline void arch_invalidate_pmem(void *addr, size_t size)
40{ 35{
41 BUG(); 36 BUG();
@@ -80,18 +75,4 @@ static inline void invalidate_pmem(void *addr, size_t size)
80 if (arch_has_pmem_api()) 75 if (arch_has_pmem_api())
81 arch_invalidate_pmem(addr, size); 76 arch_invalidate_pmem(addr, size);
82} 77}
83
84/**
85 * wb_cache_pmem - write back processor cache for PMEM memory range
86 * @addr: virtual start address
87 * @size: number of bytes to write back
88 *
89 * Write back the processor cache range starting at 'addr' for 'size' bytes.
90 * See blkdev_issue_flush() note for memcpy_to_pmem().
91 */
92static inline void wb_cache_pmem(void *addr, size_t size)
93{
94 if (arch_has_pmem_api())
95 arch_wb_cache_pmem(addr, size);
96}
97#endif /* __PMEM_H__ */ 78#endif /* __PMEM_H__ */