aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2015-08-18 15:55:39 -0400
committerDan Williams <dan.j.williams@intel.com>2015-08-20 14:07:23 -0400
commit5de490daec8b6354b90d5c9d3e2415b195f5adb6 (patch)
tree0f9091d0827916e8a810cff47315445bed81d726
parent4a370df5534ef727cba9a9d74bf22e0609f91d6e (diff)
pmem: add copy_from_iter_pmem() and clear_pmem()
Add support for two new PMEM APIs, copy_from_iter_pmem() and clear_pmem(). copy_from_iter_pmem() is used to copy data from an iterator into a PMEM buffer. clear_pmem() zeros a PMEM memory range. Both of these new APIs must be explicitly ordered using a wmb_pmem() function call and are implemented in such a way that the wmb_pmem() will make the stores to PMEM durable. Because both APIs are unordered they can be called as needed without introducing any unwanted memory barriers. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--arch/x86/include/asm/pmem.h75
-rw-r--r--include/linux/pmem.h64
2 files changed, 137 insertions, 2 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 7f3413fce46c..a3a0df6545ee 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -66,6 +66,81 @@ static inline void arch_wmb_pmem(void)
66 pcommit_sfence(); 66 pcommit_sfence();
67} 67}
68 68
69/**
70 * __arch_wb_cache_pmem - write back a cache range with CLWB
71 * @vaddr: virtual start address
72 * @size: number of bytes to write back
73 *
74 * Write back a cache range using the CLWB (cache line write back)
75 * instruction. This function requires explicit ordering with an
76 * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
77 */
78static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
79{
80 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
81 unsigned long clflush_mask = x86_clflush_size - 1;
82 void *vend = vaddr + size;
83 void *p;
84
85 for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
86 p < vend; p += x86_clflush_size)
87 clwb(p);
88}
89
90/*
91 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
92 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
93 */
94static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
95{
96 return iter_is_iovec(i) == false;
97}
98
99/**
100 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
101 * @addr: PMEM destination address
102 * @bytes: number of bytes to copy
103 * @i: iterator with source data
104 *
105 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
106 * This function requires explicit ordering with an arch_wmb_pmem() call.
107 */
108static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
109 struct iov_iter *i)
110{
111 void *vaddr = (void __force *)addr;
112 size_t len;
113
114 /* TODO: skip the write-back by always using non-temporal stores */
115 len = copy_from_iter_nocache(vaddr, bytes, i);
116
117 if (__iter_needs_pmem_wb(i))
118 __arch_wb_cache_pmem(vaddr, bytes);
119
120 return len;
121}
122
123/**
124 * arch_clear_pmem - zero a PMEM memory range
125 * @addr: virtual start address
126 * @size: number of bytes to zero
127 *
128 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
129 * This function requires explicit ordering with an arch_wmb_pmem() call.
130 */
131static inline void arch_clear_pmem(void __pmem *addr, size_t size)
132{
133 void *vaddr = (void __force *)addr;
134
135 /* TODO: implement the zeroing via non-temporal writes */
136 if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
137 clear_page(vaddr);
138 else
139 memset(vaddr, 0, size);
140
141 __arch_wb_cache_pmem(vaddr, size);
142}
143
69static inline bool arch_has_wmb_pmem(void) 144static inline bool arch_has_wmb_pmem(void)
70{ 145{
71#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index a0706ea04efd..a9d84bf335ee 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,6 +14,7 @@
14#define __PMEM_H__ 14#define __PMEM_H__
15 15
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/uio.h>
17 18
18#ifdef CONFIG_ARCH_HAS_PMEM_API 19#ifdef CONFIG_ARCH_HAS_PMEM_API
19#include <asm/pmem.h> 20#include <asm/pmem.h>
@@ -33,12 +34,24 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
33{ 34{
34 BUG(); 35 BUG();
35} 36}
37
38static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
39 struct iov_iter *i)
40{
41 BUG();
42 return 0;
43}
44
45static inline void arch_clear_pmem(void __pmem *addr, size_t size)
46{
47 BUG();
48}
36#endif 49#endif
37 50
38/* 51/*
39 * Architectures that define ARCH_HAS_PMEM_API must provide 52 * Architectures that define ARCH_HAS_PMEM_API must provide
40 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), and 53 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
41 * arch_has_wmb_pmem(). 54 * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
42 */ 55 */
43 56
44static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) 57static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
@@ -78,6 +91,20 @@ static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
78 memcpy((void __force *) dst, src, size); 91 memcpy((void __force *) dst, src, size);
79} 92}
80 93
94static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
95 size_t bytes, struct iov_iter *i)
96{
97 return copy_from_iter_nocache((void __force *)addr, bytes, i);
98}
99
100static inline void default_clear_pmem(void __pmem *addr, size_t size)
101{
102 if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
103 clear_page((void __force *)addr);
104 else
105 memset((void __force *)addr, 0, size);
106}
107
81/** 108/**
82 * memremap_pmem - map physical persistent memory for pmem api 109 * memremap_pmem - map physical persistent memory for pmem api
83 * @offset: physical address of persistent memory 110 * @offset: physical address of persistent memory
@@ -134,4 +161,37 @@ static inline void wmb_pmem(void)
134 if (arch_has_pmem_api()) 161 if (arch_has_pmem_api())
135 arch_wmb_pmem(); 162 arch_wmb_pmem();
136} 163}
164
165/**
166 * copy_from_iter_pmem - copy data from an iterator to PMEM
167 * @addr: PMEM destination address
168 * @bytes: number of bytes to copy
169 * @i: iterator with source data
170 *
171 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
172 * This function requires explicit ordering with a wmb_pmem() call.
173 */
174static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
175 struct iov_iter *i)
176{
177 if (arch_has_pmem_api())
178 return arch_copy_from_iter_pmem(addr, bytes, i);
179 return default_copy_from_iter_pmem(addr, bytes, i);
180}
181
182/**
183 * clear_pmem - zero a PMEM memory range
184 * @addr: virtual start address
185 * @size: number of bytes to zero
186 *
187 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
188 * This function requires explicit ordering with a wmb_pmem() call.
189 */
190static inline void clear_pmem(void __pmem *addr, size_t size)
191{
192 if (arch_has_pmem_api())
193 arch_clear_pmem(addr, size);
194 else
195 default_clear_pmem(addr, size);
196}
137#endif /* __PMEM_H__ */ 197#endif /* __PMEM_H__ */