aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-06-02 02:07:43 -0400
committerDan Williams <dan.j.williams@intel.com>2016-07-12 18:13:48 -0400
commit7c8a6a71904d57ae5fb24140f9661ec22ca9ee85 (patch)
tree83e6e13de0d4eac94859bc8ae40624abf9a41ffb /include/linux
parent91131dbd1d50637dc338526502a1a2ec5a7f97df (diff)
pmem: kill wmb_pmem()
All users have been replaced with flushing in the pmem driver. Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/pmem.h47
1 files changed, 4 insertions, 43 deletions
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 57d146fe44dd..9e3ea94b8157 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -26,16 +26,6 @@
26 * calling these symbols with arch_has_pmem_api() and redirect to the 26 * calling these symbols with arch_has_pmem_api() and redirect to the
27 * implementation in asm/pmem.h. 27 * implementation in asm/pmem.h.
28 */ 28 */
29static inline bool __arch_has_wmb_pmem(void)
30{
31 return false;
32}
33
34static inline void arch_wmb_pmem(void)
35{
36 BUG();
37}
38
39static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, 29static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
40 size_t n) 30 size_t n)
41{ 31{
@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
101 return default_memcpy_from_pmem(dst, src, size); 91 return default_memcpy_from_pmem(dst, src, size);
102} 92}
103 93
104/**
105 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
106 *
107 * For a given cpu implementation within an architecture it is possible
108 * that wmb_pmem() resolves to a nop. In the case this returns
109 * false, pmem api users are unable to ensure durability and may want to
110 * fall back to a different data consistency model, or otherwise notify
111 * the user.
112 */
113static inline bool arch_has_wmb_pmem(void)
114{
115 return arch_has_pmem_api() && __arch_has_wmb_pmem();
116}
117
118/* 94/*
119 * These defaults seek to offer decent performance and minimize the 95 * These defaults seek to offer decent performance and minimize the
120 * window between i/o completion and writes being durable on media. 96 * window between i/o completion and writes being durable on media.
@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
152 * being effectively evicted from, or never written to, the processor 128 * being effectively evicted from, or never written to, the processor
153 * cache hierarchy after the copy completes. After memcpy_to_pmem() 129 * cache hierarchy after the copy completes. After memcpy_to_pmem()
154 * data may still reside in cpu or platform buffers, so this operation 130 * data may still reside in cpu or platform buffers, so this operation
155 * must be followed by a wmb_pmem(). 131 * must be followed by a blkdev_issue_flush() on the pmem block device.
156 */ 132 */
157static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) 133static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
158{ 134{
@@ -163,28 +139,13 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
163} 139}
164 140
165/** 141/**
166 * wmb_pmem - synchronize writes to persistent memory
167 *
168 * After a series of memcpy_to_pmem() operations this drains data from
169 * cpu write buffers and any platform (memory controller) buffers to
170 * ensure that written data is durable on persistent memory media.
171 */
172static inline void wmb_pmem(void)
173{
174 if (arch_has_wmb_pmem())
175 arch_wmb_pmem();
176 else
177 wmb();
178}
179
180/**
181 * copy_from_iter_pmem - copy data from an iterator to PMEM 142 * copy_from_iter_pmem - copy data from an iterator to PMEM
182 * @addr: PMEM destination address 143 * @addr: PMEM destination address
183 * @bytes: number of bytes to copy 144 * @bytes: number of bytes to copy
184 * @i: iterator with source data 145 * @i: iterator with source data
185 * 146 *
186 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. 147 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
187 * This function requires explicit ordering with a wmb_pmem() call. 148 * See blkdev_issue_flush() note for memcpy_to_pmem().
188 */ 149 */
189static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, 150static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
190 struct iov_iter *i) 151 struct iov_iter *i)
@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
200 * @size: number of bytes to zero 161 * @size: number of bytes to zero
201 * 162 *
202 * Write zeros into the memory range starting at 'addr' for 'size' bytes. 163 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
203 * This function requires explicit ordering with a wmb_pmem() call. 164 * See blkdev_issue_flush() note for memcpy_to_pmem().
204 */ 165 */
205static inline void clear_pmem(void __pmem *addr, size_t size) 166static inline void clear_pmem(void __pmem *addr, size_t size)
206{ 167{
@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
230 * @size: number of bytes to write back 191 * @size: number of bytes to write back
231 * 192 *
232 * Write back the processor cache range starting at 'addr' for 'size' bytes. 193 * Write back the processor cache range starting at 'addr' for 'size' bytes.
233 * This function requires explicit ordering with a wmb_pmem() call. 194 * See blkdev_issue_flush() note for memcpy_to_pmem().
234 */ 195 */
235static inline void wb_cache_pmem(void __pmem *addr, size_t size) 196static inline void wb_cache_pmem(void __pmem *addr, size_t size)
236{ 197{