aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-06-08 18:16:44 -0400
committerDan Williams <dan.j.williams@intel.com>2018-06-08 18:16:44 -0400
commit930218affeadd1325ea17e053f0dcecf218f5a4f (patch)
tree203c293668a88e03b9f292cf4d5a294ca8a4275a
parentb56845794e1e93121acb74ca325db965035d5545 (diff)
parent5d8beee20d89e34ff1dcb0da84adf7607858c59d (diff)
Merge branch 'for-4.18/mcsafe' into libnvdimm-for-next
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.debug3
-rw-r--r--arch/x86/include/asm/mcsafe_test.h75
-rw-r--r--arch/x86/include/asm/string_64.h10
-rw-r--r--arch/x86/include/asm/uaccess_64.h14
-rw-r--r--arch/x86/lib/memcpy_64.S112
-rw-r--r--arch/x86/lib/usercopy_64.c21
-rw-r--r--drivers/dax/super.c10
-rw-r--r--drivers/md/dm-linear.c16
-rw-r--r--drivers/md/dm-log-writes.c15
-rw-r--r--drivers/md/dm-stripe.c21
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/nvdimm/claim.c3
-rw-r--r--drivers/nvdimm/pmem.c13
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--fs/dax.c21
-rw-r--r--include/linux/dax.h5
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/uio.h15
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/iov_iter.c61
-rw-r--r--tools/testing/nvdimm/test/nfit.c104
23 files changed, 485 insertions, 79 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c07f492b871a..6ca22706cd64 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -60,6 +60,7 @@ config X86
60 select ARCH_HAS_PMEM_API if X86_64 60 select ARCH_HAS_PMEM_API if X86_64
61 select ARCH_HAS_REFCOUNT 61 select ARCH_HAS_REFCOUNT
62 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 62 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
63 select ARCH_HAS_UACCESS_MCSAFE if X86_64
63 select ARCH_HAS_SET_MEMORY 64 select ARCH_HAS_SET_MEMORY
64 select ARCH_HAS_SG_CHAIN 65 select ARCH_HAS_SG_CHAIN
65 select ARCH_HAS_STRICT_KERNEL_RWX 66 select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 192e4d2f9efc..c6dd1d980081 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -72,6 +72,9 @@ config EARLY_PRINTK_USB_XDBC
72 You should normally say N here, unless you want to debug early 72 You should normally say N here, unless you want to debug early
73 crashes or need a very simple printk logging facility. 73 crashes or need a very simple printk logging facility.
74 74
75config MCSAFE_TEST
76 def_bool n
77
75config X86_PTDUMP_CORE 78config X86_PTDUMP_CORE
76 def_bool n 79 def_bool n
77 80
diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h
new file mode 100644
index 000000000000..eb59804b6201
--- /dev/null
+++ b/arch/x86/include/asm/mcsafe_test.h
@@ -0,0 +1,75 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _MCSAFE_TEST_H_
3#define _MCSAFE_TEST_H_
4
5#ifndef __ASSEMBLY__
6#ifdef CONFIG_MCSAFE_TEST
7extern unsigned long mcsafe_test_src;
8extern unsigned long mcsafe_test_dst;
9
10static inline void mcsafe_inject_src(void *addr)
11{
12 if (addr)
13 mcsafe_test_src = (unsigned long) addr;
14 else
15 mcsafe_test_src = ~0UL;
16}
17
18static inline void mcsafe_inject_dst(void *addr)
19{
20 if (addr)
21 mcsafe_test_dst = (unsigned long) addr;
22 else
23 mcsafe_test_dst = ~0UL;
24}
25#else /* CONFIG_MCSAFE_TEST */
26static inline void mcsafe_inject_src(void *addr)
27{
28}
29
30static inline void mcsafe_inject_dst(void *addr)
31{
32}
33#endif /* CONFIG_MCSAFE_TEST */
34
35#else /* __ASSEMBLY__ */
36#include <asm/export.h>
37
38#ifdef CONFIG_MCSAFE_TEST
39.macro MCSAFE_TEST_CTL
40 .pushsection .data
41 .align 8
42 .globl mcsafe_test_src
43 mcsafe_test_src:
44 .quad 0
45 EXPORT_SYMBOL_GPL(mcsafe_test_src)
46 .globl mcsafe_test_dst
47 mcsafe_test_dst:
48 .quad 0
49 EXPORT_SYMBOL_GPL(mcsafe_test_dst)
50 .popsection
51.endm
52
53.macro MCSAFE_TEST_SRC reg count target
54 leaq \count(\reg), %r9
55 cmp mcsafe_test_src, %r9
56 ja \target
57.endm
58
59.macro MCSAFE_TEST_DST reg count target
60 leaq \count(\reg), %r9
61 cmp mcsafe_test_dst, %r9
62 ja \target
63.endm
64#else
65.macro MCSAFE_TEST_CTL
66.endm
67
68.macro MCSAFE_TEST_SRC reg count target
69.endm
70
71.macro MCSAFE_TEST_DST reg count target
72.endm
73#endif /* CONFIG_MCSAFE_TEST */
74#endif /* __ASSEMBLY__ */
75#endif /* _MCSAFE_TEST_H_ */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 533f74c300c2..d33f92b9fa22 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -116,7 +116,8 @@ int strcmp(const char *cs, const char *ct);
116#endif 116#endif
117 117
118#define __HAVE_ARCH_MEMCPY_MCSAFE 1 118#define __HAVE_ARCH_MEMCPY_MCSAFE 1
119__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt); 119__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
120 size_t cnt);
120DECLARE_STATIC_KEY_FALSE(mcsafe_key); 121DECLARE_STATIC_KEY_FALSE(mcsafe_key);
121 122
122/** 123/**
@@ -131,14 +132,15 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key);
131 * actually do machine check recovery. Everyone else can just 132 * actually do machine check recovery. Everyone else can just
132 * use memcpy(). 133 * use memcpy().
133 * 134 *
134 * Return 0 for success, -EFAULT for fail 135 * Return 0 for success, or number of bytes not copied if there was an
136 * exception.
135 */ 137 */
136static __always_inline __must_check int 138static __always_inline __must_check unsigned long
137memcpy_mcsafe(void *dst, const void *src, size_t cnt) 139memcpy_mcsafe(void *dst, const void *src, size_t cnt)
138{ 140{
139#ifdef CONFIG_X86_MCE 141#ifdef CONFIG_X86_MCE
140 if (static_branch_unlikely(&mcsafe_key)) 142 if (static_branch_unlikely(&mcsafe_key))
141 return memcpy_mcsafe_unrolled(dst, src, cnt); 143 return __memcpy_mcsafe(dst, src, cnt);
142 else 144 else
143#endif 145#endif
144 memcpy(dst, src, cnt); 146 memcpy(dst, src, cnt);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 62546b3a398e..62acb613114b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -47,6 +47,17 @@ copy_user_generic(void *to, const void *from, unsigned len)
47} 47}
48 48
49static __always_inline __must_check unsigned long 49static __always_inline __must_check unsigned long
50copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51{
52 unsigned long ret;
53
54 __uaccess_begin();
55 ret = memcpy_mcsafe(to, from, len);
56 __uaccess_end();
57 return ret;
58}
59
60static __always_inline __must_check unsigned long
50raw_copy_from_user(void *dst, const void __user *src, unsigned long size) 61raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51{ 62{
52 int ret = 0; 63 int ret = 0;
@@ -194,4 +205,7 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
194unsigned long 205unsigned long
195copy_user_handle_tail(char *to, char *from, unsigned len); 206copy_user_handle_tail(char *to, char *from, unsigned len);
196 207
208unsigned long
209mcsafe_handle_tail(char *to, char *from, unsigned len);
210
197#endif /* _ASM_X86_UACCESS_64_H */ 211#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..298ef1479240 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -3,6 +3,7 @@
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/errno.h> 4#include <asm/errno.h>
5#include <asm/cpufeatures.h> 5#include <asm/cpufeatures.h>
6#include <asm/mcsafe_test.h>
6#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
7#include <asm/export.h> 8#include <asm/export.h>
8 9
@@ -183,12 +184,15 @@ ENTRY(memcpy_orig)
183ENDPROC(memcpy_orig) 184ENDPROC(memcpy_orig)
184 185
185#ifndef CONFIG_UML 186#ifndef CONFIG_UML
187
188MCSAFE_TEST_CTL
189
186/* 190/*
187 * memcpy_mcsafe_unrolled - memory copy with machine check exception handling 191 * __memcpy_mcsafe - memory copy with machine check exception handling
188 * Note that we only catch machine checks when reading the source addresses. 192 * Note that we only catch machine checks when reading the source addresses.
189 * Writes to target are posted and don't generate machine checks. 193 * Writes to target are posted and don't generate machine checks.
190 */ 194 */
191ENTRY(memcpy_mcsafe_unrolled) 195ENTRY(__memcpy_mcsafe)
192 cmpl $8, %edx 196 cmpl $8, %edx
193 /* Less than 8 bytes? Go to byte copy loop */ 197 /* Less than 8 bytes? Go to byte copy loop */
194 jb .L_no_whole_words 198 jb .L_no_whole_words
@@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)
204 subl $8, %ecx 208 subl $8, %ecx
205 negl %ecx 209 negl %ecx
206 subl %ecx, %edx 210 subl %ecx, %edx
207.L_copy_leading_bytes: 211.L_read_leading_bytes:
208 movb (%rsi), %al 212 movb (%rsi), %al
213 MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
214 MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
215.L_write_leading_bytes:
209 movb %al, (%rdi) 216 movb %al, (%rdi)
210 incq %rsi 217 incq %rsi
211 incq %rdi 218 incq %rdi
212 decl %ecx 219 decl %ecx
213 jnz .L_copy_leading_bytes 220 jnz .L_read_leading_bytes
214 221
215.L_8byte_aligned: 222.L_8byte_aligned:
216 /* Figure out how many whole cache lines (64-bytes) to copy */
217 movl %edx, %ecx
218 andl $63, %edx
219 shrl $6, %ecx
220 jz .L_no_whole_cache_lines
221
222 /* Loop copying whole cache lines */
223.L_cache_w0: movq (%rsi), %r8
224.L_cache_w1: movq 1*8(%rsi), %r9
225.L_cache_w2: movq 2*8(%rsi), %r10
226.L_cache_w3: movq 3*8(%rsi), %r11
227 movq %r8, (%rdi)
228 movq %r9, 1*8(%rdi)
229 movq %r10, 2*8(%rdi)
230 movq %r11, 3*8(%rdi)
231.L_cache_w4: movq 4*8(%rsi), %r8
232.L_cache_w5: movq 5*8(%rsi), %r9
233.L_cache_w6: movq 6*8(%rsi), %r10
234.L_cache_w7: movq 7*8(%rsi), %r11
235 movq %r8, 4*8(%rdi)
236 movq %r9, 5*8(%rdi)
237 movq %r10, 6*8(%rdi)
238 movq %r11, 7*8(%rdi)
239 leaq 64(%rsi), %rsi
240 leaq 64(%rdi), %rdi
241 decl %ecx
242 jnz .L_cache_w0
243
244 /* Are there any trailing 8-byte words? */
245.L_no_whole_cache_lines:
246 movl %edx, %ecx 223 movl %edx, %ecx
247 andl $7, %edx 224 andl $7, %edx
248 shrl $3, %ecx 225 shrl $3, %ecx
249 jz .L_no_whole_words 226 jz .L_no_whole_words
250 227
251 /* Copy trailing words */ 228.L_read_words:
252.L_copy_trailing_words:
253 movq (%rsi), %r8 229 movq (%rsi), %r8
254 mov %r8, (%rdi) 230 MCSAFE_TEST_SRC %rsi 8 .E_read_words
255 leaq 8(%rsi), %rsi 231 MCSAFE_TEST_DST %rdi 8 .E_write_words
256 leaq 8(%rdi), %rdi 232.L_write_words:
233 movq %r8, (%rdi)
234 addq $8, %rsi
235 addq $8, %rdi
257 decl %ecx 236 decl %ecx
258 jnz .L_copy_trailing_words 237 jnz .L_read_words
259 238
260 /* Any trailing bytes? */ 239 /* Any trailing bytes? */
261.L_no_whole_words: 240.L_no_whole_words:
@@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)
264 243
265 /* Copy trailing bytes */ 244 /* Copy trailing bytes */
266 movl %edx, %ecx 245 movl %edx, %ecx
267.L_copy_trailing_bytes: 246.L_read_trailing_bytes:
268 movb (%rsi), %al 247 movb (%rsi), %al
248 MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
249 MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
250.L_write_trailing_bytes:
269 movb %al, (%rdi) 251 movb %al, (%rdi)
270 incq %rsi 252 incq %rsi
271 incq %rdi 253 incq %rdi
272 decl %ecx 254 decl %ecx
273 jnz .L_copy_trailing_bytes 255 jnz .L_read_trailing_bytes
274 256
275 /* Copy successful. Return zero */ 257 /* Copy successful. Return zero */
276.L_done_memcpy_trap: 258.L_done_memcpy_trap:
277 xorq %rax, %rax 259 xorq %rax, %rax
278 ret 260 ret
279ENDPROC(memcpy_mcsafe_unrolled) 261ENDPROC(__memcpy_mcsafe)
280EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) 262EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
281 263
282 .section .fixup, "ax" 264 .section .fixup, "ax"
283 /* Return -EFAULT for any failure */ 265 /*
284.L_memcpy_mcsafe_fail: 266 * Return number of bytes not copied for any failure. Note that
285 mov $-EFAULT, %rax 267 * there is no "tail" handling since the source buffer is 8-byte
268 * aligned and poison is cacheline aligned.
269 */
270.E_read_words:
271 shll $3, %ecx
272.E_leading_bytes:
273 addl %edx, %ecx
274.E_trailing_bytes:
275 mov %ecx, %eax
286 ret 276 ret
287 277
278 /*
279 * For write fault handling, given the destination is unaligned,
280 * we handle faults on multi-byte writes with a byte-by-byte
281 * copy up to the write-protected page.
282 */
283.E_write_words:
284 shll $3, %ecx
285 addl %edx, %ecx
286 movl %ecx, %edx
287 jmp mcsafe_handle_tail
288
288 .previous 289 .previous
289 290
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE(.L_write_words, .E_write_words)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
297 _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
298 _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
299 _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
300 _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
301#endif 297#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 75d3776123cc..7ebc9901dd05 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -75,6 +75,27 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
75 return len; 75 return len;
76} 76}
77 77
78/*
79 * Similar to copy_user_handle_tail, probe for the write fault point,
80 * but reuse __memcpy_mcsafe in case a new read error is encountered.
81 * clac() is handled in _copy_to_iter_mcsafe().
82 */
83__visible unsigned long
84mcsafe_handle_tail(char *to, char *from, unsigned len)
85{
86 for (; len; --len, to++, from++) {
87 /*
88 * Call the assembly routine back directly since
89 * memcpy_mcsafe() may silently fallback to memcpy.
90 */
91 unsigned long rem = __memcpy_mcsafe(to, from, 1);
92
93 if (rem)
94 break;
95 }
96 return len;
97}
98
78#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 99#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
79/** 100/**
80 * clean_cache_range - write back a cache range with CLWB 101 * clean_cache_range - write back a cache range with CLWB
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 60d01b5d2a67..88e77b7f0c4b 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -287,6 +287,16 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
287} 287}
288EXPORT_SYMBOL_GPL(dax_copy_from_iter); 288EXPORT_SYMBOL_GPL(dax_copy_from_iter);
289 289
290size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
291 size_t bytes, struct iov_iter *i)
292{
293 if (!dax_alive(dax_dev))
294 return 0;
295
296 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
297}
298EXPORT_SYMBOL_GPL(dax_copy_to_iter);
299
290#ifdef CONFIG_ARCH_HAS_PMEM_API 300#ifdef CONFIG_ARCH_HAS_PMEM_API
291void arch_wb_cache_pmem(void *addr, size_t size); 301void arch_wb_cache_pmem(void *addr, size_t size);
292void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 302void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 775c06d953b7..d10964d41fd7 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -185,9 +185,24 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
185 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 185 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
186} 186}
187 187
188static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
189 void *addr, size_t bytes, struct iov_iter *i)
190{
191 struct linear_c *lc = ti->private;
192 struct block_device *bdev = lc->dev->bdev;
193 struct dax_device *dax_dev = lc->dev->dax_dev;
194 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
195
196 dev_sector = linear_map_sector(ti, sector);
197 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
198 return 0;
199 return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
200}
201
188#else 202#else
189#define linear_dax_direct_access NULL 203#define linear_dax_direct_access NULL
190#define linear_dax_copy_from_iter NULL 204#define linear_dax_copy_from_iter NULL
205#define linear_dax_copy_to_iter NULL
191#endif 206#endif
192 207
193static struct target_type linear_target = { 208static struct target_type linear_target = {
@@ -204,6 +219,7 @@ static struct target_type linear_target = {
204 .iterate_devices = linear_iterate_devices, 219 .iterate_devices = linear_iterate_devices,
205 .direct_access = linear_dax_direct_access, 220 .direct_access = linear_dax_direct_access,
206 .dax_copy_from_iter = linear_dax_copy_from_iter, 221 .dax_copy_from_iter = linear_dax_copy_from_iter,
222 .dax_copy_to_iter = linear_dax_copy_to_iter,
207}; 223};
208 224
209int __init dm_linear_init(void) 225int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index c90c7c08a77f..9ea2b0291f20 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -962,9 +962,23 @@ static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
962dax_copy: 962dax_copy:
963 return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); 963 return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
964} 964}
965
966static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
967 pgoff_t pgoff, void *addr, size_t bytes,
968 struct iov_iter *i)
969{
970 struct log_writes_c *lc = ti->private;
971 sector_t sector = pgoff * PAGE_SECTORS;
972
973 if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
974 return 0;
975 return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
976}
977
965#else 978#else
966#define log_writes_dax_direct_access NULL 979#define log_writes_dax_direct_access NULL
967#define log_writes_dax_copy_from_iter NULL 980#define log_writes_dax_copy_from_iter NULL
981#define log_writes_dax_copy_to_iter NULL
968#endif 982#endif
969 983
970static struct target_type log_writes_target = { 984static struct target_type log_writes_target = {
@@ -982,6 +996,7 @@ static struct target_type log_writes_target = {
982 .io_hints = log_writes_io_hints, 996 .io_hints = log_writes_io_hints,
983 .direct_access = log_writes_dax_direct_access, 997 .direct_access = log_writes_dax_direct_access,
984 .dax_copy_from_iter = log_writes_dax_copy_from_iter, 998 .dax_copy_from_iter = log_writes_dax_copy_from_iter,
999 .dax_copy_to_iter = log_writes_dax_copy_to_iter,
985}; 1000};
986 1001
987static int __init dm_log_writes_init(void) 1002static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index fe7fb9b1aec3..8547d7594338 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -354,9 +354,29 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
354 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 354 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
355} 355}
356 356
357static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
358 void *addr, size_t bytes, struct iov_iter *i)
359{
360 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
361 struct stripe_c *sc = ti->private;
362 struct dax_device *dax_dev;
363 struct block_device *bdev;
364 uint32_t stripe;
365
366 stripe_map_sector(sc, sector, &stripe, &dev_sector);
367 dev_sector += sc->stripe[stripe].physical_start;
368 dax_dev = sc->stripe[stripe].dev->dax_dev;
369 bdev = sc->stripe[stripe].dev->bdev;
370
371 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
372 return 0;
373 return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
374}
375
357#else 376#else
358#define stripe_dax_direct_access NULL 377#define stripe_dax_direct_access NULL
359#define stripe_dax_copy_from_iter NULL 378#define stripe_dax_copy_from_iter NULL
379#define stripe_dax_copy_to_iter NULL
360#endif 380#endif
361 381
362/* 382/*
@@ -478,6 +498,7 @@ static struct target_type stripe_target = {
478 .io_hints = stripe_io_hints, 498 .io_hints = stripe_io_hints,
479 .direct_access = stripe_dax_direct_access, 499 .direct_access = stripe_dax_direct_access,
480 .dax_copy_from_iter = stripe_dax_copy_from_iter, 500 .dax_copy_from_iter = stripe_dax_copy_from_iter,
501 .dax_copy_to_iter = stripe_dax_copy_to_iter,
481}; 502};
482 503
483int __init dm_stripe_init(void) 504int __init dm_stripe_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0a7b0107ca78..6752f1c25258 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1089,6 +1089,30 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1089 return ret; 1089 return ret;
1090} 1090}
1091 1091
1092static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1093 void *addr, size_t bytes, struct iov_iter *i)
1094{
1095 struct mapped_device *md = dax_get_private(dax_dev);
1096 sector_t sector = pgoff * PAGE_SECTORS;
1097 struct dm_target *ti;
1098 long ret = 0;
1099 int srcu_idx;
1100
1101 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1102
1103 if (!ti)
1104 goto out;
1105 if (!ti->type->dax_copy_to_iter) {
1106 ret = copy_to_iter(addr, bytes, i);
1107 goto out;
1108 }
1109 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1110 out:
1111 dm_put_live_table(md, srcu_idx);
1112
1113 return ret;
1114}
1115
1092/* 1116/*
1093 * A target may call dm_accept_partial_bio only from the map routine. It is 1117 * A target may call dm_accept_partial_bio only from the map routine. It is
1094 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. 1118 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
@@ -3134,6 +3158,7 @@ static const struct block_device_operations dm_blk_dops = {
3134static const struct dax_operations dm_dax_ops = { 3158static const struct dax_operations dm_dax_ops = {
3135 .direct_access = dm_dax_direct_access, 3159 .direct_access = dm_dax_direct_access,
3136 .copy_from_iter = dm_dax_copy_from_iter, 3160 .copy_from_iter = dm_dax_copy_from_iter,
3161 .copy_to_iter = dm_dax_copy_to_iter,
3137}; 3162};
3138 3163
3139/* 3164/*
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 30852270484f..2e96b34bc936 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -276,7 +276,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
276 if (rw == READ) { 276 if (rw == READ) {
277 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) 277 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
278 return -EIO; 278 return -EIO;
279 return memcpy_mcsafe(buf, nsio->addr + offset, size); 279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
280 return -EIO;
280 } 281 }
281 282
282 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 283 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index bf2dd2a4a5e6..68940356cad3 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -101,15 +101,15 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
101 void *pmem_addr, unsigned int len) 101 void *pmem_addr, unsigned int len)
102{ 102{
103 unsigned int chunk; 103 unsigned int chunk;
104 int rc; 104 unsigned long rem;
105 void *mem; 105 void *mem;
106 106
107 while (len) { 107 while (len) {
108 mem = kmap_atomic(page); 108 mem = kmap_atomic(page);
109 chunk = min_t(unsigned int, len, PAGE_SIZE); 109 chunk = min_t(unsigned int, len, PAGE_SIZE);
110 rc = memcpy_mcsafe(mem + off, pmem_addr, chunk); 110 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
111 kunmap_atomic(mem); 111 kunmap_atomic(mem);
112 if (rc) 112 if (rem)
113 return BLK_STS_IOERR; 113 return BLK_STS_IOERR;
114 len -= chunk; 114 len -= chunk;
115 off = 0; 115 off = 0;
@@ -259,9 +259,16 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
259 return copy_from_iter_flushcache(addr, bytes, i); 259 return copy_from_iter_flushcache(addr, bytes, i);
260} 260}
261 261
262static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
263 void *addr, size_t bytes, struct iov_iter *i)
264{
265 return copy_to_iter_mcsafe(addr, bytes, i);
266}
267
262static const struct dax_operations pmem_dax_ops = { 268static const struct dax_operations pmem_dax_ops = {
263 .direct_access = pmem_dax_direct_access, 269 .direct_access = pmem_dax_direct_access,
264 .copy_from_iter = pmem_copy_from_iter, 270 .copy_from_iter = pmem_copy_from_iter,
271 .copy_to_iter = pmem_copy_to_iter,
265}; 272};
266 273
267static const struct attribute_group *pmem_attribute_groups[] = { 274static const struct attribute_group *pmem_attribute_groups[] = {
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 0a312e450207..29024492b8ed 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -51,9 +51,16 @@ static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
51 return copy_from_iter(addr, bytes, i); 51 return copy_from_iter(addr, bytes, i);
52} 52}
53 53
54static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
55 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
56{
57 return copy_to_iter(addr, bytes, i);
58}
59
54static const struct dax_operations dcssblk_dax_ops = { 60static const struct dax_operations dcssblk_dax_ops = {
55 .direct_access = dcssblk_dax_direct_access, 61 .direct_access = dcssblk_dax_direct_access,
56 .copy_from_iter = dcssblk_dax_copy_from_iter, 62 .copy_from_iter = dcssblk_dax_copy_from_iter,
63 .copy_to_iter = dcssblk_dax_copy_to_iter,
57}; 64};
58 65
59struct dcssblk_dev_info { 66struct dcssblk_dev_info {
diff --git a/fs/dax.c b/fs/dax.c
index 31e9f51ac917..1f5f14a2ce4c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1082,6 +1082,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1082 struct iov_iter *iter = data; 1082 struct iov_iter *iter = data;
1083 loff_t end = pos + length, done = 0; 1083 loff_t end = pos + length, done = 0;
1084 ssize_t ret = 0; 1084 ssize_t ret = 0;
1085 size_t xfer;
1085 int id; 1086 int id;
1086 1087
1087 if (iov_iter_rw(iter) == READ) { 1088 if (iov_iter_rw(iter) == READ) {
@@ -1145,18 +1146,20 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1145 * vfs_write(), depending on which operation we are doing. 1146 * vfs_write(), depending on which operation we are doing.
1146 */ 1147 */
1147 if (iov_iter_rw(iter) == WRITE) 1148 if (iov_iter_rw(iter) == WRITE)
1148 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1149 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1149 map_len, iter); 1150 map_len, iter);
1150 else 1151 else
1151 map_len = copy_to_iter(kaddr, map_len, iter); 1152 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1152 if (map_len <= 0) { 1153 map_len, iter);
1153 ret = map_len ? map_len : -EFAULT;
1154 break;
1155 }
1156 1154
1157 pos += map_len; 1155 pos += xfer;
1158 length -= map_len; 1156 length -= xfer;
1159 done += map_len; 1157 done += xfer;
1158
1159 if (xfer == 0)
1160 ret = -EFAULT;
1161 if (xfer < map_len)
1162 break;
1160 } 1163 }
1161 dax_read_unlock(id); 1164 dax_read_unlock(id);
1162 1165
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 25bab6abb695..b51db4264c83 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -20,6 +20,9 @@ struct dax_operations {
20 /* copy_from_iter: required operation for fs-dax direct-i/o */ 20 /* copy_from_iter: required operation for fs-dax direct-i/o */
21 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 21 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
22 struct iov_iter *); 22 struct iov_iter *);
23 /* copy_to_iter: required operation for fs-dax direct-i/o */
24 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
25 struct iov_iter *);
23}; 26};
24 27
25extern struct attribute_group dax_attribute_group; 28extern struct attribute_group dax_attribute_group;
@@ -125,6 +128,8 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
125 void **kaddr, pfn_t *pfn); 128 void **kaddr, pfn_t *pfn);
126size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 129size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
127 size_t bytes, struct iov_iter *i); 130 size_t bytes, struct iov_iter *i);
131size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
132 size_t bytes, struct iov_iter *i);
128void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 133void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
129 134
130ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 135ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 31fef7c34185..6fb0808e87c8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -133,7 +133,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
133 */ 133 */
134typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 134typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
135 long nr_pages, void **kaddr, pfn_t *pfn); 135 long nr_pages, void **kaddr, pfn_t *pfn);
136typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 136typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
137 void *addr, size_t bytes, struct iov_iter *i); 137 void *addr, size_t bytes, struct iov_iter *i);
138#define PAGE_SECTORS (PAGE_SIZE / 512) 138#define PAGE_SECTORS (PAGE_SIZE / 512)
139 139
@@ -184,7 +184,8 @@ struct target_type {
184 dm_iterate_devices_fn iterate_devices; 184 dm_iterate_devices_fn iterate_devices;
185 dm_io_hints_fn io_hints; 185 dm_io_hints_fn io_hints;
186 dm_dax_direct_access_fn direct_access; 186 dm_dax_direct_access_fn direct_access;
187 dm_dax_copy_from_iter_fn dax_copy_from_iter; 187 dm_dax_copy_iter_fn dax_copy_from_iter;
188 dm_dax_copy_iter_fn dax_copy_to_iter;
188 189
189 /* For internal device-mapper use. */ 190 /* For internal device-mapper use. */
190 struct list_head list; 191 struct list_head list;
diff --git a/include/linux/string.h b/include/linux/string.h
index dd39a690c841..4a5a0eb7df51 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -147,8 +147,8 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
147extern void * memchr(const void *,int,__kernel_size_t); 147extern void * memchr(const void *,int,__kernel_size_t);
148#endif 148#endif
149#ifndef __HAVE_ARCH_MEMCPY_MCSAFE 149#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
150static inline __must_check int memcpy_mcsafe(void *dst, const void *src, 150static inline __must_check unsigned long memcpy_mcsafe(void *dst,
151 size_t cnt) 151 const void *src, size_t cnt)
152{ 152{
153 memcpy(dst, src, cnt); 153 memcpy(dst, src, cnt);
154 return 0; 154 return 0;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e67e12adb136..409c845d4cd3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
154#define _copy_from_iter_flushcache _copy_from_iter_nocache 154#define _copy_from_iter_flushcache _copy_from_iter_nocache
155#endif 155#endif
156 156
157#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
158size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
159#else
160#define _copy_to_iter_mcsafe _copy_to_iter
161#endif
162
157static __always_inline __must_check 163static __always_inline __must_check
158size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 164size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
159{ 165{
@@ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
163 return _copy_from_iter_flushcache(addr, bytes, i); 169 return _copy_from_iter_flushcache(addr, bytes, i);
164} 170}
165 171
172static __always_inline __must_check
173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174{
175 if (unlikely(!check_copy_size(addr, bytes, false)))
176 return 0;
177 else
178 return _copy_to_iter_mcsafe(addr, bytes, i);
179}
180
166size_t iov_iter_zero(size_t bytes, struct iov_iter *); 181size_t iov_iter_zero(size_t bytes, struct iov_iter *);
167unsigned long iov_iter_alignment(const struct iov_iter *i); 182unsigned long iov_iter_alignment(const struct iov_iter *i);
168unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 183unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --git a/lib/Kconfig b/lib/Kconfig
index 5fe577673b98..907f6e4f1cf2 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -586,6 +586,9 @@ config ARCH_HAS_PMEM_API
586config ARCH_HAS_UACCESS_FLUSHCACHE 586config ARCH_HAS_UACCESS_FLUSHCACHE
587 bool 587 bool
588 588
589config ARCH_HAS_UACCESS_MCSAFE
590 bool
591
589config STACKDEPOT 592config STACKDEPOT
590 bool 593 bool
591 select STACKTRACE 594 select STACKTRACE
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index fdae394172fa..7e43cd54c84c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
573} 573}
574EXPORT_SYMBOL(_copy_to_iter); 574EXPORT_SYMBOL(_copy_to_iter);
575 575
576#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578{
579 if (access_ok(VERIFY_WRITE, to, n)) {
580 kasan_check_read(from, n);
581 n = copy_to_user_mcsafe((__force void *) to, from, n);
582 }
583 return n;
584}
585
586static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587 const char *from, size_t len)
588{
589 unsigned long ret;
590 char *to;
591
592 to = kmap_atomic(page);
593 ret = memcpy_mcsafe(to + offset, from, len);
594 kunmap_atomic(to);
595
596 return ret;
597}
598
599size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
600{
601 const char *from = addr;
602 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
603
604 if (unlikely(i->type & ITER_PIPE)) {
605 WARN_ON(1);
606 return 0;
607 }
608 if (iter_is_iovec(i))
609 might_fault();
610 iterate_and_advance(i, bytes, v,
611 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
612 ({
613 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
614 (from += v.bv_len) - v.bv_len, v.bv_len);
615 if (rem) {
616 curr_addr = (unsigned long) from;
617 bytes = curr_addr - s_addr - rem;
618 return bytes;
619 }
620 }),
621 ({
622 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
623 v.iov_len);
624 if (rem) {
625 curr_addr = (unsigned long) from;
626 bytes = curr_addr - s_addr - rem;
627 return bytes;
628 }
629 })
630 )
631
632 return bytes;
633}
634EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
635#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
636
576size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 637size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
577{ 638{
578 char *to = addr; 639 char *to = addr;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 4ea385be528f..a8fb63edcf89 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -29,6 +29,8 @@
29#include "nfit_test.h" 29#include "nfit_test.h"
30#include "../watermark.h" 30#include "../watermark.h"
31 31
32#include <asm/mcsafe_test.h>
33
32/* 34/*
33 * Generate an NFIT table to describe the following topology: 35 * Generate an NFIT table to describe the following topology:
34 * 36 *
@@ -2681,6 +2683,107 @@ static struct platform_driver nfit_test_driver = {
2681 .id_table = nfit_test_id, 2683 .id_table = nfit_test_id,
2682}; 2684};
2683 2685
2686static char mcsafe_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
2687
2688enum INJECT {
2689 INJECT_NONE,
2690 INJECT_SRC,
2691 INJECT_DST,
2692};
2693
2694static void mcsafe_test_init(char *dst, char *src, size_t size)
2695{
2696 size_t i;
2697
2698 memset(dst, 0xff, size);
2699 for (i = 0; i < size; i++)
2700 src[i] = (char) i;
2701}
2702
2703static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src,
2704 size_t size, unsigned long rem)
2705{
2706 size_t i;
2707
2708 for (i = 0; i < size - rem; i++)
2709 if (dst[i] != (unsigned char) i) {
2710 pr_info_once("%s:%d: offset: %zd got: %#x expect: %#x\n",
2711 __func__, __LINE__, i, dst[i],
2712 (unsigned char) i);
2713 return false;
2714 }
2715 for (i = size - rem; i < size; i++)
2716 if (dst[i] != 0xffU) {
2717 pr_info_once("%s:%d: offset: %zd got: %#x expect: 0xff\n",
2718 __func__, __LINE__, i, dst[i]);
2719 return false;
2720 }
2721 return true;
2722}
2723
2724void mcsafe_test(void)
2725{
2726 char *inject_desc[] = { "none", "source", "destination" };
2727 enum INJECT inj;
2728
2729 if (IS_ENABLED(CONFIG_MCSAFE_TEST)) {
2730 pr_info("%s: run...\n", __func__);
2731 } else {
2732 pr_info("%s: disabled, skip.\n", __func__);
2733 return;
2734 }
2735
2736 for (inj = INJECT_NONE; inj <= INJECT_DST; inj++) {
2737 int i;
2738
2739 pr_info("%s: inject: %s\n", __func__, inject_desc[inj]);
2740 for (i = 0; i < 512; i++) {
2741 unsigned long expect, rem;
2742 void *src, *dst;
2743 bool valid;
2744
2745 switch (inj) {
2746 case INJECT_NONE:
2747 mcsafe_inject_src(NULL);
2748 mcsafe_inject_dst(NULL);
2749 dst = &mcsafe_buf[2048];
2750 src = &mcsafe_buf[1024 - i];
2751 expect = 0;
2752 break;
2753 case INJECT_SRC:
2754 mcsafe_inject_src(&mcsafe_buf[1024]);
2755 mcsafe_inject_dst(NULL);
2756 dst = &mcsafe_buf[2048];
2757 src = &mcsafe_buf[1024 - i];
2758 expect = 512 - i;
2759 break;
2760 case INJECT_DST:
2761 mcsafe_inject_src(NULL);
2762 mcsafe_inject_dst(&mcsafe_buf[2048]);
2763 dst = &mcsafe_buf[2048 - i];
2764 src = &mcsafe_buf[1024];
2765 expect = 512 - i;
2766 break;
2767 }
2768
2769 mcsafe_test_init(dst, src, 512);
2770 rem = __memcpy_mcsafe(dst, src, 512);
2771 valid = mcsafe_test_validate(dst, src, 512, expect);
2772 if (rem == expect && valid)
2773 continue;
2774 pr_info("%s: copy(%#lx, %#lx, %d) off: %d rem: %ld %s expect: %ld\n",
2775 __func__,
2776 ((unsigned long) dst) & ~PAGE_MASK,
2777 ((unsigned long ) src) & ~PAGE_MASK,
2778 512, i, rem, valid ? "valid" : "bad",
2779 expect);
2780 }
2781 }
2782
2783 mcsafe_inject_src(NULL);
2784 mcsafe_inject_dst(NULL);
2785}
2786
2684static __init int nfit_test_init(void) 2787static __init int nfit_test_init(void)
2685{ 2788{
2686 int rc, i; 2789 int rc, i;
@@ -2689,6 +2792,7 @@ static __init int nfit_test_init(void)
2689 libnvdimm_test(); 2792 libnvdimm_test();
2690 acpi_nfit_test(); 2793 acpi_nfit_test();
2691 device_dax_test(); 2794 device_dax_test();
2795 mcsafe_test();
2692 2796
2693 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); 2797 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
2694 2798