aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorToshi Kani <toshi.kani@hpe.com>2016-02-11 16:24:17 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-17 03:10:23 -0500
commita82eee7424525e34e98d821dd059ce14560a1e35 (patch)
treecfbee28f966b7fcf625c70d0fc0b62a16012307a /arch/x86/lib
parentee9737c924706aaa72c2ead93e3ad5644681dc1c (diff)
x86/uaccess/64: Handle the caching of 4-byte nocache copies properly in __copy_user_nocache()
Data corruption issues were observed in tests which initiated a system crash/reset while accessing BTT devices. This problem is reproducible. The BTT driver calls pmem_rw_bytes() to update data in pmem devices. This interface calls __copy_user_nocache(), which uses non-temporal stores so that the stores to pmem are persistent. __copy_user_nocache() uses non-temporal stores when a request size is 8 bytes or larger (and is aligned by 8 bytes). The BTT driver updates the BTT map table, which entry size is 4 bytes. Therefore, updates to the map table entries remain cached, and are not written to pmem after a crash. Change __copy_user_nocache() to use non-temporal store when a request size is 4 bytes. The change extends the current byte-copy path for a less-than-8-bytes request, and does not add any overhead to the regular path. Reported-and-tested-by: Micah Parrish <micah.parrish@hpe.com> Reported-and-tested-by: Brian Boylston <brian.boylston@hpe.com> Signed-off-by: Toshi Kani <toshi.kani@hpe.com> Cc: <stable@vger.kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luis R. Rodriguez <mcgrof@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/1455225857-12039-3-git-send-email-toshi.kani@hpe.com [ Small readability edits. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/copy_user_64.S36
1 files changed, 32 insertions, 4 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index a644aad1f112..27f89c79a44b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -237,13 +237,14 @@ ENDPROC(copy_user_enhanced_fast_string)
237 * Note: Cached memory copy is used when destination or size is not 237 * Note: Cached memory copy is used when destination or size is not
238 * naturally aligned. That is: 238 * naturally aligned. That is:
239 * - Require 8-byte alignment when size is 8 bytes or larger. 239 * - Require 8-byte alignment when size is 8 bytes or larger.
240 * - Require 4-byte alignment when size is 4 bytes.
240 */ 241 */
241ENTRY(__copy_user_nocache) 242ENTRY(__copy_user_nocache)
242 ASM_STAC 243 ASM_STAC
243 244
244 /* If size is less than 8 bytes, go to byte copy */ 245 /* If size is less than 8 bytes, go to 4-byte copy */
245 cmpl $8,%edx 246 cmpl $8,%edx
246 jb .L_1b_cache_copy_entry 247 jb .L_4b_nocache_copy_entry
247 248
248 /* If destination is not 8-byte aligned, "cache" copy to align it */ 249 /* If destination is not 8-byte aligned, "cache" copy to align it */
249 ALIGN_DESTINATION 250 ALIGN_DESTINATION
@@ -282,7 +283,7 @@ ENTRY(__copy_user_nocache)
282 movl %edx,%ecx 283 movl %edx,%ecx
283 andl $7,%edx 284 andl $7,%edx
284 shrl $3,%ecx 285 shrl $3,%ecx
285 jz .L_1b_cache_copy_entry /* jump if count is 0 */ 286 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
286 287
287 /* Perform 8-byte nocache loop-copy */ 288 /* Perform 8-byte nocache loop-copy */
288.L_8b_nocache_copy_loop: 289.L_8b_nocache_copy_loop:
@@ -294,11 +295,33 @@ ENTRY(__copy_user_nocache)
294 jnz .L_8b_nocache_copy_loop 295 jnz .L_8b_nocache_copy_loop
295 296
296 /* If no byte left, we're done */ 297 /* If no byte left, we're done */
297.L_1b_cache_copy_entry: 298.L_4b_nocache_copy_entry:
299 andl %edx,%edx
300 jz .L_finish_copy
301
302 /* If destination is not 4-byte aligned, go to byte copy: */
303 movl %edi,%ecx
304 andl $3,%ecx
305 jnz .L_1b_cache_copy_entry
306
307 /* Set 4-byte copy count (1 or 0) and remainder */
308 movl %edx,%ecx
309 andl $3,%edx
310 shrl $2,%ecx
311 jz .L_1b_cache_copy_entry /* jump if count is 0 */
312
313 /* Perform 4-byte nocache copy: */
31430: movl (%rsi),%r8d
31531: movnti %r8d,(%rdi)
316 leaq 4(%rsi),%rsi
317 leaq 4(%rdi),%rdi
318
319 /* If no bytes left, we're done: */
298 andl %edx,%edx 320 andl %edx,%edx
299 jz .L_finish_copy 321 jz .L_finish_copy
300 322
301 /* Perform byte "cache" loop-copy for the remainder */ 323 /* Perform byte "cache" loop-copy for the remainder */
324.L_1b_cache_copy_entry:
302 movl %edx,%ecx 325 movl %edx,%ecx
303.L_1b_cache_copy_loop: 326.L_1b_cache_copy_loop:
30440: movb (%rsi),%al 32740: movb (%rsi),%al
@@ -323,6 +346,9 @@ ENTRY(__copy_user_nocache)
323.L_fixup_8b_copy: 346.L_fixup_8b_copy:
324 lea (%rdx,%rcx,8),%rdx 347 lea (%rdx,%rcx,8),%rdx
325 jmp .L_fixup_handle_tail 348 jmp .L_fixup_handle_tail
349.L_fixup_4b_copy:
350 lea (%rdx,%rcx,4),%rdx
351 jmp .L_fixup_handle_tail
326.L_fixup_1b_copy: 352.L_fixup_1b_copy:
327 movl %ecx,%edx 353 movl %ecx,%edx
328.L_fixup_handle_tail: 354.L_fixup_handle_tail:
@@ -348,6 +374,8 @@ ENTRY(__copy_user_nocache)
348 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy) 374 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
349 _ASM_EXTABLE(20b,.L_fixup_8b_copy) 375 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
350 _ASM_EXTABLE(21b,.L_fixup_8b_copy) 376 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
377 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
378 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
351 _ASM_EXTABLE(40b,.L_fixup_1b_copy) 379 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
352 _ASM_EXTABLE(41b,.L_fixup_1b_copy) 380 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
353ENDPROC(__copy_user_nocache) 381ENDPROC(__copy_user_nocache)