aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben.hutchings@codethink.co.uk>2017-05-09 13:00:43 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-05-20 08:28:36 -0400
commite0c871792cc6f1ded64ec087779b08c825d76170 (patch)
treec5c9fa07db7e9c2728a0ec2eec73be462dbb23cb
parente65c6aa108607501271f2af80f3947f315fb56ca (diff)
x86, pmem: Fix cache flushing for iovec write < 8 bytes
commit 8376efd31d3d7c44bd05be337adde023cc531fa1 upstream. Commit 11e63f6d920d added cache flushing for unaligned writes from an iovec, covering the first and last cache line of a >= 8 byte write and the first cache line of a < 8 byte write. But an unaligned write of 2-7 bytes can still cover two cache lines, so make sure we flush both in that case. Fixes: 11e63f6d920d ("x86, pmem: fix broken __copy_user_nocache ...") Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/include/asm/pmem.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 529bb4a6487a..e2904373010d 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
103 103
104 if (bytes < 8) { 104 if (bytes < 8) {
105 if (!IS_ALIGNED(dest, 4) || (bytes != 4)) 105 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 arch_wb_cache_pmem(addr, 1); 106 arch_wb_cache_pmem(addr, bytes);
107 } else { 107 } else {
108 if (!IS_ALIGNED(dest, 8)) { 108 if (!IS_ALIGNED(dest, 8)) {
109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); 109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);