diff options
author | Yandong Zhao <yandong77520@gmail.com> | 2018-07-11 07:06:28 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-07-11 12:02:02 -0400 |
commit | 2fd8eb4ad87104c54800ef3cea498c92eb15c78a (patch) | |
tree | ba0d6cb6f9dee0872b801d0d021e8ae84382e1b5 | |
parent | 96f95a17c1cfe65a002e525114d96616e91a8f2d (diff) |
arm64: neon: Fix function may_use_simd() return error status
It does not matter if the caller of may_use_simd() migrates to
another cpu after the call, but it is still important that the
kernel_neon_busy percpu instance that is read matches the cpu the
task is running on at the time of the read.
This means that raw_cpu_read() is not sufficient. kernel_neon_busy
may appear true if the caller migrates during the execution of
raw_cpu_read() and the next task to be scheduled in on the initial
cpu calls kernel_neon_begin().
This patch replaces raw_cpu_read() with this_cpu_read() to protect
against this race.
Cc: <stable@vger.kernel.org>
Fixes: cb84d11e1625 ("arm64: neon: Remove support for nested or hardirq kernel-mode NEON")
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Yandong Zhao <yandong77520@gmail.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/include/asm/simd.h | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h | |||
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); | |||
29 | static __must_check inline bool may_use_simd(void) | 29 | static __must_check inline bool may_use_simd(void) |
30 | { | 30 | { |
31 | /* | 31 | /* |
32 | * The raw_cpu_read() is racy if called with preemption enabled. | 32 | * kernel_neon_busy is only set while preemption is disabled, |
33 | * This is not a bug: kernel_neon_busy is only set when | 33 | * and is clear whenever preemption is enabled. Since |
34 | * preemption is disabled, so we cannot migrate to another CPU | 34 | * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy |
35 | * while it is set, nor can we migrate to a CPU where it is set. | 35 | * cannot change under our feet -- if it's set we cannot be |
36 | * So, if we find it clear on some CPU then we're guaranteed to | 36 | * migrated, and if it's clear we cannot be migrated to a CPU |
37 | * find it clear on any CPU we could migrate to. | 37 | * where it is set. |
38 | * | ||
39 | * If we are in between kernel_neon_begin()...kernel_neon_end(), | ||
40 | * the flag will be set, but preemption is also disabled, so we | ||
41 | * can't migrate to another CPU and spuriously see it become | ||
42 | * false. | ||
43 | */ | 38 | */ |
44 | return !in_irq() && !irqs_disabled() && !in_nmi() && | 39 | return !in_irq() && !irqs_disabled() && !in_nmi() && |
45 | !raw_cpu_read(kernel_neon_busy); | 40 | !this_cpu_read(kernel_neon_busy); |
46 | } | 41 | } |
47 | 42 | ||
48 | #else /* ! CONFIG_KERNEL_MODE_NEON */ | 43 | #else /* ! CONFIG_KERNEL_MODE_NEON */ |