diff options
author | Palmer Dabbelt <palmer@sifive.com> | 2017-12-05 20:48:11 -0500 |
---|---|---|
committer | Palmer Dabbelt <palmer@dabbelt.com> | 2017-12-11 10:51:07 -0500 |
commit | 3cfa5008081db845c6c53d531ec34e9c84a9fd99 (patch) | |
tree | 805a705c5ab4544fe0bb8402a21433805a502f7e | |
parent | 86ad5c97ce5ccdda1459d35370fd5e105721bb8d (diff) |
RISC-V: Resurrect smp_mb__after_spinlock()
I removed this last week because of an incorrect comment:
smp_mb__after_spinlock() is actually still used, and is necessary on
RISC-V. It's been resurrected, with a comment that describes what it
actually does this time. Thanks to Andrea for finding the bug!
Fixes: 3343eb6806f3 ("RISC-V: Remove smb_mb__{before,after}_spinlock()")
CC: Andrea Parri <parri.andrea@gmail.com>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
-rw-r--r-- | arch/riscv/include/asm/barrier.h | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 773c4e039cd7..c0319cbf1eec 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h | |||
@@ -38,6 +38,25 @@ | |||
38 | #define smp_rmb() RISCV_FENCE(r,r) | 38 | #define smp_rmb() RISCV_FENCE(r,r) |
39 | #define smp_wmb() RISCV_FENCE(w,w) | 39 | #define smp_wmb() RISCV_FENCE(w,w) |
40 | 40 | ||
41 | /* | ||
42 | * This is a very specific barrier: it's currently only used in two places in | ||
43 | * the kernel, both in the scheduler. See include/linux/spinlock.h for the two | ||
44 | * orderings it guarantees, but the "critical section is RCsc" guarantee | ||
45 | * mandates a barrier on RISC-V. The sequence looks like: | ||
46 | * | ||
47 | * lr.aq lock | ||
48 | * sc lock <= LOCKED | ||
49 | * smp_mb__after_spinlock() | ||
50 | * // critical section | ||
51 | * lr lock | ||
52 | * sc.rl lock <= UNLOCKED | ||
53 | * | ||
54 | * The AQ/RL pair provides a RCpc critical section, but there's not really any | ||
55 | * way we can take advantage of that here because the ordering is only enforced | ||
56 | * on that one lock. Thus, we're just doing a full fence. | ||
57 | */ | ||
58 | #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) | ||
59 | |||
41 | #include <asm-generic/barrier.h> | 60 | #include <asm-generic/barrier.h> |
42 | 61 | ||
43 | #endif /* __ASSEMBLY__ */ | 62 | #endif /* __ASSEMBLY__ */ |