diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2016-12-28 05:33:48 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-01-16 01:27:48 -0500 |
commit | e991c24d68b8c0ba297eeb7af80b1e398e98c33f (patch) | |
tree | 0de08c08b2ecdf7b9a22683ac8cff66331ae7782 | |
parent | 49def1853334396f948dcb4cedb9347abb318df5 (diff) |
s390/ctl_reg: make __ctl_load a full memory barrier
We have quite a lot of code that depends on the order of the
__ctl_load inline assemby and subsequent memory accesses, like
e.g. disabling lowcore protection and the writing to lowcore.
Since the __ctl_load macro does not have memory barrier semantics, nor
any other dependencies the compiler is, theoretically, free to shuffle
code around. Or in other words: storing to lowcore could happen before
lowcore protection is disabled.
In order to avoid this class of potential bugs simply add a full
memory barrier to the __ctl_load macro.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/ctl_reg.h | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h | |||
@@ -15,7 +15,9 @@ | |||
15 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ | 15 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ |
16 | asm volatile( \ | 16 | asm volatile( \ |
17 | " lctlg %1,%2,%0\n" \ | 17 | " lctlg %1,%2,%0\n" \ |
18 | : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ | 18 | : \ |
19 | : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ | ||
20 | : "memory"); \ | ||
19 | } | 21 | } |
20 | 22 | ||
21 | #define __ctl_store(array, low, high) { \ | 23 | #define __ctl_store(array, low, high) { \ |