diff options
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_xics.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/gup.c | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 4 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 2 | ||||
-rw-r--r-- | include/linux/compiler.h | 21 | ||||
-rw-r--r-- | mm/gup.c | 2 |
8 files changed, 35 insertions, 24 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 7b066f6b02ad..7c22997de906 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c | |||
@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, | |||
152 | * in virtual mode. | 152 | * in virtual mode. |
153 | */ | 153 | */ |
154 | do { | 154 | do { |
155 | old_state = new_state = ACCESS_ONCE(icp->state); | 155 | old_state = new_state = READ_ONCE(icp->state); |
156 | 156 | ||
157 | /* Down_CPPR */ | 157 | /* Down_CPPR */ |
158 | new_state.cppr = new_cppr; | 158 | new_state.cppr = new_cppr; |
@@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) | |||
211 | * pending priority | 211 | * pending priority |
212 | */ | 212 | */ |
213 | do { | 213 | do { |
214 | old_state = new_state = ACCESS_ONCE(icp->state); | 214 | old_state = new_state = READ_ONCE(icp->state); |
215 | 215 | ||
216 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); | 216 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); |
217 | if (!old_state.xisr) | 217 | if (!old_state.xisr) |
@@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |||
277 | * whenever the MFRR is made less favored. | 277 | * whenever the MFRR is made less favored. |
278 | */ | 278 | */ |
279 | do { | 279 | do { |
280 | old_state = new_state = ACCESS_ONCE(icp->state); | 280 | old_state = new_state = READ_ONCE(icp->state); |
281 | 281 | ||
282 | /* Set_MFRR */ | 282 | /* Set_MFRR */ |
283 | new_state.mfrr = mfrr; | 283 | new_state.mfrr = mfrr; |
@@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) | |||
352 | icp_rm_clr_vcpu_irq(icp->vcpu); | 352 | icp_rm_clr_vcpu_irq(icp->vcpu); |
353 | 353 | ||
354 | do { | 354 | do { |
355 | old_state = new_state = ACCESS_ONCE(icp->state); | 355 | old_state = new_state = READ_ONCE(icp->state); |
356 | 356 | ||
357 | reject = 0; | 357 | reject = 0; |
358 | new_state.cppr = cppr; | 358 | new_state.cppr = cppr; |
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 807351f76f84..a4a8d9f0dcb7 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, | |||
327 | icp->server_num); | 327 | icp->server_num); |
328 | 328 | ||
329 | do { | 329 | do { |
330 | old_state = new_state = ACCESS_ONCE(icp->state); | 330 | old_state = new_state = READ_ONCE(icp->state); |
331 | 331 | ||
332 | *reject = 0; | 332 | *reject = 0; |
333 | 333 | ||
@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, | |||
512 | * in virtual mode. | 512 | * in virtual mode. |
513 | */ | 513 | */ |
514 | do { | 514 | do { |
515 | old_state = new_state = ACCESS_ONCE(icp->state); | 515 | old_state = new_state = READ_ONCE(icp->state); |
516 | 516 | ||
517 | /* Down_CPPR */ | 517 | /* Down_CPPR */ |
518 | new_state.cppr = new_cppr; | 518 | new_state.cppr = new_cppr; |
@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) | |||
567 | * pending priority | 567 | * pending priority |
568 | */ | 568 | */ |
569 | do { | 569 | do { |
570 | old_state = new_state = ACCESS_ONCE(icp->state); | 570 | old_state = new_state = READ_ONCE(icp->state); |
571 | 571 | ||
572 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); | 572 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); |
573 | if (!old_state.xisr) | 573 | if (!old_state.xisr) |
@@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |||
634 | * whenever the MFRR is made less favored. | 634 | * whenever the MFRR is made less favored. |
635 | */ | 635 | */ |
636 | do { | 636 | do { |
637 | old_state = new_state = ACCESS_ONCE(icp->state); | 637 | old_state = new_state = READ_ONCE(icp->state); |
638 | 638 | ||
639 | /* Set_MFRR */ | 639 | /* Set_MFRR */ |
640 | new_state.mfrr = mfrr; | 640 | new_state.mfrr = mfrr; |
@@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) | |||
679 | if (!icp) | 679 | if (!icp) |
680 | return H_PARAMETER; | 680 | return H_PARAMETER; |
681 | } | 681 | } |
682 | state = ACCESS_ONCE(icp->state); | 682 | state = READ_ONCE(icp->state); |
683 | kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); | 683 | kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); |
684 | kvmppc_set_gpr(vcpu, 5, state.mfrr); | 684 | kvmppc_set_gpr(vcpu, 5, state.mfrr); |
685 | return H_SUCCESS; | 685 | return H_SUCCESS; |
@@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) | |||
721 | BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | 721 | BOOK3S_INTERRUPT_EXTERNAL_LEVEL); |
722 | 722 | ||
723 | do { | 723 | do { |
724 | old_state = new_state = ACCESS_ONCE(icp->state); | 724 | old_state = new_state = READ_ONCE(icp->state); |
725 | 725 | ||
726 | reject = 0; | 726 | reject = 0; |
727 | new_state.cppr = cppr; | 727 | new_state.cppr = cppr; |
@@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private) | |||
885 | if (!icp) | 885 | if (!icp) |
886 | continue; | 886 | continue; |
887 | 887 | ||
888 | state.raw = ACCESS_ONCE(icp->state.raw); | 888 | state.raw = READ_ONCE(icp->state.raw); |
889 | seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", | 889 | seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n", |
890 | icp->server_num, state.xisr, | 890 | icp->server_num, state.xisr, |
891 | state.pending_pri, state.cppr, state.mfrr, | 891 | state.pending_pri, state.cppr, state.mfrr, |
@@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) | |||
1082 | * the ICS states before the ICP states. | 1082 | * the ICS states before the ICP states. |
1083 | */ | 1083 | */ |
1084 | do { | 1084 | do { |
1085 | old_state = ACCESS_ONCE(icp->state); | 1085 | old_state = READ_ONCE(icp->state); |
1086 | 1086 | ||
1087 | if (new_state.mfrr <= old_state.mfrr) { | 1087 | if (new_state.mfrr <= old_state.mfrr) { |
1088 | resend = false; | 1088 | resend = false; |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index cf0464f4284f..7e408bfc7948 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -986,7 +986,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift | |||
986 | */ | 986 | */ |
987 | pdshift = PUD_SHIFT; | 987 | pdshift = PUD_SHIFT; |
988 | pudp = pud_offset(&pgd, ea); | 988 | pudp = pud_offset(&pgd, ea); |
989 | pud = ACCESS_ONCE(*pudp); | 989 | pud = READ_ONCE(*pudp); |
990 | 990 | ||
991 | if (pud_none(pud)) | 991 | if (pud_none(pud)) |
992 | return NULL; | 992 | return NULL; |
@@ -998,7 +998,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift | |||
998 | else { | 998 | else { |
999 | pdshift = PMD_SHIFT; | 999 | pdshift = PMD_SHIFT; |
1000 | pmdp = pmd_offset(&pud, ea); | 1000 | pmdp = pmd_offset(&pud, ea); |
1001 | pmd = ACCESS_ONCE(*pmdp); | 1001 | pmd = READ_ONCE(*pmdp); |
1002 | /* | 1002 | /* |
1003 | * A hugepage collapse is captured by pmd_none, because | 1003 | * A hugepage collapse is captured by pmd_none, because |
1004 | * it mark the pmd none and do a hpte invalidate. | 1004 | * it mark the pmd none and do a hpte invalidate. |
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index e15f52a17b6c..e7af6a65baab 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c | |||
@@ -17,7 +17,7 @@ | |||
17 | static inline pte_t gup_get_pte(pte_t *ptep) | 17 | static inline pte_t gup_get_pte(pte_t *ptep) |
18 | { | 18 | { |
19 | #ifndef CONFIG_X2TLB | 19 | #ifndef CONFIG_X2TLB |
20 | return ACCESS_ONCE(*ptep); | 20 | return READ_ONCE(*ptep); |
21 | #else | 21 | #else |
22 | /* | 22 | /* |
23 | * With get_user_pages_fast, we walk down the pagetables without | 23 | * With get_user_pages_fast, we walk down the pagetables without |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f8a2fc..7050d864f520 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, | |||
183 | 183 | ||
184 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | 184 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
185 | { | 185 | { |
186 | __ticket_t head = ACCESS_ONCE(lock->tickets.head); | 186 | __ticket_t head = READ_ONCE(lock->tickets.head); |
187 | 187 | ||
188 | for (;;) { | 188 | for (;;) { |
189 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); | 189 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
190 | /* | 190 | /* |
191 | * We need to check "unlocked" in a loop, tmp.head == head | 191 | * We need to check "unlocked" in a loop, tmp.head == head |
192 | * can be false positive because of overflow. | 192 | * can be false positive because of overflow. |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index f18fd1d411f6..740ae3026a14 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -550,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
550 | mid_mfn = NULL; | 550 | mid_mfn = NULL; |
551 | } | 551 | } |
552 | 552 | ||
553 | p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep)); | 553 | p2m_pfn = pte_pfn(READ_ONCE(*ptep)); |
554 | if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || | 554 | if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || |
555 | p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { | 555 | p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { |
556 | /* p2m leaf page is missing */ | 556 | /* p2m leaf page is missing */ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 17f624cdf53c..d1ec10a940ff 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -451,12 +451,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
451 | * to make the compiler aware of ordering is to put the two invocations of | 451 | * to make the compiler aware of ordering is to put the two invocations of |
452 | * ACCESS_ONCE() in different C statements. | 452 | * ACCESS_ONCE() in different C statements. |
453 | * | 453 | * |
454 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | 454 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
455 | * merging, or refetching absolutely anything at any time. Its main intended | 455 | * on a union member will work as long as the size of the member matches the |
456 | * use is to mediate communication between process-level code and irq/NMI | 456 | * size of the union and the size is smaller than word size. |
457 | * handlers, all running on the same CPU. | 457 | * |
458 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication | ||
459 | * between process-level code and irq/NMI handlers, all running on the same CPU, | ||
460 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
461 | * mutilate accesses that either do not require ordering or that interact | ||
462 | * with an explicit memory barrier or atomic instruction that provides the | ||
463 | * required ordering. | ||
464 | * | ||
465 | * If possible use READ_ONCE/ASSIGN_ONCE instead. | ||
458 | */ | 466 | */ |
459 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 467 | #define __ACCESS_ONCE(x) ({ \ |
468 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | ||
469 | (volatile typeof(x) *)&(x); }) | ||
470 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | ||
460 | 471 | ||
461 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ | 472 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
462 | #ifdef CONFIG_KPROBES | 473 | #ifdef CONFIG_KPROBES |
@@ -1092,7 +1092,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
1092 | 1092 | ||
1093 | pmdp = pmd_offset(&pud, addr); | 1093 | pmdp = pmd_offset(&pud, addr); |
1094 | do { | 1094 | do { |
1095 | pmd_t pmd = ACCESS_ONCE(*pmdp); | 1095 | pmd_t pmd = READ_ONCE(*pmdp); |
1096 | 1096 | ||
1097 | next = pmd_addr_end(addr, end); | 1097 | next = pmd_addr_end(addr, end); |
1098 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 1098 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |