aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-20 19:48:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-20 19:48:59 -0500
commit60815cf2e05057db5b78e398d9734c493560b11e (patch)
tree23d7f55df13cc5a0c072cc8a6f361f8e7050b825 /mm
parentbfc7249cc293deac8f2678b7ec3d2407b68c0a33 (diff)
parent5de72a2247ac05bde7c89039631b3d0c6186fafb (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux
Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger: "kernel: Provide READ_ONCE and ASSIGN_ONCE As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com ACCESS_ONCE might fail with specific compilers for non-scalar accesses. Here is a set of patches to tackle that problem. The first patch introduce READ_ONCE and ASSIGN_ONCE. If the data structure is larger than the machine word size memcpy is used and a warning is emitted. The next patches fix up several in-tree users of ACCESS_ONCE on non-scalar types. This does not yet contain a patch that forces ACCESS_ONCE to work only on scalar types. This is targetted for the next merge window as Linux next already contains new offenders regarding ACCESS_ONCE vs. non-scalar types" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux: s390/kvm: REPLACE barrier fixup with READ_ONCE arm/spinlock: Replace ACCESS_ONCE with READ_ONCE arm64/spinlock: Replace ACCESS_ONCE READ_ONCE mips/gup: Replace ACCESS_ONCE with READ_ONCE x86/gup: Replace ACCESS_ONCE with READ_ONCE x86/spinlock: Replace ACCESS_ONCE with READ_ONCE mm: replace ACCESS_ONCE with READ_ONCE or barriers kernel: Provide READ_ONCE and ASSIGN_ONCE
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c2
-rw-r--r--mm/memory.c11
-rw-r--r--mm/rmap.c3
3 files changed, 13 insertions, 3 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 0ca1df9075ab..a900759cc807 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -968,7 +968,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
968 968
969 pudp = pud_offset(&pgd, addr); 969 pudp = pud_offset(&pgd, addr);
970 do { 970 do {
971 pud_t pud = ACCESS_ONCE(*pudp); 971 pud_t pud = READ_ONCE(*pudp);
972 972
973 next = pud_addr_end(addr, end); 973 next = pud_addr_end(addr, end);
974 if (pud_none(pud)) 974 if (pud_none(pud))
diff --git a/mm/memory.c b/mm/memory.c
index d8aebc52265f..649e7d440bd7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3195,7 +3195,16 @@ static int handle_pte_fault(struct mm_struct *mm,
3195 pte_t entry; 3195 pte_t entry;
3196 spinlock_t *ptl; 3196 spinlock_t *ptl;
3197 3197
3198 entry = ACCESS_ONCE(*pte); 3198 /*
3199 * some architectures can have larger ptes than wordsize,
3200 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y,
3201 * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses.
3202 * The code below just needs a consistent view for the ifs and
3203 * we later double check anyway with the ptl lock held. So here
3204 * a barrier will do.
3205 */
3206 entry = *pte;
3207 barrier();
3199 if (!pte_present(entry)) { 3208 if (!pte_present(entry)) {
3200 if (pte_none(entry)) { 3209 if (pte_none(entry)) {
3201 if (vma->vm_ops) { 3210 if (vma->vm_ops) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 45ba250babd8..c5bc241127b2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -583,7 +583,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
583 * without holding anon_vma lock for write. So when looking for a 583 * without holding anon_vma lock for write. So when looking for a
584 * genuine pmde (in which to find pte), test present and !THP together. 584 * genuine pmde (in which to find pte), test present and !THP together.
585 */ 585 */
586 pmde = ACCESS_ONCE(*pmd); 586 pmde = *pmd;
587 barrier();
587 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 588 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
588 pmd = NULL; 589 pmd = NULL;
589out: 590out: