aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 48c122d42ed7..19e0ae9beecb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 if (!new) 311 if (!new)
312 return -ENOMEM; 312 return -ENOMEM;
313 313
314 /*
315 * Ensure all pte setup (eg. pte page lock and page clearing) are
316 * visible before the pte is made visible to other CPUs by being
317 * put into page tables.
318 *
319 * The other side of the story is the pointer chasing in the page
320 * table walking code (when walking the page table without locking;
321 * ie. most of the time). Fortunately, these data accesses consist
322 * of a chain of data-dependent loads, meaning most CPUs (alpha
323 * being the notable exception) will already guarantee loads are
324 * seen in-order. See the alpha page table accessors for the
325 * smp_read_barrier_depends() barriers in page table walking code.
326 */
327 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
328
314 spin_lock(&mm->page_table_lock); 329 spin_lock(&mm->page_table_lock);
315 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 330 if (!pmd_present(*pmd)) { /* Has another populated it ? */
316 mm->nr_ptes++; 331 mm->nr_ptes++;
@@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
329 if (!new) 344 if (!new)
330 return -ENOMEM; 345 return -ENOMEM;
331 346
347 smp_wmb(); /* See comment in __pte_alloc */
348
332 spin_lock(&init_mm.page_table_lock); 349 spin_lock(&init_mm.page_table_lock);
333 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 350 if (!pmd_present(*pmd)) { /* Has another populated it ? */
334 pmd_populate_kernel(&init_mm, pmd, new); 351 pmd_populate_kernel(&init_mm, pmd, new);
@@ -2278,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2278 vmf.flags = flags; 2295 vmf.flags = flags;
2279 vmf.page = NULL; 2296 vmf.page = NULL;
2280 2297
2281 BUG_ON(vma->vm_flags & VM_PFNMAP);
2282
2283 ret = vma->vm_ops->fault(vma, &vmf); 2298 ret = vma->vm_ops->fault(vma, &vmf);
2284 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2299 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2285 return ret; 2300 return ret;
@@ -2619,6 +2634,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2619 if (!new) 2634 if (!new)
2620 return -ENOMEM; 2635 return -ENOMEM;
2621 2636
2637 smp_wmb(); /* See comment in __pte_alloc */
2638
2622 spin_lock(&mm->page_table_lock); 2639 spin_lock(&mm->page_table_lock);
2623 if (pgd_present(*pgd)) /* Another has populated it */ 2640 if (pgd_present(*pgd)) /* Another has populated it */
2624 pud_free(mm, new); 2641 pud_free(mm, new);
@@ -2640,6 +2657,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2640 if (!new) 2657 if (!new)
2641 return -ENOMEM; 2658 return -ENOMEM;
2642 2659
2660 smp_wmb(); /* See comment in __pte_alloc */
2661
2643 spin_lock(&mm->page_table_lock); 2662 spin_lock(&mm->page_table_lock);
2644#ifndef __ARCH_HAS_4LEVEL_HACK 2663#ifndef __ARCH_HAS_4LEVEL_HACK
2645 if (pud_present(*pud)) /* Another has populated it */ 2664 if (pud_present(*pud)) /* Another has populated it */