aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index bbab1e37055e..fb5608a120ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 if (!new) 311 if (!new)
312 return -ENOMEM; 312 return -ENOMEM;
313 313
314 /*
315 * Ensure all pte setup (eg. pte page lock and page clearing) are
316 * visible before the pte is made visible to other CPUs by being
317 * put into page tables.
318 *
319 * The other side of the story is the pointer chasing in the page
320 * table walking code (when walking the page table without locking;
321 * ie. most of the time). Fortunately, these data accesses consist
322 * of a chain of data-dependent loads, meaning most CPUs (alpha
323 * being the notable exception) will already guarantee loads are
324 * seen in-order. See the alpha page table accessors for the
325 * smp_read_barrier_depends() barriers in page table walking code.
326 */
327 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
328
314 spin_lock(&mm->page_table_lock); 329 spin_lock(&mm->page_table_lock);
315 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 330 if (!pmd_present(*pmd)) { /* Has another populated it ? */
316 mm->nr_ptes++; 331 mm->nr_ptes++;
@@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
329 if (!new) 344 if (!new)
330 return -ENOMEM; 345 return -ENOMEM;
331 346
347 smp_wmb(); /* See comment in __pte_alloc */
348
332 spin_lock(&init_mm.page_table_lock); 349 spin_lock(&init_mm.page_table_lock);
333 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 350 if (!pmd_present(*pmd)) { /* Has another populated it ? */
334 pmd_populate_kernel(&init_mm, pmd, new); 351 pmd_populate_kernel(&init_mm, pmd, new);
@@ -969,7 +986,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
969 goto no_page_table; 986 goto no_page_table;
970 987
971 pmd = pmd_offset(pud, address); 988 pmd = pmd_offset(pud, address);
972 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 989 if (pmd_none(*pmd))
973 goto no_page_table; 990 goto no_page_table;
974 991
975 if (pmd_huge(*pmd)) { 992 if (pmd_huge(*pmd)) {
@@ -978,6 +995,9 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
978 goto out; 995 goto out;
979 } 996 }
980 997
998 if (unlikely(pmd_bad(*pmd)))
999 goto no_page_table;
1000
981 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1001 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
982 if (!ptep) 1002 if (!ptep)
983 goto out; 1003 goto out;
@@ -2616,6 +2636,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2616 if (!new) 2636 if (!new)
2617 return -ENOMEM; 2637 return -ENOMEM;
2618 2638
2639 smp_wmb(); /* See comment in __pte_alloc */
2640
2619 spin_lock(&mm->page_table_lock); 2641 spin_lock(&mm->page_table_lock);
2620 if (pgd_present(*pgd)) /* Another has populated it */ 2642 if (pgd_present(*pgd)) /* Another has populated it */
2621 pud_free(mm, new); 2643 pud_free(mm, new);
@@ -2637,6 +2659,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2637 if (!new) 2659 if (!new)
2638 return -ENOMEM; 2660 return -ENOMEM;
2639 2661
2662 smp_wmb(); /* See comment in __pte_alloc */
2663
2640 spin_lock(&mm->page_table_lock); 2664 spin_lock(&mm->page_table_lock);
2641#ifndef __ARCH_HAS_4LEVEL_HACK 2665#ifndef __ARCH_HAS_4LEVEL_HACK
2642 if (pud_present(*pud)) /* Another has populated it */ 2666 if (pud_present(*pud)) /* Another has populated it */