diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 48c122d42ed7..fb5608a120ed 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | |||
311 | if (!new) | 311 | if (!new) |
312 | return -ENOMEM; | 312 | return -ENOMEM; |
313 | 313 | ||
314 | /* | ||
315 | * Ensure all pte setup (eg. pte page lock and page clearing) are | ||
316 | * visible before the pte is made visible to other CPUs by being | ||
317 | * put into page tables. | ||
318 | * | ||
319 | * The other side of the story is the pointer chasing in the page | ||
320 | * table walking code (when walking the page table without locking; | ||
321 | * ie. most of the time). Fortunately, these data accesses consist | ||
322 | * of a chain of data-dependent loads, meaning most CPUs (alpha | ||
323 | * being the notable exception) will already guarantee loads are | ||
324 | * seen in-order. See the alpha page table accessors for the | ||
325 | * smp_read_barrier_depends() barriers in page table walking code. | ||
326 | */ | ||
327 | smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ | ||
328 | |||
314 | spin_lock(&mm->page_table_lock); | 329 | spin_lock(&mm->page_table_lock); |
315 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ | 330 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ |
316 | mm->nr_ptes++; | 331 | mm->nr_ptes++; |
@@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) | |||
329 | if (!new) | 344 | if (!new) |
330 | return -ENOMEM; | 345 | return -ENOMEM; |
331 | 346 | ||
347 | smp_wmb(); /* See comment in __pte_alloc */ | ||
348 | |||
332 | spin_lock(&init_mm.page_table_lock); | 349 | spin_lock(&init_mm.page_table_lock); |
333 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ | 350 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ |
334 | pmd_populate_kernel(&init_mm, pmd, new); | 351 | pmd_populate_kernel(&init_mm, pmd, new); |
@@ -2619,6 +2636,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |||
2619 | if (!new) | 2636 | if (!new) |
2620 | return -ENOMEM; | 2637 | return -ENOMEM; |
2621 | 2638 | ||
2639 | smp_wmb(); /* See comment in __pte_alloc */ | ||
2640 | |||
2622 | spin_lock(&mm->page_table_lock); | 2641 | spin_lock(&mm->page_table_lock); |
2623 | if (pgd_present(*pgd)) /* Another has populated it */ | 2642 | if (pgd_present(*pgd)) /* Another has populated it */ |
2624 | pud_free(mm, new); | 2643 | pud_free(mm, new); |
@@ -2640,6 +2659,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |||
2640 | if (!new) | 2659 | if (!new) |
2641 | return -ENOMEM; | 2660 | return -ENOMEM; |
2642 | 2661 | ||
2662 | smp_wmb(); /* See comment in __pte_alloc */ | ||
2663 | |||
2643 | spin_lock(&mm->page_table_lock); | 2664 | spin_lock(&mm->page_table_lock); |
2644 | #ifndef __ARCH_HAS_4LEVEL_HACK | 2665 | #ifndef __ARCH_HAS_4LEVEL_HACK |
2645 | if (pud_present(*pud)) /* Another has populated it */ | 2666 | if (pud_present(*pud)) /* Another has populated it */ |