diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/pageattr.c | 40 |
1 files changed, 38 insertions, 2 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f5e8663c0f75..b6374d653d06 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -35,6 +35,14 @@ struct cpa_data { | |||
35 | int curpage; | 35 | int curpage; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | /* | ||
39 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | ||
40 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | ||
41 | * entries change the page attribute in parallel to some other cpu | ||
42 | * splitting a large page entry along with changing the attribute. | ||
43 | */ | ||
44 | static DEFINE_SPINLOCK(cpa_lock); | ||
45 | |||
38 | #define CPA_FLUSHTLB 1 | 46 | #define CPA_FLUSHTLB 1 |
39 | #define CPA_ARRAY 2 | 47 | #define CPA_ARRAY 2 |
40 | 48 | ||
@@ -453,7 +461,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
453 | unsigned int i, level; | 461 | unsigned int i, level; |
454 | pte_t *pbase, *tmp; | 462 | pte_t *pbase, *tmp; |
455 | pgprot_t ref_prot; | 463 | pgprot_t ref_prot; |
456 | struct page *base = alloc_pages(GFP_KERNEL, 0); | 464 | struct page *base; |
465 | |||
466 | if (!debug_pagealloc) | ||
467 | spin_unlock(&cpa_lock); | ||
468 | base = alloc_pages(GFP_KERNEL, 0); | ||
469 | if (!debug_pagealloc) | ||
470 | spin_lock(&cpa_lock); | ||
457 | if (!base) | 471 | if (!base) |
458 | return -ENOMEM; | 472 | return -ENOMEM; |
459 | 473 | ||
@@ -594,7 +608,25 @@ repeat: | |||
594 | */ | 608 | */ |
595 | err = split_large_page(kpte, address); | 609 | err = split_large_page(kpte, address); |
596 | if (!err) { | 610 | if (!err) { |
597 | cpa->flags |= CPA_FLUSHTLB; | 611 | /* |
612 | * Do a global flush tlb after splitting the large page | ||
613 | * and before we do the actual change page attribute in the PTE. | ||
614 | * | ||
615 | * With out this, we violate the TLB application note, that says | ||
616 | * "The TLBs may contain both ordinary and large-page | ||
617 | * translations for a 4-KByte range of linear addresses. This | ||
618 | * may occur if software modifies the paging structures so that | ||
619 | * the page size used for the address range changes. If the two | ||
620 | * translations differ with respect to page frame or attributes | ||
621 | * (e.g., permissions), processor behavior is undefined and may | ||
622 | * be implementation-specific." | ||
623 | * | ||
624 | * We do this global tlb flush inside the cpa_lock, so that we | ||
625 | * don't allow any other cpu, with stale tlb entries change the | ||
626 | * page attribute in parallel, that also falls into the | ||
627 | * just split large page entry. | ||
628 | */ | ||
629 | flush_tlb_all(); | ||
598 | goto repeat; | 630 | goto repeat; |
599 | } | 631 | } |
600 | 632 | ||
@@ -686,7 +718,11 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |||
686 | if (cpa->flags & CPA_ARRAY) | 718 | if (cpa->flags & CPA_ARRAY) |
687 | cpa->numpages = 1; | 719 | cpa->numpages = 1; |
688 | 720 | ||
721 | if (!debug_pagealloc) | ||
722 | spin_lock(&cpa_lock); | ||
689 | ret = __change_page_attr(cpa, checkalias); | 723 | ret = __change_page_attr(cpa, checkalias); |
724 | if (!debug_pagealloc) | ||
725 | spin_unlock(&cpa_lock); | ||
690 | if (ret) | 726 | if (ret) |
691 | return ret; | 727 | return ret; |
692 | 728 | ||