diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:57 -0500 |
commit | 9a3dc7804e9856668caef41efc54179e61ffccc0 (patch) | |
tree | a4f154ce7ad556fb5677e154b45747479694482b | |
parent | 7afe15b9d888050435cd154906828df88d4e667d (diff) |
x86: cpa: simplify locking
further simplify cpa locking: since the largepage-split is a
slowpath, use the pgd_lock for the whole operation, intead
of the mmap_sem.
This also makes it suitable for DEBUG_PAGEALLOC purposes again.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/mm/pageattr_32.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 0966023dfd70..9cf2fea54eb5 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c | |||
@@ -37,9 +37,8 @@ pte_t *lookup_address(unsigned long address, int *level) | |||
37 | return pte_offset_kernel(pmd, address); | 37 | return pte_offset_kernel(pmd, address); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | 40 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
41 | { | 41 | { |
42 | unsigned long flags; | ||
43 | struct page *page; | 42 | struct page *page; |
44 | 43 | ||
45 | /* change init_mm */ | 44 | /* change init_mm */ |
@@ -47,7 +46,6 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |||
47 | if (SHARED_KERNEL_PMD) | 46 | if (SHARED_KERNEL_PMD) |
48 | return; | 47 | return; |
49 | 48 | ||
50 | spin_lock_irqsave(&pgd_lock, flags); | ||
51 | for (page = pgd_list; page; page = (struct page *)page->index) { | 49 | for (page = pgd_list; page; page = (struct page *)page->index) { |
52 | pgd_t *pgd; | 50 | pgd_t *pgd; |
53 | pud_t *pud; | 51 | pud_t *pud; |
@@ -58,12 +56,12 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |||
58 | pmd = pmd_offset(pud, address); | 56 | pmd = pmd_offset(pud, address); |
59 | set_pte_atomic((pte_t *)pmd, pte); | 57 | set_pte_atomic((pte_t *)pmd, pte); |
60 | } | 58 | } |
61 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
62 | } | 59 | } |
63 | 60 | ||
64 | static int split_large_page(pte_t *kpte, unsigned long address) | 61 | static int split_large_page(pte_t *kpte, unsigned long address) |
65 | { | 62 | { |
66 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | 63 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
64 | unsigned long flags; | ||
67 | unsigned long addr; | 65 | unsigned long addr; |
68 | pte_t *pbase, *tmp; | 66 | pte_t *pbase, *tmp; |
69 | struct page *base; | 67 | struct page *base; |
@@ -73,7 +71,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
73 | if (!base) | 71 | if (!base) |
74 | return -ENOMEM; | 72 | return -ENOMEM; |
75 | 73 | ||
76 | down_write(&init_mm.mmap_sem); | 74 | spin_lock_irqsave(&pgd_lock, flags); |
77 | /* | 75 | /* |
78 | * Check for races, another CPU might have split this page | 76 | * Check for races, another CPU might have split this page |
79 | * up for us already: | 77 | * up for us already: |
@@ -95,11 +93,11 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
95 | /* | 93 | /* |
96 | * Install the new, split up pagetable: | 94 | * Install the new, split up pagetable: |
97 | */ | 95 | */ |
98 | set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | 96 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
99 | base = NULL; | 97 | base = NULL; |
100 | 98 | ||
101 | out_unlock: | 99 | out_unlock: |
102 | up_write(&init_mm.mmap_sem); | 100 | spin_unlock_irqrestore(&pgd_lock, flags); |
103 | 101 | ||
104 | if (base) | 102 | if (base) |
105 | __free_pages(base, 0); | 103 | __free_pages(base, 0); |