diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:37:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:31 -0400 |
commit | 394158559d4c912cc58c311b6346cdea0ed2b1de (patch) | |
tree | c4cdc93d77d964577af8b42a5c9b37916735bf47 /arch/x86/mm | |
parent | 5a5f8f42241cf09caec5530a7639cfa8dccc3a7b (diff) |
x86: move all the pgd_list handling to one place
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pgtable.c | 28 |
1 files changed, 7 insertions, 21 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c67966e10a95..0d2866b8f425 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -43,34 +43,31 @@ void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | |||
43 | #endif /* PAGETABLE_LEVELS > 3 */ | 43 | #endif /* PAGETABLE_LEVELS > 3 */ |
44 | #endif /* PAGETABLE_LEVELS > 2 */ | 44 | #endif /* PAGETABLE_LEVELS > 2 */ |
45 | 45 | ||
46 | #ifdef CONFIG_X86_64 | ||
47 | static inline void pgd_list_add(pgd_t *pgd) | 46 | static inline void pgd_list_add(pgd_t *pgd) |
48 | { | 47 | { |
49 | struct page *page = virt_to_page(pgd); | 48 | struct page *page = virt_to_page(pgd); |
50 | unsigned long flags; | ||
51 | 49 | ||
52 | spin_lock_irqsave(&pgd_lock, flags); | ||
53 | list_add(&page->lru, &pgd_list); | 50 | list_add(&page->lru, &pgd_list); |
54 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
55 | } | 51 | } |
56 | 52 | ||
57 | static inline void pgd_list_del(pgd_t *pgd) | 53 | static inline void pgd_list_del(pgd_t *pgd) |
58 | { | 54 | { |
59 | struct page *page = virt_to_page(pgd); | 55 | struct page *page = virt_to_page(pgd); |
60 | unsigned long flags; | ||
61 | 56 | ||
62 | spin_lock_irqsave(&pgd_lock, flags); | ||
63 | list_del(&page->lru); | 57 | list_del(&page->lru); |
64 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
65 | } | 58 | } |
66 | 59 | ||
60 | #ifdef CONFIG_X86_64 | ||
67 | pgd_t *pgd_alloc(struct mm_struct *mm) | 61 | pgd_t *pgd_alloc(struct mm_struct *mm) |
68 | { | 62 | { |
69 | unsigned boundary; | 63 | unsigned boundary; |
70 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 64 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); |
65 | unsigned long flags; | ||
71 | if (!pgd) | 66 | if (!pgd) |
72 | return NULL; | 67 | return NULL; |
68 | spin_lock_irqsave(&pgd_lock, flags); | ||
73 | pgd_list_add(pgd); | 69 | pgd_list_add(pgd); |
70 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
74 | /* | 71 | /* |
75 | * Copy kernel pointers in from init. | 72 | * Copy kernel pointers in from init. |
76 | * Could keep a freelist or slab cache of those because the kernel | 73 | * Could keep a freelist or slab cache of those because the kernel |
@@ -86,8 +83,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
86 | 83 | ||
87 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 84 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
88 | { | 85 | { |
86 | unsigned long flags; | ||
89 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | 87 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); |
88 | spin_lock_irqsave(&pgd_lock, flags); | ||
90 | pgd_list_del(pgd); | 89 | pgd_list_del(pgd); |
90 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
91 | free_page((unsigned long)pgd); | 91 | free_page((unsigned long)pgd); |
92 | } | 92 | } |
93 | #else | 93 | #else |
@@ -101,20 +101,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
101 | * vmalloc faults work because attached pagetables are never freed. | 101 | * vmalloc faults work because attached pagetables are never freed. |
102 | * -- wli | 102 | * -- wli |
103 | */ | 103 | */ |
104 | static inline void pgd_list_add(pgd_t *pgd) | ||
105 | { | ||
106 | struct page *page = virt_to_page(pgd); | ||
107 | |||
108 | list_add(&page->lru, &pgd_list); | ||
109 | } | ||
110 | |||
111 | static inline void pgd_list_del(pgd_t *pgd) | ||
112 | { | ||
113 | struct page *page = virt_to_page(pgd); | ||
114 | |||
115 | list_del(&page->lru); | ||
116 | } | ||
117 | |||
118 | #define UNSHARED_PTRS_PER_PGD \ | 104 | #define UNSHARED_PTRS_PER_PGD \ |
119 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | 105 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) |
120 | 106 | ||