aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-21 11:10:30 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 21:37:09 -0400
commit34feb2c83beb3bdf13535a36770f7e50b47ef299 (patch)
tree4abc3b0fe31495315f414677dcb5198967caa333 /include/asm-x86_64
parentf0a7a5c93dfd1c0348dbbdb6f22cb82d99079c93 (diff)
x86_64: Quicklist support for x86_64
This adds caching of pgds and puds, pmds, pte. That way we can avoid costly zeroing and initialization of special mappings in the pgd. A second quicklist is useful to separate out PGD handling. We can carry the initialized pgds over to the next process needing them. Also clean up the pgd_list handling to use regular list macros. There is no need anymore to avoid the lru field. Move the add/removal of the pgds to the pgdlist into the constructor / destructor. That way the implementation is congruent with i386. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: "Luck, Tony" <tony.luck@intel.com> Acked-by: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/pgalloc.h73
-rw-r--r--include/asm-x86_64/pgtable.h1
2 files changed, 49 insertions, 25 deletions
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 8bb564687860..b467be6d367f 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -4,6 +4,10 @@
4#include <asm/pda.h> 4#include <asm/pda.h>
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/quicklist.h>
8
9#define QUICK_PGD 0 /* We preserve special mappings over free */
10#define QUICK_PT 1 /* Other page table pages that are zero on free */
7 11
8#define pmd_populate_kernel(mm, pmd, pte) \ 12#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) 13 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
@@ -20,23 +24,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
20static inline void pmd_free(pmd_t *pmd) 24static inline void pmd_free(pmd_t *pmd)
21{ 25{
22 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 26 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
23 free_page((unsigned long)pmd); 27 quicklist_free(QUICK_PT, NULL, pmd);
24} 28}
25 29
26static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) 30static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
27{ 31{
28 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 32 return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
29} 33}
30 34
31static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
32{ 36{
33 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 37 return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
34} 38}
35 39
36static inline void pud_free (pud_t *pud) 40static inline void pud_free (pud_t *pud)
37{ 41{
38 BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); 42 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
39 free_page((unsigned long)pud); 43 quicklist_free(QUICK_PT, NULL, pud);
40} 44}
41 45
42static inline void pgd_list_add(pgd_t *pgd) 46static inline void pgd_list_add(pgd_t *pgd)
@@ -57,41 +61,57 @@ static inline void pgd_list_del(pgd_t *pgd)
57 spin_unlock(&pgd_lock); 61 spin_unlock(&pgd_lock);
58} 62}
59 63
60static inline pgd_t *pgd_alloc(struct mm_struct *mm) 64static inline void pgd_ctor(void *x)
61{ 65{
62 unsigned boundary; 66 unsigned boundary;
63 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 67 pgd_t *pgd = x;
64 if (!pgd) 68 struct page *page = virt_to_page(pgd);
65 return NULL; 69
66 pgd_list_add(pgd);
67 /* 70 /*
68 * Copy kernel pointers in from init. 71 * Copy kernel pointers in from init.
69 * Could keep a freelist or slab cache of those because the kernel
70 * part never changes.
71 */ 72 */
72 boundary = pgd_index(__PAGE_OFFSET); 73 boundary = pgd_index(__PAGE_OFFSET);
73 memset(pgd, 0, boundary * sizeof(pgd_t));
74 memcpy(pgd + boundary, 74 memcpy(pgd + boundary,
75 init_level4_pgt + boundary, 75 init_level4_pgt + boundary,
76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); 76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
77
78 spin_lock(&pgd_lock);
79 list_add(&page->lru, &pgd_list);
80 spin_unlock(&pgd_lock);
81}
82
83static inline void pgd_dtor(void *x)
84{
85 pgd_t *pgd = x;
86 struct page *page = virt_to_page(pgd);
87
88 spin_lock(&pgd_lock);
89 list_del(&page->lru);
90 spin_unlock(&pgd_lock);
91}
92
93static inline pgd_t *pgd_alloc(struct mm_struct *mm)
94{
95 pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD,
96 GFP_KERNEL|__GFP_REPEAT, pgd_ctor);
77 return pgd; 97 return pgd;
78} 98}
79 99
80static inline void pgd_free(pgd_t *pgd) 100static inline void pgd_free(pgd_t *pgd)
81{ 101{
82 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); 102 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
83 pgd_list_del(pgd); 103 quicklist_free(QUICK_PGD, pgd_dtor, pgd);
84 free_page((unsigned long)pgd);
85} 104}
86 105
87static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 106static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
88{ 107{
89 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 108 return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
90} 109}
91 110
92static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 111static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
93{ 112{
94 void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 113 void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
114
95 if (!p) 115 if (!p)
96 return NULL; 116 return NULL;
97 return virt_to_page(p); 117 return virt_to_page(p);
@@ -103,17 +123,22 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
103static inline void pte_free_kernel(pte_t *pte) 123static inline void pte_free_kernel(pte_t *pte)
104{ 124{
105 BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); 125 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
106 free_page((unsigned long)pte); 126 quicklist_free(QUICK_PT, NULL, pte);
107} 127}
108 128
109static inline void pte_free(struct page *pte) 129static inline void pte_free(struct page *pte)
110{ 130{
111 __free_page(pte); 131 quicklist_free_page(QUICK_PT, NULL, pte);
112} 132}
113 133
114#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 134#define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte))
115 135
116#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 136#define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
117#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 137#define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
118 138
139static inline void check_pgt_cache(void)
140{
141 quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16);
142 quicklist_trim(QUICK_PT, NULL, 25, 16);
143}
119#endif /* _X86_64_PGALLOC_H */ 144#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 3ba53099297d..60cff1e4f7a3 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -409,7 +409,6 @@ extern int kern_addr_valid(unsigned long addr);
409#define HAVE_ARCH_UNMAPPED_AREA 409#define HAVE_ARCH_UNMAPPED_AREA
410 410
411#define pgtable_cache_init() do { } while (0) 411#define pgtable_cache_init() do { } while (0)
412#define check_pgt_cache() do { } while (0)
413 412
414#define PAGE_AGP PAGE_KERNEL_NOCACHE 413#define PAGE_AGP PAGE_KERNEL_NOCACHE
415#define HAVE_PAGE_AGP 1 414#define HAVE_PAGE_AGP 1