diff options
author | David S. Miller <davem@davemloft.net> | 2006-01-31 21:30:27 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:14 -0500 |
commit | 3c936465249f863f322154ff1aaa628b84ee5750 (patch) | |
tree | 2bd7a229236f197d20a655133370e5d0c1bf886c | |
parent | 05e28f9de65a38bb0c769080e91b6976e7e1e70c (diff) |
[SPARC64]: Kill pgtable quicklists and use SLAB.
Taking a nod from the powerpc port.
With the per-cpu caching of both the page allocator and SLAB, the
pgtable quicklist scheme becomes relatively silly and primitive.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 4 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 32 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 9 | ||||
-rw-r--r-- | include/asm-sparc64/pgalloc.h | 158 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 7 |
5 files changed, 44 insertions, 166 deletions
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 3c06bfb92a8c..f1f01378d079 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -241,10 +241,6 @@ EXPORT_SYMBOL(verify_compat_iovec); | |||
241 | #endif | 241 | #endif |
242 | 242 | ||
243 | EXPORT_SYMBOL(dump_fpu); | 243 | EXPORT_SYMBOL(dump_fpu); |
244 | EXPORT_SYMBOL(pte_alloc_one_kernel); | ||
245 | #ifndef CONFIG_SMP | ||
246 | EXPORT_SYMBOL(pgt_quicklists); | ||
247 | #endif | ||
248 | EXPORT_SYMBOL(put_fs_struct); | 244 | EXPORT_SYMBOL(put_fs_struct); |
249 | 245 | ||
250 | /* math-emu wants this */ | 246 | /* math-emu wants this */ |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 936ae1a594ac..7c456afaa9a5 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly; | |||
141 | 141 | ||
142 | int bigkernel = 0; | 142 | int bigkernel = 0; |
143 | 143 | ||
144 | /* XXX Tune this... */ | 144 | kmem_cache_t *pgtable_cache __read_mostly; |
145 | #define PGT_CACHE_LOW 25 | ||
146 | #define PGT_CACHE_HIGH 50 | ||
147 | 145 | ||
148 | #ifndef CONFIG_SMP | 146 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
149 | struct pgtable_cache_struct pgt_quicklists; | 147 | { |
150 | #endif | 148 | clear_page(addr); |
149 | } | ||
151 | 150 | ||
152 | void check_pgt_cache(void) | 151 | void pgtable_cache_init(void) |
153 | { | 152 | { |
154 | preempt_disable(); | 153 | pgtable_cache = kmem_cache_create("pgtable_cache", |
155 | if (pgtable_cache_size > PGT_CACHE_HIGH) { | 154 | PAGE_SIZE, PAGE_SIZE, |
156 | do { | 155 | SLAB_HWCACHE_ALIGN | |
157 | if (pgd_quicklist) | 156 | SLAB_MUST_HWCACHE_ALIGN, |
158 | free_pgd_slow(get_pgd_fast()); | 157 | zero_ctor, |
159 | if (pte_quicklist) | 158 | NULL); |
160 | free_pte_slow(pte_alloc_one_fast()); | 159 | if (!pgtable_cache) { |
161 | } while (pgtable_cache_size > PGT_CACHE_LOW); | 160 | prom_printf("pgtable_cache_init(): Could not create!\n"); |
161 | prom_halt(); | ||
162 | } | 162 | } |
163 | preempt_enable(); | ||
164 | } | 163 | } |
165 | 164 | ||
166 | #ifdef CONFIG_DEBUG_DCFLUSH | 165 | #ifdef CONFIG_DEBUG_DCFLUSH |
@@ -340,7 +339,6 @@ void show_mem(void) | |||
340 | nr_swap_pages << (PAGE_SHIFT-10)); | 339 | nr_swap_pages << (PAGE_SHIFT-10)); |
341 | printk("%ld pages of RAM\n", num_physpages); | 340 | printk("%ld pages of RAM\n", num_physpages); |
342 | printk("%d free pages\n", nr_free_pages()); | 341 | printk("%d free pages\n", nr_free_pages()); |
343 | printk("%d pages in page table cache\n",pgtable_cache_size); | ||
344 | } | 342 | } |
345 | 343 | ||
346 | void mmu_info(struct seq_file *m) | 344 | void mmu_info(struct seq_file *m) |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 45a9a2cfaf79..f7c0faede8b8 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -17,14 +17,7 @@ typedef struct { | |||
17 | unsigned long clock_tick; /* %tick's per second */ | 17 | unsigned long clock_tick; /* %tick's per second */ |
18 | unsigned long udelay_val; | 18 | unsigned long udelay_val; |
19 | 19 | ||
20 | /* Dcache line 2 */ | 20 | /* Dcache line 2, rarely used */ |
21 | unsigned int pgcache_size; | ||
22 | unsigned int __pad1; | ||
23 | unsigned long *pte_cache; | ||
24 | unsigned long *pgd_cache; | ||
25 | unsigned long __pad2; | ||
26 | |||
27 | /* Dcache line 3, rarely used */ | ||
28 | unsigned int dcache_size; | 21 | unsigned int dcache_size; |
29 | unsigned int dcache_line_size; | 22 | unsigned int dcache_line_size; |
30 | unsigned int icache_size; | 23 | unsigned int icache_size; |
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index ecea1bbdc115..12e4a273bd43 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
11 | #include <asm/cpudata.h> | 12 | #include <asm/cpudata.h> |
@@ -13,164 +14,59 @@ | |||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | 15 | ||
15 | /* Page table allocation/freeing. */ | 16 | /* Page table allocation/freeing. */ |
16 | #ifdef CONFIG_SMP | 17 | extern kmem_cache_t *pgtable_cache; |
17 | /* Sliiiicck */ | ||
18 | #define pgt_quicklists local_cpu_data() | ||
19 | #else | ||
20 | extern struct pgtable_cache_struct { | ||
21 | unsigned long *pgd_cache; | ||
22 | unsigned long *pte_cache; | ||
23 | unsigned int pgcache_size; | ||
24 | } pgt_quicklists; | ||
25 | #endif | ||
26 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | ||
27 | #define pte_quicklist (pgt_quicklists.pte_cache) | ||
28 | #define pgtable_cache_size (pgt_quicklists.pgcache_size) | ||
29 | 18 | ||
30 | static inline void free_pgd_fast(pgd_t *pgd) | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
31 | { | 20 | { |
32 | preempt_disable(); | 21 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
33 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | ||
34 | pgd_quicklist = (unsigned long *) pgd; | ||
35 | pgtable_cache_size++; | ||
36 | preempt_enable(); | ||
37 | } | 22 | } |
38 | 23 | ||
39 | static inline pgd_t *get_pgd_fast(void) | 24 | static inline void pgd_free(pgd_t *pgd) |
40 | { | 25 | { |
41 | unsigned long *ret; | 26 | kmem_cache_free(pgtable_cache, pgd); |
42 | |||
43 | preempt_disable(); | ||
44 | if((ret = pgd_quicklist) != NULL) { | ||
45 | pgd_quicklist = (unsigned long *)(*ret); | ||
46 | ret[0] = 0; | ||
47 | pgtable_cache_size--; | ||
48 | preempt_enable(); | ||
49 | } else { | ||
50 | preempt_enable(); | ||
51 | ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
52 | if(ret) | ||
53 | memset(ret, 0, PAGE_SIZE); | ||
54 | } | ||
55 | return (pgd_t *)ret; | ||
56 | } | ||
57 | |||
58 | static inline void free_pgd_slow(pgd_t *pgd) | ||
59 | { | ||
60 | free_page((unsigned long)pgd); | ||
61 | } | 27 | } |
62 | 28 | ||
63 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) | 29 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
64 | 30 | ||
65 | static inline pmd_t *pmd_alloc_one_fast(void) | 31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
66 | { | ||
67 | unsigned long *ret; | ||
68 | |||
69 | preempt_disable(); | ||
70 | ret = (unsigned long *) pte_quicklist; | ||
71 | if (likely(ret)) { | ||
72 | pte_quicklist = (unsigned long *)(*ret); | ||
73 | ret[0] = 0; | ||
74 | pgtable_cache_size--; | ||
75 | } | ||
76 | preempt_enable(); | ||
77 | |||
78 | return (pmd_t *) ret; | ||
79 | } | ||
80 | |||
81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
82 | { | ||
83 | pmd_t *pmd; | ||
84 | |||
85 | pmd = pmd_alloc_one_fast(); | ||
86 | if (unlikely(!pmd)) { | ||
87 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
88 | if (pmd) | ||
89 | memset(pmd, 0, PAGE_SIZE); | ||
90 | } | ||
91 | return pmd; | ||
92 | } | ||
93 | |||
94 | static inline void free_pmd_fast(pmd_t *pmd) | ||
95 | { | ||
96 | preempt_disable(); | ||
97 | *(unsigned long *)pmd = (unsigned long) pte_quicklist; | ||
98 | pte_quicklist = (unsigned long *) pmd; | ||
99 | pgtable_cache_size++; | ||
100 | preempt_enable(); | ||
101 | } | ||
102 | |||
103 | static inline void free_pmd_slow(pmd_t *pmd) | ||
104 | { | ||
105 | free_page((unsigned long)pmd); | ||
106 | } | ||
107 | |||
108 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) | ||
109 | #define pmd_populate(MM,PMD,PTE_PAGE) \ | ||
110 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
111 | |||
112 | static inline pte_t *pte_alloc_one_fast(void) | ||
113 | { | 32 | { |
114 | unsigned long *ret; | 33 | return kmem_cache_alloc(pgtable_cache, |
115 | 34 | GFP_KERNEL|__GFP_REPEAT); | |
116 | preempt_disable(); | ||
117 | ret = (unsigned long *) pte_quicklist; | ||
118 | if (likely(ret)) { | ||
119 | pte_quicklist = (unsigned long *)(*ret); | ||
120 | ret[0] = 0; | ||
121 | pgtable_cache_size--; | ||
122 | } | ||
123 | preempt_enable(); | ||
124 | |||
125 | return (pte_t *) ret; | ||
126 | } | 35 | } |
127 | 36 | ||
128 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 37 | static inline void pmd_free(pmd_t *pmd) |
129 | { | 38 | { |
130 | pte_t *ptep = pte_alloc_one_fast(); | 39 | kmem_cache_free(pgtable_cache, pmd); |
131 | |||
132 | if (likely(ptep)) | ||
133 | return ptep; | ||
134 | |||
135 | return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | ||
136 | } | 40 | } |
137 | 41 | ||
138 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) | 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
43 | unsigned long address) | ||
139 | { | 44 | { |
140 | pte_t *pte = pte_alloc_one_fast(); | 45 | return kmem_cache_alloc(pgtable_cache, |
141 | 46 | GFP_KERNEL|__GFP_REPEAT); | |
142 | if (likely(pte)) | ||
143 | return virt_to_page(pte); | ||
144 | |||
145 | return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
146 | } | 47 | } |
147 | 48 | ||
148 | static inline void free_pte_fast(pte_t *pte) | 49 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
50 | unsigned long address) | ||
149 | { | 51 | { |
150 | preempt_disable(); | 52 | return virt_to_page(pte_alloc_one_kernel(mm, address)); |
151 | *(unsigned long *)pte = (unsigned long) pte_quicklist; | ||
152 | pte_quicklist = (unsigned long *) pte; | ||
153 | pgtable_cache_size++; | ||
154 | preempt_enable(); | ||
155 | } | 53 | } |
156 | 54 | ||
157 | static inline void free_pte_slow(pte_t *pte) | ||
158 | { | ||
159 | free_page((unsigned long) pte); | ||
160 | } | ||
161 | |||
162 | static inline void pte_free_kernel(pte_t *pte) | 55 | static inline void pte_free_kernel(pte_t *pte) |
163 | { | 56 | { |
164 | free_pte_fast(pte); | 57 | kmem_cache_free(pgtable_cache, pte); |
165 | } | 58 | } |
166 | 59 | ||
167 | static inline void pte_free(struct page *ptepage) | 60 | static inline void pte_free(struct page *ptepage) |
168 | { | 61 | { |
169 | free_pte_fast(page_address(ptepage)); | 62 | pte_free_kernel(page_address(ptepage)); |
170 | } | 63 | } |
171 | 64 | ||
172 | #define pmd_free(pmd) free_pmd_fast(pmd) | 65 | |
173 | #define pgd_free(pgd) free_pgd_fast(pgd) | 66 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
174 | #define pgd_alloc(mm) get_pgd_fast() | 67 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
68 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
69 | |||
70 | #define check_pgt_cache() do { } while (0) | ||
175 | 71 | ||
176 | #endif /* _SPARC64_PGALLOC_H */ | 72 | #endif /* _SPARC64_PGALLOC_H */ |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index f3ba1e058195..77ba0b6cc1ce 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -432,12 +432,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, | |||
432 | unsigned long); | 432 | unsigned long); |
433 | #define HAVE_ARCH_FB_UNMAPPED_AREA | 433 | #define HAVE_ARCH_FB_UNMAPPED_AREA |
434 | 434 | ||
435 | /* | 435 | extern void pgtable_cache_init(void); |
436 | * No page table caches to initialise | ||
437 | */ | ||
438 | #define pgtable_cache_init() do { } while (0) | ||
439 | |||
440 | extern void check_pgt_cache(void); | ||
441 | 436 | ||
442 | #endif /* !(__ASSEMBLY__) */ | 437 | #endif /* !(__ASSEMBLY__) */ |
443 | 438 | ||