diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-12 14:15:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-12 14:26:22 -0400 |
commit | f1d1a842d85acf34dd185027cb2c9b4fd13130ef (patch) | |
tree | 99ec45adca911aeb145a56d75a213ebb900b2175 | |
parent | 8df767dd759c1390f604814ee5b2d1489f9a59f7 (diff) |
SLUB: i386 support
SLUB cannot run on i386 at this point because i386 uses the page->private and
page->index field of slab pages for the pgd cache.
Make SLUB run on i386 by replacing the pgd slab cache with a quicklist.
Limit the changes as much as possible. Leave the improvised linked list in place
etc etc. This has been working here for a couple of weeks now.
Acked-by: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/i386/Kconfig | 8 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 7 | ||||
-rw-r--r-- | arch/i386/mm/pgtable.c | 26 | ||||
-rw-r--r-- | include/asm-i386/pgalloc.h | 2 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 5 |
7 files changed, 25 insertions, 26 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 30944ee2e61a..c2d54b802232 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -55,6 +55,10 @@ config ZONE_DMA | |||
55 | bool | 55 | bool |
56 | default y | 56 | default y |
57 | 57 | ||
58 | config QUICKLIST | ||
59 | bool | ||
60 | default y | ||
61 | |||
58 | config SBUS | 62 | config SBUS |
59 | bool | 63 | bool |
60 | 64 | ||
@@ -79,10 +83,6 @@ config ARCH_MAY_HAVE_PC_FDC | |||
79 | bool | 83 | bool |
80 | default y | 84 | default y |
81 | 85 | ||
82 | config ARCH_USES_SLAB_PAGE_STRUCT | ||
83 | bool | ||
84 | default y | ||
85 | |||
86 | config DMI | 86 | config DMI |
87 | bool | 87 | bool |
88 | default y | 88 | default y |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index d76d9bc33b30..06dfa65ad180 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -186,6 +186,7 @@ void cpu_idle(void) | |||
186 | if (__get_cpu_var(cpu_idle_state)) | 186 | if (__get_cpu_var(cpu_idle_state)) |
187 | __get_cpu_var(cpu_idle_state) = 0; | 187 | __get_cpu_var(cpu_idle_state) = 0; |
188 | 188 | ||
189 | check_pgt_cache(); | ||
189 | rmb(); | 190 | rmb(); |
190 | idle = pm_idle; | 191 | idle = pm_idle; |
191 | 192 | ||
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 93f202a855fa..706bda72dc60 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
421 | } | 421 | } |
422 | if (!cpus_empty(cpu_mask)) | 422 | if (!cpus_empty(cpu_mask)) |
423 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | 423 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
424 | 424 | check_pgt_cache(); | |
425 | preempt_enable(); | 425 | preempt_enable(); |
426 | } | 426 | } |
427 | 427 | ||
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index c50782efa5c3..b22ce8d6b1ba 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -740,7 +740,6 @@ int remove_memory(u64 start, u64 size) | |||
740 | EXPORT_SYMBOL_GPL(remove_memory); | 740 | EXPORT_SYMBOL_GPL(remove_memory); |
741 | #endif | 741 | #endif |
742 | 742 | ||
743 | struct kmem_cache *pgd_cache; | ||
744 | struct kmem_cache *pmd_cache; | 743 | struct kmem_cache *pmd_cache; |
745 | 744 | ||
746 | void __init pgtable_cache_init(void) | 745 | void __init pgtable_cache_init(void) |
@@ -764,12 +763,6 @@ void __init pgtable_cache_init(void) | |||
764 | pgd_size = PAGE_SIZE; | 763 | pgd_size = PAGE_SIZE; |
765 | } | 764 | } |
766 | } | 765 | } |
767 | pgd_cache = kmem_cache_create("pgd", | ||
768 | pgd_size, | ||
769 | pgd_size, | ||
770 | SLAB_PANIC, | ||
771 | pgd_ctor, | ||
772 | (!SHARED_KERNEL_PMD) ? pgd_dtor : NULL); | ||
773 | } | 766 | } |
774 | 767 | ||
775 | /* | 768 | /* |
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 9a96c1647428..8d7c0864cc04 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/quicklist.h> | ||
16 | 17 | ||
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
@@ -205,8 +206,6 @@ void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) | |||
205 | * against pageattr.c; it is the unique case in which a valid change | 206 | * against pageattr.c; it is the unique case in which a valid change |
206 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | 207 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
207 | * vmalloc faults work because attached pagetables are never freed. | 208 | * vmalloc faults work because attached pagetables are never freed. |
208 | * The locking scheme was chosen on the basis of manfred's | ||
209 | * recommendations and having no core impact whatsoever. | ||
210 | * -- wli | 209 | * -- wli |
211 | */ | 210 | */ |
212 | DEFINE_SPINLOCK(pgd_lock); | 211 | DEFINE_SPINLOCK(pgd_lock); |
@@ -232,9 +231,11 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
232 | set_page_private(next, (unsigned long)pprev); | 231 | set_page_private(next, (unsigned long)pprev); |
233 | } | 232 | } |
234 | 233 | ||
234 | |||
235 | |||
235 | #if (PTRS_PER_PMD == 1) | 236 | #if (PTRS_PER_PMD == 1) |
236 | /* Non-PAE pgd constructor */ | 237 | /* Non-PAE pgd constructor */ |
237 | void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) | 238 | void pgd_ctor(void *pgd) |
238 | { | 239 | { |
239 | unsigned long flags; | 240 | unsigned long flags; |
240 | 241 | ||
@@ -256,7 +257,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) | |||
256 | } | 257 | } |
257 | #else /* PTRS_PER_PMD > 1 */ | 258 | #else /* PTRS_PER_PMD > 1 */ |
258 | /* PAE pgd constructor */ | 259 | /* PAE pgd constructor */ |
259 | void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) | 260 | void pgd_ctor(void *pgd) |
260 | { | 261 | { |
261 | /* PAE, kernel PMD may be shared */ | 262 | /* PAE, kernel PMD may be shared */ |
262 | 263 | ||
@@ -275,11 +276,12 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) | |||
275 | } | 276 | } |
276 | #endif /* PTRS_PER_PMD */ | 277 | #endif /* PTRS_PER_PMD */ |
277 | 278 | ||
278 | void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) | 279 | void pgd_dtor(void *pgd) |
279 | { | 280 | { |
280 | unsigned long flags; /* can be called from interrupt context */ | 281 | unsigned long flags; /* can be called from interrupt context */ |
281 | 282 | ||
282 | BUG_ON(SHARED_KERNEL_PMD); | 283 | if (SHARED_KERNEL_PMD) |
284 | return; | ||
283 | 285 | ||
284 | paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); | 286 | paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); |
285 | spin_lock_irqsave(&pgd_lock, flags); | 287 | spin_lock_irqsave(&pgd_lock, flags); |
@@ -321,7 +323,7 @@ static void pmd_cache_free(pmd_t *pmd, int idx) | |||
321 | pgd_t *pgd_alloc(struct mm_struct *mm) | 323 | pgd_t *pgd_alloc(struct mm_struct *mm) |
322 | { | 324 | { |
323 | int i; | 325 | int i; |
324 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | 326 | pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); |
325 | 327 | ||
326 | if (PTRS_PER_PMD == 1 || !pgd) | 328 | if (PTRS_PER_PMD == 1 || !pgd) |
327 | return pgd; | 329 | return pgd; |
@@ -344,7 +346,7 @@ out_oom: | |||
344 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | 346 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); |
345 | pmd_cache_free(pmd, i); | 347 | pmd_cache_free(pmd, i); |
346 | } | 348 | } |
347 | kmem_cache_free(pgd_cache, pgd); | 349 | quicklist_free(0, pgd_dtor, pgd); |
348 | return NULL; | 350 | return NULL; |
349 | } | 351 | } |
350 | 352 | ||
@@ -361,5 +363,11 @@ void pgd_free(pgd_t *pgd) | |||
361 | pmd_cache_free(pmd, i); | 363 | pmd_cache_free(pmd, i); |
362 | } | 364 | } |
363 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ | 365 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
364 | kmem_cache_free(pgd_cache, pgd); | 366 | quicklist_free(0, pgd_dtor, pgd); |
365 | } | 367 | } |
368 | |||
369 | void check_pgt_cache(void) | ||
370 | { | ||
371 | quicklist_trim(0, pgd_dtor, 25, 16); | ||
372 | } | ||
373 | |||
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h index 47430175b75f..d07b7afc2692 100644 --- a/include/asm-i386/pgalloc.h +++ b/include/asm-i386/pgalloc.h | |||
@@ -65,6 +65,4 @@ do { \ | |||
65 | #define pud_populate(mm, pmd, pte) BUG() | 65 | #define pud_populate(mm, pmd, pte) BUG() |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | #define check_pgt_cache() do { } while (0) | ||
69 | |||
70 | #endif /* _I386_PGALLOC_H */ | 68 | #endif /* _I386_PGALLOC_H */ |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index edce9d51a676..2394589786ba 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -35,17 +35,16 @@ struct vm_area_struct; | |||
35 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 35 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
36 | extern unsigned long empty_zero_page[1024]; | 36 | extern unsigned long empty_zero_page[1024]; |
37 | extern pgd_t swapper_pg_dir[1024]; | 37 | extern pgd_t swapper_pg_dir[1024]; |
38 | extern struct kmem_cache *pgd_cache; | ||
39 | extern struct kmem_cache *pmd_cache; | 38 | extern struct kmem_cache *pmd_cache; |
40 | extern spinlock_t pgd_lock; | 39 | extern spinlock_t pgd_lock; |
41 | extern struct page *pgd_list; | 40 | extern struct page *pgd_list; |
41 | void check_pgt_cache(void); | ||
42 | 42 | ||
43 | void pmd_ctor(void *, struct kmem_cache *, unsigned long); | 43 | void pmd_ctor(void *, struct kmem_cache *, unsigned long); |
44 | void pgd_ctor(void *, struct kmem_cache *, unsigned long); | ||
45 | void pgd_dtor(void *, struct kmem_cache *, unsigned long); | ||
46 | void pgtable_cache_init(void); | 44 | void pgtable_cache_init(void); |
47 | void paging_init(void); | 45 | void paging_init(void); |
48 | 46 | ||
47 | |||
49 | /* | 48 | /* |
50 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | 49 | * The Linux x86 paging architecture is 'compile-time dual-mode', it |
51 | * implements both the traditional 2-level x86 page tables and the | 50 | * implements both the traditional 2-level x86 page tables and the |