diff options
author | LEROY Christophe <christophe.leroy@c-s.fr> | 2015-01-20 04:57:34 -0500 |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2015-01-29 22:59:02 -0500 |
commit | ce67f5d0a00cce231e62334c3624737623c32d6a (patch) | |
tree | c723a98f0acfe0e685727ff602f901ea70ce984e /arch/powerpc/mm/pgtable_32.c | |
parent | 5ddb75cee5afab3bdaf6eb4efefc8029923a9cc7 (diff) |
powerpc32: Use kmem_cache memory for PGDIR
When pages are not 4K, PGDIR table is allocated with kmalloc(). In order to
optimise TLB handlers, aligned memory is needed. kmalloc() doesn't provide
aligned memory blocks, so lets use a kmem_cache pool instead.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 833139620431..03b1a3b0fbd5 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -72,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); | |||
72 | 72 | ||
73 | #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) | 73 | #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) |
74 | 74 | ||
75 | #ifndef CONFIG_PPC_4K_PAGES | ||
76 | static struct kmem_cache *pgtable_cache; | ||
77 | |||
78 | void pgtable_cache_init(void) | ||
79 | { | ||
80 | pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, | ||
81 | 1 << PGDIR_ORDER, 0, NULL); | ||
82 | if (pgtable_cache == NULL) | ||
83 | panic("Couldn't allocate pgtable caches"); | ||
84 | } | ||
85 | #endif | ||
86 | |||
75 | pgd_t *pgd_alloc(struct mm_struct *mm) | 87 | pgd_t *pgd_alloc(struct mm_struct *mm) |
76 | { | 88 | { |
77 | pgd_t *ret; | 89 | pgd_t *ret; |
78 | 90 | ||
79 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ | 91 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ |
80 | #ifndef CONFIG_PPC_4K_PAGES | 92 | #ifndef CONFIG_PPC_4K_PAGES |
81 | ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); | 93 | ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); |
82 | #else | 94 | #else |
83 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | 95 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, |
84 | PGDIR_ORDER - PAGE_SHIFT); | 96 | PGDIR_ORDER - PAGE_SHIFT); |
@@ -89,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
89 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 101 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
90 | { | 102 | { |
91 | #ifndef CONFIG_PPC_4K_PAGES | 103 | #ifndef CONFIG_PPC_4K_PAGES |
92 | kfree((void *)pgd); | 104 | kmem_cache_free(pgtable_cache, (void *)pgd); |
93 | #else | 105 | #else |
94 | free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); | 106 | free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); |
95 | #endif | 107 | #endif |