diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2009-10-28 12:27:18 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-10-30 02:20:57 -0400 |
commit | a0668cdc154e54bf0c85182e0535eea237d53146 (patch) | |
tree | 84efcadf011e16c240ac9b1c948141fc1cc7d324 /arch/powerpc/mm/pgtable.c | |
parent | f71dc176aa06359681c30ba6877ffccab6fba3a6 (diff) |
powerpc/mm: Cleanup management of kmem_caches for pagetables
Currently we have a fair bit of rather fiddly code to manage the
various kmem_caches used to store page tables of various levels. We
generally have two caches holding some combination of PGD, PUD and PMD
tables, plus several more for the special hugepage pagetables.
This patch cleans this all up by taking a different approach. Rather
than the caches being designated as for PUDs or for hugeptes for 16M
pages, the caches are simply allocated to be a specific size. Thus
sharing of caches between different types/levels of pagetables happens
naturally. The pagetable size, where needed, is passed around encoded
in the same way as {PGD,PUD,PMD}_INDEX_SIZE; that is n where the
pagetable contains 2^n pointers.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 53040931de32..99df697c601a 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -49,12 +49,12 @@ struct pte_freelist_batch | |||
49 | { | 49 | { |
50 | struct rcu_head rcu; | 50 | struct rcu_head rcu; |
51 | unsigned int index; | 51 | unsigned int index; |
52 | pgtable_free_t tables[0]; | 52 | unsigned long tables[0]; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #define PTE_FREELIST_SIZE \ | 55 | #define PTE_FREELIST_SIZE \ |
56 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | 56 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ |
57 | / sizeof(pgtable_free_t)) | 57 | / sizeof(unsigned long)) |
58 | 58 | ||
59 | static void pte_free_smp_sync(void *arg) | 59 | static void pte_free_smp_sync(void *arg) |
60 | { | 60 | { |
@@ -64,13 +64,13 @@ static void pte_free_smp_sync(void *arg) | |||
64 | /* This is only called when we are critically out of memory | 64 | /* This is only called when we are critically out of memory |
65 | * (and fail to get a page in pte_free_tlb). | 65 | * (and fail to get a page in pte_free_tlb). |
66 | */ | 66 | */ |
67 | static void pgtable_free_now(pgtable_free_t pgf) | 67 | static void pgtable_free_now(void *table, unsigned shift) |
68 | { | 68 | { |
69 | pte_freelist_forced_free++; | 69 | pte_freelist_forced_free++; |
70 | 70 | ||
71 | smp_call_function(pte_free_smp_sync, NULL, 1); | 71 | smp_call_function(pte_free_smp_sync, NULL, 1); |
72 | 72 | ||
73 | pgtable_free(pgf); | 73 | pgtable_free(table, shift); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void pte_free_rcu_callback(struct rcu_head *head) | 76 | static void pte_free_rcu_callback(struct rcu_head *head) |
@@ -79,8 +79,12 @@ static void pte_free_rcu_callback(struct rcu_head *head) | |||
79 | container_of(head, struct pte_freelist_batch, rcu); | 79 | container_of(head, struct pte_freelist_batch, rcu); |
80 | unsigned int i; | 80 | unsigned int i; |
81 | 81 | ||
82 | for (i = 0; i < batch->index; i++) | 82 | for (i = 0; i < batch->index; i++) { |
83 | pgtable_free(batch->tables[i]); | 83 | void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); |
84 | unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; | ||
85 | |||
86 | pgtable_free(table, shift); | ||
87 | } | ||
84 | 88 | ||
85 | free_page((unsigned long)batch); | 89 | free_page((unsigned long)batch); |
86 | } | 90 | } |
@@ -91,25 +95,28 @@ static void pte_free_submit(struct pte_freelist_batch *batch) | |||
91 | call_rcu(&batch->rcu, pte_free_rcu_callback); | 95 | call_rcu(&batch->rcu, pte_free_rcu_callback); |
92 | } | 96 | } |
93 | 97 | ||
94 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | 98 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) |
95 | { | 99 | { |
96 | /* This is safe since tlb_gather_mmu has disabled preemption */ | 100 | /* This is safe since tlb_gather_mmu has disabled preemption */ |
97 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 101 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
102 | unsigned long pgf; | ||
98 | 103 | ||
99 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 104 | if (atomic_read(&tlb->mm->mm_users) < 2 || |
100 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ | 105 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ |
101 | pgtable_free(pgf); | 106 | pgtable_free(table, shift); |
102 | return; | 107 | return; |
103 | } | 108 | } |
104 | 109 | ||
105 | if (*batchp == NULL) { | 110 | if (*batchp == NULL) { |
106 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | 111 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); |
107 | if (*batchp == NULL) { | 112 | if (*batchp == NULL) { |
108 | pgtable_free_now(pgf); | 113 | pgtable_free_now(table, shift); |
109 | return; | 114 | return; |
110 | } | 115 | } |
111 | (*batchp)->index = 0; | 116 | (*batchp)->index = 0; |
112 | } | 117 | } |
118 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | ||
119 | pgf = (unsigned long)table | shift; | ||
113 | (*batchp)->tables[(*batchp)->index++] = pgf; | 120 | (*batchp)->tables[(*batchp)->index++] = pgf; |
114 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | 121 | if ((*batchp)->index == PTE_FREELIST_SIZE) { |
115 | pte_free_submit(*batchp); | 122 | pte_free_submit(*batchp); |