diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-08-05 05:39:06 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-08-28 20:53:31 -0400 |
commit | e28f7faf05159f1cfd564596f5e6178edba6bd49 (patch) | |
tree | 45534d2c33bff8b64e3fd155fba55146cb7518e6 /arch/ppc64/mm/tlb.c | |
parent | decd300b30e499fe6be1bbfc5650fc971de8c1fa (diff) |
[PATCH] Four level pagetables for ppc64
Implement 4-level pagetables for ppc64
This patch implements full four-level page tables for ppc64, thereby
extending the usable user address range to 44 bits (16T).
The patch uses a full page for the tables at the bottom and top level,
and a quarter page for the intermediate levels. It uses full 64-bit
pointers at every level, thus also increasing the addressable range of
physical memory. This patch also tweaks the VSID allocation to allow
matching range for user addresses (this halves the number of available
contexts) and adds some #if and BUILD_BUG sanity checks.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/mm/tlb.c')
-rw-r--r-- | arch/ppc64/mm/tlb.c | 95 |
1 files changed, 55 insertions, 40 deletions
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c index 26f0172c4527..d8a6593a13f0 100644 --- a/arch/ppc64/mm/tlb.c +++ b/arch/ppc64/mm/tlb.c | |||
@@ -41,7 +41,58 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
41 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 41 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); |
42 | unsigned long pte_freelist_forced_free; | 42 | unsigned long pte_freelist_forced_free; |
43 | 43 | ||
44 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) | 44 | struct pte_freelist_batch |
45 | { | ||
46 | struct rcu_head rcu; | ||
47 | unsigned int index; | ||
48 | pgtable_free_t tables[0]; | ||
49 | }; | ||
50 | |||
51 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
52 | unsigned long pte_freelist_forced_free; | ||
53 | |||
54 | #define PTE_FREELIST_SIZE \ | ||
55 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
56 | / sizeof(pgtable_free_t)) | ||
57 | |||
58 | #ifdef CONFIG_SMP | ||
59 | static void pte_free_smp_sync(void *arg) | ||
60 | { | ||
61 | /* Do nothing, just ensure we sync with all CPUs */ | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | /* This is only called when we are critically out of memory | ||
66 | * (and fail to get a page in pte_free_tlb). | ||
67 | */ | ||
68 | static void pgtable_free_now(pgtable_free_t pgf) | ||
69 | { | ||
70 | pte_freelist_forced_free++; | ||
71 | |||
72 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | ||
73 | |||
74 | pgtable_free(pgf); | ||
75 | } | ||
76 | |||
77 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
78 | { | ||
79 | struct pte_freelist_batch *batch = | ||
80 | container_of(head, struct pte_freelist_batch, rcu); | ||
81 | unsigned int i; | ||
82 | |||
83 | for (i = 0; i < batch->index; i++) | ||
84 | pgtable_free(batch->tables[i]); | ||
85 | |||
86 | free_page((unsigned long)batch); | ||
87 | } | ||
88 | |||
89 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
90 | { | ||
91 | INIT_RCU_HEAD(&batch->rcu); | ||
92 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
93 | } | ||
94 | |||
95 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | ||
45 | { | 96 | { |
46 | /* This is safe as we are holding page_table_lock */ | 97 | /* This is safe as we are holding page_table_lock */ |
47 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | 98 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); |
@@ -49,19 +100,19 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) | |||
49 | 100 | ||
50 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 101 | if (atomic_read(&tlb->mm->mm_users) < 2 || |
51 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | 102 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { |
52 | pte_free(ptepage); | 103 | pgtable_free(pgf); |
53 | return; | 104 | return; |
54 | } | 105 | } |
55 | 106 | ||
56 | if (*batchp == NULL) { | 107 | if (*batchp == NULL) { |
57 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | 108 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); |
58 | if (*batchp == NULL) { | 109 | if (*batchp == NULL) { |
59 | pte_free_now(ptepage); | 110 | pgtable_free_now(pgf); |
60 | return; | 111 | return; |
61 | } | 112 | } |
62 | (*batchp)->index = 0; | 113 | (*batchp)->index = 0; |
63 | } | 114 | } |
64 | (*batchp)->pages[(*batchp)->index++] = ptepage; | 115 | (*batchp)->tables[(*batchp)->index++] = pgf; |
65 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | 116 | if ((*batchp)->index == PTE_FREELIST_SIZE) { |
66 | pte_free_submit(*batchp); | 117 | pte_free_submit(*batchp); |
67 | *batchp = NULL; | 118 | *batchp = NULL; |
@@ -132,42 +183,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
132 | put_cpu(); | 183 | put_cpu(); |
133 | } | 184 | } |
134 | 185 | ||
135 | #ifdef CONFIG_SMP | ||
136 | static void pte_free_smp_sync(void *arg) | ||
137 | { | ||
138 | /* Do nothing, just ensure we sync with all CPUs */ | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | /* This is only called when we are critically out of memory | ||
143 | * (and fail to get a page in pte_free_tlb). | ||
144 | */ | ||
145 | void pte_free_now(struct page *ptepage) | ||
146 | { | ||
147 | pte_freelist_forced_free++; | ||
148 | |||
149 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | ||
150 | |||
151 | pte_free(ptepage); | ||
152 | } | ||
153 | |||
154 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
155 | { | ||
156 | struct pte_freelist_batch *batch = | ||
157 | container_of(head, struct pte_freelist_batch, rcu); | ||
158 | unsigned int i; | ||
159 | |||
160 | for (i = 0; i < batch->index; i++) | ||
161 | pte_free(batch->pages[i]); | ||
162 | free_page((unsigned long)batch); | ||
163 | } | ||
164 | |||
165 | void pte_free_submit(struct pte_freelist_batch *batch) | ||
166 | { | ||
167 | INIT_RCU_HEAD(&batch->rcu); | ||
168 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
169 | } | ||
170 | |||
171 | void pte_free_finish(void) | 186 | void pte_free_finish(void) |
172 | { | 187 | { |
173 | /* This is safe as we are holding page_table_lock */ | 188 | /* This is safe as we are holding page_table_lock */ |