aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_64.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-10-11 06:37:10 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-12 00:05:17 -0400
commit1189be6508d45183013ddb82b18f4934193de274 (patch)
tree58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/mm/tlb_64.c
parent287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff)
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_64.c')
-rw-r--r--arch/powerpc/mm/tlb_64.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index cbd34fc813ee..eafbca52bff9 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -132,6 +132,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133 unsigned long vsid, vaddr; 133 unsigned long vsid, vaddr;
134 unsigned int psize; 134 unsigned int psize;
135 int ssize;
135 real_pte_t rpte; 136 real_pte_t rpte;
136 int i; 137 int i;
137 138
@@ -161,11 +162,14 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
161 162
162 /* Build full vaddr */ 163 /* Build full vaddr */
163 if (!is_kernel_addr(addr)) { 164 if (!is_kernel_addr(addr)) {
164 vsid = get_vsid(mm->context.id, addr); 165 ssize = user_segment_size(addr);
166 vsid = get_vsid(mm->context.id, addr, ssize);
165 WARN_ON(vsid == 0); 167 WARN_ON(vsid == 0);
166 } else 168 } else {
167 vsid = get_kernel_vsid(addr); 169 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
168 vaddr = (vsid << 28 ) | (addr & 0x0fffffff); 170 ssize = mmu_kernel_ssize;
171 }
172 vaddr = hpt_va(addr, vsid, ssize);
169 rpte = __real_pte(__pte(pte), ptep); 173 rpte = __real_pte(__pte(pte), ptep);
170 174
171 /* 175 /*
@@ -175,7 +179,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
175 * and decide to use local invalidates instead... 179 * and decide to use local invalidates instead...
176 */ 180 */
177 if (!batch->active) { 181 if (!batch->active) {
178 flush_hash_page(vaddr, rpte, psize, 0); 182 flush_hash_page(vaddr, rpte, psize, ssize, 0);
179 return; 183 return;
180 } 184 }
181 185
@@ -189,13 +193,15 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
189 * We also need to ensure only one page size is present in a given 193 * We also need to ensure only one page size is present in a given
190 * batch 194 * batch
191 */ 195 */
192 if (i != 0 && (mm != batch->mm || batch->psize != psize)) { 196 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
197 batch->ssize != ssize)) {
193 __flush_tlb_pending(batch); 198 __flush_tlb_pending(batch);
194 i = 0; 199 i = 0;
195 } 200 }
196 if (i == 0) { 201 if (i == 0) {
197 batch->mm = mm; 202 batch->mm = mm;
198 batch->psize = psize; 203 batch->psize = psize;
204 batch->ssize = ssize;
199 } 205 }
200 batch->pte[i] = rpte; 206 batch->pte[i] = rpte;
201 batch->vaddr[i] = vaddr; 207 batch->vaddr[i] = vaddr;
@@ -222,7 +228,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
222 local = 1; 228 local = 1;
223 if (i == 1) 229 if (i == 1)
224 flush_hash_page(batch->vaddr[0], batch->pte[0], 230 flush_hash_page(batch->vaddr[0], batch->pte[0],
225 batch->psize, local); 231 batch->psize, batch->ssize, local);
226 else 232 else
227 flush_hash_range(i, local); 233 flush_hash_range(i, local);
228 batch->index = 0; 234 batch->index = 0;