diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 68 |
2 files changed, 48 insertions, 36 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f6ffaaa7a5bf..8508f973d9cc 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -316,12 +316,11 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
316 | { | 316 | { |
317 | if (pte_present(*ptep)) { | 317 | if (pte_present(*ptep)) { |
318 | /* We open-code pte_clear because we need to pass the right | 318 | /* We open-code pte_clear because we need to pass the right |
319 | * argument to hpte_update (huge / !huge) | 319 | * argument to hpte_need_flush (huge / !huge). Might not be |
320 | * necessary anymore if we make hpte_need_flush() get the | ||
321 | * page size from the slices | ||
320 | */ | 322 | */ |
321 | unsigned long old = pte_update(ptep, ~0UL); | 323 | pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1); |
322 | if (old & _PAGE_HASHPTE) | ||
323 | hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1); | ||
324 | flush_tlb_pending(); | ||
325 | } | 324 | } |
326 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | 325 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
327 | } | 326 | } |
@@ -329,12 +328,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
329 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 328 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
330 | pte_t *ptep) | 329 | pte_t *ptep) |
331 | { | 330 | { |
332 | unsigned long old = pte_update(ptep, ~0UL); | 331 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1); |
333 | |||
334 | if (old & _PAGE_HASHPTE) | ||
335 | hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1); | ||
336 | *ptep = __pte(0); | ||
337 | |||
338 | return __pte(old); | 332 | return __pte(old); |
339 | } | 333 | } |
340 | 334 | ||
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index b58baa65c4a7..fd8d08c325eb 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -120,17 +120,20 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * Update the MMU hash table to correspond with a change to | 123 | * A linux PTE was changed and the corresponding hash table entry |
124 | * a Linux PTE. If wrprot is true, it is permissible to | 124 | * neesd to be flushed. This function will either perform the flush |
125 | * change the existing HPTE to read-only rather than removing it | 125 | * immediately or will batch it up if the current CPU has an active |
126 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). | 126 | * batch on it. |
127 | * | ||
128 | * Must be called from within some kind of spinlock/non-preempt region... | ||
127 | */ | 129 | */ |
128 | void hpte_update(struct mm_struct *mm, unsigned long addr, | 130 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
129 | pte_t *ptep, unsigned long pte, int huge) | 131 | pte_t *ptep, unsigned long pte, int huge) |
130 | { | 132 | { |
131 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 133 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
132 | unsigned long vsid; | 134 | unsigned long vsid, vaddr; |
133 | unsigned int psize; | 135 | unsigned int psize; |
136 | real_pte_t rpte; | ||
134 | int i; | 137 | int i; |
135 | 138 | ||
136 | i = batch->index; | 139 | i = batch->index; |
@@ -151,6 +154,26 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
151 | } else | 154 | } else |
152 | psize = pte_pagesize_index(pte); | 155 | psize = pte_pagesize_index(pte); |
153 | 156 | ||
157 | /* Build full vaddr */ | ||
158 | if (!is_kernel_addr(addr)) { | ||
159 | vsid = get_vsid(mm->context.id, addr); | ||
160 | WARN_ON(vsid == 0); | ||
161 | } else | ||
162 | vsid = get_kernel_vsid(addr); | ||
163 | vaddr = (vsid << 28 ) | (addr & 0x0fffffff); | ||
164 | rpte = __real_pte(__pte(pte), ptep); | ||
165 | |||
166 | /* | ||
167 | * Check if we have an active batch on this CPU. If not, just | ||
168 | * flush now and return. For now, we don global invalidates | ||
169 | * in that case, might be worth testing the mm cpu mask though | ||
170 | * and decide to use local invalidates instead... | ||
171 | */ | ||
172 | if (!batch->active) { | ||
173 | flush_hash_page(vaddr, rpte, psize, 0); | ||
174 | return; | ||
175 | } | ||
176 | |||
154 | /* | 177 | /* |
155 | * This can happen when we are in the middle of a TLB batch and | 178 | * This can happen when we are in the middle of a TLB batch and |
156 | * we encounter memory pressure (eg copy_page_range when it tries | 179 | * we encounter memory pressure (eg copy_page_range when it tries |
@@ -162,47 +185,42 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
162 | * batch | 185 | * batch |
163 | */ | 186 | */ |
164 | if (i != 0 && (mm != batch->mm || batch->psize != psize)) { | 187 | if (i != 0 && (mm != batch->mm || batch->psize != psize)) { |
165 | flush_tlb_pending(); | 188 | __flush_tlb_pending(batch); |
166 | i = 0; | 189 | i = 0; |
167 | } | 190 | } |
168 | if (i == 0) { | 191 | if (i == 0) { |
169 | batch->mm = mm; | 192 | batch->mm = mm; |
170 | batch->psize = psize; | 193 | batch->psize = psize; |
171 | } | 194 | } |
172 | if (!is_kernel_addr(addr)) { | 195 | batch->pte[i] = rpte; |
173 | vsid = get_vsid(mm->context.id, addr); | 196 | batch->vaddr[i] = vaddr; |
174 | WARN_ON(vsid == 0); | ||
175 | } else | ||
176 | vsid = get_kernel_vsid(addr); | ||
177 | batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); | ||
178 | batch->pte[i] = __real_pte(__pte(pte), ptep); | ||
179 | batch->index = ++i; | 197 | batch->index = ++i; |
180 | if (i >= PPC64_TLB_BATCH_NR) | 198 | if (i >= PPC64_TLB_BATCH_NR) |
181 | flush_tlb_pending(); | 199 | __flush_tlb_pending(batch); |
182 | } | 200 | } |
183 | 201 | ||
202 | /* | ||
203 | * This function is called when terminating an mmu batch or when a batch | ||
204 | * is full. It will perform the flush of all the entries currently stored | ||
205 | * in a batch. | ||
206 | * | ||
207 | * Must be called from within some kind of spinlock/non-preempt region... | ||
208 | */ | ||
184 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | 209 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
185 | { | 210 | { |
186 | int i; | ||
187 | int cpu; | ||
188 | cpumask_t tmp; | 211 | cpumask_t tmp; |
189 | int local = 0; | 212 | int i, local = 0; |
190 | 213 | ||
191 | BUG_ON(in_interrupt()); | ||
192 | |||
193 | cpu = get_cpu(); | ||
194 | i = batch->index; | 214 | i = batch->index; |
195 | tmp = cpumask_of_cpu(cpu); | 215 | tmp = cpumask_of_cpu(smp_processor_id()); |
196 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | 216 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) |
197 | local = 1; | 217 | local = 1; |
198 | |||
199 | if (i == 1) | 218 | if (i == 1) |
200 | flush_hash_page(batch->vaddr[0], batch->pte[0], | 219 | flush_hash_page(batch->vaddr[0], batch->pte[0], |
201 | batch->psize, local); | 220 | batch->psize, local); |
202 | else | 221 | else |
203 | flush_hash_range(i, local); | 222 | flush_hash_range(i, local); |
204 | batch->index = 0; | 223 | batch->index = 0; |
205 | put_cpu(); | ||
206 | } | 224 | } |
207 | 225 | ||
208 | void pte_free_finish(void) | 226 | void pte_free_finish(void) |