aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable.c')
-rw-r--r--arch/powerpc/mm/pgtable.c134
1 files changed, 132 insertions, 2 deletions
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6d94116fdea1..f5c6fd42265c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * This file contains common routines for dealing with free of page tables 2 * This file contains common routines for dealing with free of page tables
3 * Along with common page table handling code
3 * 4 *
4 * Derived from arch/powerpc/mm/tlb_64.c: 5 * Derived from arch/powerpc/mm/tlb_64.c:
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -81,11 +82,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
81void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 82void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
82{ 83{
83 /* This is safe since tlb_gather_mmu has disabled preemption */ 84 /* This is safe since tlb_gather_mmu has disabled preemption */
84 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
85 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 85 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
86 86
87 if (atomic_read(&tlb->mm->mm_users) < 2 || 87 if (atomic_read(&tlb->mm->mm_users) < 2 ||
88 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 88 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
89 pgtable_free(pgf); 89 pgtable_free(pgf);
90 return; 90 return;
91 } 91 }
@@ -115,3 +115,133 @@ void pte_free_finish(void)
115 pte_free_submit(*batchp); 115 pte_free_submit(*batchp);
116 *batchp = NULL; 116 *batchp = NULL;
117} 117}
118
119/*
120 * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
121 */
122static pte_t do_dcache_icache_coherency(pte_t pte)
123{
124 unsigned long pfn = pte_pfn(pte);
125 struct page *page;
126
127 if (unlikely(!pfn_valid(pfn)))
128 return pte;
129 page = pfn_to_page(pfn);
130
131 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
132 pr_debug("do_dcache_icache_coherency... flushing\n");
133 flush_dcache_icache_page(page);
134 set_bit(PG_arch_1, &page->flags);
135 }
136 else
137 pr_debug("do_dcache_icache_coherency... already clean\n");
138 return __pte(pte_val(pte) | _PAGE_HWEXEC);
139}
140
141static inline int is_exec_fault(void)
142{
143 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
144}
145
146/* We only try to do i/d cache coherency on stuff that looks like
147 * reasonably "normal" PTEs. We currently require a PTE to be present
148 * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE
149 */
150static inline int pte_looks_normal(pte_t pte)
151{
152 return (pte_val(pte) &
153 (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
154 (_PAGE_PRESENT);
155}
156
157#if defined(CONFIG_PPC_STD_MMU)
158/* Server-style MMU handles coherency when hashing if HW exec permission
159 * is supposed per page (currently 64-bit only). Else, we always flush
160 * valid PTEs in set_pte.
161 */
162static inline int pte_need_exec_flush(pte_t pte, int set_pte)
163{
164 return set_pte && pte_looks_normal(pte) &&
165 !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
166 cpu_has_feature(CPU_FTR_NOEXECUTE));
167}
168#elif _PAGE_HWEXEC == 0
169/* Embedded type MMU without HW exec support (8xx only so far), we flush
170 * the cache for any present PTE
171 */
172static inline int pte_need_exec_flush(pte_t pte, int set_pte)
173{
174 return set_pte && pte_looks_normal(pte);
175}
176#else
177/* Other embedded CPUs with HW exec support per-page, we flush on exec
178 * fault if HWEXEC is not set
179 */
180static inline int pte_need_exec_flush(pte_t pte, int set_pte)
181{
182 return pte_looks_normal(pte) && is_exec_fault() &&
183 !(pte_val(pte) & _PAGE_HWEXEC);
184}
185#endif
186
187/*
188 * set_pte stores a linux PTE into the linux page table.
189 */
190void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
191{
192#ifdef CONFIG_DEBUG_VM
193 WARN_ON(pte_present(*ptep));
194#endif
195 /* Note: mm->context.id might not yet have been assigned as
196 * this context might not have been activated yet when this
197 * is called.
198 */
199 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
200 if (pte_need_exec_flush(pte, 1))
201 pte = do_dcache_icache_coherency(pte);
202
203 /* Perform the setting of the PTE */
204 __set_pte_at(mm, addr, ptep, pte, 0);
205}
206
207/*
208 * This is called when relaxing access to a PTE. It's also called in the page
209 * fault path when we don't hit any of the major fault cases, ie, a minor
210 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
211 * handled those two for us, we additionally deal with missing execute
212 * permission here on some processors
213 */
214int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
215 pte_t *ptep, pte_t entry, int dirty)
216{
217 int changed;
218 if (!dirty && pte_need_exec_flush(entry, 0))
219 entry = do_dcache_icache_coherency(entry);
220 changed = !pte_same(*(ptep), entry);
221 if (changed) {
222 assert_pte_locked(vma->vm_mm, address);
223 __ptep_set_access_flags(ptep, entry);
224 flush_tlb_page_nohash(vma, address);
225 }
226 return changed;
227}
228
229#ifdef CONFIG_DEBUG_VM
230void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
231{
232 pgd_t *pgd;
233 pud_t *pud;
234 pmd_t *pmd;
235
236 if (mm == &init_mm)
237 return;
238 pgd = mm->pgd + pgd_index(addr);
239 BUG_ON(pgd_none(*pgd));
240 pud = pud_offset(pgd, addr);
241 BUG_ON(pud_none(*pud));
242 pmd = pmd_offset(pud, addr);
243 BUG_ON(!pmd_present(*pmd));
244 BUG_ON(!spin_is_locked(pte_lockptr(mm, pmd)));
245}
246#endif /* CONFIG_DEBUG_VM */
247