aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2013-05-15 11:34:05 -0400
committerChris Zankel <chris@zankel.net>2013-07-08 04:18:56 -0400
commita99e07ee5e887750f5136bc6799abe47a56fd2c9 (patch)
tree9464580bafe4ff135db21a6fb2a5165445d7bdaf
parentc5a771d0678f9613e9f89cf1a5bdcfa5b08b225b (diff)
xtensa: check TLB sanity on return to userspace
- check that user TLB mappings correspond to the current page table; - check that TLB mapping VPN is in the kernel/user address range in accordance with its ASID. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
-rw-r--r--arch/xtensa/Kconfig.debug10
-rw-r--r--arch/xtensa/kernel/entry.S9
-rw-r--r--arch/xtensa/mm/tlb.c113
3 files changed, 131 insertions, 1 deletions
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index a34010e0e51c..af7da74d535f 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -2,6 +2,16 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config DEBUG_TLB_SANITY
6 bool "Debug TLB sanity"
7 depends on DEBUG_KERNEL
8 help
9 Enable this to turn on TLB sanity check on each entry to userspace.
10 This check can spot missing TLB invalidation/wrong PTE permissions/
11 premature page freeing.
12
13 If unsure, say N.
14
5config LD_NO_RELAX 15config LD_NO_RELAX
6 bool "Disable linker relaxation" 16 bool "Disable linker relaxation"
7 default n 17 default n
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fa94512ff84d..9298742f0fd0 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -458,7 +458,7 @@ common_exception_return:
458 458
459 _bbsi.l a4, TIF_NEED_RESCHED, 3f 459 _bbsi.l a4, TIF_NEED_RESCHED, 3f
460 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f 460 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
461 _bbci.l a4, TIF_SIGPENDING, 4f 461 _bbci.l a4, TIF_SIGPENDING, 5f
462 462
4632: l32i a4, a1, PT_DEPC 4632: l32i a4, a1, PT_DEPC
464 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 464 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
@@ -476,6 +476,13 @@ common_exception_return:
476 callx4 a4 476 callx4 a4
477 j 1b 477 j 1b
478 478
4795:
480#ifdef CONFIG_DEBUG_TLB_SANITY
481 l32i a4, a1, PT_DEPC
482 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
483 movi a4, check_tlb_sanity
484 callx4 a4
485#endif
4794: /* Restore optional registers. */ 4864: /* Restore optional registers. */
480 487
481 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 488 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 743346150eea..ca9d2366bf12 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -141,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
141 141
142 local_irq_restore(flags); 142 local_irq_restore(flags);
143} 143}
144
145#ifdef CONFIG_DEBUG_TLB_SANITY
146
147static unsigned get_pte_for_vaddr(unsigned vaddr)
148{
149 struct task_struct *task = get_current();
150 struct mm_struct *mm = task->mm;
151 pgd_t *pgd;
152 pmd_t *pmd;
153 pte_t *pte;
154
155 if (!mm)
156 mm = task->active_mm;
157 pgd = pgd_offset(mm, vaddr);
158 if (pgd_none_or_clear_bad(pgd))
159 return 0;
160 pmd = pmd_offset(pgd, vaddr);
161 if (pmd_none_or_clear_bad(pmd))
162 return 0;
163 pte = pte_offset_map(pmd, vaddr);
164 if (!pte)
165 return 0;
166 return pte_val(*pte);
167}
168
169enum {
170 TLB_SUSPICIOUS = 1,
171 TLB_INSANE = 2,
172};
173
174static void tlb_insane(void)
175{
176 BUG_ON(1);
177}
178
179static void tlb_suspicious(void)
180{
181 WARN_ON(1);
182}
183
184/*
185 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
186 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
187 *
188 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
189 * marked as non-present. Non-present PTE and the page with non-zero refcount
190 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
191 * means that the page was freed prematurely. Non-zero mapcount is unusual,
192 * but does not necessary means an error, thus marked as suspicious.
193 */
194static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
195{
196 unsigned tlbidx = w | (e << PAGE_SHIFT);
197 unsigned r0 = dtlb ?
198 read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
199 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
200 unsigned pte = get_pte_for_vaddr(vpn);
201 unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
202 unsigned tlb_asid = r0 & ASID_MASK;
203 bool kernel = tlb_asid == 1;
204 int rc = 0;
205
206 if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
207 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
208 dtlb ? 'D' : 'I', w, e, vpn,
209 kernel ? "kernel" : "user");
210 rc |= TLB_INSANE;
211 }
212
213 if (tlb_asid == mm_asid) {
214 unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
215 read_itlb_translation(tlbidx);
216 if ((pte ^ r1) & PAGE_MASK) {
217 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
218 dtlb ? 'D' : 'I', w, e, r0, r1, pte);
219 if (pte == 0 || !pte_present(__pte(pte))) {
220 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
221 pr_err("page refcount: %d, mapcount: %d\n",
222 page_count(p),
223 page_mapcount(p));
224 if (!page_count(p))
225 rc |= TLB_INSANE;
226 else if (page_mapped(p))
227 rc |= TLB_SUSPICIOUS;
228 } else {
229 rc |= TLB_INSANE;
230 }
231 }
232 }
233 return rc;
234}
235
236void check_tlb_sanity(void)
237{
238 unsigned long flags;
239 unsigned w, e;
240 int bug = 0;
241
242 local_irq_save(flags);
243 for (w = 0; w < DTLB_ARF_WAYS; ++w)
244 for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
245 bug |= check_tlb_entry(w, e, true);
246 for (w = 0; w < ITLB_ARF_WAYS; ++w)
247 for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
248 bug |= check_tlb_entry(w, e, false);
249 if (bug & TLB_INSANE)
250 tlb_insane();
251 if (bug & TLB_SUSPICIOUS)
252 tlb_suspicious();
253 local_irq_restore(flags);
254}
255
256#endif /* CONFIG_DEBUG_TLB_SANITY */