aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-14 14:06:41 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-14 14:06:41 -0400
commit8010fbe7a67c2f993cbb11b9d8b7e98528256dd1 (patch)
tree861fc7d33fe08b33818b9401f2ba1b32edd82505
parent112e58471de3431fbd03dee514777ad4a66a77b2 (diff)
sh: TLB fast path optimizations for load/store exceptions.
This only bothers with the TLB entry flush in the case of the initial page write exception, as it is unecessary in the case of the load/store exceptions. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S2
-rw-r--r--arch/sh/mm/fault_32.c26
2 files changed, 14 insertions, 14 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index bbaf2bd118e7..a701fac8ed42 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -124,7 +124,7 @@ ENTRY(tlb_miss_store)
124 .align 2 124 .align 2
125ENTRY(initial_page_write) 125ENTRY(initial_page_write)
126 bra call_handle_tlbmiss 126 bra call_handle_tlbmiss
127 mov #1, r5 127 mov #2, r5
128 128
129 .align 2 129 .align 2
130ENTRY(tlb_protection_violation_load) 130ENTRY(tlb_protection_violation_load)
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 41840647f65f..f1c93c880ed4 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -327,7 +327,6 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
327 pmd_t *pmd; 327 pmd_t *pmd;
328 pte_t *pte; 328 pte_t *pte;
329 pte_t entry; 329 pte_t entry;
330 int ret = 1;
331 330
332 /* 331 /*
333 * We don't take page faults for P1, P2, and parts of P4, these 332 * We don't take page faults for P1, P2, and parts of P4, these
@@ -338,40 +337,41 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
338 pgd = pgd_offset_k(address); 337 pgd = pgd_offset_k(address);
339 } else { 338 } else {
340 if (unlikely(address >= TASK_SIZE || !current->mm)) 339 if (unlikely(address >= TASK_SIZE || !current->mm))
341 goto out; 340 return 1;
342 341
343 pgd = pgd_offset(current->mm, address); 342 pgd = pgd_offset(current->mm, address);
344 } 343 }
345 344
346 pud = pud_offset(pgd, address); 345 pud = pud_offset(pgd, address);
347 if (pud_none_or_clear_bad(pud)) 346 if (pud_none_or_clear_bad(pud))
348 goto out; 347 return 1;
349 pmd = pmd_offset(pud, address); 348 pmd = pmd_offset(pud, address);
350 if (pmd_none_or_clear_bad(pmd)) 349 if (pmd_none_or_clear_bad(pmd))
351 goto out; 350 return 1;
352 pte = pte_offset_kernel(pmd, address); 351 pte = pte_offset_kernel(pmd, address);
353 entry = *pte; 352 entry = *pte;
354 if (unlikely(pte_none(entry) || pte_not_present(entry))) 353 if (unlikely(pte_none(entry) || pte_not_present(entry)))
355 goto out; 354 return 1;
356 if (unlikely(writeaccess && !pte_write(entry))) 355 if (unlikely(writeaccess && !pte_write(entry)))
357 goto out; 356 return 1;
358 357
359 if (writeaccess) 358 if (writeaccess)
360 entry = pte_mkdirty(entry); 359 entry = pte_mkdirty(entry);
361 entry = pte_mkyoung(entry); 360 entry = pte_mkyoung(entry);
362 361
362 set_pte(pte, entry);
363
363#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) 364#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
364 /* 365 /*
365 * ITLB is not affected by "ldtlb" instruction. 366 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
366 * So, we need to flush the entry by ourselves. 367 * the case of an initial page write exception, so we need to
368 * flush it in order to avoid potential TLB entry duplication.
367 */ 369 */
368 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 370 if (writeaccess == 2)
371 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
369#endif 372#endif
370 373
371 set_pte(pte, entry);
372 update_mmu_cache(NULL, address, entry); 374 update_mmu_cache(NULL, address, entry);
373 375
374 ret = 0; 376 return 0;
375out:
376 return ret;
377} 377}