diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-14 14:06:41 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-14 14:06:41 -0400 |
commit | 8010fbe7a67c2f993cbb11b9d8b7e98528256dd1 (patch) | |
tree | 861fc7d33fe08b33818b9401f2ba1b32edd82505 /arch/sh/mm/fault_32.c | |
parent | 112e58471de3431fbd03dee514777ad4a66a77b2 (diff) |
sh: TLB fast path optimizations for load/store exceptions.
This only bothers with the TLB entry flush in the case of the initial
page write exception, as it is unecessary in the case of the load/store
exceptions.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r-- | arch/sh/mm/fault_32.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 41840647f65f..f1c93c880ed4 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -327,7 +327,6 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
327 | pmd_t *pmd; | 327 | pmd_t *pmd; |
328 | pte_t *pte; | 328 | pte_t *pte; |
329 | pte_t entry; | 329 | pte_t entry; |
330 | int ret = 1; | ||
331 | 330 | ||
332 | /* | 331 | /* |
333 | * We don't take page faults for P1, P2, and parts of P4, these | 332 | * We don't take page faults for P1, P2, and parts of P4, these |
@@ -338,40 +337,41 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
338 | pgd = pgd_offset_k(address); | 337 | pgd = pgd_offset_k(address); |
339 | } else { | 338 | } else { |
340 | if (unlikely(address >= TASK_SIZE || !current->mm)) | 339 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
341 | goto out; | 340 | return 1; |
342 | 341 | ||
343 | pgd = pgd_offset(current->mm, address); | 342 | pgd = pgd_offset(current->mm, address); |
344 | } | 343 | } |
345 | 344 | ||
346 | pud = pud_offset(pgd, address); | 345 | pud = pud_offset(pgd, address); |
347 | if (pud_none_or_clear_bad(pud)) | 346 | if (pud_none_or_clear_bad(pud)) |
348 | goto out; | 347 | return 1; |
349 | pmd = pmd_offset(pud, address); | 348 | pmd = pmd_offset(pud, address); |
350 | if (pmd_none_or_clear_bad(pmd)) | 349 | if (pmd_none_or_clear_bad(pmd)) |
351 | goto out; | 350 | return 1; |
352 | pte = pte_offset_kernel(pmd, address); | 351 | pte = pte_offset_kernel(pmd, address); |
353 | entry = *pte; | 352 | entry = *pte; |
354 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 353 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
355 | goto out; | 354 | return 1; |
356 | if (unlikely(writeaccess && !pte_write(entry))) | 355 | if (unlikely(writeaccess && !pte_write(entry))) |
357 | goto out; | 356 | return 1; |
358 | 357 | ||
359 | if (writeaccess) | 358 | if (writeaccess) |
360 | entry = pte_mkdirty(entry); | 359 | entry = pte_mkdirty(entry); |
361 | entry = pte_mkyoung(entry); | 360 | entry = pte_mkyoung(entry); |
362 | 361 | ||
362 | set_pte(pte, entry); | ||
363 | |||
363 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | 364 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) |
364 | /* | 365 | /* |
365 | * ITLB is not affected by "ldtlb" instruction. | 366 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in |
366 | * So, we need to flush the entry by ourselves. | 367 | * the case of an initial page write exception, so we need to |
368 | * flush it in order to avoid potential TLB entry duplication. | ||
367 | */ | 369 | */ |
368 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 370 | if (writeaccess == 2) |
371 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
369 | #endif | 372 | #endif |
370 | 373 | ||
371 | set_pte(pte, entry); | ||
372 | update_mmu_cache(NULL, address, entry); | 374 | update_mmu_cache(NULL, address, entry); |
373 | 375 | ||
374 | ret = 0; | 376 | return 0; |
375 | out: | ||
376 | return ret; | ||
377 | } | 377 | } |