diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 03:44:45 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 03:44:45 -0400 |
commit | 4de5185629f44942f60e2fd536709ef31bd5a9c1 (patch) | |
tree | 9e0d652a25c7c14d7d153e8ed322270bc554fa4f | |
parent | c06fd28387a3da2cc4763f7f471f735ccdd61b88 (diff) |
sh64: Invert page fault fast-path error path values.
This brings the sh64 version in line with the sh32 one with regards to
how errors are handled. Base work for further unification of the
implementations.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/cpu/sh5/entry.S | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_64.c | 36 |
2 files changed, 19 insertions, 19 deletions
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index de60dc8d737d..ff1f0e6e9bec 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -335,7 +335,7 @@ tlb_miss: | |||
335 | /* If the fast path handler fixed the fault, just drop through quickly | 335 | /* If the fast path handler fixed the fault, just drop through quickly |
336 | to the restore code right away to return to the excepting context. | 336 | to the restore code right away to return to the excepting context. |
337 | */ | 337 | */ |
338 | beqi/u r2, 0, tr1 | 338 | bnei/u r2, 0, tr1 |
339 | 339 | ||
340 | fast_tlb_miss_restore: | 340 | fast_tlb_miss_restore: |
341 | ld.q SP, SAVED_TR0, r2 | 341 | ld.q SP, SAVED_TR0, r2 |
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c index 98b64278f8c7..59cb058217a0 100644 --- a/arch/sh/mm/tlbex_64.c +++ b/arch/sh/mm/tlbex_64.c | |||
@@ -53,23 +53,23 @@ static int handle_vmalloc_fault(struct mm_struct *mm, | |||
53 | 53 | ||
54 | pud = pud_offset(dir, address); | 54 | pud = pud_offset(dir, address); |
55 | if (pud_none_or_clear_bad(pud)) | 55 | if (pud_none_or_clear_bad(pud)) |
56 | return 0; | 56 | return 1; |
57 | 57 | ||
58 | pmd = pmd_offset(pud, address); | 58 | pmd = pmd_offset(pud, address); |
59 | if (pmd_none_or_clear_bad(pmd)) | 59 | if (pmd_none_or_clear_bad(pmd)) |
60 | return 0; | 60 | return 1; |
61 | 61 | ||
62 | pte = pte_offset_kernel(pmd, address); | 62 | pte = pte_offset_kernel(pmd, address); |
63 | entry = *pte; | 63 | entry = *pte; |
64 | 64 | ||
65 | if (pte_none(entry) || !pte_present(entry)) | 65 | if (pte_none(entry) || !pte_present(entry)) |
66 | return 0; | 66 | return 1; |
67 | if ((pte_val(entry) & protection_flags) != protection_flags) | 67 | if ((pte_val(entry) & protection_flags) != protection_flags) |
68 | return 0; | 68 | return 1; |
69 | 69 | ||
70 | update_mmu_cache(NULL, address, pte); | 70 | update_mmu_cache(NULL, address, pte); |
71 | 71 | ||
72 | return 1; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | static int handle_tlbmiss(struct mm_struct *mm, | 75 | static int handle_tlbmiss(struct mm_struct *mm, |
@@ -94,27 +94,27 @@ static int handle_tlbmiss(struct mm_struct *mm, | |||
94 | the next test is necessary. - RPC */ | 94 | the next test is necessary. - RPC */ |
95 | if (address >= (unsigned long) TASK_SIZE) | 95 | if (address >= (unsigned long) TASK_SIZE) |
96 | /* upper half - never has page table entries. */ | 96 | /* upper half - never has page table entries. */ |
97 | return 0; | 97 | return 1; |
98 | 98 | ||
99 | dir = pgd_offset(mm, address); | 99 | dir = pgd_offset(mm, address); |
100 | if (pgd_none(*dir) || !pgd_present(*dir)) | 100 | if (pgd_none(*dir) || !pgd_present(*dir)) |
101 | return 0; | 101 | return 1; |
102 | if (!pgd_present(*dir)) | 102 | if (!pgd_present(*dir)) |
103 | return 0; | 103 | return 1; |
104 | 104 | ||
105 | pud = pud_offset(dir, address); | 105 | pud = pud_offset(dir, address); |
106 | if (pud_none(*pud) || !pud_present(*pud)) | 106 | if (pud_none(*pud) || !pud_present(*pud)) |
107 | return 0; | 107 | return 1; |
108 | 108 | ||
109 | pmd = pmd_offset(pud, address); | 109 | pmd = pmd_offset(pud, address); |
110 | if (pmd_none(*pmd) || !pmd_present(*pmd)) | 110 | if (pmd_none(*pmd) || !pmd_present(*pmd)) |
111 | return 0; | 111 | return 1; |
112 | 112 | ||
113 | pte = pte_offset_kernel(pmd, address); | 113 | pte = pte_offset_kernel(pmd, address); |
114 | entry = *pte; | 114 | entry = *pte; |
115 | 115 | ||
116 | if (pte_none(entry) || !pte_present(entry)) | 116 | if (pte_none(entry) || !pte_present(entry)) |
117 | return 0; | 117 | return 1; |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * If the page doesn't have sufficient protection bits set to | 120 | * If the page doesn't have sufficient protection bits set to |
@@ -123,11 +123,11 @@ static int handle_tlbmiss(struct mm_struct *mm, | |||
123 | * handler. | 123 | * handler. |
124 | */ | 124 | */ |
125 | if ((pte_val(entry) & protection_flags) != protection_flags) | 125 | if ((pte_val(entry) & protection_flags) != protection_flags) |
126 | return 0; | 126 | return 1; |
127 | 127 | ||
128 | update_mmu_cache(NULL, address, pte); | 128 | update_mmu_cache(NULL, address, pte); |
129 | 129 | ||
130 | return 1; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
@@ -214,12 +214,12 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, | |||
214 | * Process-contexts can never have this address | 214 | * Process-contexts can never have this address |
215 | * range mapped | 215 | * range mapped |
216 | */ | 216 | */ |
217 | if (handle_vmalloc_fault(mm, protection_flags, address)) | 217 | if (handle_vmalloc_fault(mm, protection_flags, address) == 0) |
218 | return 1; | 218 | return 0; |
219 | } else if (!in_interrupt() && mm) { | 219 | } else if (!in_interrupt() && mm) { |
220 | if (handle_tlbmiss(mm, protection_flags, address)) | 220 | if (handle_tlbmiss(mm, protection_flags, address) == 0) |
221 | return 1; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | return 0; | 224 | return 1; |
225 | } | 225 | } |