aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-18 23:05:18 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-11-18 23:05:18 -0500
commit0f1a394ba68c4bbdedb1dbfdf6784ba54c07bbe4 (patch)
treeef68075d7d43e8f458bf653f072ae2f8cc0bcbd1 /arch/sh/mm/fault.c
parent1c6b2ca5e0939bf8b5d1a11f1646f25189ecd447 (diff)
sh: lockless UTLB miss fast-path.
With the refactored update_mmu_cache() introduced in older kernels, there's no longer any need to take the page_table_lock in this path, so simply drop it completely. Without this, performance degradation is seen on SMP on heavily threaded workloads that don't use the split ptlock, and ultimately we have no reason to contend for the lock in the first place. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r--arch/sh/mm/fault.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index c56a5fabcd0f..60d74f793a1d 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -258,9 +258,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
258 pmd_t *pmd; 258 pmd_t *pmd;
259 pte_t *pte; 259 pte_t *pte;
260 pte_t entry; 260 pte_t entry;
261 struct mm_struct *mm = current->mm;
262 spinlock_t *ptl = NULL;
263 int ret = 1;
264 261
265#ifdef CONFIG_SH_KGDB 262#ifdef CONFIG_SH_KGDB
266 if (kgdb_nofault && kgdb_bus_err_hook) 263 if (kgdb_nofault && kgdb_bus_err_hook)
@@ -274,12 +271,11 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
274 */ 271 */
275 if (address >= P3SEG && address < P3_ADDR_MAX) { 272 if (address >= P3SEG && address < P3_ADDR_MAX) {
276 pgd = pgd_offset_k(address); 273 pgd = pgd_offset_k(address);
277 mm = NULL;
278 } else { 274 } else {
279 if (unlikely(address >= TASK_SIZE || !mm)) 275 if (unlikely(address >= TASK_SIZE || !current->mm))
280 return 1; 276 return 1;
281 277
282 pgd = pgd_offset(mm, address); 278 pgd = pgd_offset(current->mm, address);
283 } 279 }
284 280
285 pud = pud_offset(pgd, address); 281 pud = pud_offset(pgd, address);
@@ -289,16 +285,12 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
289 if (pmd_none_or_clear_bad(pmd)) 285 if (pmd_none_or_clear_bad(pmd))
290 return 1; 286 return 1;
291 287
292 if (mm) 288 pte = pte_offset_kernel(pmd, address);
293 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
294 else
295 pte = pte_offset_kernel(pmd, address);
296
297 entry = *pte; 289 entry = *pte;
298 if (unlikely(pte_none(entry) || pte_not_present(entry))) 290 if (unlikely(pte_none(entry) || pte_not_present(entry)))
299 goto unlock; 291 return 1;
300 if (unlikely(writeaccess && !pte_write(entry))) 292 if (unlikely(writeaccess && !pte_write(entry)))
301 goto unlock; 293 return 1;
302 294
303 if (writeaccess) 295 if (writeaccess)
304 entry = pte_mkdirty(entry); 296 entry = pte_mkdirty(entry);
@@ -306,9 +298,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
306 298
307 set_pte(pte, entry); 299 set_pte(pte, entry);
308 update_mmu_cache(NULL, address, entry); 300 update_mmu_cache(NULL, address, entry);
309 ret = 0; 301
310unlock: 302 return 0;
311 if (mm)
312 pte_unmap_unlock(pte, ptl);
313 return ret;
314} 303}