aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm/fault.c')
-rw-r--r--arch/tile/mm/fault.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cba30e9547b4..22e58f51ed23 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
130} 130}
131 131
132/* 132/*
133 * Handle a fault on the vmalloc or module mapping area 133 * Handle a fault on the vmalloc area.
134 */ 134 */
135static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) 135static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
136{ 136{
@@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void)
203 * interrupt or a critical region, and must do as little as possible. 203 * interrupt or a critical region, and must do as little as possible.
204 * Similarly, we can't use atomic ops here, since we may be handling a 204 * Similarly, we can't use atomic ops here, since we may be handling a
205 * fault caused by an atomic op access. 205 * fault caused by an atomic op access.
206 *
207 * If we find a migrating PTE while we're in an NMI context, and we're
208 * at a PC that has a registered exception handler, we don't wait,
209 * since this thread may (e.g.) have been interrupted while migrating
210 * its own stack, which would then cause us to self-deadlock.
206 */ 211 */
207static int handle_migrating_pte(pgd_t *pgd, int fault_num, 212static int handle_migrating_pte(pgd_t *pgd, int fault_num,
208 unsigned long address, 213 unsigned long address, unsigned long pc,
209 int is_kernel_mode, int write) 214 int is_kernel_mode, int write)
210{ 215{
211 pud_t *pud; 216 pud_t *pud;
@@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num,
227 pte_offset_kernel(pmd, address); 232 pte_offset_kernel(pmd, address);
228 pteval = *pte; 233 pteval = *pte;
229 if (pte_migrating(pteval)) { 234 if (pte_migrating(pteval)) {
235 if (in_nmi() && search_exception_tables(pc))
236 return 0;
230 wait_for_migration(pte); 237 wait_for_migration(pte);
231 return 1; 238 return 1;
232 } 239 }
@@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs,
300 * rather than trying to patch up the existing PTE. 307 * rather than trying to patch up the existing PTE.
301 */ 308 */
302 pgd = get_current_pgd(); 309 pgd = get_current_pgd();
303 if (handle_migrating_pte(pgd, fault_num, address, 310 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
304 is_kernel_mode, write)) 311 is_kernel_mode, write))
305 return 1; 312 return 1;
306 313
@@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs,
335 /* 342 /*
336 * If we're trying to touch user-space addresses, we must 343 * If we're trying to touch user-space addresses, we must
337 * be either at PL0, or else with interrupts enabled in the 344 * be either at PL0, or else with interrupts enabled in the
338 * kernel, so either way we can re-enable interrupts here. 345 * kernel, so either way we can re-enable interrupts here
346 * unless we are doing atomic access to user space with
347 * interrupts disabled.
339 */ 348 */
340 local_irq_enable(); 349 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
350 local_irq_enable();
341 351
342 mm = tsk->mm; 352 mm = tsk->mm;
343 353
@@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
665 */ 675 */
666 if (fault_num == INT_DTLB_ACCESS) 676 if (fault_num == INT_DTLB_ACCESS)
667 write = 1; 677 write = 1;
668 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 678 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
669 return state; 679 return state;
670 680
671 /* Return zero so that we continue on with normal fault handling. */ 681 /* Return zero so that we continue on with normal fault handling. */