aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 15:34:52 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-04-02 12:13:02 -0400
commit48292738d06d7b46d861652ef59bd03be931c2c9 (patch)
treeb296408a3c451199cb76372e218c3c3c572d3759 /arch
parent6731aa9eae3e94b094a44d2aea02387a39202fbc (diff)
arch/tile: don't wait for migrating PTEs in an NMI handler
Doing so raises the possibility of self-deadlock if we are waiting for a backtrace for an oprofile or perf interrupt while we are in the middle of migrating our own stack page. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/tile/mm/fault.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cac17c4f2ecf..a1da473c8555 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void)
203 * interrupt or a critical region, and must do as little as possible. 203 * interrupt or a critical region, and must do as little as possible.
204 * Similarly, we can't use atomic ops here, since we may be handling a 204 * Similarly, we can't use atomic ops here, since we may be handling a
205 * fault caused by an atomic op access. 205 * fault caused by an atomic op access.
206 *
207 * If we find a migrating PTE while we're in an NMI context, and we're
208 * at a PC that has a registered exception handler, we don't wait,
209 * since this thread may (e.g.) have been interrupted while migrating
210 * its own stack, which would then cause us to self-deadlock.
206 */ 211 */
207static int handle_migrating_pte(pgd_t *pgd, int fault_num, 212static int handle_migrating_pte(pgd_t *pgd, int fault_num,
208 unsigned long address, 213 unsigned long address, unsigned long pc,
209 int is_kernel_mode, int write) 214 int is_kernel_mode, int write)
210{ 215{
211 pud_t *pud; 216 pud_t *pud;
@@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num,
227 pte_offset_kernel(pmd, address); 232 pte_offset_kernel(pmd, address);
228 pteval = *pte; 233 pteval = *pte;
229 if (pte_migrating(pteval)) { 234 if (pte_migrating(pteval)) {
235 if (in_nmi() && search_exception_tables(pc))
236 return 0;
230 wait_for_migration(pte); 237 wait_for_migration(pte);
231 return 1; 238 return 1;
232 } 239 }
@@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs,
300 * rather than trying to patch up the existing PTE. 307 * rather than trying to patch up the existing PTE.
301 */ 308 */
302 pgd = get_current_pgd(); 309 pgd = get_current_pgd();
303 if (handle_migrating_pte(pgd, fault_num, address, 310 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
304 is_kernel_mode, write)) 311 is_kernel_mode, write))
305 return 1; 312 return 1;
306 313
@@ -665,7 +672,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
665 */ 672 */
666 if (fault_num == INT_DTLB_ACCESS) 673 if (fault_num == INT_DTLB_ACCESS)
667 write = 1; 674 write = 1;
668 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 675 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
669 return state; 676 return state;
670 677
671 /* Return zero so that we continue on with normal fault handling. */ 678 /* Return zero so that we continue on with normal fault handling. */