summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@ezchip.com>2015-09-22 14:49:41 -0400
committerChris Metcalf <cmetcalf@ezchip.com>2016-01-18 14:49:09 -0500
commit583b24a210ada7e88fc12aaf50024975ec882816 (patch)
treeb396bc95a44f29d9dd2c8bab86fcd0d0728b24b1 /arch
parent65a792e84f25d1436698f999224b2cf5d7594546 (diff)
arch/tile: adopt prepare_exit_to_usermode() model from x86
This change is a prerequisite change for TASK_ISOLATION but also stands on its own for readability and maintainability. The existing tile do_work_pending() was called in a loop from assembly on the slow path; this change moves the loop into C code as well. For the x86 version see commit c5c46f59e4e7 ("x86/entry: Add new, comprehensible entry and exit handlers written in C"). This change exposes a pre-existing bug on the older tilepro platform; the singlestep processing is done last, but on tilepro (unlike tilegx) we enable interrupts while doing that processing, so we could in theory miss a signal or other asynchronous event. A future change could fix this by breaking the singlestep work into a "prepare" step done in the main loop, and a "trigger" step done after exiting the loop. Since this change is intended as purely a restructuring change, we call out the bug explicitly now, but don't yet fix it. Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/tile/include/asm/thread_info.h8
-rw-r--r--arch/tile/kernel/intvec_32.S46
-rw-r--r--arch/tile/kernel/intvec_64.S49
-rw-r--r--arch/tile/kernel/process.c79
5 files changed, 77 insertions, 107 deletions
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 139dfdee0134..0684e88aacd8 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -212,7 +212,7 @@ static inline void release_thread(struct task_struct *dead_task)
212 /* Nothing for now */ 212 /* Nothing for now */
213} 213}
214 214
215extern int do_work_pending(struct pt_regs *regs, u32 flags); 215extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
216 216
217 217
218/* 218/*
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index dc1fb28d9636..4b7cef9e94e0 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -140,10 +140,14 @@ extern void _cpu_idle(void);
140#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 140#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
141#define _TIF_NOHZ (1<<TIF_NOHZ) 141#define _TIF_NOHZ (1<<TIF_NOHZ)
142 142
143/* Work to do as we loop to exit to user space. */
144#define _TIF_WORK_MASK \
145 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
146 _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
147
143/* Work to do on any return to user space. */ 148/* Work to do on any return to user space. */
144#define _TIF_ALLWORK_MASK \ 149#define _TIF_ALLWORK_MASK \
145 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \ 150 (_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
146 _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
147 151
148/* Work to do at syscall entry. */ 152/* Work to do at syscall entry. */
149#define _TIF_SYSCALL_ENTRY_WORK \ 153#define _TIF_SYSCALL_ENTRY_WORK \
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fbbe2ea882ea..33d48812872a 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -846,18 +846,6 @@ STD_ENTRY(interrupt_return)
846 FEEDBACK_REENTER(interrupt_return) 846 FEEDBACK_REENTER(interrupt_return)
847 847
848 /* 848 /*
849 * Use r33 to hold whether we have already loaded the callee-saves
850 * into ptregs. We don't want to do it twice in this loop, since
851 * then we'd clobber whatever changes are made by ptrace, etc.
852 * Get base of stack in r32.
853 */
854 {
855 GET_THREAD_INFO(r32)
856 movei r33, 0
857 }
858
859.Lretry_work_pending:
860 /*
861 * Disable interrupts so as to make sure we don't 849 * Disable interrupts so as to make sure we don't
862 * miss an interrupt that sets any of the thread flags (like 850 * miss an interrupt that sets any of the thread flags (like
863 * need_resched or sigpending) between sampling and the iret. 851 * need_resched or sigpending) between sampling and the iret.
@@ -867,33 +855,27 @@ STD_ENTRY(interrupt_return)
867 IRQ_DISABLE(r20, r21) 855 IRQ_DISABLE(r20, r21)
868 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ 856 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
869 857
870 858 /*
871 /* Check to see if there is any work to do before returning to user. */ 859 * See if there are any work items (including single-shot items)
860 * to do. If so, save the callee-save registers to pt_regs
861 * and then dispatch to C code.
862 */
863 GET_THREAD_INFO(r21)
872 { 864 {
873 addi r29, r32, THREAD_INFO_FLAGS_OFFSET 865 addi r22, r21, THREAD_INFO_FLAGS_OFFSET
874 moveli r1, lo16(_TIF_ALLWORK_MASK) 866 moveli r20, lo16(_TIF_ALLWORK_MASK)
875 } 867 }
876 { 868 {
877 lw r29, r29 869 lw r22, r22
878 auli r1, r1, ha16(_TIF_ALLWORK_MASK) 870 auli r20, r20, ha16(_TIF_ALLWORK_MASK)
879 } 871 }
880 and r1, r29, r1 872 and r1, r22, r20
881 bzt r1, .Lrestore_all
882
883 /*
884 * Make sure we have all the registers saved for signal
885 * handling, notify-resume, or single-step. Call out to C
886 * code to figure out exactly what we need to do for each flag bit,
887 * then if necessary, reload the flags and recheck.
888 */
889 { 873 {
890 PTREGS_PTR(r0, PTREGS_OFFSET_BASE) 874 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
891 bnz r33, 1f 875 bzt r1, .Lrestore_all
892 } 876 }
893 push_extra_callee_saves r0 877 push_extra_callee_saves r0
894 movei r33, 1 878 jal prepare_exit_to_usermode
8951: jal do_work_pending
896 bnz r0, .Lretry_work_pending
897 879
898 /* 880 /*
899 * In the NMI case we 881 * In the NMI case we
@@ -1327,7 +1309,7 @@ STD_ENTRY(ret_from_kernel_thread)
1327 FEEDBACK_REENTER(ret_from_kernel_thread) 1309 FEEDBACK_REENTER(ret_from_kernel_thread)
1328 { 1310 {
1329 movei r30, 0 /* not an NMI */ 1311 movei r30, 0 /* not an NMI */
1330 j .Lresume_userspace /* jump into middle of interrupt_return */ 1312 j interrupt_return
1331 } 1313 }
1332 STD_ENDPROC(ret_from_kernel_thread) 1314 STD_ENDPROC(ret_from_kernel_thread)
1333 1315
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 58964d209d4d..a41c994ce237 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -879,20 +879,6 @@ STD_ENTRY(interrupt_return)
879 FEEDBACK_REENTER(interrupt_return) 879 FEEDBACK_REENTER(interrupt_return)
880 880
881 /* 881 /*
882 * Use r33 to hold whether we have already loaded the callee-saves
883 * into ptregs. We don't want to do it twice in this loop, since
884 * then we'd clobber whatever changes are made by ptrace, etc.
885 */
886 {
887 movei r33, 0
888 move r32, sp
889 }
890
891 /* Get base of stack in r32. */
892 EXTRACT_THREAD_INFO(r32)
893
894.Lretry_work_pending:
895 /*
896 * Disable interrupts so as to make sure we don't 882 * Disable interrupts so as to make sure we don't
897 * miss an interrupt that sets any of the thread flags (like 883 * miss an interrupt that sets any of the thread flags (like
898 * need_resched or sigpending) between sampling and the iret. 884 * need_resched or sigpending) between sampling and the iret.
@@ -902,33 +888,28 @@ STD_ENTRY(interrupt_return)
902 IRQ_DISABLE(r20, r21) 888 IRQ_DISABLE(r20, r21)
903 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ 889 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
904 890
905 891 /*
906 /* Check to see if there is any work to do before returning to user. */ 892 * See if there are any work items (including single-shot items)
893 * to do. If so, save the callee-save registers to pt_regs
894 * and then dispatch to C code.
895 */
896 move r21, sp
897 EXTRACT_THREAD_INFO(r21)
907 { 898 {
908 addi r29, r32, THREAD_INFO_FLAGS_OFFSET 899 addi r22, r21, THREAD_INFO_FLAGS_OFFSET
909 moveli r1, hw1_last(_TIF_ALLWORK_MASK) 900 moveli r20, hw1_last(_TIF_ALLWORK_MASK)
910 } 901 }
911 { 902 {
912 ld r29, r29 903 ld r22, r22
913 shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK) 904 shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
914 } 905 }
915 and r1, r29, r1 906 and r1, r22, r20
916 beqzt r1, .Lrestore_all
917
918 /*
919 * Make sure we have all the registers saved for signal
920 * handling or notify-resume. Call out to C code to figure out
921 * exactly what we need to do for each flag bit, then if
922 * necessary, reload the flags and recheck.
923 */
924 { 907 {
925 PTREGS_PTR(r0, PTREGS_OFFSET_BASE) 908 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
926 bnez r33, 1f 909 beqzt r1, .Lrestore_all
927 } 910 }
928 push_extra_callee_saves r0 911 push_extra_callee_saves r0
929 movei r33, 1 912 jal prepare_exit_to_usermode
9301: jal do_work_pending
931 bnez r0, .Lretry_work_pending
932 913
933 /* 914 /*
934 * In the NMI case we 915 * In the NMI case we
@@ -1411,7 +1392,7 @@ STD_ENTRY(ret_from_kernel_thread)
1411 FEEDBACK_REENTER(ret_from_kernel_thread) 1392 FEEDBACK_REENTER(ret_from_kernel_thread)
1412 { 1393 {
1413 movei r30, 0 /* not an NMI */ 1394 movei r30, 0 /* not an NMI */
1414 j .Lresume_userspace /* jump into middle of interrupt_return */ 1395 j interrupt_return
1415 } 1396 }
1416 STD_ENDPROC(ret_from_kernel_thread) 1397 STD_ENDPROC(ret_from_kernel_thread)
1417 1398
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 7d5769310bef..b5f30d376ce1 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -462,54 +462,57 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
462 462
463/* 463/*
464 * This routine is called on return from interrupt if any of the 464 * This routine is called on return from interrupt if any of the
465 * TIF_WORK_MASK flags are set in thread_info->flags. It is 465 * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
466 * entered with interrupts disabled so we don't miss an event 466 * entered with interrupts disabled so we don't miss an event that
467 * that modified the thread_info flags. If any flag is set, we 467 * modified the thread_info flags. We loop until all the tested flags
468 * handle it and return, and the calling assembly code will 468 * are clear. Note that the function is called on certain conditions
469 * re-disable interrupts, reload the thread flags, and call back 469 * that are not listed in the loop condition here (e.g. SINGLESTEP)
470 * if more flags need to be handled. 470 * which guarantees we will do those things once, and redo them if any
471 * 471 * of the other work items is re-done, but won't continue looping if
472 * We return whether we need to check the thread_info flags again 472 * all the other work is done.
473 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
474 * important that it be tested last, and then claim that we don't
475 * need to recheck the flags.
476 */ 473 */
477int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) 474void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
478{ 475{
479 /* If we enter in kernel mode, do nothing and exit the caller loop. */ 476 if (WARN_ON(!user_mode(regs)))
480 if (!user_mode(regs)) 477 return;
481 return 0;
482 478
483 user_exit(); 479 do {
480 local_irq_enable();
484 481
485 /* Enable interrupts; they are disabled again on return to caller. */ 482 if (thread_info_flags & _TIF_NEED_RESCHED)
486 local_irq_enable(); 483 schedule();
487 484
488 if (thread_info_flags & _TIF_NEED_RESCHED) {
489 schedule();
490 return 1;
491 }
492#if CHIP_HAS_TILE_DMA() 485#if CHIP_HAS_TILE_DMA()
493 if (thread_info_flags & _TIF_ASYNC_TLB) { 486 if (thread_info_flags & _TIF_ASYNC_TLB)
494 do_async_page_fault(regs); 487 do_async_page_fault(regs);
495 return 1;
496 }
497#endif 488#endif
498 if (thread_info_flags & _TIF_SIGPENDING) { 489
499 do_signal(regs); 490 if (thread_info_flags & _TIF_SIGPENDING)
500 return 1; 491 do_signal(regs);
501 } 492
502 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 493 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
503 clear_thread_flag(TIF_NOTIFY_RESUME); 494 clear_thread_flag(TIF_NOTIFY_RESUME);
504 tracehook_notify_resume(regs); 495 tracehook_notify_resume(regs);
505 return 1; 496 }
506 } 497
507 if (thread_info_flags & _TIF_SINGLESTEP) 498 local_irq_disable();
499 thread_info_flags = READ_ONCE(current_thread_info()->flags);
500
501 } while (thread_info_flags & _TIF_WORK_MASK);
502
503 if (thread_info_flags & _TIF_SINGLESTEP) {
508 single_step_once(regs); 504 single_step_once(regs);
505#ifndef __tilegx__
506 /*
507 * FIXME: on tilepro, since we enable interrupts in
508 * this routine, it's possible that we miss a signal
509 * or other asynchronous event.
510 */
511 local_irq_disable();
512#endif
513 }
509 514
510 user_enter(); 515 user_enter();
511
512 return 0;
513} 516}
514 517
515unsigned long get_wchan(struct task_struct *p) 518unsigned long get_wchan(struct task_struct *p)