diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-02 14:50:06 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-02 18:53:35 -0400 |
commit | 313ce674d3cbc2d48ed34a9462427920ac54f4ad (patch) | |
tree | 7478150a1268c79851f1a366158c59606330e700 /arch/tile | |
parent | 93013a0f533fb3dd6875ca670d8e0bb4166a796e (diff) |
arch/tile: support TIF_NOTIFY_RESUME
This support is required for CONFIG_KEYS, NFSv4 kernel DNS, etc.
The change is slightly more complex than the minimal thing, since
I took advantage of having to go into the assembly code to just
move a bunch of stuff into C code: specifically, the schedule(),
do_async_page_fault(), do_signal(), and single_step_once() support,
in addition to the TIF_NOTIFY_RESUME support.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/tile/include/asm/traps.h | 4 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 114 | ||||
-rw-r--r-- | arch/tile/kernel/process.c | 48 | ||||
-rw-r--r-- | arch/tile/kernel/single_step.c | 8 | ||||
-rw-r--r-- | arch/tile/mm/fault.c | 6 |
7 files changed, 87 insertions, 100 deletions
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index e6889474038a..d6b43ddfcc04 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task) | |||
215 | 215 | ||
216 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 216 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
217 | 217 | ||
218 | extern int do_work_pending(struct pt_regs *regs, u32 flags); | ||
219 | |||
218 | 220 | ||
219 | /* | 221 | /* |
220 | * Return saved (kernel) PC of a blocked thread. | 222 | * Return saved (kernel) PC of a blocked thread. |
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 3405b52853b8..bc4f562bd459 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | |||
125 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | 125 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ |
126 | #define TIF_SECCOMP 6 /* secure computing */ | 126 | #define TIF_SECCOMP 6 /* secure computing */ |
127 | #define TIF_MEMDIE 7 /* OOM killer at work */ | 127 | #define TIF_MEMDIE 7 /* OOM killer at work */ |
128 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | ||
128 | 129 | ||
129 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 130 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
130 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 131 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | |||
134 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 135 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
135 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 136 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
136 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) | 137 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) |
138 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
137 | 139 | ||
138 | /* Work to do on any return to user space. */ | 140 | /* Work to do on any return to user space. */ |
139 | #define _TIF_ALLWORK_MASK \ | 141 | #define _TIF_ALLWORK_MASK \ |
140 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) | 142 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\ |
143 | _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME) | ||
141 | 144 | ||
142 | /* | 145 | /* |
143 | * Thread-synchronous status. | 146 | * Thread-synchronous status. |
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h index d06e35f57201..5f20f920f932 100644 --- a/arch/tile/include/asm/traps.h +++ b/arch/tile/include/asm/traps.h | |||
@@ -15,10 +15,14 @@ | |||
15 | #ifndef _ASM_TILE_TRAPS_H | 15 | #ifndef _ASM_TILE_TRAPS_H |
16 | #define _ASM_TILE_TRAPS_H | 16 | #define _ASM_TILE_TRAPS_H |
17 | 17 | ||
18 | #include <arch/chip.h> | ||
19 | |||
18 | /* mm/fault.c */ | 20 | /* mm/fault.c */ |
19 | void do_page_fault(struct pt_regs *, int fault_num, | 21 | void do_page_fault(struct pt_regs *, int fault_num, |
20 | unsigned long address, unsigned long write); | 22 | unsigned long address, unsigned long write); |
23 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
21 | void do_async_page_fault(struct pt_regs *); | 24 | void do_async_page_fault(struct pt_regs *); |
25 | #endif | ||
22 | 26 | ||
23 | #ifndef __tilegx__ | 27 | #ifndef __tilegx__ |
24 | /* | 28 | /* |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index fffcfa6b3a62..f35c3124fa62 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -851,14 +851,27 @@ STD_ENTRY(interrupt_return) | |||
851 | /* Check to see if there is any work to do before returning to user. */ | 851 | /* Check to see if there is any work to do before returning to user. */ |
852 | { | 852 | { |
853 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET | 853 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET |
854 | moveli r28, lo16(_TIF_ALLWORK_MASK) | 854 | moveli r1, lo16(_TIF_ALLWORK_MASK) |
855 | } | 855 | } |
856 | { | 856 | { |
857 | lw r29, r29 | 857 | lw r29, r29 |
858 | auli r28, r28, ha16(_TIF_ALLWORK_MASK) | 858 | auli r1, r1, ha16(_TIF_ALLWORK_MASK) |
859 | } | 859 | } |
860 | and r28, r29, r28 | 860 | and r1, r29, r1 |
861 | bnz r28, .Lwork_pending | 861 | bzt r1, .Lrestore_all |
862 | |||
863 | /* | ||
864 | * Make sure we have all the registers saved for signal | ||
865 | * handling or single-step. Call out to C code to figure out | ||
866 | * exactly what we need to do for each flag bit, then if | ||
867 | * necessary, reload the flags and recheck. | ||
868 | */ | ||
869 | push_extra_callee_saves r0 | ||
870 | { | ||
871 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
872 | jal do_work_pending | ||
873 | } | ||
874 | bnz r0, .Lresume_userspace | ||
862 | 875 | ||
863 | /* | 876 | /* |
864 | * In the NMI case we | 877 | * In the NMI case we |
@@ -1099,99 +1112,6 @@ STD_ENTRY(interrupt_return) | |||
1099 | pop_reg r50 | 1112 | pop_reg r50 |
1100 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) | 1113 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) |
1101 | j .Lcontinue_restore_regs | 1114 | j .Lcontinue_restore_regs |
1102 | |||
1103 | .Lwork_pending: | ||
1104 | /* Mask the reschedule flag */ | ||
1105 | andi r28, r29, _TIF_NEED_RESCHED | ||
1106 | |||
1107 | { | ||
1108 | /* | ||
1109 | * If the NEED_RESCHED flag is called, we call schedule(), which | ||
1110 | * may drop this context right here and go do something else. | ||
1111 | * On return, jump back to .Lresume_userspace and recheck. | ||
1112 | */ | ||
1113 | bz r28, .Lasync_tlb | ||
1114 | |||
1115 | /* Mask the async-tlb flag */ | ||
1116 | andi r28, r29, _TIF_ASYNC_TLB | ||
1117 | } | ||
1118 | |||
1119 | jal schedule | ||
1120 | FEEDBACK_REENTER(interrupt_return) | ||
1121 | |||
1122 | /* Reload the flags and check again */ | ||
1123 | j .Lresume_userspace | ||
1124 | |||
1125 | .Lasync_tlb: | ||
1126 | { | ||
1127 | bz r28, .Lneed_sigpending | ||
1128 | |||
1129 | /* Mask the sigpending flag */ | ||
1130 | andi r28, r29, _TIF_SIGPENDING | ||
1131 | } | ||
1132 | |||
1133 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1134 | jal do_async_page_fault | ||
1135 | FEEDBACK_REENTER(interrupt_return) | ||
1136 | |||
1137 | /* | ||
1138 | * Go restart the "resume userspace" process. We may have | ||
1139 | * fired a signal, and we need to disable interrupts again. | ||
1140 | */ | ||
1141 | j .Lresume_userspace | ||
1142 | |||
1143 | .Lneed_sigpending: | ||
1144 | /* | ||
1145 | * At this point we are either doing signal handling or single-step, | ||
1146 | * so either way make sure we have all the registers saved. | ||
1147 | */ | ||
1148 | push_extra_callee_saves r0 | ||
1149 | |||
1150 | { | ||
1151 | /* If no signal pending, skip to singlestep check */ | ||
1152 | bz r28, .Lneed_singlestep | ||
1153 | |||
1154 | /* Mask the singlestep flag */ | ||
1155 | andi r28, r29, _TIF_SINGLESTEP | ||
1156 | } | ||
1157 | |||
1158 | jal do_signal | ||
1159 | FEEDBACK_REENTER(interrupt_return) | ||
1160 | |||
1161 | /* Reload the flags and check again */ | ||
1162 | j .Lresume_userspace | ||
1163 | |||
1164 | .Lneed_singlestep: | ||
1165 | { | ||
1166 | /* Get a pointer to the EX1 field */ | ||
1167 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
1168 | |||
1169 | /* If we get here, our bit must be set. */ | ||
1170 | bz r28, .Lwork_confusion | ||
1171 | } | ||
1172 | /* If we are in priv mode, don't single step */ | ||
1173 | lw r28, r29 | ||
1174 | andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
1175 | bnz r28, .Lrestore_all | ||
1176 | |||
1177 | /* Allow interrupts within the single step code */ | ||
1178 | TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */ | ||
1179 | IRQ_ENABLE(r20, r21) | ||
1180 | |||
1181 | /* try to single-step the current instruction */ | ||
1182 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1183 | jal single_step_once | ||
1184 | FEEDBACK_REENTER(interrupt_return) | ||
1185 | |||
1186 | /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */ | ||
1187 | IRQ_DISABLE(r20,r21) | ||
1188 | |||
1189 | j .Lrestore_all | ||
1190 | |||
1191 | .Lwork_confusion: | ||
1192 | move r0, r28 | ||
1193 | panic "thread_info allwork flags unhandled on userspace resume: %#x" | ||
1194 | |||
1195 | STD_ENDPROC(interrupt_return) | 1115 | STD_ENDPROC(interrupt_return) |
1196 | 1116 | ||
1197 | /* | 1117 | /* |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index d0065103eb7b..8e8633490f8b 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -25,10 +25,13 @@ | |||
25 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
26 | #include <linux/syscalls.h> | 26 | #include <linux/syscalls.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/tracehook.h> | ||
29 | #include <linux/signal.h> | ||
28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
29 | #include <asm/stack.h> | 31 | #include <asm/stack.h> |
30 | #include <asm/homecache.h> | 32 | #include <asm/homecache.h> |
31 | #include <asm/syscalls.h> | 33 | #include <asm/syscalls.h> |
34 | #include <asm/traps.h> | ||
32 | #ifdef CONFIG_HARDWALL | 35 | #ifdef CONFIG_HARDWALL |
33 | #include <asm/hardwall.h> | 36 | #include <asm/hardwall.h> |
34 | #endif | 37 | #endif |
@@ -546,6 +549,51 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, | |||
546 | return __switch_to(prev, next, next_current_ksp0(next)); | 549 | return __switch_to(prev, next, next_current_ksp0(next)); |
547 | } | 550 | } |
548 | 551 | ||
552 | /* | ||
553 | * This routine is called on return from interrupt if any of the | ||
554 | * TIF_WORK_MASK flags are set in thread_info->flags. It is | ||
555 | * entered with interrupts disabled so we don't miss an event | ||
556 | * that modified the thread_info flags. If any flag is set, we | ||
557 | * handle it and return, and the calling assembly code will | ||
558 | * re-disable interrupts, reload the thread flags, and call back | ||
559 | * if more flags need to be handled. | ||
560 | * | ||
561 | * We return whether we need to check the thread_info flags again | ||
562 | * or not. Note that we don't clear TIF_SINGLESTEP here, so it's | ||
563 | * important that it be tested last, and then claim that we don't | ||
564 | * need to recheck the flags. | ||
565 | */ | ||
566 | int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) | ||
567 | { | ||
568 | if (thread_info_flags & _TIF_NEED_RESCHED) { | ||
569 | schedule(); | ||
570 | return 1; | ||
571 | } | ||
572 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
573 | if (thread_info_flags & _TIF_ASYNC_TLB) { | ||
574 | do_async_page_fault(regs); | ||
575 | return 1; | ||
576 | } | ||
577 | #endif | ||
578 | if (thread_info_flags & _TIF_SIGPENDING) { | ||
579 | do_signal(regs); | ||
580 | return 1; | ||
581 | } | ||
582 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
583 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
584 | tracehook_notify_resume(regs); | ||
585 | if (current->replacement_session_keyring) | ||
586 | key_replace_session_keyring(); | ||
587 | return 1; | ||
588 | } | ||
589 | if (thread_info_flags & _TIF_SINGLESTEP) { | ||
590 | if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0) | ||
591 | single_step_once(regs); | ||
592 | return 0; | ||
593 | } | ||
594 | panic("work_pending: bad flags %#x\n", thread_info_flags); | ||
595 | } | ||
596 | |||
549 | /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ | 597 | /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ |
550 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | 598 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
551 | void __user *, parent_tidptr, void __user *, child_tidptr, | 599 | void __user *, parent_tidptr, void __user *, child_tidptr, |
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 84a729e06ec4..86df5a239b70 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c | |||
@@ -318,6 +318,14 @@ void single_step_once(struct pt_regs *regs) | |||
318 | " .popsection\n" | 318 | " .popsection\n" |
319 | ); | 319 | ); |
320 | 320 | ||
321 | /* | ||
322 | * Enable interrupts here to allow touching userspace and the like. | ||
323 | * The callers expect this: do_trap() already has interrupts | ||
324 | * enabled, and do_work_pending() handles functions that enable | ||
325 | * interrupts internally. | ||
326 | */ | ||
327 | local_irq_enable(); | ||
328 | |||
321 | if (state == NULL) { | 329 | if (state == NULL) { |
322 | /* allocate a page of writable, executable memory */ | 330 | /* allocate a page of writable, executable memory */ |
323 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | 331 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 51f8663bf074..24ca54a0703b 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -732,6 +732,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, | |||
732 | panic("Bad fault number %d in do_page_fault", fault_num); | 732 | panic("Bad fault number %d in do_page_fault", fault_num); |
733 | } | 733 | } |
734 | 734 | ||
735 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
735 | if (EX1_PL(regs->ex1) != USER_PL) { | 736 | if (EX1_PL(regs->ex1) != USER_PL) { |
736 | struct async_tlb *async; | 737 | struct async_tlb *async; |
737 | switch (fault_num) { | 738 | switch (fault_num) { |
@@ -775,6 +776,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, | |||
775 | return; | 776 | return; |
776 | } | 777 | } |
777 | } | 778 | } |
779 | #endif | ||
778 | 780 | ||
779 | handle_page_fault(regs, fault_num, is_page_fault, address, write); | 781 | handle_page_fault(regs, fault_num, is_page_fault, address, write); |
780 | } | 782 | } |
@@ -801,8 +803,6 @@ static void handle_async_page_fault(struct pt_regs *regs, | |||
801 | async->address, async->is_write); | 803 | async->address, async->is_write); |
802 | } | 804 | } |
803 | } | 805 | } |
804 | #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ | ||
805 | |||
806 | 806 | ||
807 | /* | 807 | /* |
808 | * This routine effectively re-issues asynchronous page faults | 808 | * This routine effectively re-issues asynchronous page faults |
@@ -824,6 +824,8 @@ void do_async_page_fault(struct pt_regs *regs) | |||
824 | handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); | 824 | handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); |
825 | #endif | 825 | #endif |
826 | } | 826 | } |
827 | #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ | ||
828 | |||
827 | 829 | ||
828 | void vmalloc_sync_all(void) | 830 | void vmalloc_sync_all(void) |
829 | { | 831 | { |