diff options
author | Roman Zippel <zippel@linux-m68k.org> | 2005-11-13 19:06:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-13 21:14:14 -0500 |
commit | 3b66a1edb01b82269a668a478625765b1fa4936f (patch) | |
tree | 417ead0264e34fdd1ec345a26d8670427be4829c /arch/m68k/kernel/entry.S | |
parent | abd03753bd1532c05eb13231569a5257b007e29c (diff) |
[PATCH] m68k: convert thread flags to use bit fields
Remove task_work structure, use the standard thread flags functions and use
shifts in entry.S to test the thread flags. Add a few local labels to entry.S
to allow gas to generate short jumps.
Finally it changes a number of inline functions in thread_info.h to macros to
delay the current_thread_info() usage, which requires on m68k a structure
(task_struct) not yet defined at this point.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Al Viro <viro@parcelfarce.linux.theplanet.co.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/m68k/kernel/entry.S')
-rw-r--r-- | arch/m68k/kernel/entry.S | 78 |
1 files changed, 40 insertions, 38 deletions
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 23ca60a45552..320fde05dc63 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -44,9 +44,7 @@ | |||
44 | 44 | ||
45 | #include <asm/asm-offsets.h> | 45 | #include <asm/asm-offsets.h> |
46 | 46 | ||
47 | .globl system_call, buserr, trap | 47 | .globl system_call, buserr, trap, resume |
48 | .globl resume, ret_from_exception | ||
49 | .globl ret_from_signal | ||
50 | .globl inthandler, sys_call_table | 48 | .globl inthandler, sys_call_table |
51 | .globl sys_fork, sys_clone, sys_vfork | 49 | .globl sys_fork, sys_clone, sys_vfork |
52 | .globl ret_from_interrupt, bad_interrupt | 50 | .globl ret_from_interrupt, bad_interrupt |
@@ -58,7 +56,7 @@ ENTRY(buserr) | |||
58 | movel %sp,%sp@- | stack frame pointer argument | 56 | movel %sp,%sp@- | stack frame pointer argument |
59 | bsrl buserr_c | 57 | bsrl buserr_c |
60 | addql #4,%sp | 58 | addql #4,%sp |
61 | jra ret_from_exception | 59 | jra .Lret_from_exception |
62 | 60 | ||
63 | ENTRY(trap) | 61 | ENTRY(trap) |
64 | SAVE_ALL_INT | 62 | SAVE_ALL_INT |
@@ -66,7 +64,7 @@ ENTRY(trap) | |||
66 | movel %sp,%sp@- | stack frame pointer argument | 64 | movel %sp,%sp@- | stack frame pointer argument |
67 | bsrl trap_c | 65 | bsrl trap_c |
68 | addql #4,%sp | 66 | addql #4,%sp |
69 | jra ret_from_exception | 67 | jra .Lret_from_exception |
70 | 68 | ||
71 | | After a fork we jump here directly from resume, | 69 | | After a fork we jump here directly from resume, |
72 | | so that %d1 contains the previous task | 70 | | so that %d1 contains the previous task |
@@ -75,30 +73,31 @@ ENTRY(ret_from_fork) | |||
75 | movel %d1,%sp@- | 73 | movel %d1,%sp@- |
76 | jsr schedule_tail | 74 | jsr schedule_tail |
77 | addql #4,%sp | 75 | addql #4,%sp |
78 | jra ret_from_exception | 76 | jra .Lret_from_exception |
79 | 77 | ||
80 | badsys: | 78 | do_trace_entry: |
81 | movel #-ENOSYS,%sp@(PT_D0) | ||
82 | jra ret_from_exception | ||
83 | |||
84 | do_trace: | ||
85 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace | 79 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace |
86 | subql #4,%sp | 80 | subql #4,%sp |
87 | SAVE_SWITCH_STACK | 81 | SAVE_SWITCH_STACK |
88 | jbsr syscall_trace | 82 | jbsr syscall_trace |
89 | RESTORE_SWITCH_STACK | 83 | RESTORE_SWITCH_STACK |
90 | addql #4,%sp | 84 | addql #4,%sp |
91 | movel %sp@(PT_ORIG_D0),%d1 | 85 | movel %sp@(PT_ORIG_D0),%d0 |
92 | movel #-ENOSYS,%d0 | 86 | cmpl #NR_syscalls,%d0 |
93 | cmpl #NR_syscalls,%d1 | 87 | jcs syscall |
94 | jcc 1f | 88 | badsys: |
95 | jbsr @(sys_call_table,%d1:l:4)@(0) | 89 | movel #-ENOSYS,%sp@(PT_D0) |
96 | 1: movel %d0,%sp@(PT_D0) | save the return value | 90 | jra ret_from_syscall |
97 | subql #4,%sp | dummy return address | 91 | |
92 | do_trace_exit: | ||
93 | subql #4,%sp | ||
98 | SAVE_SWITCH_STACK | 94 | SAVE_SWITCH_STACK |
99 | jbsr syscall_trace | 95 | jbsr syscall_trace |
96 | RESTORE_SWITCH_STACK | ||
97 | addql #4,%sp | ||
98 | jra .Lret_from_exception | ||
100 | 99 | ||
101 | ret_from_signal: | 100 | ENTRY(ret_from_signal) |
102 | RESTORE_SWITCH_STACK | 101 | RESTORE_SWITCH_STACK |
103 | addql #4,%sp | 102 | addql #4,%sp |
104 | /* on 68040 complete pending writebacks if any */ | 103 | /* on 68040 complete pending writebacks if any */ |
@@ -111,7 +110,7 @@ ret_from_signal: | |||
111 | addql #4,%sp | 110 | addql #4,%sp |
112 | 1: | 111 | 1: |
113 | #endif | 112 | #endif |
114 | jra ret_from_exception | 113 | jra .Lret_from_exception |
115 | 114 | ||
116 | ENTRY(system_call) | 115 | ENTRY(system_call) |
117 | SAVE_ALL_SYS | 116 | SAVE_ALL_SYS |
@@ -120,30 +119,34 @@ ENTRY(system_call) | |||
120 | | save top of frame | 119 | | save top of frame |
121 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | 120 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) |
122 | 121 | ||
123 | tstb %curptr@(TASK_SYSCALL_TRACE) | 122 | | syscall trace? |
124 | jne do_trace | 123 | tstb %curptr@(TASK_INFO+TINFO_FLAGS+2) |
124 | jmi do_trace_entry | ||
125 | cmpl #NR_syscalls,%d0 | 125 | cmpl #NR_syscalls,%d0 |
126 | jcc badsys | 126 | jcc badsys |
127 | syscall: | ||
127 | jbsr @(sys_call_table,%d0:l:4)@(0) | 128 | jbsr @(sys_call_table,%d0:l:4)@(0) |
128 | movel %d0,%sp@(PT_D0) | save the return value | 129 | movel %d0,%sp@(PT_D0) | save the return value |
129 | 130 | ret_from_syscall: | |
130 | |oriw #0x0700,%sr | 131 | |oriw #0x0700,%sr |
131 | movel %curptr@(TASK_WORK),%d0 | 132 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 |
132 | jne syscall_exit_work | 133 | jne syscall_exit_work |
133 | 1: RESTORE_ALL | 134 | 1: RESTORE_ALL |
134 | 135 | ||
135 | syscall_exit_work: | 136 | syscall_exit_work: |
136 | btst #5,%sp@(PT_SR) | check if returning to kernel | 137 | btst #5,%sp@(PT_SR) | check if returning to kernel |
137 | bnes 1b | if so, skip resched, signals | 138 | bnes 1b | if so, skip resched, signals |
138 | tstw %d0 | 139 | lslw #1,%d0 |
139 | jeq do_signal_return | 140 | jcs do_trace_exit |
140 | tstb %d0 | 141 | jmi do_delayed_trace |
141 | jne do_delayed_trace | 142 | lslw #8,%d0 |
142 | 143 | jmi do_signal_return | |
143 | pea resume_userspace | 144 | pea resume_userspace |
144 | jmp schedule | 145 | jra schedule |
146 | |||
145 | 147 | ||
146 | ret_from_exception: | 148 | ENTRY(ret_from_exception) |
149 | .Lret_from_exception: | ||
147 | btst #5,%sp@(PT_SR) | check if returning to kernel | 150 | btst #5,%sp@(PT_SR) | check if returning to kernel |
148 | bnes 1f | if so, skip resched, signals | 151 | bnes 1f | if so, skip resched, signals |
149 | | only allow interrupts when we are really the last one on the | 152 | | only allow interrupts when we are really the last one on the |
@@ -152,19 +155,18 @@ ret_from_exception: | |||
152 | andw #ALLOWINT,%sr | 155 | andw #ALLOWINT,%sr |
153 | 156 | ||
154 | resume_userspace: | 157 | resume_userspace: |
155 | movel %curptr@(TASK_WORK),%d0 | 158 | moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0 |
156 | lsrl #8,%d0 | ||
157 | jne exit_work | 159 | jne exit_work |
158 | 1: RESTORE_ALL | 160 | 1: RESTORE_ALL |
159 | 161 | ||
160 | exit_work: | 162 | exit_work: |
161 | | save top of frame | 163 | | save top of frame |
162 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | 164 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) |
163 | tstb %d0 | 165 | lslb #1,%d0 |
164 | jeq do_signal_return | 166 | jmi do_signal_return |
165 | |||
166 | pea resume_userspace | 167 | pea resume_userspace |
167 | jmp schedule | 168 | jra schedule |
169 | |||
168 | 170 | ||
169 | do_signal_return: | 171 | do_signal_return: |
170 | |andw #ALLOWINT,%sr | 172 | |andw #ALLOWINT,%sr |
@@ -254,7 +256,7 @@ ret_from_interrupt: | |||
254 | 256 | ||
255 | /* check if we need to do software interrupts */ | 257 | /* check if we need to do software interrupts */ |
256 | tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING | 258 | tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING |
257 | jeq ret_from_exception | 259 | jeq .Lret_from_exception |
258 | pea ret_from_exception | 260 | pea ret_from_exception |
259 | jra do_softirq | 261 | jra do_softirq |
260 | 262 | ||