diff options
author | James Hogan <james.hogan@imgtec.com> | 2012-10-09 05:54:54 -0400 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:09:49 -0500 |
commit | 44dea393cf98a09b4b9f00dc3dd7e2c211f4b0e8 (patch) | |
tree | a2d5aed0af6fcf05b9ea882a4915ce446cffb6a4 /arch/metag | |
parent | 26025bbfbba33a9425be1b89eccb4664ea4c17b6 (diff) |
metag: Scheduling/Process management
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/include/asm/thread_info.h | 155 | ||||
-rw-r--r-- | arch/metag/kernel/process.c | 461 |
2 files changed, 616 insertions, 0 deletions
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h new file mode 100644 index 000000000000..0ecd34d8b5f6 --- /dev/null +++ b/arch/metag/include/asm/thread_info.h | |||
@@ -0,0 +1,155 @@ | |||
1 | /* thread_info.h: Meta low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | * | ||
6 | * Meta port by Imagination Technologies | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_THREAD_INFO_H | ||
10 | #define _ASM_THREAD_INFO_H | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/page.h> | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | #include <asm/processor.h> | ||
17 | #endif | ||
18 | |||
19 | /* | ||
20 | * low level task data that entry.S needs immediate access to | ||
21 | * - this struct should fit entirely inside of one cache line | ||
22 | * - this struct shares the supervisor stack pages | ||
23 | * - if the contents of this structure are changed, the assembly constants must | ||
24 | * also be changed | ||
25 | */ | ||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | /* This must be 8 byte aligned so we can ensure stack alignment. */ | ||
29 | struct thread_info { | ||
30 | struct task_struct *task; /* main task structure */ | ||
31 | struct exec_domain *exec_domain; /* execution domain */ | ||
32 | unsigned long flags; /* low level flags */ | ||
33 | unsigned long status; /* thread-synchronous flags */ | ||
34 | u32 cpu; /* current CPU */ | ||
35 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space */ | ||
38 | struct restart_block restart_block; | ||
39 | |||
40 | u8 supervisor_stack[0]; | ||
41 | }; | ||
42 | |||
43 | #else /* !__ASSEMBLY__ */ | ||
44 | |||
45 | #include <generated/asm-offsets.h> | ||
46 | |||
47 | #endif | ||
48 | |||
49 | #define PREEMPT_ACTIVE 0x10000000 | ||
50 | |||
51 | #ifdef CONFIG_4KSTACKS | ||
52 | #define THREAD_SHIFT 12 | ||
53 | #else | ||
54 | #define THREAD_SHIFT 13 | ||
55 | #endif | ||
56 | |||
57 | #if THREAD_SHIFT >= PAGE_SHIFT | ||
58 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) | ||
59 | #else | ||
60 | #define THREAD_SIZE_ORDER 0 | ||
61 | #endif | ||
62 | |||
63 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
64 | |||
65 | #define STACK_WARN (THREAD_SIZE/8) | ||
66 | /* | ||
67 | * macros/functions for gaining access to the thread information structure | ||
68 | */ | ||
69 | #ifndef __ASSEMBLY__ | ||
70 | |||
71 | #define INIT_THREAD_INFO(tsk) \ | ||
72 | { \ | ||
73 | .task = &tsk, \ | ||
74 | .exec_domain = &default_exec_domain, \ | ||
75 | .flags = 0, \ | ||
76 | .cpu = 0, \ | ||
77 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
78 | .addr_limit = KERNEL_DS, \ | ||
79 | .restart_block = { \ | ||
80 | .fn = do_no_restart_syscall, \ | ||
81 | }, \ | ||
82 | } | ||
83 | |||
84 | #define init_thread_info (init_thread_union.thread_info) | ||
85 | #define init_stack (init_thread_union.stack) | ||
86 | |||
87 | /* how to get the current stack pointer from C */ | ||
88 | register unsigned long current_stack_pointer asm("A0StP") __used; | ||
89 | |||
90 | /* how to get the thread information struct from C */ | ||
91 | static inline struct thread_info *current_thread_info(void) | ||
92 | { | ||
93 | return (struct thread_info *)(current_stack_pointer & | ||
94 | ~(THREAD_SIZE - 1)); | ||
95 | } | ||
96 | |||
97 | #define __HAVE_ARCH_KSTACK_END | ||
98 | static inline int kstack_end(void *addr) | ||
99 | { | ||
100 | return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1)) | ||
101 | + sizeof(struct thread_info)); | ||
102 | } | ||
103 | |||
104 | #endif | ||
105 | |||
106 | /* | ||
107 | * thread information flags | ||
108 | * - these are process state flags that various assembly files may need to | ||
109 | * access | ||
110 | * - pending work-to-be-done flags are in LSW | ||
111 | * - other flags in MSW | ||
112 | */ | ||
113 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
114 | #define TIF_SIGPENDING 1 /* signal pending */ | ||
115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | ||
116 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user | ||
117 | mode */ | ||
118 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ | ||
119 | #define TIF_SECCOMP 5 /* secure computing */ | ||
120 | #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ | ||
121 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ | ||
122 | #define TIF_POLLING_NRFLAG 8 /* true if poll_idle() is polling | ||
123 | TIF_NEED_RESCHED */ | ||
124 | #define TIF_MEMDIE 9 /* is terminating due to OOM killer */ | ||
125 | #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint instrumentation */ | ||
126 | |||
127 | |||
128 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
129 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
130 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
131 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
132 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
133 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
134 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
135 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | ||
136 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | ||
137 | |||
138 | /* work to do in syscall trace */ | ||
139 | #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | ||
140 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
141 | _TIF_SYSCALL_TRACEPOINT) | ||
142 | |||
143 | /* work to do on any return to u-space */ | ||
144 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ | ||
145 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ | ||
146 | _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ | ||
147 | _TIF_NOTIFY_RESUME) | ||
148 | |||
149 | /* work to do on interrupt/exception return */ | ||
150 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ | ||
151 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) | ||
152 | |||
153 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
154 | |||
155 | #endif /* _ASM_THREAD_INFO_H */ | ||
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c new file mode 100644 index 000000000000..c6efe62e5b76 --- /dev/null +++ b/arch/metag/kernel/process.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies | ||
3 | * | ||
4 | * This file contains the architecture-dependent parts of process handling. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/export.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/unistd.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/user.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/elfcore.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/tick.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/mman.h> | ||
22 | #include <linux/pm.h> | ||
23 | #include <linux/syscalls.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <asm/core_reg.h> | ||
26 | #include <asm/user_gateway.h> | ||
27 | #include <asm/tcm.h> | ||
28 | #include <asm/traps.h> | ||
29 | #include <asm/switch_to.h> | ||
30 | |||
31 | /* | ||
32 | * Wait for the next interrupt and enable local interrupts | ||
33 | */ | ||
34 | static inline void arch_idle(void) | ||
35 | { | ||
36 | int tmp; | ||
37 | |||
38 | /* | ||
39 | * Quickly jump straight into the interrupt entry point without actually | ||
40 | * triggering an interrupt. When TXSTATI gets read the processor will | ||
41 | * block until an interrupt is triggered. | ||
42 | */ | ||
43 | asm volatile (/* Switch into ISTAT mode */ | ||
44 | "RTH\n\t" | ||
45 | /* Enable local interrupts */ | ||
46 | "MOV TXMASKI, %1\n\t" | ||
47 | /* | ||
48 | * We can't directly "SWAP PC, PCX", so we swap via a | ||
49 | * temporary. Essentially we do: | ||
50 | * PCX_new = 1f (the place to continue execution) | ||
51 | * PC = PCX_old | ||
52 | */ | ||
53 | "ADD %0, CPC0, #(1f-.)\n\t" | ||
54 | "SWAP PCX, %0\n\t" | ||
55 | "MOV PC, %0\n" | ||
56 | /* Continue execution here with interrupts enabled */ | ||
57 | "1:" | ||
58 | : "=a" (tmp) | ||
59 | : "r" (get_trigger_mask())); | ||
60 | } | ||
61 | |||
62 | void cpu_idle(void) | ||
63 | { | ||
64 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
65 | |||
66 | while (1) { | ||
67 | tick_nohz_idle_enter(); | ||
68 | rcu_idle_enter(); | ||
69 | |||
70 | while (!need_resched()) { | ||
71 | /* | ||
72 | * We need to disable interrupts here to ensure we don't | ||
73 | * miss a wakeup call. | ||
74 | */ | ||
75 | local_irq_disable(); | ||
76 | if (!need_resched()) { | ||
77 | #ifdef CONFIG_HOTPLUG_CPU | ||
78 | if (cpu_is_offline(smp_processor_id())) | ||
79 | cpu_die(); | ||
80 | #endif | ||
81 | arch_idle(); | ||
82 | } else { | ||
83 | local_irq_enable(); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | rcu_idle_exit(); | ||
88 | tick_nohz_idle_exit(); | ||
89 | schedule_preempt_disabled(); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void (*pm_power_off)(void); | ||
94 | EXPORT_SYMBOL(pm_power_off); | ||
95 | |||
96 | void (*soc_restart)(char *cmd); | ||
97 | void (*soc_halt)(void); | ||
98 | |||
99 | void machine_restart(char *cmd) | ||
100 | { | ||
101 | if (soc_restart) | ||
102 | soc_restart(cmd); | ||
103 | hard_processor_halt(HALT_OK); | ||
104 | } | ||
105 | |||
106 | void machine_halt(void) | ||
107 | { | ||
108 | if (soc_halt) | ||
109 | soc_halt(); | ||
110 | smp_send_stop(); | ||
111 | hard_processor_halt(HALT_OK); | ||
112 | } | ||
113 | |||
114 | void machine_power_off(void) | ||
115 | { | ||
116 | if (pm_power_off) | ||
117 | pm_power_off(); | ||
118 | smp_send_stop(); | ||
119 | hard_processor_halt(HALT_OK); | ||
120 | } | ||
121 | |||
122 | #define FLAG_Z 0x8 | ||
123 | #define FLAG_N 0x4 | ||
124 | #define FLAG_O 0x2 | ||
125 | #define FLAG_C 0x1 | ||
126 | |||
127 | void show_regs(struct pt_regs *regs) | ||
128 | { | ||
129 | int i; | ||
130 | const char *AX0_names[] = {"A0StP", "A0FrP"}; | ||
131 | const char *AX1_names[] = {"A1GbP", "A1LbP"}; | ||
132 | |||
133 | const char *DX0_names[] = { | ||
134 | "D0Re0", | ||
135 | "D0Ar6", | ||
136 | "D0Ar4", | ||
137 | "D0Ar2", | ||
138 | "D0FrT", | ||
139 | "D0.5 ", | ||
140 | "D0.6 ", | ||
141 | "D0.7 " | ||
142 | }; | ||
143 | |||
144 | const char *DX1_names[] = { | ||
145 | "D1Re0", | ||
146 | "D1Ar5", | ||
147 | "D1Ar3", | ||
148 | "D1Ar1", | ||
149 | "D1RtP", | ||
150 | "D1.5 ", | ||
151 | "D1.6 ", | ||
152 | "D1.7 " | ||
153 | }; | ||
154 | |||
155 | pr_info(" pt_regs @ %p\n", regs); | ||
156 | pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); | ||
157 | pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, | ||
158 | regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', | ||
159 | regs->ctx.Flags & FLAG_N ? 'N' : 'n', | ||
160 | regs->ctx.Flags & FLAG_O ? 'O' : 'o', | ||
161 | regs->ctx.Flags & FLAG_C ? 'C' : 'c'); | ||
162 | pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); | ||
163 | pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); | ||
164 | |||
165 | /* AX regs */ | ||
166 | for (i = 0; i < 2; i++) { | ||
167 | pr_info(" %s = 0x%08x ", | ||
168 | AX0_names[i], | ||
169 | regs->ctx.AX[i].U0); | ||
170 | printk(" %s = 0x%08x\n", | ||
171 | AX1_names[i], | ||
172 | regs->ctx.AX[i].U1); | ||
173 | } | ||
174 | |||
175 | if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) | ||
176 | pr_warn(" Extended state present - AX2.[01] will be WRONG\n"); | ||
177 | |||
178 | /* Special place with AXx.2 */ | ||
179 | pr_info(" A0.2 = 0x%08x ", | ||
180 | regs->ctx.Ext.AX2.U0); | ||
181 | printk(" A1.2 = 0x%08x\n", | ||
182 | regs->ctx.Ext.AX2.U1); | ||
183 | |||
184 | /* 'extended' AX regs (nominally, just AXx.3) */ | ||
185 | for (i = 0; i < (TBICTX_AX_REGS - 3); i++) { | ||
186 | pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); | ||
187 | printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); | ||
188 | } | ||
189 | |||
190 | for (i = 0; i < 8; i++) { | ||
191 | pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); | ||
192 | printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); | ||
193 | } | ||
194 | |||
195 | show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); | ||
196 | } | ||
197 | |||
198 | int copy_thread(unsigned long clone_flags, unsigned long usp, | ||
199 | unsigned long arg, struct task_struct *tsk) | ||
200 | { | ||
201 | struct pt_regs *childregs = task_pt_regs(tsk); | ||
202 | void *kernel_context = ((void *) childregs + | ||
203 | sizeof(struct pt_regs)); | ||
204 | unsigned long global_base; | ||
205 | |||
206 | BUG_ON(((unsigned long)childregs) & 0x7); | ||
207 | BUG_ON(((unsigned long)kernel_context) & 0x7); | ||
208 | |||
209 | memset(&tsk->thread.kernel_context, 0, | ||
210 | sizeof(tsk->thread.kernel_context)); | ||
211 | |||
212 | tsk->thread.kernel_context = __TBISwitchInit(kernel_context, | ||
213 | ret_from_fork, | ||
214 | 0, 0); | ||
215 | |||
216 | if (unlikely(tsk->flags & PF_KTHREAD)) { | ||
217 | /* | ||
218 | * Make sure we don't leak any kernel data to child's regs | ||
219 | * if kernel thread becomes a userspace thread in the future | ||
220 | */ | ||
221 | memset(childregs, 0 , sizeof(struct pt_regs)); | ||
222 | |||
223 | global_base = __core_reg_get(A1GbP); | ||
224 | childregs->ctx.AX[0].U1 = (unsigned long) global_base; | ||
225 | childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; | ||
226 | /* Set D1Ar1=arg and D1RtP=usp (fn) */ | ||
227 | childregs->ctx.DX[4].U1 = usp; | ||
228 | childregs->ctx.DX[3].U1 = arg; | ||
229 | tsk->thread.int_depth = 2; | ||
230 | return 0; | ||
231 | } | ||
232 | /* | ||
233 | * Get a pointer to where the new child's register block should have | ||
234 | * been pushed. | ||
235 | * The Meta's stack grows upwards, and the context is the the first | ||
236 | * thing to be pushed by TBX (phew) | ||
237 | */ | ||
238 | *childregs = *current_pt_regs(); | ||
239 | /* Set the correct stack for the clone mode */ | ||
240 | if (usp) | ||
241 | childregs->ctx.AX[0].U0 = ALIGN(usp, 8); | ||
242 | tsk->thread.int_depth = 1; | ||
243 | |||
244 | /* set return value for child process */ | ||
245 | childregs->ctx.DX[0].U0 = 0; | ||
246 | |||
247 | /* The TLS pointer is passed as an argument to sys_clone. */ | ||
248 | if (clone_flags & CLONE_SETTLS) | ||
249 | tsk->thread.tls_ptr = | ||
250 | (__force void __user *)childregs->ctx.DX[1].U1; | ||
251 | |||
252 | #ifdef CONFIG_METAG_FPU | ||
253 | if (tsk->thread.fpu_context) { | ||
254 | struct meta_fpu_context *ctx; | ||
255 | |||
256 | ctx = kmemdup(tsk->thread.fpu_context, | ||
257 | sizeof(struct meta_fpu_context), GFP_ATOMIC); | ||
258 | tsk->thread.fpu_context = ctx; | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | #ifdef CONFIG_METAG_DSP | ||
263 | if (tsk->thread.dsp_context) { | ||
264 | struct meta_ext_context *ctx; | ||
265 | int i; | ||
266 | |||
267 | ctx = kmemdup(tsk->thread.dsp_context, | ||
268 | sizeof(struct meta_ext_context), GFP_ATOMIC); | ||
269 | for (i = 0; i < 2; i++) | ||
270 | ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], | ||
271 | GFP_ATOMIC); | ||
272 | tsk->thread.dsp_context = ctx; | ||
273 | } | ||
274 | #endif | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | #ifdef CONFIG_METAG_FPU | ||
280 | static void alloc_fpu_context(struct thread_struct *thread) | ||
281 | { | ||
282 | thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), | ||
283 | GFP_ATOMIC); | ||
284 | } | ||
285 | |||
286 | static void clear_fpu(struct thread_struct *thread) | ||
287 | { | ||
288 | thread->user_flags &= ~TBICTX_FPAC_BIT; | ||
289 | kfree(thread->fpu_context); | ||
290 | thread->fpu_context = NULL; | ||
291 | } | ||
292 | #else | ||
293 | static void clear_fpu(struct thread_struct *thread) | ||
294 | { | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | #ifdef CONFIG_METAG_DSP | ||
299 | static void clear_dsp(struct thread_struct *thread) | ||
300 | { | ||
301 | if (thread->dsp_context) { | ||
302 | kfree(thread->dsp_context->ram[0]); | ||
303 | kfree(thread->dsp_context->ram[1]); | ||
304 | |||
305 | kfree(thread->dsp_context); | ||
306 | |||
307 | thread->dsp_context = NULL; | ||
308 | } | ||
309 | |||
310 | __core_reg_set(D0.8, 0); | ||
311 | } | ||
312 | #else | ||
313 | static void clear_dsp(struct thread_struct *thread) | ||
314 | { | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | struct task_struct *__sched __switch_to(struct task_struct *prev, | ||
319 | struct task_struct *next) | ||
320 | { | ||
321 | TBIRES to, from; | ||
322 | |||
323 | to.Switch.pCtx = next->thread.kernel_context; | ||
324 | to.Switch.pPara = prev; | ||
325 | |||
326 | #ifdef CONFIG_METAG_FPU | ||
327 | if (prev->thread.user_flags & TBICTX_FPAC_BIT) { | ||
328 | struct pt_regs *regs = task_pt_regs(prev); | ||
329 | TBIRES state; | ||
330 | |||
331 | state.Sig.SaveMask = prev->thread.user_flags; | ||
332 | state.Sig.pCtx = ®s->ctx; | ||
333 | |||
334 | if (!prev->thread.fpu_context) | ||
335 | alloc_fpu_context(&prev->thread); | ||
336 | if (prev->thread.fpu_context) | ||
337 | __TBICtxFPUSave(state, prev->thread.fpu_context); | ||
338 | } | ||
339 | /* | ||
340 | * Force a restore of the FPU context next time this process is | ||
341 | * scheduled. | ||
342 | */ | ||
343 | if (prev->thread.fpu_context) | ||
344 | prev->thread.fpu_context->needs_restore = true; | ||
345 | #endif | ||
346 | |||
347 | |||
348 | from = __TBISwitch(to, &prev->thread.kernel_context); | ||
349 | |||
350 | /* Restore TLS pointer for this process. */ | ||
351 | set_gateway_tls(current->thread.tls_ptr); | ||
352 | |||
353 | return (struct task_struct *) from.Switch.pPara; | ||
354 | } | ||
355 | |||
356 | void flush_thread(void) | ||
357 | { | ||
358 | clear_fpu(¤t->thread); | ||
359 | clear_dsp(¤t->thread); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Free current thread data structures etc. | ||
364 | */ | ||
365 | void exit_thread(void) | ||
366 | { | ||
367 | clear_fpu(¤t->thread); | ||
368 | clear_dsp(¤t->thread); | ||
369 | } | ||
370 | |||
371 | /* TODO: figure out how to unwind the kernel stack here to figure out | ||
372 | * where we went to sleep. */ | ||
373 | unsigned long get_wchan(struct task_struct *p) | ||
374 | { | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | ||
379 | { | ||
380 | /* Returning 0 indicates that the FPU state was not stored (as it was | ||
381 | * not in use) */ | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | #ifdef CONFIG_METAG_USER_TCM | ||
386 | |||
387 | #define ELF_MIN_ALIGN PAGE_SIZE | ||
388 | |||
389 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) | ||
390 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) | ||
391 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) | ||
392 | |||
393 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) | ||
394 | |||
395 | unsigned long __metag_elf_map(struct file *filep, unsigned long addr, | ||
396 | struct elf_phdr *eppnt, int prot, int type, | ||
397 | unsigned long total_size) | ||
398 | { | ||
399 | unsigned long map_addr, size; | ||
400 | unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr); | ||
401 | unsigned long raw_size = eppnt->p_filesz + page_off; | ||
402 | unsigned long off = eppnt->p_offset - page_off; | ||
403 | unsigned int tcm_tag; | ||
404 | addr = ELF_PAGESTART(addr); | ||
405 | size = ELF_PAGEALIGN(raw_size); | ||
406 | |||
407 | /* mmap() will return -EINVAL if given a zero size, but a | ||
408 | * segment with zero filesize is perfectly valid */ | ||
409 | if (!size) | ||
410 | return addr; | ||
411 | |||
412 | tcm_tag = tcm_lookup_tag(addr); | ||
413 | |||
414 | if (tcm_tag != TCM_INVALID_TAG) | ||
415 | type &= ~MAP_FIXED; | ||
416 | |||
417 | /* | ||
418 | * total_size is the size of the ELF (interpreter) image. | ||
419 | * The _first_ mmap needs to know the full size, otherwise | ||
420 | * randomization might put this image into an overlapping | ||
421 | * position with the ELF binary image. (since size < total_size) | ||
422 | * So we first map the 'big' image - and unmap the remainder at | ||
423 | * the end. (which unmap is needed for ELF images with holes.) | ||
424 | */ | ||
425 | if (total_size) { | ||
426 | total_size = ELF_PAGEALIGN(total_size); | ||
427 | map_addr = vm_mmap(filep, addr, total_size, prot, type, off); | ||
428 | if (!BAD_ADDR(map_addr)) | ||
429 | vm_munmap(map_addr+size, total_size-size); | ||
430 | } else | ||
431 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | ||
432 | |||
433 | if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) { | ||
434 | struct tcm_allocation *tcm; | ||
435 | unsigned long tcm_addr; | ||
436 | |||
437 | tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); | ||
438 | if (!tcm) | ||
439 | return -ENOMEM; | ||
440 | |||
441 | tcm_addr = tcm_alloc(tcm_tag, raw_size); | ||
442 | if (tcm_addr != addr) { | ||
443 | kfree(tcm); | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | tcm->tag = tcm_tag; | ||
448 | tcm->addr = tcm_addr; | ||
449 | tcm->size = raw_size; | ||
450 | |||
451 | list_add(&tcm->list, ¤t->mm->context.tcm); | ||
452 | |||
453 | eppnt->p_vaddr = map_addr; | ||
454 | if (copy_from_user((void *) addr, (void __user *) map_addr, | ||
455 | raw_size)) | ||
456 | return -EFAULT; | ||
457 | } | ||
458 | |||
459 | return map_addr; | ||
460 | } | ||
461 | #endif | ||