aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-01-18 04:42:18 -0500
committerVineet Gupta <vgupta@synopsys.com>2013-02-11 09:30:38 -0500
commitbf90e1eab682dcb79b7765989fb65835ce9d6165 (patch)
treecb1bb4364862d878e1d361d371f8392d08f606d7
parent4adeefe161a74369e44cc8e663f240ece0470dc3 (diff)
ARC: Process-creation/scheduling/idle-loop
Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h20
-rw-r--r--arch/arc/include/asm/processor.h9
-rw-r--r--arch/arc/include/asm/ptrace.h8
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/kernel/ctx_sw.c91
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/entry.S15
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/process.c193
10 files changed, 484 insertions, 8 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 8789de1c7c8f..a4e9806ace23 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -17,6 +17,8 @@ config ARC
17 select GENERIC_FIND_FIRST_BIT 17 select GENERIC_FIND_FIRST_BIT
18 # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP 18 # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
19 select GENERIC_IRQ_SHOW 19 select GENERIC_IRQ_SHOW
20 select GENERIC_KERNEL_EXECVE
21 select GENERIC_KERNEL_THREAD
20 select GENERIC_PENDING_IRQ if SMP 22 select GENERIC_PENDING_IRQ if SMP
21 select GENERIC_SMP_IDLE_THREAD 23 select GENERIC_SMP_IDLE_THREAD
22 select HAVE_GENERIC_HARDIRQS 24 select HAVE_GENERIC_HARDIRQS
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 3fccb04e6d93..d76411882481 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -47,6 +47,17 @@
47#define AUX_ITRIGGER 0x40d 47#define AUX_ITRIGGER 0x40d
48#define AUX_IPULSE 0x415 48#define AUX_IPULSE 0x415
49 49
50/*
51 * Floating Pt Registers
52 * Status regs are read-only (build-time) so need not be saved/restored
53 */
54#define ARC_AUX_FP_STAT 0x300
55#define ARC_AUX_DPFP_1L 0x301
56#define ARC_AUX_DPFP_1H 0x302
57#define ARC_AUX_DPFP_2L 0x303
58#define ARC_AUX_DPFP_2H 0x304
59#define ARC_AUX_DPFP_STAT 0x305
60
50#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
51 62
52/* 63/*
@@ -110,6 +121,15 @@
110 121
111#endif 122#endif
112 123
124#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
125/* These DPFP regs need to be saved/restored across ctx-sw */
126struct arc_fpu {
127 struct {
128 unsigned int l, h;
129 } aux_dpfp[2];
130};
131#endif
132
113#endif /* __ASEMBLY__ */ 133#endif /* __ASEMBLY__ */
114 134
115#endif /* __KERNEL__ */ 135#endif /* __KERNEL__ */
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index bf88cfbc9128..860252ec3fa7 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -29,6 +29,9 @@ struct thread_struct {
29 unsigned long callee_reg; /* pointer to callee regs */ 29 unsigned long callee_reg; /* pointer to callee regs */
30 unsigned long fault_address; /* dbls as brkpt holder as well */ 30 unsigned long fault_address; /* dbls as brkpt holder as well */
31 unsigned long cause_code; /* Exception Cause Code (ECR) */ 31 unsigned long cause_code; /* Exception Cause Code (ECR) */
32#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
33 struct arc_fpu fpu;
34#endif
32}; 35};
33 36
34#define INIT_THREAD { \ 37#define INIT_THREAD { \
@@ -54,12 +57,6 @@ unsigned long thread_saved_pc(struct task_struct *t);
54 57
55#define cpu_relax() do { } while (0) 58#define cpu_relax() do { } while (0)
56 59
57/*
58 * Create a new kernel thread
59 */
60
61extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags);
62
63#define copy_segments(tsk, mm) do { } while (0) 60#define copy_segments(tsk, mm) do { } while (0)
64#define release_segments(mm) do { } while (0) 61#define release_segments(mm) do { } while (0)
65 62
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 4c9359477ded..3afadefe335f 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -91,6 +91,14 @@ struct callee_regs {
91#define in_syscall(regs) (((regs->orig_r8) >= 0 && \ 91#define in_syscall(regs) (((regs->orig_r8) >= 0 && \
92 (regs->orig_r8 <= NR_syscalls)) ? 1 : 0) 92 (regs->orig_r8 <= NR_syscalls)) ? 1 : 0)
93 93
94#define current_pt_regs() \
95({ \
96 /* open-coded current_thread_info() */ \
97 register unsigned long sp asm ("sp"); \
98 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
99 (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
100})
101
94#endif /* !__ASSEMBLY__ */ 102#endif /* !__ASSEMBLY__ */
95 103
96#endif /* __KERNEL__ */ 104#endif /* __KERNEL__ */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000000..1b171ab5fec0
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_SWITCH_TO_H
10#define _ASM_ARC_SWITCH_TO_H
11
12#ifndef __ASSEMBLY__
13
14#include <linux/sched.h>
15
16#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
17
18extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
19#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
20#define ARC_FPU_NEXT(t)
21
22#else
23
24#define ARC_FPU_PREV(p, n)
25#define ARC_FPU_NEXT(n)
26
27#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
28
29struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
30
31#define switch_to(prev, next, last) \
32do { \
33 ARC_FPU_PREV(prev, next); \
34 last = __switch_to(prev, next);\
35 ARC_FPU_NEXT(next); \
36 mb(); \
37} while (0)
38
39#endif
40
41#endif
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 000000000000..647e37a5165e
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: Aug 2009
9 * -"C" version of lowest level context switch asm macro called by schedular
10 * gcc doesn't generate the dward CFI info for hand written asm, hence can't
11 * backtrace out of it (e.g. tasks sleeping in kernel).
12 * So we cheat a bit by writing almost similar code in inline-asm.
13 * -This is a hacky way of doing things, but there is no other simple way.
14 * I don't want/intend to extend unwinding code to understand raw asm
15 */
16
17#include <asm/asm-offsets.h>
18#include <linux/sched.h>
19
20struct task_struct *__sched
21__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
22{
23 unsigned int tmp;
24 unsigned int prev = (unsigned int)prev_task;
25 unsigned int next = (unsigned int)next_task;
26 int num_words_to_skip = 1;
27
28 __asm__ __volatile__(
29 /* FP/BLINK save generated by gcc (standard function prologue */
30 "st.a r13, [sp, -4] \n\t"
31 "st.a r14, [sp, -4] \n\t"
32 "st.a r15, [sp, -4] \n\t"
33 "st.a r16, [sp, -4] \n\t"
34 "st.a r17, [sp, -4] \n\t"
35 "st.a r18, [sp, -4] \n\t"
36 "st.a r19, [sp, -4] \n\t"
37 "st.a r20, [sp, -4] \n\t"
38 "st.a r21, [sp, -4] \n\t"
39 "st.a r22, [sp, -4] \n\t"
40 "st.a r23, [sp, -4] \n\t"
41 "st.a r24, [sp, -4] \n\t"
42 "st.a r25, [sp, -4] \n\t"
43 "sub sp, sp, %4 \n\t" /* create gutter at top */
44
45 /* set ksp of outgoing task in tsk->thread.ksp */
46 "st.as sp, [%3, %1] \n\t"
47
48 "sync \n\t"
49
50 /*
51 * setup _current_task with incoming tsk.
52 * optionally, set r25 to that as well
53 * For SMP extra work to get to &_current_task[cpu]
54 * (open coded SET_CURR_TASK_ON_CPU)
55 */
56 "st %2, [@_current_task] \n\t"
57
58 /* get ksp of incoming task from tsk->thread.ksp */
59 "ld.as sp, [%2, %1] \n\t"
60
61 /* start loading it's CALLEE reg file */
62
63 "add sp, sp, %4 \n\t" /* skip gutter at top */
64
65 "ld.ab r25, [sp, 4] \n\t"
66 "ld.ab r24, [sp, 4] \n\t"
67 "ld.ab r23, [sp, 4] \n\t"
68 "ld.ab r22, [sp, 4] \n\t"
69 "ld.ab r21, [sp, 4] \n\t"
70 "ld.ab r20, [sp, 4] \n\t"
71 "ld.ab r19, [sp, 4] \n\t"
72 "ld.ab r18, [sp, 4] \n\t"
73 "ld.ab r17, [sp, 4] \n\t"
74 "ld.ab r16, [sp, 4] \n\t"
75 "ld.ab r15, [sp, 4] \n\t"
76 "ld.ab r14, [sp, 4] \n\t"
77 "ld.ab r13, [sp, 4] \n\t"
78
79 /* last (ret value) = prev : although for ARC it mov r0, r0 */
80 "mov %0, %3 \n\t"
81
82 /* FP/BLINK restore generated by gcc (standard func epilogue */
83
84 : "=r"(tmp)
85 : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
86 "n"(num_words_to_skip * 4)
87 : "blink"
88 );
89
90 return (struct task_struct *)tmp;
91}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 000000000000..d8972345e4c2
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Vineetg: Aug 2009
9 * -Moved core context switch macro out of entry.S into this file.
10 * -This is the more "natural" hand written assembler
11 */
12
13#include <asm/entry.h> /* For the SAVE_* macros */
14#include <asm/asm-offsets.h>
15#include <asm/linkage.h>
16
17;################### Low Level Context Switch ##########################
18
19 .section .sched.text,"ax",@progbits
20 .align 4
21 .global __switch_to
22 .type __switch_to, @function
23__switch_to:
24
25 /* Save regs on kernel mode stack of task */
26 st.a blink, [sp, -4]
27 st.a fp, [sp, -4]
28 SAVE_CALLEE_SAVED_KERNEL
29
30 /* Save the now KSP in task->thread.ksp */
31 st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
32
33 /*
34 * Return last task in r0 (return reg)
35 * On ARC, Return reg = First Arg reg = r0.
36 * Since we already have last task in r0,
37 * don't need to do anything special to return it
38 */
39
40 /* hardware memory barrier */
41 sync
42
43 /*
44 * switch to new task, contained in r1
45 * Temp reg r3 is required to get the ptr to store val
46 */
47 SET_CURR_TASK_ON_CPU r1, r3
48
49 /* reload SP with kernel mode stack pointer in task->thread.ksp */
50 ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
51
52 /* restore the registers */
53 RESTORE_CALLEE_SAVED_KERNEL
54 ld.ab fp, [sp, 4]
55 ld.ab blink, [sp, 4]
56 j [blink]
57
58ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 0b0a190547a9..ed08ac14fbc4 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -566,8 +566,19 @@ ARC_ENTRY ret_from_fork
566 ; when the forked child comes here from the __switch_to function 566 ; when the forked child comes here from the __switch_to function
567 ; r0 has the last task pointer. 567 ; r0 has the last task pointer.
568 ; put last task in scheduler queue 568 ; put last task in scheduler queue
569 bl @schedule_tail 569 bl @schedule_tail
570 b @ret_from_exception 570
571 ; If kernel thread, jump to it's entry-point
572 ld r9, [sp, PT_status32]
573 brne r9, 0, 1f
574
575 jl.d [r14]
576 mov r0, r13 ; arg to payload
577
5781:
579 ; special case of kernel_thread entry point returning back due to
580 ; kernel_execve() - pretend return from syscall to ret to userland
581 b ret_from_exception
571ARC_EXIT ret_from_fork 582ARC_EXIT ret_from_fork
572 583
573;################### Special Sys Call Wrappers ########################## 584;################### Special Sys Call Wrappers ##########################
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 000000000000..f352e512cbd1
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
1/*
2 * fpu.c - save/restore of Floating Point Unit Registers on task switch
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/sched.h>
12#include <asm/switch_to.h>
13
14/*
15 * To save/restore FPU regs, simplest scheme would use LR/SR insns.
16 * However since SR serializes the pipeline, an alternate "hack" can be used
17 * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
18 *
19 * Store to 64bit dpfp1 reg from a pair of core regs:
20 * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
21 *
22 * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
23 * mov_s r3, 0
24 * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
25 * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
26 * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
27 *
28 * However we can tweak the read, so that read-out of outgoing task's FPU regs
29 * and write of incoming task's regs happen in one shot. So all the work is
30 * done before context switch
31 */
32
33void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
34{
35 unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
36 unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
37
38 const unsigned int zero = 0;
39
40 __asm__ __volatile__(
41 "daddh11 %0, %2, %2\n"
42 "dexcl1 %1, %3, %4\n"
43 : "=&r" (*(saveto + 1)), /* early clobber must here */
44 "=&r" (*(saveto))
45 : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
46 );
47
48 __asm__ __volatile__(
49 "daddh22 %0, %2, %2\n"
50 "dexcl2 %1, %3, %4\n"
51 : "=&r"(*(saveto + 3)), /* early clobber must here */
52 "=&r"(*(saveto + 2))
53 : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
54 );
55}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 4d14e5638c8c..279e080b6b54 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -40,3 +40,196 @@ SYSCALL_DEFINE0(arc_gettls)
40{ 40{
41 return task_thread_info(current)->thr_ptr; 41 return task_thread_info(current)->thr_ptr;
42} 42}
43
44static inline void arch_idle(void)
45{
46 /* sleep, but enable all interrupts before committing */
47 __asm__("sleep 0x3");
48}
49
50void cpu_idle(void)
51{
52 /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
53
54 /* endless idle loop with no priority at all */
55 while (1) {
56 tick_nohz_idle_enter();
57 rcu_idle_enter();
58
59doze:
60 local_irq_disable();
61 if (!need_resched()) {
62 arch_idle();
63 goto doze;
64 } else {
65 local_irq_enable();
66 }
67
68 rcu_idle_exit();
69 tick_nohz_idle_exit();
70
71 schedule_preempt_disabled();
72 }
73}
74
75asmlinkage void ret_from_fork(void);
76
77/* Layout of Child kernel mode stack as setup at the end of this function is
78 *
79 * | ... |
80 * | ... |
81 * | unused |
82 * | |
83 * ------------------ <==== top of Stack (thread.ksp)
84 * | UNUSED 1 word|
85 * ------------------
86 * | r25 |
87 * ~ ~
88 * | --to-- | (CALLEE Regs of user mode)
89 * | r13 |
90 * ------------------
91 * | fp |
92 * | blink | @ret_from_fork
93 * ------------------
94 * | |
95 * ~ ~
96 * ~ ~
97 * | |
98 * ------------------
99 * | r12 |
100 * ~ ~
101 * | --to-- | (scratch Regs of user mode)
102 * | r0 |
103 * ------------------
104 * | UNUSED 1 word|
105 * ------------------ <===== END of PAGE
106 */
107int copy_thread(unsigned long clone_flags,
108 unsigned long usp, unsigned long arg,
109 struct task_struct *p)
110{
111 struct pt_regs *c_regs; /* child's pt_regs */
112 unsigned long *childksp; /* to unwind out of __switch_to() */
113 struct callee_regs *c_callee; /* child's callee regs */
114 struct callee_regs *parent_callee; /* paren't callee */
115 struct pt_regs *regs = current_pt_regs();
116
117 /* Mark the specific anchors to begin with (see pic above) */
118 c_regs = task_pt_regs(p);
119 childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
120 c_callee = ((struct callee_regs *)childksp) - 1;
121
122 /*
123 * __switch_to() uses thread.ksp to start unwinding stack
124 * For kernel threads we don't need to create callee regs, the
125 * stack layout nevertheless needs to remain the same.
126 * Also, since __switch_to anyways unwinds callee regs, we use
127 * this to populate kernel thread entry-pt/args into callee regs,
128 * so that ret_from_kernel_thread() becomes simpler.
129 */
130 p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
131
132 /* __switch_to expects FP(0), BLINK(return addr) at top */
133 childksp[0] = 0; /* fp */
134 childksp[1] = (unsigned long)ret_from_fork; /* blink */
135
136 if (unlikely(p->flags & PF_KTHREAD)) {
137 memset(c_regs, 0, sizeof(struct pt_regs));
138
139 c_callee->r13 = arg; /* argument to kernel thread */
140 c_callee->r14 = usp; /* function */
141
142 return 0;
143 }
144
145 /*--------- User Task Only --------------*/
146
147 /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
148 childksp[0] = 0; /* for POP fp */
149 childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
150
151 /* Copy parents pt regs on child's kernel mode stack */
152 *c_regs = *regs;
153
154 if (usp)
155 c_regs->sp = usp;
156
157 c_regs->r0 = 0; /* fork returns 0 in child */
158
159 parent_callee = ((struct callee_regs *)regs) - 1;
160 *c_callee = *parent_callee;
161
162 if (unlikely(clone_flags & CLONE_SETTLS)) {
163 /*
164 * set task's userland tls data ptr from 4th arg
165 * clone C-lib call is difft from clone sys-call
166 */
167 task_thread_info(p)->thr_ptr = regs->r3;
168 } else {
169 /* Normal fork case: set parent's TLS ptr in child */
170 task_thread_info(p)->thr_ptr =
171 task_thread_info(current)->thr_ptr;
172 }
173
174 return 0;
175}
176
177/*
178 * Some archs flush debug and FPU info here
179 */
180void flush_thread(void)
181{
182}
183
184/*
185 * Free any architecture-specific thread data structures, etc.
186 */
187void exit_thread(void)
188{
189}
190
191int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
192{
193 return 0;
194}
195
196/*
197 * API: expected by schedular Code: If thread is sleeping where is that.
198 * What is this good for? it will be always the scheduler or ret_from_fork.
199 * So we hard code that anyways.
200 */
201unsigned long thread_saved_pc(struct task_struct *t)
202{
203 struct pt_regs *regs = task_pt_regs(t);
204 unsigned long blink = 0;
205
206 /*
207 * If the thread being queried for in not itself calling this, then it
208 * implies it is not executing, which in turn implies it is sleeping,
209 * which in turn implies it got switched OUT by the schedular.
210 * In that case, it's kernel mode blink can reliably retrieved as per
211 * the picture above (right above pt_regs).
212 */
213 if (t != current && t->state != TASK_RUNNING)
214 blink = *((unsigned int *)regs - 1);
215
216 return blink;
217}
218
219int elf_check_arch(const struct elf32_hdr *x)
220{
221 unsigned int eflags;
222
223 if (x->e_machine != EM_ARCOMPACT)
224 return 0;
225
226 eflags = x->e_flags;
227 if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_V2) {
228 pr_err("ABI mismatch - you need newer toolchain\n");
229 force_sigsegv(SIGSEGV, current);
230 return 0;
231 }
232
233 return 1;
234}
235EXPORT_SYMBOL(elf_check_arch);