diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/ftrace.c | 184 | ||||
-rw-r--r-- | arch/mips/kernel/mcount.S | 55 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 87 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 2 |
4 files changed, 208 insertions, 120 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index e9e64e0ff7aa..5a84a1f11231 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Code for replacing ftrace calls with jumps. | 2 | * Code for replacing ftrace calls with jumps. |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2009 DSLab, Lanzhou University, China | 5 | * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China |
6 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> | 6 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> |
7 | * | 7 | * |
8 | * Thanks goes to Steven Rostedt for writing the original x86 version. | 8 | * Thanks goes to Steven Rostedt for writing the original x86 version. |
@@ -12,18 +12,62 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
17 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/uasm.h> | ||
19 | |||
20 | /* | ||
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
18 | 35 | ||
19 | #ifdef CONFIG_DYNAMIC_FTRACE | 36 | #ifdef CONFIG_DYNAMIC_FTRACE |
20 | 37 | ||
21 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
22 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
23 | #define jump_insn_encode(op_code, addr) \ | ||
24 | ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK))) | ||
25 | 40 | ||
26 | static unsigned int ftrace_nop = 0x00000000; | 41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ |
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | ||
44 | #define INSN_JAL(addr) \ | ||
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | ||
46 | |||
47 | static unsigned int insn_jal_ftrace_caller __read_mostly; | ||
48 | static unsigned int insn_lui_v1_hi16_mcount __read_mostly; | ||
49 | static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; | ||
50 | |||
51 | static inline void ftrace_dyn_arch_init_insns(void) | ||
52 | { | ||
53 | u32 *buf; | ||
54 | unsigned int v1; | ||
55 | |||
56 | /* lui v1, hi16_mcount */ | ||
57 | v1 = 3; | ||
58 | buf = (u32 *)&insn_lui_v1_hi16_mcount; | ||
59 | UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); | ||
60 | |||
61 | /* jal (ftrace_caller + 8), jump over the first two instruction */ | ||
62 | buf = (u32 *)&insn_jal_ftrace_caller; | ||
63 | uasm_i_jal(&buf, (FTRACE_ADDR + 8)); | ||
64 | |||
65 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
66 | /* j ftrace_graph_caller */ | ||
67 | buf = (u32 *)&insn_j_ftrace_graph_caller; | ||
68 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); | ||
69 | #endif | ||
70 | } | ||
27 | 71 | ||
28 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
29 | { | 73 | { |
@@ -40,67 +84,56 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
40 | return 0; | 84 | return 0; |
41 | } | 85 | } |
42 | 86 | ||
43 | static int lui_v1; | ||
44 | static int jal_mcount; | ||
45 | |||
46 | int ftrace_make_nop(struct module *mod, | 87 | int ftrace_make_nop(struct module *mod, |
47 | struct dyn_ftrace *rec, unsigned long addr) | 88 | struct dyn_ftrace *rec, unsigned long addr) |
48 | { | 89 | { |
49 | unsigned int new; | 90 | unsigned int new; |
50 | int faulted; | ||
51 | unsigned long ip = rec->ip; | 91 | unsigned long ip = rec->ip; |
52 | 92 | ||
53 | /* We have compiled module with -mlong-calls, but compiled the kernel | 93 | /* |
54 | * without it, we need to cope with them respectively. */ | 94 | * We have compiled module with -mlong-calls, but compiled the kernel |
55 | if (ip & 0x40000000) { | 95 | * without it, we need to cope with them respectively. |
56 | /* record it for ftrace_make_call */ | 96 | */ |
57 | if (lui_v1 == 0) { | 97 | if (in_module(ip)) { |
58 | /* lui_v1 = *(unsigned int *)ip; */ | 98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) |
59 | safe_load_code(lui_v1, ip, faulted); | 99 | /* |
60 | 100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | |
61 | if (unlikely(faulted)) | 101 | * addiu v1, v1, low_16bit_of_mcount |
62 | return -EFAULT; | 102 | * move at, ra |
63 | } | 103 | * move $12, ra_address |
64 | 104 | * jalr v1 | |
65 | /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | 105 | * sub sp, sp, 8 |
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
66 | * addiu v1, v1, low_16bit_of_mcount | 112 | * addiu v1, v1, low_16bit_of_mcount |
67 | * move at, ra | 113 | * move at, ra |
68 | * jalr v1 | 114 | * jalr v1 |
69 | * nop | 115 | * nop | move $12, ra_address | sub sp, sp, 8 |
70 | * 1f: (ip + 12) | 116 | * 1: offset = 4 instructions |
71 | */ | 117 | */ |
72 | new = 0x10000004; | 118 | new = INSN_B_1F_4; |
119 | #endif | ||
73 | } else { | 120 | } else { |
74 | /* record/calculate it for ftrace_make_call */ | 121 | /* |
75 | if (jal_mcount == 0) { | 122 | * move at, ra |
76 | /* We can record it directly like this: | 123 | * jal _mcount --> nop |
77 | * jal_mcount = *(unsigned int *)ip; | ||
78 | * Herein, jump over the first two nop instructions */ | ||
79 | jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8)); | ||
80 | } | ||
81 | |||
82 | /* move at, ra | ||
83 | * jalr v1 --> nop | ||
84 | */ | 124 | */ |
85 | new = ftrace_nop; | 125 | new = INSN_NOP; |
86 | } | 126 | } |
87 | return ftrace_modify_code(ip, new); | 127 | return ftrace_modify_code(ip, new); |
88 | } | 128 | } |
89 | 129 | ||
90 | static int modified; /* initialized as 0 by default */ | ||
91 | |||
92 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 130 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
93 | { | 131 | { |
94 | unsigned int new; | 132 | unsigned int new; |
95 | unsigned long ip = rec->ip; | 133 | unsigned long ip = rec->ip; |
96 | 134 | ||
97 | /* We just need to remove the "b ftrace_stub" at the fist time! */ | ||
98 | if (modified == 0) { | ||
99 | modified = 1; | ||
100 | ftrace_modify_code(addr, ftrace_nop); | ||
101 | } | ||
102 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ |
103 | new = (ip & 0x40000000) ? lui_v1 : jal_mcount; | 136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; |
104 | 137 | ||
105 | return ftrace_modify_code(ip, new); | 138 | return ftrace_modify_code(ip, new); |
106 | } | 139 | } |
@@ -111,44 +144,48 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
111 | { | 144 | { |
112 | unsigned int new; | 145 | unsigned int new; |
113 | 146 | ||
114 | new = jump_insn_encode(JAL, (unsigned long)func); | 147 | new = INSN_JAL((unsigned long)func); |
115 | 148 | ||
116 | return ftrace_modify_code(FTRACE_CALL_IP, new); | 149 | return ftrace_modify_code(FTRACE_CALL_IP, new); |
117 | } | 150 | } |
118 | 151 | ||
119 | int __init ftrace_dyn_arch_init(void *data) | 152 | int __init ftrace_dyn_arch_init(void *data) |
120 | { | 153 | { |
154 | /* Encode the instructions when booting */ | ||
155 | ftrace_dyn_arch_init_insns(); | ||
156 | |||
157 | /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ | ||
158 | ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); | ||
159 | |||
121 | /* The return code is retured via data */ | 160 | /* The return code is retured via data */ |
122 | *(unsigned long *)data = 0; | 161 | *(unsigned long *)data = 0; |
123 | 162 | ||
124 | return 0; | 163 | return 0; |
125 | } | 164 | } |
126 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 165 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
127 | 166 | ||
128 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 167 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
129 | 168 | ||
130 | #ifdef CONFIG_DYNAMIC_FTRACE | 169 | #ifdef CONFIG_DYNAMIC_FTRACE |
131 | 170 | ||
132 | extern void ftrace_graph_call(void); | 171 | extern void ftrace_graph_call(void); |
133 | #define JMP 0x08000000 /* jump to target directly */ | ||
134 | #define CALL_FTRACE_GRAPH_CALLER \ | ||
135 | jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller)) | ||
136 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) | 172 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) |
137 | 173 | ||
138 | int ftrace_enable_ftrace_graph_caller(void) | 174 | int ftrace_enable_ftrace_graph_caller(void) |
139 | { | 175 | { |
140 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, | 176 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, |
141 | CALL_FTRACE_GRAPH_CALLER); | 177 | insn_j_ftrace_graph_caller); |
142 | } | 178 | } |
143 | 179 | ||
144 | int ftrace_disable_ftrace_graph_caller(void) | 180 | int ftrace_disable_ftrace_graph_caller(void) |
145 | { | 181 | { |
146 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop); | 182 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); |
147 | } | 183 | } |
148 | 184 | ||
149 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 185 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
150 | 186 | ||
151 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 187 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
188 | |||
152 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ | 189 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ |
153 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
154 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
@@ -162,17 +199,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
162 | unsigned int code; | 199 | unsigned int code; |
163 | int faulted; | 200 | int faulted; |
164 | 201 | ||
165 | /* in module or kernel? */ | 202 | /* |
166 | if (self_addr & 0x40000000) { | 203 | * For module, move the ip from calling site of mcount to the |
167 | /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */ | 204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for |
168 | ip = self_addr - 20; | 205 | * kernel, move to the instruction "move ra, at"(offset is 12) |
169 | } else { | 206 | */ |
170 | /* kernel: move to the instruction "move ra, at" */ | 207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); |
171 | ip = self_addr - 12; | ||
172 | } | ||
173 | 208 | ||
174 | /* search the text until finding the non-store instruction or "s{d,w} | 209 | /* |
175 | * ra, offset(sp)" instruction */ | 210 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | ||
212 | */ | ||
176 | do { | 213 | do { |
177 | ip -= 4; | 214 | ip -= 4; |
178 | 215 | ||
@@ -181,10 +218,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
181 | 218 | ||
182 | if (unlikely(faulted)) | 219 | if (unlikely(faulted)) |
183 | return 0; | 220 | return 0; |
184 | 221 | /* | |
185 | /* If we hit the non-store instruction before finding where the | 222 | * If we hit the non-store instruction before finding where the |
186 | * ra is stored, then this is a leaf function and it does not | 223 | * ra is stored, then this is a leaf function and it does not |
187 | * store the ra on the stack. */ | 224 | * store the ra on the stack |
225 | */ | ||
188 | if ((code & S_R_SP) != S_R_SP) | 226 | if ((code & S_R_SP) != S_R_SP) |
189 | return parent_addr; | 227 | return parent_addr; |
190 | 228 | ||
@@ -202,7 +240,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
202 | return 0; | 240 | return 0; |
203 | } | 241 | } |
204 | 242 | ||
205 | #endif | 243 | #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ |
206 | 244 | ||
207 | /* | 245 | /* |
208 | * Hook the return address and push it in the stack of return addrs | 246 | * Hook the return address and push it in the stack of return addrs |
@@ -220,7 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
220 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
221 | return; | 259 | return; |
222 | 260 | ||
223 | /* "parent" is the stack address saved the return address of the caller | 261 | /* |
262 | * "parent" is the stack address saved the return address of the caller | ||
224 | * of _mcount. | 263 | * of _mcount. |
225 | * | 264 | * |
226 | * if the gcc < 4.5, a leaf function does not save the return address | 265 | * if the gcc < 4.5, a leaf function does not save the return address |
@@ -242,10 +281,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
242 | goto out; | 281 | goto out; |
243 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
244 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, |
245 | (unsigned long)parent, | 284 | (unsigned long)parent, fp); |
246 | fp); | 285 | /* |
247 | /* If fails when getting the stack address of the non-leaf function's | 286 | * If fails when getting the stack address of the non-leaf function's |
248 | * ra, stop function graph tracer and return */ | 287 | * ra, stop function graph tracer and return |
288 | */ | ||
249 | if (parent == 0) | 289 | if (parent == 0) |
250 | goto out; | 290 | goto out; |
251 | #endif | 291 | #endif |
@@ -272,4 +312,4 @@ out: | |||
272 | ftrace_graph_stop(); | 312 | ftrace_graph_stop(); |
273 | WARN_ON(1); | 313 | WARN_ON(1); |
274 | } | 314 | } |
275 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 315 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 6851fc97a511..6bfcb7a00ec6 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -6,6 +6,7 @@ | |||
6 | * more details. | 6 | * more details. |
7 | * | 7 | * |
8 | * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China | 8 | * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China |
9 | * Copyright (C) 2010 DSLab, Lanzhou University, China | ||
9 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> | 10 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> |
10 | */ | 11 | */ |
11 | 12 | ||
@@ -45,8 +46,6 @@ | |||
45 | PTR_L a5, PT_R9(sp) | 46 | PTR_L a5, PT_R9(sp) |
46 | PTR_L a6, PT_R10(sp) | 47 | PTR_L a6, PT_R10(sp) |
47 | PTR_L a7, PT_R11(sp) | 48 | PTR_L a7, PT_R11(sp) |
48 | #endif | ||
49 | #ifdef CONFIG_64BIT | ||
50 | PTR_ADDIU sp, PT_SIZE | 49 | PTR_ADDIU sp, PT_SIZE |
51 | #else | 50 | #else |
52 | PTR_ADDIU sp, (PT_SIZE + 8) | 51 | PTR_ADDIU sp, (PT_SIZE + 8) |
@@ -58,6 +57,12 @@ | |||
58 | move ra, AT | 57 | move ra, AT |
59 | .endm | 58 | .endm |
60 | 59 | ||
60 | /* | ||
61 | * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass | ||
62 | * the location of the parent's return address. | ||
63 | */ | ||
64 | #define MCOUNT_RA_ADDRESS_REG $12 | ||
65 | |||
61 | #ifdef CONFIG_DYNAMIC_FTRACE | 66 | #ifdef CONFIG_DYNAMIC_FTRACE |
62 | 67 | ||
63 | NESTED(ftrace_caller, PT_SIZE, ra) | 68 | NESTED(ftrace_caller, PT_SIZE, ra) |
@@ -71,14 +76,14 @@ _mcount: | |||
71 | 76 | ||
72 | MCOUNT_SAVE_REGS | 77 | MCOUNT_SAVE_REGS |
73 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | 78 | #ifdef KBUILD_MCOUNT_RA_ADDRESS |
74 | PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */ | 79 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) |
75 | #endif | 80 | #endif |
76 | 81 | ||
77 | move a0, ra /* arg1: next ip, selfaddr */ | 82 | move a0, ra /* arg1: self return address */ |
78 | .globl ftrace_call | 83 | .globl ftrace_call |
79 | ftrace_call: | 84 | ftrace_call: |
80 | nop /* a placeholder for the call to a real tracing function */ | 85 | nop /* a placeholder for the call to a real tracing function */ |
81 | move a1, AT /* arg2: the caller's next ip, parent */ | 86 | move a1, AT /* arg2: parent's return address */ |
82 | 87 | ||
83 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 88 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
84 | .globl ftrace_graph_call | 89 | .globl ftrace_graph_call |
@@ -119,9 +124,9 @@ NESTED(_mcount, PT_SIZE, ra) | |||
119 | static_trace: | 124 | static_trace: |
120 | MCOUNT_SAVE_REGS | 125 | MCOUNT_SAVE_REGS |
121 | 126 | ||
122 | move a0, ra /* arg1: next ip, selfaddr */ | 127 | move a0, ra /* arg1: self return address */ |
123 | jalr t2 /* (1) call *ftrace_trace_function */ | 128 | jalr t2 /* (1) call *ftrace_trace_function */ |
124 | move a1, AT /* arg2: the caller's next ip, parent */ | 129 | move a1, AT /* arg2: parent's return address */ |
125 | 130 | ||
126 | MCOUNT_RESTORE_REGS | 131 | MCOUNT_RESTORE_REGS |
127 | .globl ftrace_stub | 132 | .globl ftrace_stub |
@@ -134,28 +139,34 @@ ftrace_stub: | |||
134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 139 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
135 | 140 | ||
136 | NESTED(ftrace_graph_caller, PT_SIZE, ra) | 141 | NESTED(ftrace_graph_caller, PT_SIZE, ra) |
137 | #ifdef CONFIG_DYNAMIC_FTRACE | 142 | #ifndef CONFIG_DYNAMIC_FTRACE |
138 | PTR_L a1, PT_R31(sp) /* load the original ra from the stack */ | ||
139 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
140 | PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */ | ||
141 | #endif | ||
142 | #else | ||
143 | MCOUNT_SAVE_REGS | 143 | MCOUNT_SAVE_REGS |
144 | move a1, ra /* arg2: next ip, selfaddr */ | ||
145 | #endif | 144 | #endif |
146 | 145 | ||
146 | /* arg1: Get the location of the parent's return address */ | ||
147 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | 147 | #ifdef KBUILD_MCOUNT_RA_ADDRESS |
148 | bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */ | 148 | #ifdef CONFIG_DYNAMIC_FTRACE |
149 | PTR_L a0, PT_R12(sp) | ||
150 | #else | ||
151 | move a0, MCOUNT_RA_ADDRESS_REG | ||
152 | #endif | ||
153 | bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ | ||
149 | nop | 154 | nop |
150 | PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */ | 155 | #endif |
151 | 1: move a0, t0 /* arg1: the location of the return address */ | 156 | PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ |
157 | 1: | ||
158 | |||
159 | /* arg2: Get self return address */ | ||
160 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
161 | PTR_L a1, PT_R31(sp) | ||
152 | #else | 162 | #else |
153 | PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */ | 163 | move a1, ra |
154 | #endif | 164 | #endif |
155 | jal prepare_ftrace_return | 165 | |
166 | /* arg3: Get frame pointer of current stack */ | ||
156 | #ifdef CONFIG_FRAME_POINTER | 167 | #ifdef CONFIG_FRAME_POINTER |
157 | move a2, fp /* arg3: frame pointer */ | 168 | move a2, fp |
158 | #else | 169 | #else /* ! CONFIG_FRAME_POINTER */ |
159 | #ifdef CONFIG_64BIT | 170 | #ifdef CONFIG_64BIT |
160 | PTR_LA a2, PT_SIZE(sp) | 171 | PTR_LA a2, PT_SIZE(sp) |
161 | #else | 172 | #else |
@@ -163,6 +174,8 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) | |||
163 | #endif | 174 | #endif |
164 | #endif | 175 | #endif |
165 | 176 | ||
177 | jal prepare_ftrace_return | ||
178 | nop | ||
166 | MCOUNT_RESTORE_REGS | 179 | MCOUNT_RESTORE_REGS |
167 | RETURN_BACK | 180 | RETURN_BACK |
168 | END(ftrace_graph_caller) | 181 | END(ftrace_graph_caller) |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index f5981c499109..2340f11dc29c 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (C) 2005 Mips Technologies, Inc | 3 | * Copyright (C) 2005 Mips Technologies, Inc |
4 | */ | 4 | */ |
5 | #include <linux/cpu.h> | 5 | #include <linux/cpu.h> |
6 | #include <linux/cpuset.h> | ||
6 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
7 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
@@ -39,6 +40,21 @@ static inline struct task_struct *find_process_by_pid(pid_t pid) | |||
39 | return pid ? find_task_by_vpid(pid) : current; | 40 | return pid ? find_task_by_vpid(pid) : current; |
40 | } | 41 | } |
41 | 42 | ||
43 | /* | ||
44 | * check the target process has a UID that matches the current process's | ||
45 | */ | ||
46 | static bool check_same_owner(struct task_struct *p) | ||
47 | { | ||
48 | const struct cred *cred = current_cred(), *pcred; | ||
49 | bool match; | ||
50 | |||
51 | rcu_read_lock(); | ||
52 | pcred = __task_cred(p); | ||
53 | match = (cred->euid == pcred->euid || | ||
54 | cred->euid == pcred->uid); | ||
55 | rcu_read_unlock(); | ||
56 | return match; | ||
57 | } | ||
42 | 58 | ||
43 | /* | 59 | /* |
44 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | 60 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process |
@@ -46,12 +62,10 @@ static inline struct task_struct *find_process_by_pid(pid_t pid) | |||
46 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | 62 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, |
47 | unsigned long __user *user_mask_ptr) | 63 | unsigned long __user *user_mask_ptr) |
48 | { | 64 | { |
49 | cpumask_t new_mask; | 65 | cpumask_var_t cpus_allowed, new_mask, effective_mask; |
50 | cpumask_t effective_mask; | ||
51 | int retval; | ||
52 | struct task_struct *p; | ||
53 | struct thread_info *ti; | 66 | struct thread_info *ti; |
54 | uid_t euid; | 67 | struct task_struct *p; |
68 | int retval; | ||
55 | 69 | ||
56 | if (len < sizeof(new_mask)) | 70 | if (len < sizeof(new_mask)) |
57 | return -EINVAL; | 71 | return -EINVAL; |
@@ -60,53 +74,74 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
60 | return -EFAULT; | 74 | return -EFAULT; |
61 | 75 | ||
62 | get_online_cpus(); | 76 | get_online_cpus(); |
63 | read_lock(&tasklist_lock); | 77 | rcu_read_lock(); |
64 | 78 | ||
65 | p = find_process_by_pid(pid); | 79 | p = find_process_by_pid(pid); |
66 | if (!p) { | 80 | if (!p) { |
67 | read_unlock(&tasklist_lock); | 81 | rcu_read_unlock(); |
68 | put_online_cpus(); | 82 | put_online_cpus(); |
69 | return -ESRCH; | 83 | return -ESRCH; |
70 | } | 84 | } |
71 | 85 | ||
72 | /* | 86 | /* Prevent p going away */ |
73 | * It is not safe to call set_cpus_allowed with the | ||
74 | * tasklist_lock held. We will bump the task_struct's | ||
75 | * usage count and drop tasklist_lock before invoking | ||
76 | * set_cpus_allowed. | ||
77 | */ | ||
78 | get_task_struct(p); | 87 | get_task_struct(p); |
88 | rcu_read_unlock(); | ||
79 | 89 | ||
80 | euid = current_euid(); | 90 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
91 | retval = -ENOMEM; | ||
92 | goto out_put_task; | ||
93 | } | ||
94 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
95 | retval = -ENOMEM; | ||
96 | goto out_free_cpus_allowed; | ||
97 | } | ||
98 | if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) { | ||
99 | retval = -ENOMEM; | ||
100 | goto out_free_new_mask; | ||
101 | } | ||
81 | retval = -EPERM; | 102 | retval = -EPERM; |
82 | if (euid != p->cred->euid && euid != p->cred->uid && | 103 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
83 | !capable(CAP_SYS_NICE)) { | ||
84 | read_unlock(&tasklist_lock); | ||
85 | goto out_unlock; | 104 | goto out_unlock; |
86 | } | ||
87 | 105 | ||
88 | retval = security_task_setscheduler(p, 0, NULL); | 106 | retval = security_task_setscheduler(p, 0, NULL); |
89 | if (retval) | 107 | if (retval) |
90 | goto out_unlock; | 108 | goto out_unlock; |
91 | 109 | ||
92 | /* Record new user-specified CPU set for future reference */ | 110 | /* Record new user-specified CPU set for future reference */ |
93 | p->thread.user_cpus_allowed = new_mask; | 111 | cpumask_copy(&p->thread.user_cpus_allowed, new_mask); |
94 | |||
95 | /* Unlock the task list */ | ||
96 | read_unlock(&tasklist_lock); | ||
97 | 112 | ||
113 | again: | ||
98 | /* Compute new global allowed CPU set if necessary */ | 114 | /* Compute new global allowed CPU set if necessary */ |
99 | ti = task_thread_info(p); | 115 | ti = task_thread_info(p); |
100 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && | 116 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && |
101 | cpus_intersects(new_mask, mt_fpu_cpumask)) { | 117 | cpus_intersects(*new_mask, mt_fpu_cpumask)) { |
102 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | 118 | cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); |
103 | retval = set_cpus_allowed_ptr(p, &effective_mask); | 119 | retval = set_cpus_allowed_ptr(p, effective_mask); |
104 | } else { | 120 | } else { |
121 | cpumask_copy(effective_mask, new_mask); | ||
105 | clear_ti_thread_flag(ti, TIF_FPUBOUND); | 122 | clear_ti_thread_flag(ti, TIF_FPUBOUND); |
106 | retval = set_cpus_allowed_ptr(p, &new_mask); | 123 | retval = set_cpus_allowed_ptr(p, new_mask); |
107 | } | 124 | } |
108 | 125 | ||
126 | if (!retval) { | ||
127 | cpuset_cpus_allowed(p, cpus_allowed); | ||
128 | if (!cpumask_subset(effective_mask, cpus_allowed)) { | ||
129 | /* | ||
130 | * We must have raced with a concurrent cpuset | ||
131 | * update. Just reset the cpus_allowed to the | ||
132 | * cpuset's cpus_allowed | ||
133 | */ | ||
134 | cpumask_copy(new_mask, cpus_allowed); | ||
135 | goto again; | ||
136 | } | ||
137 | } | ||
109 | out_unlock: | 138 | out_unlock: |
139 | free_cpumask_var(effective_mask); | ||
140 | out_free_new_mask: | ||
141 | free_cpumask_var(new_mask); | ||
142 | out_free_cpus_allowed: | ||
143 | free_cpumask_var(cpus_allowed); | ||
144 | out_put_task: | ||
110 | put_task_struct(p); | 145 | put_task_struct(p); |
111 | put_online_cpus(); | 146 | put_online_cpus(); |
112 | return retval; | 147 | return retval; |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 8bdd6a663c7f..852780868fb4 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -976,7 +976,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
976 | 976 | ||
977 | case 2: | 977 | case 2: |
978 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); | 978 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
979 | break; | 979 | return; |
980 | 980 | ||
981 | case 3: | 981 | case 3: |
982 | break; | 982 | break; |