aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-pxa/include/mach/pxafb.h1
-rw-r--r--arch/arm/mach-pxa/reset.c7
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/powerpc/include/asm/ftrace.h14
-rw-r--r--arch/powerpc/include/asm/module.h16
-rw-r--r--arch/powerpc/kernel/ftrace.c473
-rw-r--r--arch/powerpc/kernel/idle.c5
-rw-r--r--arch/powerpc/kernel/module_32.c10
-rw-r--r--arch/powerpc/kernel/module_64.c13
-rw-r--r--arch/um/include/asm/system.h14
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/include/asm/ftrace.h34
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h8
-rw-r--r--arch/x86/include/asm/uaccess_64.h6
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/entry_32.S42
-rw-r--r--arch/x86/kernel/entry_64.S5
-rw-r--r--arch/x86/kernel/ftrace.c312
-rw-r--r--arch/x86/kernel/stacktrace.c64
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/lib/usercopy_32.c8
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
28 files changed, 985 insertions, 83 deletions
diff --git a/arch/arm/mach-pxa/include/mach/pxafb.h b/arch/arm/mach-pxa/include/mach/pxafb.h
index 8e591118371e..cbda4d35c421 100644
--- a/arch/arm/mach-pxa/include/mach/pxafb.h
+++ b/arch/arm/mach-pxa/include/mach/pxafb.h
@@ -33,6 +33,7 @@
33#define LCD_CONN_TYPE(_x) ((_x) & 0x0f) 33#define LCD_CONN_TYPE(_x) ((_x) & 0x0f)
34#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f) 34#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f)
35 35
36#define LCD_TYPE_MASK 0xf
36#define LCD_TYPE_UNKNOWN 0 37#define LCD_TYPE_UNKNOWN 0
37#define LCD_TYPE_MONO_STN 1 38#define LCD_TYPE_MONO_STN 1
38#define LCD_TYPE_MONO_DSTN 2 39#define LCD_TYPE_MONO_DSTN 2
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 1b2af575c40f..00b2dc2a1074 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -90,12 +90,13 @@ void arch_reset(char mode)
90 /* Jump into ROM at address 0 */ 90 /* Jump into ROM at address 0 */
91 cpu_reset(0); 91 cpu_reset(0);
92 break; 92 break;
93 case 'h':
94 do_hw_reset();
95 break;
96 case 'g': 93 case 'g':
97 do_gpio_reset(); 94 do_gpio_reset();
98 break; 95 break;
96 case 'h':
97 default:
98 do_hw_reset();
99 break;
99 } 100 }
100} 101}
101 102
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index f0a5bbae0b45..3be76ee2bdbf 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -67,6 +67,7 @@
67static unsigned long spitz_pin_config[] __initdata = { 67static unsigned long spitz_pin_config[] __initdata = {
68 /* Chip Selects */ 68 /* Chip Selects */
69 GPIO78_nCS_2, /* SCOOP #2 */ 69 GPIO78_nCS_2, /* SCOOP #2 */
70 GPIO79_nCS_3, /* NAND */
70 GPIO80_nCS_4, /* SCOOP #1 */ 71 GPIO80_nCS_4, /* SCOOP #1 */
71 72
72 /* LCD - 16bpp Active TFT */ 73 /* LCD - 16bpp Active TFT */
@@ -97,10 +98,10 @@ static unsigned long spitz_pin_config[] __initdata = {
97 GPIO51_nPIOW, 98 GPIO51_nPIOW,
98 GPIO85_nPCE_1, 99 GPIO85_nPCE_1,
99 GPIO54_nPCE_2, 100 GPIO54_nPCE_2,
100 GPIO79_PSKTSEL,
101 GPIO55_nPREG, 101 GPIO55_nPREG,
102 GPIO56_nPWAIT, 102 GPIO56_nPWAIT,
103 GPIO57_nIOIS16, 103 GPIO57_nIOIS16,
104 GPIO104_PSKTSEL,
104 105
105 /* MMC */ 106 /* MMC */
106 GPIO32_MMC_CLK, 107 GPIO32_MMC_CLK,
@@ -686,7 +687,6 @@ static void __init akita_init(void)
686 spitz_pcmcia_config.num_devs = 1; 687 spitz_pcmcia_config.num_devs = 1;
687 platform_scoop_config = &spitz_pcmcia_config; 688 platform_scoop_config = &spitz_pcmcia_config;
688 689
689 pxa_set_i2c_info(NULL);
690 i2c_register_board_info(0, ARRAY_AND_SIZE(akita_i2c_board_info)); 690 i2c_register_board_info(0, ARRAY_AND_SIZE(akita_i2c_board_info));
691 691
692 common_init(); 692 common_init();
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index b298f7a631e6..e5f2ae8362f7 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -7,7 +7,19 @@
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void _mcount(void); 9extern void _mcount(void);
10#endif 10
11#ifdef CONFIG_DYNAMIC_FTRACE
12static inline unsigned long ftrace_call_adjust(unsigned long addr)
13{
14 /* reloction of mcount call site is the same as the address */
15 return addr;
16}
17
18struct dyn_arch_ftrace {
19 struct module *mod;
20};
21#endif /* CONFIG_DYNAMIC_FTRACE */
22#endif /* __ASSEMBLY__ */
11 23
12#endif 24#endif
13 25
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index e5f14b13ccf0..08454880a2c0 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -34,11 +34,19 @@ struct mod_arch_specific {
34#ifdef __powerpc64__ 34#ifdef __powerpc64__
35 unsigned int stubs_section; /* Index of stubs section in module */ 35 unsigned int stubs_section; /* Index of stubs section in module */
36 unsigned int toc_section; /* What section is the TOC? */ 36 unsigned int toc_section; /* What section is the TOC? */
37#else 37#ifdef CONFIG_DYNAMIC_FTRACE
38 unsigned long toc;
39 unsigned long tramp;
40#endif
41
42#else /* powerpc64 */
38 /* Indices of PLT sections within module. */ 43 /* Indices of PLT sections within module. */
39 unsigned int core_plt_section; 44 unsigned int core_plt_section;
40 unsigned int init_plt_section; 45 unsigned int init_plt_section;
46#ifdef CONFIG_DYNAMIC_FTRACE
47 unsigned long tramp;
41#endif 48#endif
49#endif /* powerpc64 */
42 50
43 /* List of BUG addresses, source line numbers and filenames */ 51 /* List of BUG addresses, source line numbers and filenames */
44 struct list_head bug_list; 52 struct list_head bug_list;
@@ -68,6 +76,12 @@ struct mod_arch_specific {
68# endif /* MODULE */ 76# endif /* MODULE */
69#endif 77#endif
70 78
79#ifdef CONFIG_DYNAMIC_FTRACE
80# ifdef MODULE
81 asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
82# endif /* MODULE */
83#endif
84
71 85
72struct exception_table_entry; 86struct exception_table_entry;
73void sort_ex_table(struct exception_table_entry *start, 87void sort_ex_table(struct exception_table_entry *start,
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index f4b006ed0ab1..3271cd698e4c 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -9,22 +9,30 @@
9 9
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/uaccess.h>
13#include <linux/module.h>
12#include <linux/ftrace.h> 14#include <linux/ftrace.h>
13#include <linux/percpu.h> 15#include <linux/percpu.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/list.h> 17#include <linux/list.h>
16 18
17#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <asm/code-patching.h>
18#include <asm/ftrace.h> 21#include <asm/ftrace.h>
19 22
23#if 0
24#define DEBUGP printk
25#else
26#define DEBUGP(fmt , ...) do { } while (0)
27#endif
20 28
21static unsigned int ftrace_nop = 0x60000000; 29static unsigned int ftrace_nop = PPC_NOP_INSTR;
22 30
23#ifdef CONFIG_PPC32 31#ifdef CONFIG_PPC32
24# define GET_ADDR(addr) addr 32# define GET_ADDR(addr) addr
25#else 33#else
26/* PowerPC64's functions are data that points to the functions */ 34/* PowerPC64's functions are data that points to the functions */
27# define GET_ADDR(addr) *(unsigned long *)addr 35# define GET_ADDR(addr) (*(unsigned long *)addr)
28#endif 36#endif
29 37
30 38
@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr)
33 return (int)(addr - ip); 41 return (int)(addr - ip);
34} 42}
35 43
36unsigned char *ftrace_nop_replace(void) 44static unsigned char *ftrace_nop_replace(void)
37{ 45{
38 return (char *)&ftrace_nop; 46 return (char *)&ftrace_nop;
39} 47}
40 48
41unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 49static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
42{ 50{
43 static unsigned int op; 51 static unsigned int op;
44 52
@@ -68,49 +76,434 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
68# define _ASM_PTR " .long " 76# define _ASM_PTR " .long "
69#endif 77#endif
70 78
71int 79static int
72ftrace_modify_code(unsigned long ip, unsigned char *old_code, 80ftrace_modify_code(unsigned long ip, unsigned char *old_code,
73 unsigned char *new_code) 81 unsigned char *new_code)
74{ 82{
75 unsigned replaced; 83 unsigned char replaced[MCOUNT_INSN_SIZE];
76 unsigned old = *(unsigned *)old_code;
77 unsigned new = *(unsigned *)new_code;
78 int faulted = 0;
79 84
80 /* 85 /*
81 * Note: Due to modules and __init, code can 86 * Note: Due to modules and __init, code can
82 * disappear and change, we need to protect against faulting 87 * disappear and change, we need to protect against faulting
83 * as well as code changing. 88 * as well as code changing. We do this by using the
89 * probe_kernel_* functions.
84 * 90 *
85 * No real locking needed, this code is run through 91 * No real locking needed, this code is run through
86 * kstop_machine. 92 * kstop_machine, or before SMP starts.
87 */ 93 */
88 asm volatile ( 94
89 "1: lwz %1, 0(%2)\n" 95 /* read the text we want to modify */
90 " cmpw %1, %5\n" 96 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
91 " bne 2f\n" 97 return -EFAULT;
92 " stwu %3, 0(%2)\n" 98
93 "2:\n" 99 /* Make sure it is what we expect it to be */
94 ".section .fixup, \"ax\"\n" 100 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
95 "3: li %0, 1\n" 101 return -EINVAL;
96 " b 2b\n" 102
97 ".previous\n" 103 /* replace the text with the new text */
98 ".section __ex_table,\"a\"\n" 104 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
99 _ASM_ALIGN "\n" 105 return -EPERM;
100 _ASM_PTR "1b, 3b\n" 106
101 ".previous" 107 flush_icache_range(ip, ip + 8);
102 : "=r"(faulted), "=r"(replaced) 108
103 : "r"(ip), "r"(new), 109 return 0;
104 "0"(faulted), "r"(old) 110}
105 : "memory"); 111
106 112/*
107 if (replaced != old && replaced != new) 113 * Helper functions that are the same for both PPC64 and PPC32.
108 faulted = 2; 114 */
109 115static int test_24bit_addr(unsigned long ip, unsigned long addr)
110 if (!faulted) 116{
111 flush_icache_range(ip, ip + 8); 117 long diff;
112 118
113 return faulted; 119 /*
120 * Can we get to addr from ip in 24 bits?
121 * (26 really, since we mulitply by 4 for 4 byte alignment)
122 */
123 diff = addr - ip;
124
125 /*
126 * Return true if diff is less than 1 << 25
127 * and greater than -1 << 26.
128 */
129 return (diff < (1 << 25)) && (diff > (-1 << 26));
130}
131
132static int is_bl_op(unsigned int op)
133{
134 return (op & 0xfc000003) == 0x48000001;
135}
136
137static int test_offset(unsigned long offset)
138{
139 return (offset + 0x2000000 > 0x3ffffff) || ((offset & 3) != 0);
140}
141
142static unsigned long find_bl_target(unsigned long ip, unsigned int op)
143{
144 static int offset;
145
146 offset = (op & 0x03fffffc);
147 /* make it signed */
148 if (offset & 0x02000000)
149 offset |= 0xfe000000;
150
151 return ip + (long)offset;
152}
153
154static unsigned int branch_offset(unsigned long offset)
155{
156 /* return "bl ip+offset" */
157 return 0x48000001 | (offset & 0x03fffffc);
158}
159
160#ifdef CONFIG_PPC64
161static int
162__ftrace_make_nop(struct module *mod,
163 struct dyn_ftrace *rec, unsigned long addr)
164{
165 unsigned char replaced[MCOUNT_INSN_SIZE * 2];
166 unsigned int *op = (unsigned *)&replaced;
167 unsigned char jmp[8];
168 unsigned long *ptr = (unsigned long *)&jmp;
169 unsigned long ip = rec->ip;
170 unsigned long tramp;
171 int offset;
172
173 /* read where this goes */
174 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
175 return -EFAULT;
176
177 /* Make sure that that this is still a 24bit jump */
178 if (!is_bl_op(*op)) {
179 printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
180 return -EINVAL;
181 }
182
183 /* lets find where the pointer goes */
184 tramp = find_bl_target(ip, *op);
185
186 /*
187 * On PPC64 the trampoline looks like:
188 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
189 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
190 * Where the bytes 2,3,6 and 7 make up the 32bit offset
191 * to the TOC that holds the pointer.
192 * to jump to.
193 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
194 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
195 * The actually address is 32 bytes from the offset
196 * into the TOC.
197 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
198 */
199
200 DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
201
202 /* Find where the trampoline jumps to */
203 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
204 printk(KERN_ERR "Failed to read %lx\n", tramp);
205 return -EFAULT;
206 }
207
208 DEBUGP(" %08x %08x",
209 (unsigned)(*ptr >> 32),
210 (unsigned)*ptr);
211
212 offset = (unsigned)jmp[2] << 24 |
213 (unsigned)jmp[3] << 16 |
214 (unsigned)jmp[6] << 8 |
215 (unsigned)jmp[7];
216
217 DEBUGP(" %x ", offset);
218
219 /* get the address this jumps too */
220 tramp = mod->arch.toc + offset + 32;
221 DEBUGP("toc: %lx", tramp);
222
223 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
224 printk(KERN_ERR "Failed to read %lx\n", tramp);
225 return -EFAULT;
226 }
227
228 DEBUGP(" %08x %08x\n",
229 (unsigned)(*ptr >> 32),
230 (unsigned)*ptr);
231
232 /* This should match what was called */
233 if (*ptr != GET_ADDR(addr)) {
234 printk(KERN_ERR "addr does not match %lx\n", *ptr);
235 return -EINVAL;
236 }
237
238 /*
239 * We want to nop the line, but the next line is
240 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
241 * This needs to be turned to a nop too.
242 */
243 if (probe_kernel_read(replaced, (void *)(ip+4), MCOUNT_INSN_SIZE))
244 return -EFAULT;
245
246 if (*op != 0xe8410028) {
247 printk(KERN_ERR "Next line is not ld! (%08x)\n", *op);
248 return -EINVAL;
249 }
250
251 /*
252 * Milton Miller pointed out that we can not blindly do nops.
253 * If a task was preempted when calling a trace function,
254 * the nops will remove the way to restore the TOC in r2
255 * and the r2 TOC will get corrupted.
256 */
257
258 /*
259 * Replace:
260 * bl <tramp> <==== will be replaced with "b 1f"
261 * ld r2,40(r1)
262 * 1:
263 */
264 op[0] = 0x48000008; /* b +8 */
265
266 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
267 return -EPERM;
268
269 return 0;
270}
271
272#else /* !PPC64 */
273static int
274__ftrace_make_nop(struct module *mod,
275 struct dyn_ftrace *rec, unsigned long addr)
276{
277 unsigned char replaced[MCOUNT_INSN_SIZE];
278 unsigned int *op = (unsigned *)&replaced;
279 unsigned char jmp[8];
280 unsigned int *ptr = (unsigned int *)&jmp;
281 unsigned long ip = rec->ip;
282 unsigned long tramp;
283 int offset;
284
285 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
286 return -EFAULT;
287
288 /* Make sure that that this is still a 24bit jump */
289 if (!is_bl_op(*op)) {
290 printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
291 return -EINVAL;
292 }
293
294 /* lets find where the pointer goes */
295 tramp = find_bl_target(ip, *op);
296
297 /*
298 * On PPC32 the trampoline looks like:
299 * lis r11,sym@ha
300 * addi r11,r11,sym@l
301 * mtctr r11
302 * bctr
303 */
304
305 DEBUGP("ip:%lx jumps to %lx", ip, tramp);
306
307 /* Find where the trampoline jumps to */
308 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
309 printk(KERN_ERR "Failed to read %lx\n", tramp);
310 return -EFAULT;
311 }
312
313 DEBUGP(" %08x %08x ", ptr[0], ptr[1]);
314
315 tramp = (ptr[1] & 0xffff) |
316 ((ptr[0] & 0xffff) << 16);
317 if (tramp & 0x8000)
318 tramp -= 0x10000;
319
320 DEBUGP(" %x ", tramp);
321
322 if (tramp != addr) {
323 printk(KERN_ERR
324 "Trampoline location %08lx does not match addr\n",
325 tramp);
326 return -EINVAL;
327 }
328
329 op[0] = PPC_NOP_INSTR;
330
331 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
332 return -EPERM;
333
334 return 0;
335}
336#endif /* PPC64 */
337
338int ftrace_make_nop(struct module *mod,
339 struct dyn_ftrace *rec, unsigned long addr)
340{
341 unsigned char *old, *new;
342 unsigned long ip = rec->ip;
343
344 /*
345 * If the calling address is more that 24 bits away,
346 * then we had to use a trampoline to make the call.
347 * Otherwise just update the call site.
348 */
349 if (test_24bit_addr(ip, addr)) {
350 /* within range */
351 old = ftrace_call_replace(ip, addr);
352 new = ftrace_nop_replace();
353 return ftrace_modify_code(ip, old, new);
354 }
355
356 /*
357 * Out of range jumps are called from modules.
358 * We should either already have a pointer to the module
359 * or it has been passed in.
360 */
361 if (!rec->arch.mod) {
362 if (!mod) {
363 printk(KERN_ERR "No module loaded addr=%lx\n",
364 addr);
365 return -EFAULT;
366 }
367 rec->arch.mod = mod;
368 } else if (mod) {
369 if (mod != rec->arch.mod) {
370 printk(KERN_ERR
371 "Record mod %p not equal to passed in mod %p\n",
372 rec->arch.mod, mod);
373 return -EINVAL;
374 }
375 /* nothing to do if mod == rec->arch.mod */
376 } else
377 mod = rec->arch.mod;
378
379 return __ftrace_make_nop(mod, rec, addr);
380
381}
382
383#ifdef CONFIG_PPC64
384static int
385__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
386{
387 unsigned char replaced[MCOUNT_INSN_SIZE * 2];
388 unsigned int *op = (unsigned *)&replaced;
389 unsigned long ip = rec->ip;
390 unsigned long offset;
391
392 /* read where this goes */
393 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE * 2))
394 return -EFAULT;
395
396 /*
397 * It should be pointing to two nops or
398 * b +8; ld r2,40(r1)
399 */
400 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
401 ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
402 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
403 return -EINVAL;
404 }
405
406 /* If we never set up a trampoline to ftrace_caller, then bail */
407 if (!rec->arch.mod->arch.tramp) {
408 printk(KERN_ERR "No ftrace trampoline\n");
409 return -EINVAL;
410 }
411
412 /* now calculate a jump to the ftrace caller trampoline */
413 offset = rec->arch.mod->arch.tramp - ip;
414
415 if (test_offset(offset)) {
416 printk(KERN_ERR "REL24 %li out of range!\n",
417 (long int)offset);
418 return -EINVAL;
419 }
420
421 /* Set to "bl addr" */
422 op[0] = branch_offset(offset);
423 /* ld r2,40(r1) */
424 op[1] = 0xe8410028;
425
426 DEBUGP("write to %lx\n", rec->ip);
427
428 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE * 2))
429 return -EPERM;
430
431 return 0;
432}
433#else
434static int
435__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
436{
437 unsigned char replaced[MCOUNT_INSN_SIZE];
438 unsigned int *op = (unsigned *)&replaced;
439 unsigned long ip = rec->ip;
440 unsigned long offset;
441
442 /* read where this goes */
443 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
444 return -EFAULT;
445
446 /* It should be pointing to a nop */
447 if (op[0] != PPC_NOP_INSTR) {
448 printk(KERN_ERR "Expected NOP but have %x\n", op[0]);
449 return -EINVAL;
450 }
451
452 /* If we never set up a trampoline to ftrace_caller, then bail */
453 if (!rec->arch.mod->arch.tramp) {
454 printk(KERN_ERR "No ftrace trampoline\n");
455 return -EINVAL;
456 }
457
458 /* now calculate a jump to the ftrace caller trampoline */
459 offset = rec->arch.mod->arch.tramp - ip;
460
461 if (test_offset(offset)) {
462 printk(KERN_ERR "REL24 %li out of range!\n",
463 (long int)offset);
464 return -EINVAL;
465 }
466
467 /* Set to "bl addr" */
468 op[0] = branch_offset(offset);
469
470 DEBUGP("write to %lx\n", rec->ip);
471
472 if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
473 return -EPERM;
474
475 return 0;
476}
477#endif /* CONFIG_PPC64 */
478
479int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
480{
481 unsigned char *old, *new;
482 unsigned long ip = rec->ip;
483
484 /*
485 * If the calling address is more that 24 bits away,
486 * then we had to use a trampoline to make the call.
487 * Otherwise just update the call site.
488 */
489 if (test_24bit_addr(ip, addr)) {
490 /* within range */
491 old = ftrace_nop_replace();
492 new = ftrace_call_replace(ip, addr);
493 return ftrace_modify_code(ip, old, new);
494 }
495
496 /*
497 * Out of range jumps are called from modules.
498 * Being that we are converting from nop, it had better
499 * already have a module defined.
500 */
501 if (!rec->arch.mod) {
502 printk(KERN_ERR "No module loaded\n");
503 return -EINVAL;
504 }
505
506 return __ftrace_make_call(rec, addr);
114} 507}
115 508
116int ftrace_update_ftrace_func(ftrace_func_t func) 509int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -128,10 +521,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
128 521
129int __init ftrace_dyn_arch_init(void *data) 522int __init ftrace_dyn_arch_init(void *data)
130{ 523{
131 /* This is running in kstop_machine */ 524 /* caller expects data to be zero */
525 unsigned long *p = data;
132 526
133 ftrace_mcount_set(data); 527 *p = 0;
134 528
135 return 0; 529 return 0;
136} 530}
137
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 31982d05d81a..88d9c1d5e5fb 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -69,10 +69,15 @@ void cpu_idle(void)
69 smp_mb(); 69 smp_mb();
70 local_irq_disable(); 70 local_irq_disable();
71 71
72 /* Don't trace irqs off for idle */
73 stop_critical_timings();
74
72 /* check again after disabling irqs */ 75 /* check again after disabling irqs */
73 if (!need_resched() && !cpu_should_die()) 76 if (!need_resched() && !cpu_should_die())
74 ppc_md.power_save(); 77 ppc_md.power_save();
75 78
79 start_critical_timings();
80
76 local_irq_enable(); 81 local_irq_enable();
77 set_thread_flag(TIF_POLLING_NRFLAG); 82 set_thread_flag(TIF_POLLING_NRFLAG);
78 83
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 2df91a03462a..f832773fc28e 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/ftrace.h>
25#include <linux/cache.h> 26#include <linux/cache.h>
26#include <linux/bug.h> 27#include <linux/bug.h>
27#include <linux/sort.h> 28#include <linux/sort.h>
@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
53 r_addend = rela[i].r_addend; 54 r_addend = rela[i].r_addend;
54 } 55 }
55 56
57#ifdef CONFIG_DYNAMIC_FTRACE
58 _count_relocs++; /* add one for ftrace_caller */
59#endif
56 return _count_relocs; 60 return _count_relocs;
57} 61}
58 62
@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
306 return -ENOEXEC; 310 return -ENOEXEC;
307 } 311 }
308 } 312 }
313#ifdef CONFIG_DYNAMIC_FTRACE
314 module->arch.tramp =
315 do_plt_call(module->module_core,
316 (unsigned long)ftrace_caller,
317 sechdrs, module);
318#endif
309 return 0; 319 return 0;
310} 320}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 1af2377e4992..8992b031a7b6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
20#include <linux/moduleloader.h> 20#include <linux/moduleloader.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/ftrace.h>
23#include <linux/bug.h> 24#include <linux/bug.h>
24#include <asm/module.h> 25#include <asm/module.h>
25#include <asm/firmware.h> 26#include <asm/firmware.h>
@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
163 } 164 }
164 } 165 }
165 166
167#ifdef CONFIG_DYNAMIC_FTRACE
168 /* make the trampoline to the ftrace_caller */
169 relocs++;
170#endif
171
166 DEBUGP("Looks like a total of %lu stubs, max\n", relocs); 172 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
167 return relocs * sizeof(struct ppc64_stub_entry); 173 return relocs * sizeof(struct ppc64_stub_entry);
168} 174}
@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
441 } 447 }
442 } 448 }
443 449
450#ifdef CONFIG_DYNAMIC_FTRACE
451 me->arch.toc = my_r2(sechdrs, me);
452 me->arch.tramp = stub_for_addr(sechdrs,
453 (unsigned long)ftrace_caller,
454 me);
455#endif
456
444 return 0; 457 return 0;
445} 458}
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 753346e2cdfd..ae5f94d6317d 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -11,21 +11,21 @@ extern int get_signals(void);
11extern void block_signals(void); 11extern void block_signals(void);
12extern void unblock_signals(void); 12extern void unblock_signals(void);
13 13
14#define local_save_flags(flags) do { typecheck(unsigned long, flags); \ 14#define raw_local_save_flags(flags) do { typecheck(unsigned long, flags); \
15 (flags) = get_signals(); } while(0) 15 (flags) = get_signals(); } while(0)
16#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \ 16#define raw_local_irq_restore(flags) do { typecheck(unsigned long, flags); \
17 set_signals(flags); } while(0) 17 set_signals(flags); } while(0)
18 18
19#define local_irq_save(flags) do { local_save_flags(flags); \ 19#define raw_local_irq_save(flags) do { raw_local_save_flags(flags); \
20 local_irq_disable(); } while(0) 20 raw_local_irq_disable(); } while(0)
21 21
22#define local_irq_enable() unblock_signals() 22#define raw_local_irq_enable() unblock_signals()
23#define local_irq_disable() block_signals() 23#define raw_local_irq_disable() block_signals()
24 24
25#define irqs_disabled() \ 25#define irqs_disabled() \
26({ \ 26({ \
27 unsigned long flags; \ 27 unsigned long flags; \
28 local_save_flags(flags); \ 28 raw_local_save_flags(flags); \
29 (flags == 0); \ 29 (flags == 0); \
30}) 30})
31 31
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7b7d2764a215..352f63df1d80 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,11 +29,14 @@ config X86
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
32 select HAVE_FUNCTION_RET_TRACER if X86_32
33 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 34 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
33 select HAVE_ARCH_KGDB if !X86_VOYAGER 35 select HAVE_ARCH_KGDB if !X86_VOYAGER
34 select HAVE_ARCH_TRACEHOOK 36 select HAVE_ARCH_TRACEHOOK
35 select HAVE_GENERIC_DMA_COHERENT if X86_32 37 select HAVE_GENERIC_DMA_COHERENT if X86_32
36 select HAVE_EFFICIENT_UNALIGNED_ACCESS 38 select HAVE_EFFICIENT_UNALIGNED_ACCESS
39 select USER_STACKTRACE_SUPPORT
37 40
38config ARCH_DEFCONFIG 41config ARCH_DEFCONFIG
39 string 42 string
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2a3dfbd5e677..fa013f529b74 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -186,14 +186,10 @@ config IOMMU_LEAK
186 Add a simple leak tracer to the IOMMU code. This is useful when you 186 Add a simple leak tracer to the IOMMU code. This is useful when you
187 are debugging a buggy device driver that leaks IOMMU mappings. 187 are debugging a buggy device driver that leaks IOMMU mappings.
188 188
189config MMIOTRACE_HOOKS
190 bool
191
192config MMIOTRACE 189config MMIOTRACE
193 bool "Memory mapped IO tracing" 190 bool "Memory mapped IO tracing"
194 depends on DEBUG_KERNEL && PCI 191 depends on DEBUG_KERNEL && PCI
195 select TRACING 192 select TRACING
196 select MMIOTRACE_HOOKS
197 help 193 help
198 Mmiotrace traces Memory Mapped I/O access and is meant for 194 Mmiotrace traces Memory Mapped I/O access and is meant for
199 debugging and reverse engineering. It is called from the ioremap 195 debugging and reverse engineering. It is called from the ioremap
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9e8bc29b8b17..754a3e082f94 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -17,8 +17,40 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
17 */ 17 */
18 return addr - 1; 18 return addr - 1;
19} 19}
20#endif
21 20
21#ifdef CONFIG_DYNAMIC_FTRACE
22
23struct dyn_arch_ftrace {
24 /* No extra data needed for x86 */
25};
26
27#endif /* CONFIG_DYNAMIC_FTRACE */
28#endif /* __ASSEMBLY__ */
22#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
23 30
31#ifdef CONFIG_FUNCTION_RET_TRACER
32
33#ifndef __ASSEMBLY__
34
35/*
36 * Stack of return addresses for functions
37 * of a thread.
38 * Used in struct thread_info
39 */
40struct ftrace_ret_stack {
41 unsigned long ret;
42 unsigned long func;
43 unsigned long long calltime;
44};
45
46/*
47 * Primary handler of a function return.
48 * It relays on ftrace_return_to_handler.
49 * Defined in entry32.S
50 */
51extern void return_to_handler(void);
52
53#endif /* __ASSEMBLY__ */
54#endif /* CONFIG_FUNCTION_RET_TRACER */
55
24#endif /* _ASM_X86_FTRACE_H */ 56#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e44d379faad2..0921b4018c11 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,6 +20,8 @@
20struct task_struct; 20struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h>
24#include <asm/atomic.h>
23 25
24struct thread_info { 26struct thread_info {
25 struct task_struct *task; /* main task structure */ 27 struct task_struct *task; /* main task structure */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 35c54921b2e4..99192bb55a53 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -157,6 +157,7 @@ extern int __get_user_bad(void);
157 int __ret_gu; \ 157 int __ret_gu; \
158 unsigned long __val_gu; \ 158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \ 159 __chk_user_ptr(ptr); \
160 might_fault(); \
160 switch (sizeof(*(ptr))) { \ 161 switch (sizeof(*(ptr))) { \
161 case 1: \ 162 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \ 163 __get_user_x(1, __ret_gu, __val_gu, ptr); \
@@ -241,6 +242,7 @@ extern void __put_user_8(void);
241 int __ret_pu; \ 242 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \ 243 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \ 244 __chk_user_ptr(ptr); \
245 might_fault(); \
244 __pu_val = x; \ 246 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \ 247 switch (sizeof(*(ptr))) { \
246 case 1: \ 248 case 1: \
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index d095a3aeea1b..5e06259e90e5 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
82static __always_inline unsigned long __must_check 82static __always_inline unsigned long __must_check
83__copy_to_user(void __user *to, const void *from, unsigned long n) 83__copy_to_user(void __user *to, const void *from, unsigned long n)
84{ 84{
85 might_sleep(); 85 might_fault();
86 return __copy_to_user_inatomic(to, from, n); 86 return __copy_to_user_inatomic(to, from, n);
87} 87}
88 88
89static __always_inline unsigned long 89static __always_inline unsigned long
@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
137static __always_inline unsigned long 137static __always_inline unsigned long
138__copy_from_user(void *to, const void __user *from, unsigned long n) 138__copy_from_user(void *to, const void __user *from, unsigned long n)
139{ 139{
140 might_sleep(); 140 might_fault();
141 if (__builtin_constant_p(n)) { 141 if (__builtin_constant_p(n)) {
142 unsigned long ret; 142 unsigned long ret;
143 143
@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
159static __always_inline unsigned long __copy_from_user_nocache(void *to, 159static __always_inline unsigned long __copy_from_user_nocache(void *to,
160 const void __user *from, unsigned long n) 160 const void __user *from, unsigned long n)
161{ 161{
162 might_sleep(); 162 might_fault();
163 if (__builtin_constant_p(n)) { 163 if (__builtin_constant_p(n)) {
164 unsigned long ret; 164 unsigned long ret;
165 165
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f8cfd00db450..84210c479fca 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -29,6 +29,8 @@ static __always_inline __must_check
29int __copy_from_user(void *dst, const void __user *src, unsigned size) 29int __copy_from_user(void *dst, const void __user *src, unsigned size)
30{ 30{
31 int ret = 0; 31 int ret = 0;
32
33 might_fault();
32 if (!__builtin_constant_p(size)) 34 if (!__builtin_constant_p(size))
33 return copy_user_generic(dst, (__force void *)src, size); 35 return copy_user_generic(dst, (__force void *)src, size);
34 switch (size) { 36 switch (size) {
@@ -71,6 +73,8 @@ static __always_inline __must_check
71int __copy_to_user(void __user *dst, const void *src, unsigned size) 73int __copy_to_user(void __user *dst, const void *src, unsigned size)
72{ 74{
73 int ret = 0; 75 int ret = 0;
76
77 might_fault();
74 if (!__builtin_constant_p(size)) 78 if (!__builtin_constant_p(size))
75 return copy_user_generic((__force void *)dst, src, size); 79 return copy_user_generic((__force void *)dst, src, size);
76 switch (size) { 80 switch (size) {
@@ -113,6 +117,8 @@ static __always_inline __must_check
113int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 117int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
114{ 118{
115 int ret = 0; 119 int ret = 0;
120
121 might_fault();
116 if (!__builtin_constant_p(size)) 122 if (!__builtin_constant_p(size))
117 return copy_user_generic((__force void *)dst, 123 return copy_user_generic((__force void *)dst,
118 (__force void *)src, size); 124 (__force void *)src, size);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..1d8ed95da846 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 14CFLAGS_REMOVE_ftrace.o = -pg
15endif 15endif
16 16
17ifdef CONFIG_FUNCTION_RET_TRACER
18# Don't trace __switch_to() but let it for function tracer
19CFLAGS_REMOVE_process_32.o = -pg
20endif
21
17# 22#
18# vsyscalls (which work on the user stack) should have 23# vsyscalls (which work on the user stack) should have
19# no stack-protector checks: 24# no stack-protector checks:
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
65obj-$(CONFIG_X86_IO_APIC) += io_apic.o 70obj-$(CONFIG_X86_IO_APIC) += io_apic.o
66obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 71obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
67obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 72obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
73obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
68obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 74obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
69obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 75obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
70obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 76obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 28b597ef9ca1..74defe21ba42 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1157,6 +1157,9 @@ ENTRY(mcount)
1157END(mcount) 1157END(mcount)
1158 1158
1159ENTRY(ftrace_caller) 1159ENTRY(ftrace_caller)
1160 cmpl $0, function_trace_stop
1161 jne ftrace_stub
1162
1160 pushl %eax 1163 pushl %eax
1161 pushl %ecx 1164 pushl %ecx
1162 pushl %edx 1165 pushl %edx
@@ -1180,8 +1183,15 @@ END(ftrace_caller)
1180#else /* ! CONFIG_DYNAMIC_FTRACE */ 1183#else /* ! CONFIG_DYNAMIC_FTRACE */
1181 1184
1182ENTRY(mcount) 1185ENTRY(mcount)
1186 cmpl $0, function_trace_stop
1187 jne ftrace_stub
1188
1183 cmpl $ftrace_stub, ftrace_trace_function 1189 cmpl $ftrace_stub, ftrace_trace_function
1184 jnz trace 1190 jnz trace
1191#ifdef CONFIG_FUNCTION_RET_TRACER
1192 cmpl $ftrace_stub, ftrace_function_return
1193 jnz ftrace_return_caller
1194#endif
1185.globl ftrace_stub 1195.globl ftrace_stub
1186ftrace_stub: 1196ftrace_stub:
1187 ret 1197 ret
@@ -1200,12 +1210,42 @@ trace:
1200 popl %edx 1210 popl %edx
1201 popl %ecx 1211 popl %ecx
1202 popl %eax 1212 popl %eax
1203
1204 jmp ftrace_stub 1213 jmp ftrace_stub
1205END(mcount) 1214END(mcount)
1206#endif /* CONFIG_DYNAMIC_FTRACE */ 1215#endif /* CONFIG_DYNAMIC_FTRACE */
1207#endif /* CONFIG_FUNCTION_TRACER */ 1216#endif /* CONFIG_FUNCTION_TRACER */
1208 1217
1218#ifdef CONFIG_FUNCTION_RET_TRACER
1219ENTRY(ftrace_return_caller)
1220 cmpl $0, function_trace_stop
1221 jne ftrace_stub
1222
1223 pushl %eax
1224 pushl %ecx
1225 pushl %edx
1226 movl 0xc(%esp), %edx
1227 lea 0x4(%ebp), %eax
1228 call prepare_ftrace_return
1229 popl %edx
1230 popl %ecx
1231 popl %eax
1232 ret
1233END(ftrace_return_caller)
1234
1235.globl return_to_handler
1236return_to_handler:
1237 pushl $0
1238 pushl %eax
1239 pushl %ecx
1240 pushl %edx
1241 call ftrace_return_to_handler
1242 movl %eax, 0xc(%esp)
1243 popl %edx
1244 popl %ecx
1245 popl %eax
1246 ret
1247#endif
1248
1209.section .rodata,"a" 1249.section .rodata,"a"
1210#include "syscall_table_32.S" 1250#include "syscall_table_32.S"
1211 1251
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..08aa6b10933c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -68,6 +68,8 @@ ENTRY(mcount)
68END(mcount) 68END(mcount)
69 69
70ENTRY(ftrace_caller) 70ENTRY(ftrace_caller)
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
71 73
72 /* taken from glibc */ 74 /* taken from glibc */
73 subq $0x38, %rsp 75 subq $0x38, %rsp
@@ -103,6 +105,9 @@ END(ftrace_caller)
103 105
104#else /* ! CONFIG_DYNAMIC_FTRACE */ 106#else /* ! CONFIG_DYNAMIC_FTRACE */
105ENTRY(mcount) 107ENTRY(mcount)
108 cmpl $0, function_trace_stop
109 jne ftrace_stub
110
106 cmpq $ftrace_stub, ftrace_trace_function 111 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace 112 jnz trace
108.globl ftrace_stub 113.globl ftrace_stub
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 50ea0ac8c9bf..bb137f7297ed 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -14,14 +14,17 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/sched.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/list.h> 19#include <linux/list.h>
19 20
20#include <asm/ftrace.h> 21#include <asm/ftrace.h>
22#include <linux/ftrace.h>
21#include <asm/nops.h> 23#include <asm/nops.h>
24#include <asm/nmi.h>
22 25
23 26
24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 27#ifdef CONFIG_DYNAMIC_FTRACE
25 28
26union ftrace_code_union { 29union ftrace_code_union {
27 char code[MCOUNT_INSN_SIZE]; 30 char code[MCOUNT_INSN_SIZE];
@@ -31,18 +34,12 @@ union ftrace_code_union {
31 } __attribute__((packed)); 34 } __attribute__((packed));
32}; 35};
33 36
34
35static int ftrace_calc_offset(long ip, long addr) 37static int ftrace_calc_offset(long ip, long addr)
36{ 38{
37 return (int)(addr - ip); 39 return (int)(addr - ip);
38} 40}
39 41
40unsigned char *ftrace_nop_replace(void) 42static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
41{
42 return ftrace_nop;
43}
44
45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
46{ 43{
47 static union ftrace_code_union calc; 44 static union ftrace_code_union calc;
48 45
@@ -56,7 +53,143 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
56 return calc.code; 53 return calc.code;
57} 54}
58 55
59int 56/*
57 * Modifying code must take extra care. On an SMP machine, if
58 * the code being modified is also being executed on another CPU
59 * that CPU will have undefined results and possibly take a GPF.
60 * We use kstop_machine to stop other CPUS from exectuing code.
61 * But this does not stop NMIs from happening. We still need
62 * to protect against that. We separate out the modification of
63 * the code to take care of this.
64 *
65 * Two buffers are added: An IP buffer and a "code" buffer.
66 *
67 * 1) Put the instruction pointer into the IP buffer
68 * and the new code into the "code" buffer.
69 * 2) Set a flag that says we are modifying code
70 * 3) Wait for any running NMIs to finish.
71 * 4) Write the code
72 * 5) clear the flag.
73 * 6) Wait for any running NMIs to finish.
74 *
75 * If an NMI is executed, the first thing it does is to call
76 * "ftrace_nmi_enter". This will check if the flag is set to write
77 * and if it is, it will write what is in the IP and "code" buffers.
78 *
79 * The trick is, it does not matter if everyone is writing the same
80 * content to the code location. Also, if a CPU is executing code
81 * it is OK to write to that code location if the contents being written
82 * are the same as what exists.
83 */
84
85static atomic_t in_nmi = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */
89static void *mod_code_newcode; /* holds the text to write to the IP */
90
91static unsigned nmi_wait_count;
92static atomic_t nmi_update_count = ATOMIC_INIT(0);
93
94int ftrace_arch_read_dyn_info(char *buf, int size)
95{
96 int r;
97
98 r = snprintf(buf, size, "%u %u",
99 nmi_wait_count,
100 atomic_read(&nmi_update_count));
101 return r;
102}
103
104static void ftrace_mod_code(void)
105{
106 /*
107 * Yes, more than one CPU process can be writing to mod_code_status.
108 * (and the code itself)
109 * But if one were to fail, then they all should, and if one were
110 * to succeed, then they all should.
111 */
112 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
113 MCOUNT_INSN_SIZE);
114
115}
116
117void ftrace_nmi_enter(void)
118{
119 atomic_inc(&in_nmi);
120 /* Must have in_nmi seen before reading write flag */
121 smp_mb();
122 if (mod_code_write) {
123 ftrace_mod_code();
124 atomic_inc(&nmi_update_count);
125 }
126}
127
128void ftrace_nmi_exit(void)
129{
130 /* Finish all executions before clearing in_nmi */
131 smp_wmb();
132 atomic_dec(&in_nmi);
133}
134
135static void wait_for_nmi(void)
136{
137 int waited = 0;
138
139 while (atomic_read(&in_nmi)) {
140 waited = 1;
141 cpu_relax();
142 }
143
144 if (waited)
145 nmi_wait_count++;
146}
147
148static int
149do_ftrace_mod_code(unsigned long ip, void *new_code)
150{
151 mod_code_ip = (void *)ip;
152 mod_code_newcode = new_code;
153
154 /* The buffers need to be visible before we let NMIs write them */
155 smp_wmb();
156
157 mod_code_write = 1;
158
159 /* Make sure write bit is visible before we wait on NMIs */
160 smp_mb();
161
162 wait_for_nmi();
163
164 /* Make sure all running NMIs have finished before we write the code */
165 smp_mb();
166
167 ftrace_mod_code();
168
169 /* Make sure the write happens before clearing the bit */
170 smp_wmb();
171
172 mod_code_write = 0;
173
174 /* make sure NMIs see the cleared bit */
175 smp_mb();
176
177 wait_for_nmi();
178
179 return mod_code_status;
180}
181
182
183
184
185static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
186
187static unsigned char *ftrace_nop_replace(void)
188{
189 return ftrace_nop;
190}
191
192static int
60ftrace_modify_code(unsigned long ip, unsigned char *old_code, 193ftrace_modify_code(unsigned long ip, unsigned char *old_code,
61 unsigned char *new_code) 194 unsigned char *new_code)
62{ 195{
@@ -81,7 +214,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
81 return -EINVAL; 214 return -EINVAL;
82 215
83 /* replace the text with the new text */ 216 /* replace the text with the new text */
84 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 217 if (do_ftrace_mod_code(ip, new_code))
85 return -EPERM; 218 return -EPERM;
86 219
87 sync_core(); 220 sync_core();
@@ -89,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
89 return 0; 222 return 0;
90} 223}
91 224
225int ftrace_make_nop(struct module *mod,
226 struct dyn_ftrace *rec, unsigned long addr)
227{
228 unsigned char *new, *old;
229 unsigned long ip = rec->ip;
230
231 old = ftrace_call_replace(ip, addr);
232 new = ftrace_nop_replace();
233
234 return ftrace_modify_code(rec->ip, old, new);
235}
236
237int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
238{
239 unsigned char *new, *old;
240 unsigned long ip = rec->ip;
241
242 old = ftrace_nop_replace();
243 new = ftrace_call_replace(ip, addr);
244
245 return ftrace_modify_code(rec->ip, old, new);
246}
247
92int ftrace_update_ftrace_func(ftrace_func_t func) 248int ftrace_update_ftrace_func(ftrace_func_t func)
93{ 249{
94 unsigned long ip = (unsigned long)(&ftrace_call); 250 unsigned long ip = (unsigned long)(&ftrace_call);
@@ -165,3 +321,139 @@ int __init ftrace_dyn_arch_init(void *data)
165 321
166 return 0; 322 return 0;
167} 323}
324#endif
325
326#ifdef CONFIG_FUNCTION_RET_TRACER
327
328#ifndef CONFIG_DYNAMIC_FTRACE
329
330/*
331 * These functions are picked from those used on
332 * this page for dynamic ftrace. They have been
333 * simplified to ignore all traces in NMI context.
334 */
335static atomic_t in_nmi;
336
337void ftrace_nmi_enter(void)
338{
339 atomic_inc(&in_nmi);
340}
341
342void ftrace_nmi_exit(void)
343{
344 atomic_dec(&in_nmi);
345}
346#endif /* !CONFIG_DYNAMIC_FTRACE */
347
348/* Add a function return address to the trace stack on thread info.*/
349static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func)
351{
352 int index;
353
354 if (!current->ret_stack)
355 return -EBUSY;
356
357 /* The return trace stack is full */
358 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
359 atomic_inc(&current->trace_overrun);
360 return -EBUSY;
361 }
362
363 index = ++current->curr_ret_stack;
364 barrier();
365 current->ret_stack[index].ret = ret;
366 current->ret_stack[index].func = func;
367 current->ret_stack[index].calltime = time;
368
369 return 0;
370}
371
372/* Retrieve a function return address to the trace stack on thread info.*/
373static void pop_return_trace(unsigned long *ret, unsigned long long *time,
374 unsigned long *func, unsigned long *overrun)
375{
376 int index;
377
378 index = current->curr_ret_stack;
379 *ret = current->ret_stack[index].ret;
380 *func = current->ret_stack[index].func;
381 *time = current->ret_stack[index].calltime;
382 *overrun = atomic_read(&current->trace_overrun);
383 current->curr_ret_stack--;
384}
385
386/*
387 * Send the trace to the ring-buffer.
388 * @return the original return address.
389 */
390unsigned long ftrace_return_to_handler(void)
391{
392 struct ftrace_retfunc trace;
393 pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
394 &trace.overrun);
395 trace.rettime = cpu_clock(raw_smp_processor_id());
396 ftrace_function_return(&trace);
397
398 return trace.ret;
399}
400
401/*
402 * Hook the return address and push it in the stack of return addrs
403 * in current thread info.
404 */
405void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
406{
407 unsigned long old;
408 unsigned long long calltime;
409 int faulted;
410 unsigned long return_hooker = (unsigned long)
411 &return_to_handler;
412
413 /* Nmi's are currently unsupported */
414 if (atomic_read(&in_nmi))
415 return;
416
417 /*
418 * Protect against fault, even if it shouldn't
419 * happen. This tool is too much intrusive to
420 * ignore such a protection.
421 */
422 asm volatile(
423 "1: movl (%[parent_old]), %[old]\n"
424 "2: movl %[return_hooker], (%[parent_replaced])\n"
425 " movl $0, %[faulted]\n"
426
427 ".section .fixup, \"ax\"\n"
428 "3: movl $1, %[faulted]\n"
429 ".previous\n"
430
431 ".section __ex_table, \"a\"\n"
432 " .long 1b, 3b\n"
433 " .long 2b, 3b\n"
434 ".previous\n"
435
436 : [parent_replaced] "=r" (parent), [old] "=r" (old),
437 [faulted] "=r" (faulted)
438 : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
439 : "memory"
440 );
441
442 if (WARN_ON(faulted)) {
443 unregister_ftrace_return();
444 return;
445 }
446
447 if (WARN_ON(!__kernel_text_address(old))) {
448 unregister_ftrace_return();
449 *parent = old;
450 return;
451 }
452
453 calltime = cpu_clock(raw_smp_processor_id());
454
455 if (push_return_trace(old, calltime, self_addr) == -EBUSY)
456 *parent = old;
457}
458
459#endif /* CONFIG_FUNCTION_RET_TRACER */
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index a03e7f6d90c3..10786af95545 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -6,6 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/stacktrace.h> 7#include <linux/stacktrace.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/uaccess.h>
9#include <asm/stacktrace.h> 10#include <asm/stacktrace.h>
10 11
11static void save_stack_warning(void *data, char *msg) 12static void save_stack_warning(void *data, char *msg)
@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
83 trace->entries[trace->nr_entries++] = ULONG_MAX; 84 trace->entries[trace->nr_entries++] = ULONG_MAX;
84} 85}
85EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 86EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
87
88/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
89
90struct stack_frame {
91 const void __user *next_fp;
92 unsigned long ret_addr;
93};
94
95static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
96{
97 int ret;
98
99 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
100 return 0;
101
102 ret = 1;
103 pagefault_disable();
104 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
105 ret = 0;
106 pagefault_enable();
107
108 return ret;
109}
110
111static inline void __save_stack_trace_user(struct stack_trace *trace)
112{
113 const struct pt_regs *regs = task_pt_regs(current);
114 const void __user *fp = (const void __user *)regs->bp;
115
116 if (trace->nr_entries < trace->max_entries)
117 trace->entries[trace->nr_entries++] = regs->ip;
118
119 while (trace->nr_entries < trace->max_entries) {
120 struct stack_frame frame;
121
122 frame.next_fp = NULL;
123 frame.ret_addr = 0;
124 if (!copy_stack_frame(fp, &frame))
125 break;
126 if ((unsigned long)fp < regs->sp)
127 break;
128 if (frame.ret_addr) {
129 trace->entries[trace->nr_entries++] =
130 frame.ret_addr;
131 }
132 if (fp == frame.next_fp)
133 break;
134 fp = frame.next_fp;
135 }
136}
137
138void save_stack_trace_user(struct stack_trace *trace)
139{
140 /*
141 * Trace user stack if we are not a kernel thread
142 */
143 if (current->mm) {
144 __save_stack_trace_user(trace);
145 }
146 if (trace->nr_entries < trace->max_entries)
147 trace->entries[trace->nr_entries++] = ULONG_MAX;
148}
149
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 0b8b6690a86d..6f3d3d4cd973 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -17,6 +17,9 @@
17 * want per guest time just set the kernel.vsyscall64 sysctl to 0. 17 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
18 */ 18 */
19 19
20/* Disable profiling for userspace code: */
21#define DISABLE_BRANCH_PROFILING
22
20#include <linux/time.h> 23#include <linux/time.h>
21#include <linux/init.h> 24#include <linux/init.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 9e68075544f6..4a20b2f9a381 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
39#define __do_strncpy_from_user(dst, src, count, res) \ 39#define __do_strncpy_from_user(dst, src, count, res) \
40do { \ 40do { \
41 int __d0, __d1, __d2; \ 41 int __d0, __d1, __d2; \
42 might_sleep(); \ 42 might_fault(); \
43 __asm__ __volatile__( \ 43 __asm__ __volatile__( \
44 " testl %1,%1\n" \ 44 " testl %1,%1\n" \
45 " jz 2f\n" \ 45 " jz 2f\n" \
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user);
126#define __do_clear_user(addr,size) \ 126#define __do_clear_user(addr,size) \
127do { \ 127do { \
128 int __d0; \ 128 int __d0; \
129 might_sleep(); \ 129 might_fault(); \
130 __asm__ __volatile__( \ 130 __asm__ __volatile__( \
131 "0: rep; stosl\n" \ 131 "0: rep; stosl\n" \
132 " movl %2,%0\n" \ 132 " movl %2,%0\n" \
@@ -155,7 +155,7 @@ do { \
155unsigned long 155unsigned long
156clear_user(void __user *to, unsigned long n) 156clear_user(void __user *to, unsigned long n)
157{ 157{
158 might_sleep(); 158 might_fault();
159 if (access_ok(VERIFY_WRITE, to, n)) 159 if (access_ok(VERIFY_WRITE, to, n))
160 __do_clear_user(to, n); 160 __do_clear_user(to, n);
161 return n; 161 return n;
@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n)
197 unsigned long mask = -__addr_ok(s); 197 unsigned long mask = -__addr_ok(s);
198 unsigned long res, tmp; 198 unsigned long res, tmp;
199 199
200 might_sleep(); 200 might_fault();
201 201
202 __asm__ __volatile__( 202 __asm__ __volatile__(
203 " testl %0, %0\n" 203 " testl %0, %0\n"
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index f4df6e7c718b..64d6c84e6353 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -15,7 +15,7 @@
15#define __do_strncpy_from_user(dst,src,count,res) \ 15#define __do_strncpy_from_user(dst,src,count,res) \
16do { \ 16do { \
17 long __d0, __d1, __d2; \ 17 long __d0, __d1, __d2; \
18 might_sleep(); \ 18 might_fault(); \
19 __asm__ __volatile__( \ 19 __asm__ __volatile__( \
20 " testq %1,%1\n" \ 20 " testq %1,%1\n" \
21 " jz 2f\n" \ 21 " jz 2f\n" \
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
64unsigned long __clear_user(void __user *addr, unsigned long size) 64unsigned long __clear_user(void __user *addr, unsigned long size)
65{ 65{
66 long __d0; 66 long __d0;
67 might_sleep(); 67 might_fault();
68 /* no memory constraint because it doesn't change any memory gcc knows 68 /* no memory constraint because it doesn't change any memory gcc knows
69 about */ 69 about */
70 asm volatile( 70 asm volatile(
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fea4565ff576..d8cc96a2738f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
8 8
9obj-$(CONFIG_HIGHMEM) += highmem_32.o 9obj-$(CONFIG_HIGHMEM) += highmem_32.o
10 10
11obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
12obj-$(CONFIG_MMIOTRACE) += mmiotrace.o 11obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
13mmiotrace-y := pf_in.o mmio-mod.o 12mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
14obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 13obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
15 14
16obj-$(CONFIG_NUMA) += numa_$(BITS).o 15obj-$(CONFIG_NUMA) += numa_$(BITS).o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 31e8730fa246..4152d3c3b138 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -53,7 +53,7 @@
53 53
54static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) 54static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
55{ 55{
56#ifdef CONFIG_MMIOTRACE_HOOKS 56#ifdef CONFIG_MMIOTRACE
57 if (unlikely(is_kmmio_active())) 57 if (unlikely(is_kmmio_active()))
58 if (kmmio_handler(regs, addr) == 1) 58 if (kmmio_handler(regs, addr) == 1)
59 return -1; 59 return -1;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 1ef0f90813d6..d9d35824c56f 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -9,6 +9,9 @@
9 * Also alternative() doesn't work. 9 * Also alternative() doesn't work.
10 */ 10 */
11 11
12/* Disable profiling for userspace code: */
13#define DISABLE_BRANCH_PROFILING
14
12#include <linux/kernel.h> 15#include <linux/kernel.h>
13#include <linux/posix-timers.h> 16#include <linux/posix-timers.h>
14#include <linux/time.h> 17#include <linux/time.h>