diff options
author | Robert Richter <robert.richter@amd.com> | 2011-06-15 08:37:35 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2011-06-15 08:37:35 -0400 |
commit | a406ab6d77ea86ba7c713276f30ed7058ca64e31 (patch) | |
tree | bd5dd78687e9f686cbe48bf6621874f647752fcc | |
parent | 5f307491f3a0c8551cedf5d90d660d656e0d56ec (diff) | |
parent | a0e3e70243f5b270bc3eca718f0a9fa5e6b8262e (diff) |
Merge branch 'oprofile/urgent' into HEAD
-rw-r--r-- | arch/x86/oprofile/backtrace.c | 56 | ||||
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 14 |
2 files changed, 57 insertions, 13 deletions
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index a5b64ab4cd6e..32f78eb46744 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -11,10 +11,12 @@ | |||
11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/compat.h> | ||
15 | #include <linux/highmem.h> | ||
16 | |||
14 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
15 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
16 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
17 | #include <linux/compat.h> | ||
18 | 20 | ||
19 | static int backtrace_stack(void *data, char *name) | 21 | static int backtrace_stack(void *data, char *name) |
20 | { | 22 | { |
@@ -36,17 +38,53 @@ static struct stacktrace_ops backtrace_ops = { | |||
36 | .walk_stack = print_context_stack, | 38 | .walk_stack = print_context_stack, |
37 | }; | 39 | }; |
38 | 40 | ||
41 | /* from arch/x86/kernel/cpu/perf_event.c: */ | ||
42 | |||
43 | /* | ||
44 | * best effort, GUP based copy_from_user() that assumes IRQ or NMI context | ||
45 | */ | ||
46 | static unsigned long | ||
47 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | ||
48 | { | ||
49 | unsigned long offset, addr = (unsigned long)from; | ||
50 | unsigned long size, len = 0; | ||
51 | struct page *page; | ||
52 | void *map; | ||
53 | int ret; | ||
54 | |||
55 | do { | ||
56 | ret = __get_user_pages_fast(addr, 1, 0, &page); | ||
57 | if (!ret) | ||
58 | break; | ||
59 | |||
60 | offset = addr & (PAGE_SIZE - 1); | ||
61 | size = min(PAGE_SIZE - offset, n - len); | ||
62 | |||
63 | map = kmap_atomic(page); | ||
64 | memcpy(to, map+offset, size); | ||
65 | kunmap_atomic(map); | ||
66 | put_page(page); | ||
67 | |||
68 | len += size; | ||
69 | to += size; | ||
70 | addr += size; | ||
71 | |||
72 | } while (len < n); | ||
73 | |||
74 | return len; | ||
75 | } | ||
76 | |||
39 | #ifdef CONFIG_COMPAT | 77 | #ifdef CONFIG_COMPAT |
40 | static struct stack_frame_ia32 * | 78 | static struct stack_frame_ia32 * |
41 | dump_user_backtrace_32(struct stack_frame_ia32 *head) | 79 | dump_user_backtrace_32(struct stack_frame_ia32 *head) |
42 | { | 80 | { |
81 | /* Also check accessibility of one struct frame_head beyond: */ | ||
43 | struct stack_frame_ia32 bufhead[2]; | 82 | struct stack_frame_ia32 bufhead[2]; |
44 | struct stack_frame_ia32 *fp; | 83 | struct stack_frame_ia32 *fp; |
84 | unsigned long bytes; | ||
45 | 85 | ||
46 | /* Also check accessibility of one struct frame_head beyond */ | 86 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
47 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | 87 | if (bytes != sizeof(bufhead)) |
48 | return NULL; | ||
49 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | ||
50 | return NULL; | 88 | return NULL; |
51 | 89 | ||
52 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); | 90 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); |
@@ -87,12 +125,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) | |||
87 | 125 | ||
88 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) | 126 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) |
89 | { | 127 | { |
128 | /* Also check accessibility of one struct frame_head beyond: */ | ||
90 | struct stack_frame bufhead[2]; | 129 | struct stack_frame bufhead[2]; |
130 | unsigned long bytes; | ||
91 | 131 | ||
92 | /* Also check accessibility of one struct stack_frame beyond */ | 132 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
93 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | 133 | if (bytes != sizeof(bufhead)) |
94 | return NULL; | ||
95 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | ||
96 | return NULL; | 134 | return NULL; |
97 | 135 | ||
98 | oprofile_add_trace(bufhead[0].return_address); | 136 | oprofile_add_trace(bufhead[0].return_address); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cf9750004a08..68894fdc034b 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy) | |||
112 | static int nmi_start(void) | 112 | static int nmi_start(void) |
113 | { | 113 | { |
114 | get_online_cpus(); | 114 | get_online_cpus(); |
115 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
116 | ctr_running = 1; | 115 | ctr_running = 1; |
116 | /* make ctr_running visible to the nmi handler: */ | ||
117 | smp_mb(); | ||
118 | on_each_cpu(nmi_cpu_start, NULL, 1); | ||
117 | put_online_cpus(); | 119 | put_online_cpus(); |
118 | return 0; | 120 | return 0; |
119 | } | 121 | } |
@@ -504,15 +506,18 @@ static int nmi_setup(void) | |||
504 | 506 | ||
505 | nmi_enabled = 0; | 507 | nmi_enabled = 0; |
506 | ctr_running = 0; | 508 | ctr_running = 0; |
507 | barrier(); | 509 | /* make variables visible to the nmi handler: */ |
510 | smp_mb(); | ||
508 | err = register_die_notifier(&profile_exceptions_nb); | 511 | err = register_die_notifier(&profile_exceptions_nb); |
509 | if (err) | 512 | if (err) |
510 | goto fail; | 513 | goto fail; |
511 | 514 | ||
512 | get_online_cpus(); | 515 | get_online_cpus(); |
513 | register_cpu_notifier(&oprofile_cpu_nb); | 516 | register_cpu_notifier(&oprofile_cpu_nb); |
514 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
515 | nmi_enabled = 1; | 517 | nmi_enabled = 1; |
518 | /* make nmi_enabled visible to the nmi handler: */ | ||
519 | smp_mb(); | ||
520 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
516 | put_online_cpus(); | 521 | put_online_cpus(); |
517 | 522 | ||
518 | return 0; | 523 | return 0; |
@@ -531,7 +536,8 @@ static void nmi_shutdown(void) | |||
531 | nmi_enabled = 0; | 536 | nmi_enabled = 0; |
532 | ctr_running = 0; | 537 | ctr_running = 0; |
533 | put_online_cpus(); | 538 | put_online_cpus(); |
534 | barrier(); | 539 | /* make variables visible to the nmi handler: */ |
540 | smp_mb(); | ||
535 | unregister_die_notifier(&profile_exceptions_nb); | 541 | unregister_die_notifier(&profile_exceptions_nb); |
536 | msrs = &get_cpu_var(cpu_msrs); | 542 | msrs = &get_cpu_var(cpu_msrs); |
537 | model->shutdown(msrs); | 543 | model->shutdown(msrs); |