diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:55:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:55:55 -0400 |
commit | c4a227d89f758e582fd167bb15245f2704de99ef (patch) | |
tree | f5b6e0091e6543c14d1cd7cf1f93e097a96bbd64 | |
parent | 87367a0b71a5188e34a913c05673b5078f71a64d (diff) | |
parent | f506b3dc0ec454a16d40cab9ee5d75435b39dc50 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
perf: Fix SIGIO handling
perf top: Don't stop if no kernel symtab is found
perf top: Handle kptr_restrict
perf top: Remove unused macro
perf events: initialize fd array to -1 instead of 0
perf tools: Make sure kptr_restrict warnings fit 80 col terms
perf tools: Fix build on older systems
perf symbols: Handle /proc/sys/kernel/kptr_restrict
perf: Remove duplicate headers
ftrace: Add internal recursive checks
tracing: Update btrfs's tracepoints to use u64 interface
tracing: Add __print_symbolic_u64 to avoid warnings on 32bit machine
ftrace: Set ops->flag to enabled even on static function tracing
tracing: Have event with function tracer check error return
ftrace: Have ftrace_startup() return failure code
jump_label: Check entries limit in __jump_label_update
ftrace/recordmcount: Avoid STT_FUNC symbols as base on ARM
scripts/tags.sh: Add magic for trace-events for etags too
scripts/tags.sh: Fix ctags for DEFINE_EVENT()
x86/ftrace: Fix compiler warning in ftrace.c
...
34 files changed, 359 insertions, 106 deletions
diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c index b71cebc1f8a3..e2532114c5fd 100644 --- a/arch/blackfin/mm/maccess.c +++ b/arch/blackfin/mm/maccess.c | |||
@@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size) | |||
16 | return bfin_mem_access_type(addr, size); | 16 | return bfin_mem_access_type(addr, size); |
17 | } | 17 | } |
18 | 18 | ||
19 | long probe_kernel_read(void *dst, void *src, size_t size) | 19 | long probe_kernel_read(void *dst, const void *src, size_t size) |
20 | { | 20 | { |
21 | unsigned long lsrc = (unsigned long)src; | 21 | unsigned long lsrc = (unsigned long)src; |
22 | int mem_type; | 22 | int mem_type; |
@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size) | |||
55 | return -EFAULT; | 55 | return -EFAULT; |
56 | } | 56 | } |
57 | 57 | ||
58 | long probe_kernel_write(void *dst, void *src, size_t size) | 58 | long probe_kernel_write(void *dst, const void *src, size_t size) |
59 | { | 59 | { |
60 | unsigned long ldst = (unsigned long)dst; | 60 | unsigned long ldst = (unsigned long)dst; |
61 | int mem_type; | 61 | int mem_type; |
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 71a4b0d34be0..51e5cd9b906a 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * using the stura instruction. | 19 | * using the stura instruction. |
20 | * Returns the number of bytes copied or -EFAULT. | 20 | * Returns the number of bytes copied or -EFAULT. |
21 | */ | 21 | */ |
22 | static long probe_kernel_write_odd(void *dst, void *src, size_t size) | 22 | static long probe_kernel_write_odd(void *dst, const void *src, size_t size) |
23 | { | 23 | { |
24 | unsigned long count, aligned; | 24 | unsigned long count, aligned; |
25 | int offset, mask; | 25 | int offset, mask; |
@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size) | |||
45 | return rc ? rc : count; | 45 | return rc ? rc : count; |
46 | } | 46 | } |
47 | 47 | ||
48 | long probe_kernel_write(void *dst, void *src, size_t size) | 48 | long probe_kernel_write(void *dst, const void *src, size_t size) |
49 | { | 49 | { |
50 | long copied = 0; | 50 | long copied = 0; |
51 | 51 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0ba15a6cc57e..c9a281f272fd 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -123,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
123 | static atomic_t nmi_running = ATOMIC_INIT(0); | 123 | static atomic_t nmi_running = ATOMIC_INIT(0); |
124 | static int mod_code_status; /* holds return value of text write */ | 124 | static int mod_code_status; /* holds return value of text write */ |
125 | static void *mod_code_ip; /* holds the IP to write to */ | 125 | static void *mod_code_ip; /* holds the IP to write to */ |
126 | static void *mod_code_newcode; /* holds the text to write to the IP */ | 126 | static const void *mod_code_newcode; /* holds the text to write to the IP */ |
127 | 127 | ||
128 | static unsigned nmi_wait_count; | 128 | static unsigned nmi_wait_count; |
129 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | 129 | static atomic_t nmi_update_count = ATOMIC_INIT(0); |
@@ -225,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | static int | 227 | static int |
228 | do_ftrace_mod_code(unsigned long ip, void *new_code) | 228 | do_ftrace_mod_code(unsigned long ip, const void *new_code) |
229 | { | 229 | { |
230 | /* | 230 | /* |
231 | * On x86_64, kernel text mappings are mapped read-only with | 231 | * On x86_64, kernel text mappings are mapped read-only with |
@@ -266,8 +266,8 @@ static const unsigned char *ftrace_nop_replace(void) | |||
266 | } | 266 | } |
267 | 267 | ||
268 | static int | 268 | static int |
269 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 269 | ftrace_modify_code(unsigned long ip, unsigned const char *old_code, |
270 | unsigned char *new_code) | 270 | unsigned const char *new_code) |
271 | { | 271 | { |
272 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 272 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
273 | 273 | ||
@@ -301,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
301 | int ftrace_make_nop(struct module *mod, | 301 | int ftrace_make_nop(struct module *mod, |
302 | struct dyn_ftrace *rec, unsigned long addr) | 302 | struct dyn_ftrace *rec, unsigned long addr) |
303 | { | 303 | { |
304 | unsigned char *new, *old; | 304 | unsigned const char *new, *old; |
305 | unsigned long ip = rec->ip; | 305 | unsigned long ip = rec->ip; |
306 | 306 | ||
307 | old = ftrace_call_replace(ip, addr); | 307 | old = ftrace_call_replace(ip, addr); |
@@ -312,7 +312,7 @@ int ftrace_make_nop(struct module *mod, | |||
312 | 312 | ||
313 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 313 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
314 | { | 314 | { |
315 | unsigned char *new, *old; | 315 | unsigned const char *new, *old; |
316 | unsigned long ip = rec->ip; | 316 | unsigned long ip = rec->ip; |
317 | 317 | ||
318 | old = ftrace_nop_replace(); | 318 | old = ftrace_nop_replace(); |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c3b8e24f2b16..9fd8a567fe1e 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void) | |||
316 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 316 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
317 | } | 317 | } |
318 | 318 | ||
319 | static inline int eilvt_is_available(int offset) | 319 | static inline int get_eilvt(int offset) |
320 | { | 320 | { |
321 | /* check if we may assign a vector */ | ||
322 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | 321 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); |
323 | } | 322 | } |
324 | 323 | ||
324 | static inline int put_eilvt(int offset) | ||
325 | { | ||
326 | return !setup_APIC_eilvt(offset, 0, 0, 1); | ||
327 | } | ||
328 | |||
325 | static inline int ibs_eilvt_valid(void) | 329 | static inline int ibs_eilvt_valid(void) |
326 | { | 330 | { |
327 | int offset; | 331 | int offset; |
328 | u64 val; | 332 | u64 val; |
333 | int valid = 0; | ||
334 | |||
335 | preempt_disable(); | ||
329 | 336 | ||
330 | rdmsrl(MSR_AMD64_IBSCTL, val); | 337 | rdmsrl(MSR_AMD64_IBSCTL, val); |
331 | offset = val & IBSCTL_LVT_OFFSET_MASK; | 338 | offset = val & IBSCTL_LVT_OFFSET_MASK; |
@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void) | |||
333 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | 340 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { |
334 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", | 341 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", |
335 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | 342 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); |
336 | return 0; | 343 | goto out; |
337 | } | 344 | } |
338 | 345 | ||
339 | if (!eilvt_is_available(offset)) { | 346 | if (!get_eilvt(offset)) { |
340 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", | 347 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", |
341 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | 348 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); |
342 | return 0; | 349 | goto out; |
343 | } | 350 | } |
344 | 351 | ||
345 | return 1; | 352 | valid = 1; |
353 | out: | ||
354 | preempt_enable(); | ||
355 | |||
356 | return valid; | ||
346 | } | 357 | } |
347 | 358 | ||
348 | static inline int get_ibs_offset(void) | 359 | static inline int get_ibs_offset(void) |
@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off) | |||
600 | 611 | ||
601 | static int force_ibs_eilvt_setup(void) | 612 | static int force_ibs_eilvt_setup(void) |
602 | { | 613 | { |
603 | int i; | 614 | int offset; |
604 | int ret; | 615 | int ret; |
605 | 616 | ||
606 | /* find the next free available EILVT entry */ | 617 | /* |
607 | for (i = 1; i < 4; i++) { | 618 | * find the next free available EILVT entry, skip offset 0, |
608 | if (!eilvt_is_available(i)) | 619 | * pin search to this cpu |
609 | continue; | 620 | */ |
610 | ret = setup_ibs_ctl(i); | 621 | preempt_disable(); |
611 | if (ret) | 622 | for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { |
612 | return ret; | 623 | if (get_eilvt(offset)) |
613 | pr_err(FW_BUG "using offset %d for IBS interrupts\n", i); | 624 | break; |
614 | return 0; | ||
615 | } | 625 | } |
626 | preempt_enable(); | ||
616 | 627 | ||
617 | printk(KERN_DEBUG "No EILVT entry available\n"); | 628 | if (offset == APIC_EILVT_NR_MAX) { |
618 | 629 | printk(KERN_DEBUG "No EILVT entry available\n"); | |
619 | return -EBUSY; | 630 | return -EBUSY; |
620 | } | 631 | } |
621 | |||
622 | static int __init_ibs_nmi(void) | ||
623 | { | ||
624 | int ret; | ||
625 | |||
626 | if (ibs_eilvt_valid()) | ||
627 | return 0; | ||
628 | 632 | ||
629 | ret = force_ibs_eilvt_setup(); | 633 | ret = setup_ibs_ctl(offset); |
630 | if (ret) | 634 | if (ret) |
631 | return ret; | 635 | goto out; |
632 | 636 | ||
633 | if (!ibs_eilvt_valid()) | 637 | if (!ibs_eilvt_valid()) { |
634 | return -EFAULT; | 638 | ret = -EFAULT; |
639 | goto out; | ||
640 | } | ||
635 | 641 | ||
642 | pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); | ||
636 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | 643 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); |
637 | 644 | ||
638 | return 0; | 645 | return 0; |
646 | out: | ||
647 | preempt_disable(); | ||
648 | put_eilvt(offset); | ||
649 | preempt_enable(); | ||
650 | return ret; | ||
639 | } | 651 | } |
640 | 652 | ||
641 | /* | 653 | /* |
642 | * check and reserve APIC extended interrupt LVT offset for IBS if | 654 | * check and reserve APIC extended interrupt LVT offset for IBS if |
643 | * available | 655 | * available |
644 | * | ||
645 | * init_ibs() preforms implicitly cpu-local operations, so pin this | ||
646 | * thread to its current CPU | ||
647 | */ | 656 | */ |
648 | 657 | ||
649 | static void init_ibs(void) | 658 | static void init_ibs(void) |
650 | { | 659 | { |
651 | preempt_disable(); | ||
652 | |||
653 | ibs_caps = get_ibs_caps(); | 660 | ibs_caps = get_ibs_caps(); |
661 | |||
654 | if (!ibs_caps) | 662 | if (!ibs_caps) |
663 | return; | ||
664 | |||
665 | if (ibs_eilvt_valid()) | ||
655 | goto out; | 666 | goto out; |
656 | 667 | ||
657 | if (__init_ibs_nmi() < 0) | 668 | if (!force_ibs_eilvt_setup()) |
658 | ibs_caps = 0; | 669 | goto out; |
659 | else | 670 | |
660 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); | 671 | /* Failed to setup ibs */ |
672 | ibs_caps = 0; | ||
673 | return; | ||
661 | 674 | ||
662 | out: | 675 | out: |
663 | preempt_enable(); | 676 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); |
664 | } | 677 | } |
665 | 678 | ||
666 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); | 679 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); |
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h index 4e70749f8d16..a8d5bb3cba89 100644 --- a/drivers/oprofile/event_buffer.h +++ b/drivers/oprofile/event_buffer.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #define EVENT_BUFFER_H | 11 | #define EVENT_BUFFER_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <asm/mutex.h> | 14 | #include <linux/mutex.h> |
15 | 15 | ||
16 | int alloc_event_buffer(void); | 16 | int alloc_event_buffer(void); |
17 | 17 | ||
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index f9bda64fcd1b..dccd8636095c 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
16 | #include <linux/time.h> | 16 | #include <linux/time.h> |
17 | #include <asm/mutex.h> | 17 | #include <linux/mutex.h> |
18 | 18 | ||
19 | #include "oprof.h" | 19 | #include "oprof.h" |
20 | #include "event_buffer.h" | 20 | #include "event_buffer.h" |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index b5a550a39a70..59d3ef100eb9 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -16,6 +16,11 @@ struct trace_print_flags { | |||
16 | const char *name; | 16 | const char *name; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct trace_print_flags_u64 { | ||
20 | unsigned long long mask; | ||
21 | const char *name; | ||
22 | }; | ||
23 | |||
19 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | 24 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, |
20 | unsigned long flags, | 25 | unsigned long flags, |
21 | const struct trace_print_flags *flag_array); | 26 | const struct trace_print_flags *flag_array); |
@@ -23,6 +28,13 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | |||
23 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | 28 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
24 | const struct trace_print_flags *symbol_array); | 29 | const struct trace_print_flags *symbol_array); |
25 | 30 | ||
31 | #if BITS_PER_LONG == 32 | ||
32 | const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, | ||
33 | unsigned long long val, | ||
34 | const struct trace_print_flags_u64 | ||
35 | *symbol_array); | ||
36 | #endif | ||
37 | |||
26 | const char *ftrace_print_hex_seq(struct trace_seq *p, | 38 | const char *ftrace_print_hex_seq(struct trace_seq *p, |
27 | const unsigned char *buf, int len); | 39 | const unsigned char *buf, int len); |
28 | 40 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index dc8871295a5a..8f441d1c6550 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1546,7 +1546,7 @@ struct task_struct { | |||
1546 | #ifdef CONFIG_TRACING | 1546 | #ifdef CONFIG_TRACING |
1547 | /* state flags for use by tracers */ | 1547 | /* state flags for use by tracers */ |
1548 | unsigned long trace; | 1548 | unsigned long trace; |
1549 | /* bitmask of trace recursion */ | 1549 | /* bitmask and counter of trace recursion */ |
1550 | unsigned long trace_recursion; | 1550 | unsigned long trace_recursion; |
1551 | #endif /* CONFIG_TRACING */ | 1551 | #endif /* CONFIG_TRACING */ |
1552 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1552 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d512d98dfb7d..5ca0951e1855 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
93 | * Safely read from address @src to the buffer at @dst. If a kernel fault | 93 | * Safely read from address @src to the buffer at @dst. If a kernel fault |
94 | * happens, handle that and return -EFAULT. | 94 | * happens, handle that and return -EFAULT. |
95 | */ | 95 | */ |
96 | extern long probe_kernel_read(void *dst, void *src, size_t size); | 96 | extern long probe_kernel_read(void *dst, const void *src, size_t size); |
97 | extern long __probe_kernel_read(void *dst, void *src, size_t size); | 97 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * probe_kernel_write(): safely attempt to write to a location | 100 | * probe_kernel_write(): safely attempt to write to a location |
@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, void *src, size_t size); | |||
105 | * Safely write to address @dst from the buffer at @src. If a kernel fault | 105 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
106 | * happens, handle that and return -EFAULT. | 106 | * happens, handle that and return -EFAULT. |
107 | */ | 107 | */ |
108 | extern long notrace probe_kernel_write(void *dst, void *src, size_t size); | 108 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
109 | extern long notrace __probe_kernel_write(void *dst, void *src, size_t size); | 109 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
110 | 110 | ||
111 | #endif /* __LINUX_UACCESS_H__ */ | 111 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f445cff66ab7..4114129f0794 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h | |||
@@ -28,7 +28,7 @@ struct extent_buffer; | |||
28 | { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) | 28 | { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) |
29 | 29 | ||
30 | #define __show_root_type(obj) \ | 30 | #define __show_root_type(obj) \ |
31 | __print_symbolic(obj, \ | 31 | __print_symbolic_u64(obj, \ |
32 | { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \ | 32 | { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \ |
33 | { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \ | 33 | { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \ |
34 | { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \ | 34 | { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \ |
@@ -125,7 +125,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, | |||
125 | ); | 125 | ); |
126 | 126 | ||
127 | #define __show_map_type(type) \ | 127 | #define __show_map_type(type) \ |
128 | __print_symbolic(type, \ | 128 | __print_symbolic_u64(type, \ |
129 | { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ | 129 | { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ |
130 | { EXTENT_MAP_HOLE, "HOLE" }, \ | 130 | { EXTENT_MAP_HOLE, "HOLE" }, \ |
131 | { EXTENT_MAP_INLINE, "INLINE" }, \ | 131 | { EXTENT_MAP_INLINE, "INLINE" }, \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 3e68366d485a..533c49f48047 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -205,6 +205,19 @@ | |||
205 | ftrace_print_symbols_seq(p, value, symbols); \ | 205 | ftrace_print_symbols_seq(p, value, symbols); \ |
206 | }) | 206 | }) |
207 | 207 | ||
208 | #undef __print_symbolic_u64 | ||
209 | #if BITS_PER_LONG == 32 | ||
210 | #define __print_symbolic_u64(value, symbol_array...) \ | ||
211 | ({ \ | ||
212 | static const struct trace_print_flags_u64 symbols[] = \ | ||
213 | { symbol_array, { -1, NULL } }; \ | ||
214 | ftrace_print_symbols_seq_u64(p, value, symbols); \ | ||
215 | }) | ||
216 | #else | ||
217 | #define __print_symbolic_u64(value, symbol_array...) \ | ||
218 | __print_symbolic(value, symbol_array) | ||
219 | #endif | ||
220 | |||
208 | #undef __print_hex | 221 | #undef __print_hex |
209 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) | 222 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) |
210 | 223 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index c09767f7db3e..d863b3c057bb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5028,6 +5028,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
5028 | else | 5028 | else |
5029 | perf_event_output(event, nmi, data, regs); | 5029 | perf_event_output(event, nmi, data, regs); |
5030 | 5030 | ||
5031 | if (event->fasync && event->pending_kill) { | ||
5032 | if (nmi) { | ||
5033 | event->pending_wakeup = 1; | ||
5034 | irq_work_queue(&event->pending); | ||
5035 | } else | ||
5036 | perf_event_wakeup(event); | ||
5037 | } | ||
5038 | |||
5031 | return ret; | 5039 | return ret; |
5032 | } | 5040 | } |
5033 | 5041 | ||
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 74d1c099fbd1..fa27e750dbc0 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start, | |||
105 | } | 105 | } |
106 | 106 | ||
107 | static void __jump_label_update(struct jump_label_key *key, | 107 | static void __jump_label_update(struct jump_label_key *key, |
108 | struct jump_entry *entry, int enable) | 108 | struct jump_entry *entry, |
109 | struct jump_entry *stop, int enable) | ||
109 | { | 110 | { |
110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | 111 | for (; (entry < stop) && |
112 | (entry->key == (jump_label_t)(unsigned long)key); | ||
113 | entry++) { | ||
111 | /* | 114 | /* |
112 | * entry->code set to 0 invalidates module init text sections | 115 | * entry->code set to 0 invalidates module init text sections |
113 | * kernel_text_address() verifies we are not in core kernel | 116 | * kernel_text_address() verifies we are not in core kernel |
@@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable) | |||
181 | struct jump_label_mod *mod = key->next; | 184 | struct jump_label_mod *mod = key->next; |
182 | 185 | ||
183 | while (mod) { | 186 | while (mod) { |
184 | __jump_label_update(key, mod->entries, enable); | 187 | struct module *m = mod->mod; |
188 | |||
189 | __jump_label_update(key, mod->entries, | ||
190 | m->jump_entries + m->num_jump_entries, | ||
191 | enable); | ||
185 | mod = mod->next; | 192 | mod = mod->next; |
186 | } | 193 | } |
187 | } | 194 | } |
@@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod) | |||
245 | key->next = jlm; | 252 | key->next = jlm; |
246 | 253 | ||
247 | if (jump_label_enabled(key)) | 254 | if (jump_label_enabled(key)) |
248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | 255 | __jump_label_update(key, iter, iter_stop, |
256 | JUMP_LABEL_ENABLE); | ||
249 | } | 257 | } |
250 | 258 | ||
251 | return 0; | 259 | return 0; |
@@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable) | |||
371 | 379 | ||
372 | /* if there are no users, entry can be NULL */ | 380 | /* if there are no users, entry can be NULL */ |
373 | if (entry) | 381 | if (entry) |
374 | __jump_label_update(key, entry, enable); | 382 | __jump_label_update(key, entry, __stop___jump_table, enable); |
375 | 383 | ||
376 | #ifdef CONFIG_MODULES | 384 | #ifdef CONFIG_MODULES |
377 | __jump_label_mod_update(key, enable); | 385 | __jump_label_mod_update(key, enable); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d017c2c82c44..1ee417fcbfa5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | |||
109 | static void ftrace_global_list_func(unsigned long ip, | 109 | static void ftrace_global_list_func(unsigned long ip, |
110 | unsigned long parent_ip) | 110 | unsigned long parent_ip) |
111 | { | 111 | { |
112 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | 112 | struct ftrace_ops *op; |
113 | |||
114 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | ||
115 | return; | ||
113 | 116 | ||
117 | trace_recursion_set(TRACE_GLOBAL_BIT); | ||
118 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | ||
114 | while (op != &ftrace_list_end) { | 119 | while (op != &ftrace_list_end) { |
115 | op->func(ip, parent_ip); | 120 | op->func(ip, parent_ip); |
116 | op = rcu_dereference_raw(op->next); /*see above*/ | 121 | op = rcu_dereference_raw(op->next); /*see above*/ |
117 | }; | 122 | }; |
123 | trace_recursion_clear(TRACE_GLOBAL_BIT); | ||
118 | } | 124 | } |
119 | 125 | ||
120 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | 126 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) |
@@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command) | |||
1638 | ftrace_run_update_code(command); | 1644 | ftrace_run_update_code(command); |
1639 | } | 1645 | } |
1640 | 1646 | ||
1641 | static void ftrace_startup(struct ftrace_ops *ops, int command) | 1647 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
1642 | { | 1648 | { |
1643 | bool hash_enable = true; | 1649 | bool hash_enable = true; |
1644 | 1650 | ||
1645 | if (unlikely(ftrace_disabled)) | 1651 | if (unlikely(ftrace_disabled)) |
1646 | return; | 1652 | return -ENODEV; |
1647 | 1653 | ||
1648 | ftrace_start_up++; | 1654 | ftrace_start_up++; |
1649 | command |= FTRACE_ENABLE_CALLS; | 1655 | command |= FTRACE_ENABLE_CALLS; |
@@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command) | |||
1662 | ftrace_hash_rec_enable(ops, 1); | 1668 | ftrace_hash_rec_enable(ops, 1); |
1663 | 1669 | ||
1664 | ftrace_startup_enable(command); | 1670 | ftrace_startup_enable(command); |
1671 | |||
1672 | return 0; | ||
1665 | } | 1673 | } |
1666 | 1674 | ||
1667 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) | 1675 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) |
@@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void) | |||
2501 | 2509 | ||
2502 | ret = __register_ftrace_function(&trace_probe_ops); | 2510 | ret = __register_ftrace_function(&trace_probe_ops); |
2503 | if (!ret) | 2511 | if (!ret) |
2504 | ftrace_startup(&trace_probe_ops, 0); | 2512 | ret = ftrace_startup(&trace_probe_ops, 0); |
2505 | 2513 | ||
2506 | ftrace_probe_registered = 1; | 2514 | ftrace_probe_registered = 1; |
2507 | } | 2515 | } |
@@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init); | |||
3466 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 3474 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
3467 | static inline void ftrace_startup_enable(int command) { } | 3475 | static inline void ftrace_startup_enable(int command) { } |
3468 | /* Keep as macros so we do not need to define the commands */ | 3476 | /* Keep as macros so we do not need to define the commands */ |
3469 | # define ftrace_startup(ops, command) do { } while (0) | 3477 | # define ftrace_startup(ops, command) \ |
3478 | ({ \ | ||
3479 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | ||
3480 | 0; \ | ||
3481 | }) | ||
3470 | # define ftrace_shutdown(ops, command) do { } while (0) | 3482 | # define ftrace_shutdown(ops, command) do { } while (0) |
3471 | # define ftrace_startup_sysctl() do { } while (0) | 3483 | # define ftrace_startup_sysctl() do { } while (0) |
3472 | # define ftrace_shutdown_sysctl() do { } while (0) | 3484 | # define ftrace_shutdown_sysctl() do { } while (0) |
@@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
3484 | { | 3496 | { |
3485 | struct ftrace_ops *op; | 3497 | struct ftrace_ops *op; |
3486 | 3498 | ||
3499 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | ||
3500 | return; | ||
3501 | |||
3502 | trace_recursion_set(TRACE_INTERNAL_BIT); | ||
3487 | /* | 3503 | /* |
3488 | * Some of the ops may be dynamically allocated, | 3504 | * Some of the ops may be dynamically allocated, |
3489 | * they must be freed after a synchronize_sched(). | 3505 | * they must be freed after a synchronize_sched(). |
@@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
3496 | op = rcu_dereference_raw(op->next); | 3512 | op = rcu_dereference_raw(op->next); |
3497 | }; | 3513 | }; |
3498 | preempt_enable_notrace(); | 3514 | preempt_enable_notrace(); |
3515 | trace_recursion_clear(TRACE_INTERNAL_BIT); | ||
3499 | } | 3516 | } |
3500 | 3517 | ||
3501 | static void clear_ftrace_swapper(void) | 3518 | static void clear_ftrace_swapper(void) |
@@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
3799 | 3816 | ||
3800 | ret = __register_ftrace_function(ops); | 3817 | ret = __register_ftrace_function(ops); |
3801 | if (!ret) | 3818 | if (!ret) |
3802 | ftrace_startup(ops, 0); | 3819 | ret = ftrace_startup(ops, 0); |
3803 | 3820 | ||
3804 | 3821 | ||
3805 | out_unlock: | 3822 | out_unlock: |
@@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
4045 | ftrace_graph_return = retfunc; | 4062 | ftrace_graph_return = retfunc; |
4046 | ftrace_graph_entry = entryfunc; | 4063 | ftrace_graph_entry = entryfunc; |
4047 | 4064 | ||
4048 | ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | 4065 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
4049 | 4066 | ||
4050 | out: | 4067 | out: |
4051 | mutex_unlock(&ftrace_lock); | 4068 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0ef7b4b2a1f7..b0c7aa407943 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void) | |||
2216 | 2216 | ||
2217 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" | 2217 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" |
2218 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 2218 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", |
2219 | current->trace_recursion, | 2219 | trace_recursion_buffer(), |
2220 | hardirq_count() >> HARDIRQ_SHIFT, | 2220 | hardirq_count() >> HARDIRQ_SHIFT, |
2221 | softirq_count() >> SOFTIRQ_SHIFT, | 2221 | softirq_count() >> SOFTIRQ_SHIFT, |
2222 | in_nmi()); | 2222 | in_nmi()); |
@@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void) | |||
2226 | 2226 | ||
2227 | static inline int trace_recursive_lock(void) | 2227 | static inline int trace_recursive_lock(void) |
2228 | { | 2228 | { |
2229 | current->trace_recursion++; | 2229 | trace_recursion_inc(); |
2230 | 2230 | ||
2231 | if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) | 2231 | if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) |
2232 | return 0; | 2232 | return 0; |
2233 | 2233 | ||
2234 | trace_recursive_fail(); | 2234 | trace_recursive_fail(); |
@@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void) | |||
2238 | 2238 | ||
2239 | static inline void trace_recursive_unlock(void) | 2239 | static inline void trace_recursive_unlock(void) |
2240 | { | 2240 | { |
2241 | WARN_ON_ONCE(!current->trace_recursion); | 2241 | WARN_ON_ONCE(!trace_recursion_buffer()); |
2242 | 2242 | ||
2243 | current->trace_recursion--; | 2243 | trace_recursion_dec(); |
2244 | } | 2244 | } |
2245 | 2245 | ||
2246 | #else | 2246 | #else |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6b69c4bd306f..229f8591f61d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
784 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | 784 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
785 | #include "trace_entries.h" | 785 | #include "trace_entries.h" |
786 | 786 | ||
787 | /* Only current can touch trace_recursion */ | ||
788 | #define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) | ||
789 | #define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) | ||
790 | |||
791 | /* Ring buffer has the 10 LSB bits to count */ | ||
792 | #define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) | ||
793 | |||
794 | /* for function tracing recursion */ | ||
795 | #define TRACE_INTERNAL_BIT (1<<11) | ||
796 | #define TRACE_GLOBAL_BIT (1<<12) | ||
797 | |||
798 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) | ||
799 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) | ||
800 | #define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) | ||
801 | |||
787 | #endif /* _LINUX_KERNEL_TRACE_H */ | 802 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 2fe110341359..686ec399f2a8 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata = | |||
1657 | 1657 | ||
1658 | static __init void event_trace_self_test_with_function(void) | 1658 | static __init void event_trace_self_test_with_function(void) |
1659 | { | 1659 | { |
1660 | register_ftrace_function(&trace_ops); | 1660 | int ret; |
1661 | ret = register_ftrace_function(&trace_ops); | ||
1662 | if (WARN_ON(ret < 0)) { | ||
1663 | pr_info("Failed to enable function tracer for event tests\n"); | ||
1664 | return; | ||
1665 | } | ||
1661 | pr_info("Running tests again, along with the function tracer\n"); | 1666 | pr_info("Running tests again, along with the function tracer\n"); |
1662 | event_trace_self_tests(); | 1667 | event_trace_self_tests(); |
1663 | unregister_ftrace_function(&trace_ops); | 1668 | unregister_ftrace_function(&trace_ops); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index cf535ccedc86..e37de492a9e1 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
353 | } | 353 | } |
354 | EXPORT_SYMBOL(ftrace_print_symbols_seq); | 354 | EXPORT_SYMBOL(ftrace_print_symbols_seq); |
355 | 355 | ||
356 | #if BITS_PER_LONG == 32 | ||
357 | const char * | ||
358 | ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | ||
359 | const struct trace_print_flags_u64 *symbol_array) | ||
360 | { | ||
361 | int i; | ||
362 | const char *ret = p->buffer + p->len; | ||
363 | |||
364 | for (i = 0; symbol_array[i].name; i++) { | ||
365 | |||
366 | if (val != symbol_array[i].mask) | ||
367 | continue; | ||
368 | |||
369 | trace_seq_puts(p, symbol_array[i].name); | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | if (!p->len) | ||
374 | trace_seq_printf(p, "0x%llx", val); | ||
375 | |||
376 | trace_seq_putc(p, 0); | ||
377 | |||
378 | return ret; | ||
379 | } | ||
380 | EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); | ||
381 | #endif | ||
382 | |||
356 | const char * | 383 | const char * |
357 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | 384 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) |
358 | { | 385 | { |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 7daa4b072e9f..3d0c56ad4792 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -415,15 +415,13 @@ static void watchdog_nmi_disable(int cpu) { return; } | |||
415 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 415 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
416 | 416 | ||
417 | /* prepare/enable/disable routines */ | 417 | /* prepare/enable/disable routines */ |
418 | static int watchdog_prepare_cpu(int cpu) | 418 | static void watchdog_prepare_cpu(int cpu) |
419 | { | 419 | { |
420 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | 420 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
421 | 421 | ||
422 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | 422 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
423 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 423 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
424 | hrtimer->function = watchdog_timer_fn; | 424 | hrtimer->function = watchdog_timer_fn; |
425 | |||
426 | return 0; | ||
427 | } | 425 | } |
428 | 426 | ||
429 | static int watchdog_enable(int cpu) | 427 | static int watchdog_enable(int cpu) |
@@ -542,17 +540,16 @@ static int __cpuinit | |||
542 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 540 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
543 | { | 541 | { |
544 | int hotcpu = (unsigned long)hcpu; | 542 | int hotcpu = (unsigned long)hcpu; |
545 | int err = 0; | ||
546 | 543 | ||
547 | switch (action) { | 544 | switch (action) { |
548 | case CPU_UP_PREPARE: | 545 | case CPU_UP_PREPARE: |
549 | case CPU_UP_PREPARE_FROZEN: | 546 | case CPU_UP_PREPARE_FROZEN: |
550 | err = watchdog_prepare_cpu(hotcpu); | 547 | watchdog_prepare_cpu(hotcpu); |
551 | break; | 548 | break; |
552 | case CPU_ONLINE: | 549 | case CPU_ONLINE: |
553 | case CPU_ONLINE_FROZEN: | 550 | case CPU_ONLINE_FROZEN: |
554 | if (watchdog_enabled) | 551 | if (watchdog_enabled) |
555 | err = watchdog_enable(hotcpu); | 552 | watchdog_enable(hotcpu); |
556 | break; | 553 | break; |
557 | #ifdef CONFIG_HOTPLUG_CPU | 554 | #ifdef CONFIG_HOTPLUG_CPU |
558 | case CPU_UP_CANCELED: | 555 | case CPU_UP_CANCELED: |
diff --git a/mm/maccess.c b/mm/maccess.c index e2b6f5634e0d..4cee182ab5f3 100644 --- a/mm/maccess.c +++ b/mm/maccess.c | |||
@@ -15,10 +15,10 @@ | |||
15 | * happens, handle that and return -EFAULT. | 15 | * happens, handle that and return -EFAULT. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | long __weak probe_kernel_read(void *dst, void *src, size_t size) | 18 | long __weak probe_kernel_read(void *dst, const void *src, size_t size) |
19 | __attribute__((alias("__probe_kernel_read"))); | 19 | __attribute__((alias("__probe_kernel_read"))); |
20 | 20 | ||
21 | long __probe_kernel_read(void *dst, void *src, size_t size) | 21 | long __probe_kernel_read(void *dst, const void *src, size_t size) |
22 | { | 22 | { |
23 | long ret; | 23 | long ret; |
24 | mm_segment_t old_fs = get_fs(); | 24 | mm_segment_t old_fs = get_fs(); |
@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); | |||
43 | * Safely write to address @dst from the buffer at @src. If a kernel fault | 43 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
44 | * happens, handle that and return -EFAULT. | 44 | * happens, handle that and return -EFAULT. |
45 | */ | 45 | */ |
46 | long __weak probe_kernel_write(void *dst, void *src, size_t size) | 46 | long __weak probe_kernel_write(void *dst, const void *src, size_t size) |
47 | __attribute__((alias("__probe_kernel_write"))); | 47 | __attribute__((alias("__probe_kernel_write"))); |
48 | 48 | ||
49 | long __probe_kernel_write(void *dst, void *src, size_t size) | 49 | long __probe_kernel_write(void *dst, const void *src, size_t size) |
50 | { | 50 | { |
51 | long ret; | 51 | long ret; |
52 | mm_segment_t old_fs = get_fs(); | 52 | mm_segment_t old_fs = get_fs(); |
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 4be60364a405..f40a6af6bf40 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #undef ELF_R_INFO | 43 | #undef ELF_R_INFO |
44 | #undef Elf_r_info | 44 | #undef Elf_r_info |
45 | #undef ELF_ST_BIND | 45 | #undef ELF_ST_BIND |
46 | #undef ELF_ST_TYPE | ||
46 | #undef fn_ELF_R_SYM | 47 | #undef fn_ELF_R_SYM |
47 | #undef fn_ELF_R_INFO | 48 | #undef fn_ELF_R_INFO |
48 | #undef uint_t | 49 | #undef uint_t |
@@ -76,6 +77,7 @@ | |||
76 | # define ELF_R_INFO ELF64_R_INFO | 77 | # define ELF_R_INFO ELF64_R_INFO |
77 | # define Elf_r_info Elf64_r_info | 78 | # define Elf_r_info Elf64_r_info |
78 | # define ELF_ST_BIND ELF64_ST_BIND | 79 | # define ELF_ST_BIND ELF64_ST_BIND |
80 | # define ELF_ST_TYPE ELF64_ST_TYPE | ||
79 | # define fn_ELF_R_SYM fn_ELF64_R_SYM | 81 | # define fn_ELF_R_SYM fn_ELF64_R_SYM |
80 | # define fn_ELF_R_INFO fn_ELF64_R_INFO | 82 | # define fn_ELF_R_INFO fn_ELF64_R_INFO |
81 | # define uint_t uint64_t | 83 | # define uint_t uint64_t |
@@ -108,6 +110,7 @@ | |||
108 | # define ELF_R_INFO ELF32_R_INFO | 110 | # define ELF_R_INFO ELF32_R_INFO |
109 | # define Elf_r_info Elf32_r_info | 111 | # define Elf_r_info Elf32_r_info |
110 | # define ELF_ST_BIND ELF32_ST_BIND | 112 | # define ELF_ST_BIND ELF32_ST_BIND |
113 | # define ELF_ST_TYPE ELF32_ST_TYPE | ||
111 | # define fn_ELF_R_SYM fn_ELF32_R_SYM | 114 | # define fn_ELF_R_SYM fn_ELF32_R_SYM |
112 | # define fn_ELF_R_INFO fn_ELF32_R_INFO | 115 | # define fn_ELF_R_INFO fn_ELF32_R_INFO |
113 | # define uint_t uint32_t | 116 | # define uint_t uint32_t |
@@ -427,6 +430,11 @@ static unsigned find_secsym_ndx(unsigned const txtndx, | |||
427 | if (txtndx == w2(symp->st_shndx) | 430 | if (txtndx == w2(symp->st_shndx) |
428 | /* avoid STB_WEAK */ | 431 | /* avoid STB_WEAK */ |
429 | && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { | 432 | && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { |
433 | /* function symbols on ARM have quirks, avoid them */ | ||
434 | if (w2(ehdr->e_machine) == EM_ARM | ||
435 | && ELF_ST_TYPE(symp->st_info) == STT_FUNC) | ||
436 | continue; | ||
437 | |||
430 | *recvalp = _w(symp->st_value); | 438 | *recvalp = _w(symp->st_value); |
431 | return symp - sym0; | 439 | return symp - sym0; |
432 | } | 440 | } |
diff --git a/scripts/tags.sh b/scripts/tags.sh index bd6185d529cf..75c5d24f1993 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -132,7 +132,7 @@ exuberant() | |||
132 | --regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \ | 132 | --regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \ |
133 | --regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \ | 133 | --regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \ |
134 | --regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \ | 134 | --regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/' \ |
135 | --regex-c++='/^DEFINE_EVENT\(([^,)]*).*/trace_\1/' | 135 | --regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/' |
136 | 136 | ||
137 | all_kconfigs | xargs $1 -a \ | 137 | all_kconfigs | xargs $1 -a \ |
138 | --langdef=kconfig --language-force=kconfig \ | 138 | --langdef=kconfig --language-force=kconfig \ |
@@ -152,7 +152,9 @@ emacs() | |||
152 | { | 152 | { |
153 | all_sources | xargs $1 -a \ | 153 | all_sources | xargs $1 -a \ |
154 | --regex='/^ENTRY(\([^)]*\)).*/\1/' \ | 154 | --regex='/^ENTRY(\([^)]*\)).*/\1/' \ |
155 | --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' | 155 | --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \ |
156 | --regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \ | ||
157 | --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' | ||
156 | 158 | ||
157 | all_kconfigs | xargs $1 -a \ | 159 | all_kconfigs | xargs $1 -a \ |
158 | --regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/' | 160 | --regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/' |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 1455413ec7a7..032ba6398a5c 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -215,11 +215,13 @@ LIB_FILE=$(OUTPUT)libperf.a | |||
215 | LIB_H += ../../include/linux/perf_event.h | 215 | LIB_H += ../../include/linux/perf_event.h |
216 | LIB_H += ../../include/linux/rbtree.h | 216 | LIB_H += ../../include/linux/rbtree.h |
217 | LIB_H += ../../include/linux/list.h | 217 | LIB_H += ../../include/linux/list.h |
218 | LIB_H += ../../include/linux/const.h | ||
218 | LIB_H += ../../include/linux/hash.h | 219 | LIB_H += ../../include/linux/hash.h |
219 | LIB_H += ../../include/linux/stringify.h | 220 | LIB_H += ../../include/linux/stringify.h |
220 | LIB_H += util/include/linux/bitmap.h | 221 | LIB_H += util/include/linux/bitmap.h |
221 | LIB_H += util/include/linux/bitops.h | 222 | LIB_H += util/include/linux/bitops.h |
222 | LIB_H += util/include/linux/compiler.h | 223 | LIB_H += util/include/linux/compiler.h |
224 | LIB_H += util/include/linux/const.h | ||
223 | LIB_H += util/include/linux/ctype.h | 225 | LIB_H += util/include/linux/ctype.h |
224 | LIB_H += util/include/linux/kernel.h | 226 | LIB_H += util/include/linux/kernel.h |
225 | LIB_H += util/include/linux/list.h | 227 | LIB_H += util/include/linux/list.h |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index e18eb7ed30ae..7b139e1e7e86 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -8,8 +8,6 @@ | |||
8 | #include "builtin.h" | 8 | #include "builtin.h" |
9 | 9 | ||
10 | #include "util/util.h" | 10 | #include "util/util.h" |
11 | |||
12 | #include "util/util.h" | ||
13 | #include "util/color.h" | 11 | #include "util/color.h" |
14 | #include <linux/list.h> | 12 | #include <linux/list.h> |
15 | #include "util/cache.h" | 13 | #include "util/cache.h" |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0974f957b8fa..8e2c85798185 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -823,6 +823,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
823 | 823 | ||
824 | symbol__init(); | 824 | symbol__init(); |
825 | 825 | ||
826 | if (symbol_conf.kptr_restrict) | ||
827 | pr_warning( | ||
828 | "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" | ||
829 | "check /proc/sys/kernel/kptr_restrict.\n\n" | ||
830 | "Samples in kernel functions may not be resolved if a suitable vmlinux\n" | ||
831 | "file is not found in the buildid cache or in the vmlinux path.\n\n" | ||
832 | "Samples in kernel modules won't be resolved at all.\n\n" | ||
833 | "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" | ||
834 | "even with a suitable vmlinux or kallsyms file.\n\n"); | ||
835 | |||
826 | if (no_buildid_cache || no_buildid) | 836 | if (no_buildid_cache || no_buildid) |
827 | disable_buildid_cache(); | 837 | disable_buildid_cache(); |
828 | 838 | ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 498c6f70a747..287a173523a7 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -116,6 +116,9 @@ static int process_sample_event(union perf_event *event, | |||
116 | if (al.filtered || (hide_unresolved && al.sym == NULL)) | 116 | if (al.filtered || (hide_unresolved && al.sym == NULL)) |
117 | return 0; | 117 | return 0; |
118 | 118 | ||
119 | if (al.map != NULL) | ||
120 | al.map->dso->hit = 1; | ||
121 | |||
119 | if (perf_session__add_hist_entry(session, &al, sample, evsel)) { | 122 | if (perf_session__add_hist_entry(session, &al, sample, evsel)) { |
120 | pr_debug("problem incrementing symbol period, skipping event\n"); | 123 | pr_debug("problem incrementing symbol period, skipping event\n"); |
121 | return -1; | 124 | return -1; |
@@ -249,6 +252,8 @@ static int __cmd_report(void) | |||
249 | u64 nr_samples; | 252 | u64 nr_samples; |
250 | struct perf_session *session; | 253 | struct perf_session *session; |
251 | struct perf_evsel *pos; | 254 | struct perf_evsel *pos; |
255 | struct map *kernel_map; | ||
256 | struct kmap *kernel_kmap; | ||
252 | const char *help = "For a higher level overview, try: perf report --sort comm,dso"; | 257 | const char *help = "For a higher level overview, try: perf report --sort comm,dso"; |
253 | 258 | ||
254 | signal(SIGINT, sig_handler); | 259 | signal(SIGINT, sig_handler); |
@@ -268,6 +273,24 @@ static int __cmd_report(void) | |||
268 | if (ret) | 273 | if (ret) |
269 | goto out_delete; | 274 | goto out_delete; |
270 | 275 | ||
276 | kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; | ||
277 | kernel_kmap = map__kmap(kernel_map); | ||
278 | if (kernel_map == NULL || | ||
279 | (kernel_map->dso->hit && | ||
280 | (kernel_kmap->ref_reloc_sym == NULL || | ||
281 | kernel_kmap->ref_reloc_sym->addr == 0))) { | ||
282 | const struct dso *kdso = kernel_map->dso; | ||
283 | |||
284 | ui__warning( | ||
285 | "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" | ||
286 | "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" | ||
287 | "Samples in kernel modules can't be resolved as well.\n\n", | ||
288 | RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ? | ||
289 | "As no suitable kallsyms nor vmlinux was found, kernel samples\n" | ||
290 | "can't be resolved." : | ||
291 | "If some relocation was applied (e.g. kexec) symbols may be misresolved."); | ||
292 | } | ||
293 | |||
271 | if (dump_trace) { | 294 | if (dump_trace) { |
272 | perf_session__fprintf_nr_events(session, stdout); | 295 | perf_session__fprintf_nr_events(session, stdout); |
273 | goto out_delete; | 296 | goto out_delete; |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 974f6d3f4e53..22747de7234b 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include "util/symbol.h" | 10 | #include "util/symbol.h" |
11 | #include "util/thread.h" | 11 | #include "util/thread.h" |
12 | #include "util/trace-event.h" | 12 | #include "util/trace-event.h" |
13 | #include "util/parse-options.h" | ||
14 | #include "util/util.h" | 13 | #include "util/util.h" |
15 | #include "util/evlist.h" | 14 | #include "util/evlist.h" |
16 | #include "util/evsel.h" | 15 | #include "util/evsel.h" |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 2d7934e9de38..f2f3f4937aa2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -62,8 +62,6 @@ | |||
62 | #include <linux/unistd.h> | 62 | #include <linux/unistd.h> |
63 | #include <linux/types.h> | 63 | #include <linux/types.h> |
64 | 64 | ||
65 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
66 | |||
67 | static struct perf_top top = { | 65 | static struct perf_top top = { |
68 | .count_filter = 5, | 66 | .count_filter = 5, |
69 | .delay_secs = 2, | 67 | .delay_secs = 2, |
@@ -82,6 +80,8 @@ static bool use_tui, use_stdio; | |||
82 | 80 | ||
83 | static int default_interval = 0; | 81 | static int default_interval = 0; |
84 | 82 | ||
83 | static bool kptr_restrict_warned; | ||
84 | static bool vmlinux_warned; | ||
85 | static bool inherit = false; | 85 | static bool inherit = false; |
86 | static int realtime_prio = 0; | 86 | static int realtime_prio = 0; |
87 | static bool group = false; | 87 | static bool group = false; |
@@ -740,7 +740,22 @@ static void perf_event__process_sample(const union perf_event *event, | |||
740 | al.filtered) | 740 | al.filtered) |
741 | return; | 741 | return; |
742 | 742 | ||
743 | if (!kptr_restrict_warned && | ||
744 | symbol_conf.kptr_restrict && | ||
745 | al.cpumode == PERF_RECORD_MISC_KERNEL) { | ||
746 | ui__warning( | ||
747 | "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" | ||
748 | "Check /proc/sys/kernel/kptr_restrict.\n\n" | ||
749 | "Kernel%s samples will not be resolved.\n", | ||
750 | !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? | ||
751 | " modules" : ""); | ||
752 | if (use_browser <= 0) | ||
753 | sleep(5); | ||
754 | kptr_restrict_warned = true; | ||
755 | } | ||
756 | |||
743 | if (al.sym == NULL) { | 757 | if (al.sym == NULL) { |
758 | const char *msg = "Kernel samples will not be resolved.\n"; | ||
744 | /* | 759 | /* |
745 | * As we do lazy loading of symtabs we only will know if the | 760 | * As we do lazy loading of symtabs we only will know if the |
746 | * specified vmlinux file is invalid when we actually have a | 761 | * specified vmlinux file is invalid when we actually have a |
@@ -752,12 +767,20 @@ static void perf_event__process_sample(const union perf_event *event, | |||
752 | * --hide-kernel-symbols, even if the user specifies an | 767 | * --hide-kernel-symbols, even if the user specifies an |
753 | * invalid --vmlinux ;-) | 768 | * invalid --vmlinux ;-) |
754 | */ | 769 | */ |
755 | if (al.map == machine->vmlinux_maps[MAP__FUNCTION] && | 770 | if (!kptr_restrict_warned && !vmlinux_warned && |
771 | al.map == machine->vmlinux_maps[MAP__FUNCTION] && | ||
756 | RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { | 772 | RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { |
757 | ui__warning("The %s file can't be used\n", | 773 | if (symbol_conf.vmlinux_name) { |
758 | symbol_conf.vmlinux_name); | 774 | ui__warning("The %s file can't be used.\n%s", |
759 | exit_browser(0); | 775 | symbol_conf.vmlinux_name, msg); |
760 | exit(1); | 776 | } else { |
777 | ui__warning("A vmlinux file was not found.\n%s", | ||
778 | msg); | ||
779 | } | ||
780 | |||
781 | if (use_browser <= 0) | ||
782 | sleep(5); | ||
783 | vmlinux_warned = true; | ||
761 | } | 784 | } |
762 | 785 | ||
763 | return; | 786 | return; |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 6635fcd11ca5..0fe9adf76379 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -553,9 +553,18 @@ static int perf_event__process_kernel_mmap(union perf_event *event, | |||
553 | goto out_problem; | 553 | goto out_problem; |
554 | 554 | ||
555 | perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); | 555 | perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); |
556 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, | 556 | |
557 | symbol_name, | 557 | /* |
558 | event->mmap.pgoff); | 558 | * Avoid using a zero address (kptr_restrict) for the ref reloc |
559 | * symbol. Effectively having zero here means that at record | ||
560 | * time /proc/sys/kernel/kptr_restrict was non zero. | ||
561 | */ | ||
562 | if (event->mmap.pgoff != 0) { | ||
563 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, | ||
564 | symbol_name, | ||
565 | event->mmap.pgoff); | ||
566 | } | ||
567 | |||
559 | if (machine__is_default_guest(machine)) { | 568 | if (machine__is_default_guest(machine)) { |
560 | /* | 569 | /* |
561 | * preload dso of guest kernel and modules | 570 | * preload dso of guest kernel and modules |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ee0fe0dffa71..cca29ededb5b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -35,7 +35,17 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | |||
35 | 35 | ||
36 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 36 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
37 | { | 37 | { |
38 | int cpu, thread; | ||
38 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | 39 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); |
40 | |||
41 | if (evsel->fd) { | ||
42 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
43 | for (thread = 0; thread < nthreads; thread++) { | ||
44 | FD(evsel, cpu, thread) = -1; | ||
45 | } | ||
46 | } | ||
47 | } | ||
48 | |||
39 | return evsel->fd != NULL ? 0 : -ENOMEM; | 49 | return evsel->fd != NULL ? 0 : -ENOMEM; |
40 | } | 50 | } |
41 | 51 | ||
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 0717bebc7649..afb0849fe530 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -193,9 +193,13 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
193 | *linkname = malloc(size), *targetname; | 193 | *linkname = malloc(size), *targetname; |
194 | int len, err = -1; | 194 | int len, err = -1; |
195 | 195 | ||
196 | if (is_kallsyms) | 196 | if (is_kallsyms) { |
197 | if (symbol_conf.kptr_restrict) { | ||
198 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); | ||
199 | return 0; | ||
200 | } | ||
197 | realname = (char *)name; | 201 | realname = (char *)name; |
198 | else | 202 | } else |
199 | realname = realpath(name, NULL); | 203 | realname = realpath(name, NULL); |
200 | 204 | ||
201 | if (realname == NULL || filename == NULL || linkname == NULL) | 205 | if (realname == NULL || filename == NULL || linkname == NULL) |
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h new file mode 100644 index 000000000000..1b476c9ae649 --- /dev/null +++ b/tools/perf/util/include/linux/const.h | |||
@@ -0,0 +1 @@ | |||
#include "../../../../include/linux/const.h" | |||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 516876dfbe52..eec196329fd9 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -676,9 +676,30 @@ discard_symbol: rb_erase(&pos->rb_node, root); | |||
676 | return count + moved; | 676 | return count + moved; |
677 | } | 677 | } |
678 | 678 | ||
679 | static bool symbol__restricted_filename(const char *filename, | ||
680 | const char *restricted_filename) | ||
681 | { | ||
682 | bool restricted = false; | ||
683 | |||
684 | if (symbol_conf.kptr_restrict) { | ||
685 | char *r = realpath(filename, NULL); | ||
686 | |||
687 | if (r != NULL) { | ||
688 | restricted = strcmp(r, restricted_filename) == 0; | ||
689 | free(r); | ||
690 | return restricted; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | return restricted; | ||
695 | } | ||
696 | |||
679 | int dso__load_kallsyms(struct dso *dso, const char *filename, | 697 | int dso__load_kallsyms(struct dso *dso, const char *filename, |
680 | struct map *map, symbol_filter_t filter) | 698 | struct map *map, symbol_filter_t filter) |
681 | { | 699 | { |
700 | if (symbol__restricted_filename(filename, "/proc/kallsyms")) | ||
701 | return -1; | ||
702 | |||
682 | if (dso__load_all_kallsyms(dso, filename, map) < 0) | 703 | if (dso__load_all_kallsyms(dso, filename, map) < 0) |
683 | return -1; | 704 | return -1; |
684 | 705 | ||
@@ -1790,6 +1811,9 @@ static int machine__create_modules(struct machine *machine) | |||
1790 | modules = path; | 1811 | modules = path; |
1791 | } | 1812 | } |
1792 | 1813 | ||
1814 | if (symbol__restricted_filename(path, "/proc/modules")) | ||
1815 | return -1; | ||
1816 | |||
1793 | file = fopen(modules, "r"); | 1817 | file = fopen(modules, "r"); |
1794 | if (file == NULL) | 1818 | if (file == NULL) |
1795 | return -1; | 1819 | return -1; |
@@ -2239,6 +2263,9 @@ static u64 machine__get_kernel_start_addr(struct machine *machine) | |||
2239 | } | 2263 | } |
2240 | } | 2264 | } |
2241 | 2265 | ||
2266 | if (symbol__restricted_filename(filename, "/proc/kallsyms")) | ||
2267 | return 0; | ||
2268 | |||
2242 | if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) | 2269 | if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) |
2243 | return 0; | 2270 | return 0; |
2244 | 2271 | ||
@@ -2410,6 +2437,25 @@ static int setup_list(struct strlist **list, const char *list_str, | |||
2410 | return 0; | 2437 | return 0; |
2411 | } | 2438 | } |
2412 | 2439 | ||
2440 | static bool symbol__read_kptr_restrict(void) | ||
2441 | { | ||
2442 | bool value = false; | ||
2443 | |||
2444 | if (geteuid() != 0) { | ||
2445 | FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); | ||
2446 | if (fp != NULL) { | ||
2447 | char line[8]; | ||
2448 | |||
2449 | if (fgets(line, sizeof(line), fp) != NULL) | ||
2450 | value = atoi(line) != 0; | ||
2451 | |||
2452 | fclose(fp); | ||
2453 | } | ||
2454 | } | ||
2455 | |||
2456 | return value; | ||
2457 | } | ||
2458 | |||
2413 | int symbol__init(void) | 2459 | int symbol__init(void) |
2414 | { | 2460 | { |
2415 | const char *symfs; | 2461 | const char *symfs; |
@@ -2456,6 +2502,8 @@ int symbol__init(void) | |||
2456 | if (symfs != symbol_conf.symfs) | 2502 | if (symfs != symbol_conf.symfs) |
2457 | free((void *)symfs); | 2503 | free((void *)symfs); |
2458 | 2504 | ||
2505 | symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); | ||
2506 | |||
2459 | symbol_conf.initialized = true; | 2507 | symbol_conf.initialized = true; |
2460 | return 0; | 2508 | return 0; |
2461 | 2509 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 242de0101a86..325ee36a9d29 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -75,7 +75,8 @@ struct symbol_conf { | |||
75 | use_callchain, | 75 | use_callchain, |
76 | exclude_other, | 76 | exclude_other, |
77 | show_cpu_utilization, | 77 | show_cpu_utilization, |
78 | initialized; | 78 | initialized, |
79 | kptr_restrict; | ||
79 | const char *vmlinux_name, | 80 | const char *vmlinux_name, |
80 | *kallsyms_name, | 81 | *kallsyms_name, |
81 | *source_prefix, | 82 | *source_prefix, |