diff options
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 10 | ||||
| -rw-r--r-- | kernel/module.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 19 |
3 files changed, 36 insertions, 5 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 18e3b8a8709f..ef9975812c77 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
| @@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) | |||
| 824 | #define __PV_IS_CALLEE_SAVE(func) \ | 824 | #define __PV_IS_CALLEE_SAVE(func) \ |
| 825 | ((struct paravirt_callee_save) { func }) | 825 | ((struct paravirt_callee_save) { func }) |
| 826 | 826 | ||
| 827 | static inline unsigned long arch_local_save_flags(void) | 827 | static inline notrace unsigned long arch_local_save_flags(void) |
| 828 | { | 828 | { |
| 829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); | 829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | static inline void arch_local_irq_restore(unsigned long f) | 832 | static inline notrace void arch_local_irq_restore(unsigned long f) |
| 833 | { | 833 | { |
| 834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); | 834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
| 835 | } | 835 | } |
| 836 | 836 | ||
| 837 | static inline void arch_local_irq_disable(void) | 837 | static inline notrace void arch_local_irq_disable(void) |
| 838 | { | 838 | { |
| 839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); | 839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
| 840 | } | 840 | } |
| 841 | 841 | ||
| 842 | static inline void arch_local_irq_enable(void) | 842 | static inline notrace void arch_local_irq_enable(void) |
| 843 | { | 843 | { |
| 844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); | 844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
| 845 | } | 845 | } |
| 846 | 846 | ||
| 847 | static inline unsigned long arch_local_irq_save(void) | 847 | static inline notrace unsigned long arch_local_irq_save(void) |
| 848 | { | 848 | { |
| 849 | unsigned long f; | 849 | unsigned long f; |
| 850 | 850 | ||
diff --git a/kernel/module.c b/kernel/module.c index 437a74a7524a..d190664f25ff 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
| 2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | 2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * |
| 2327 | mod->num_trace_events, GFP_KERNEL); | 2327 | mod->num_trace_events, GFP_KERNEL); |
| 2328 | #endif | 2328 | #endif |
| 2329 | #ifdef CONFIG_TRACING | ||
| 2330 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | ||
| 2331 | sizeof(*mod->trace_bprintk_fmt_start), | ||
| 2332 | &mod->num_trace_bprintk_fmt); | ||
| 2333 | /* | ||
| 2334 | * This section contains pointers to allocated objects in the trace | ||
| 2335 | * code and not scanning it leads to false positives. | ||
| 2336 | */ | ||
| 2337 | kmemleak_scan_area(mod->trace_bprintk_fmt_start, | ||
| 2338 | sizeof(*mod->trace_bprintk_fmt_start) * | ||
| 2339 | mod->num_trace_bprintk_fmt, GFP_KERNEL); | ||
| 2340 | #endif | ||
| 2329 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2341 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 2330 | /* sechdrs[0].sh_size is always zero */ | 2342 | /* sechdrs[0].sh_size is always zero */ |
| 2331 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", | 2343 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..ee6a7339cf0e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1284,6 +1284,8 @@ void trace_dump_stack(void) | |||
| 1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
| 1285 | } | 1285 | } |
| 1286 | 1286 | ||
| 1287 | static DEFINE_PER_CPU(int, user_stack_count); | ||
| 1288 | |||
| 1287 | void | 1289 | void |
| 1288 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1290 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1289 | { | 1291 | { |
| @@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1302 | if (unlikely(in_nmi())) | 1304 | if (unlikely(in_nmi())) |
| 1303 | return; | 1305 | return; |
| 1304 | 1306 | ||
| 1307 | /* | ||
| 1308 | * prevent recursion, since the user stack tracing may | ||
| 1309 | * trigger other kernel events. | ||
| 1310 | */ | ||
| 1311 | preempt_disable(); | ||
| 1312 | if (__this_cpu_read(user_stack_count)) | ||
| 1313 | goto out; | ||
| 1314 | |||
| 1315 | __this_cpu_inc(user_stack_count); | ||
| 1316 | |||
| 1317 | |||
| 1318 | |||
| 1305 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1319 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1306 | sizeof(*entry), flags, pc); | 1320 | sizeof(*entry), flags, pc); |
| 1307 | if (!event) | 1321 | if (!event) |
| @@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1319 | save_stack_trace_user(&trace); | 1333 | save_stack_trace_user(&trace); |
| 1320 | if (!filter_check_discard(call, entry, buffer, event)) | 1334 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1321 | ring_buffer_unlock_commit(buffer, event); | 1335 | ring_buffer_unlock_commit(buffer, event); |
| 1336 | |||
| 1337 | __this_cpu_dec(user_stack_count); | ||
| 1338 | |||
| 1339 | out: | ||
| 1340 | preempt_enable(); | ||
| 1322 | } | 1341 | } |
| 1323 | 1342 | ||
| 1324 | #ifdef UNUSED | 1343 | #ifdef UNUSED |
