aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/local64.h96
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/ftrace_event.h18
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kmemtrace.h25
-rw-r--r--include/linux/nmi.h13
-rw-r--r--include/linux/perf_event.h95
-rw-r--r--include/linux/sched.h24
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/trace/boot.h60
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/timer.h80
-rw-r--r--include/trace/ftrace.h23
-rw-r--r--include/trace/syscall.h1
17 files changed, 228 insertions, 261 deletions
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
new file mode 100644
index 000000000000..02ac760c1a8b
--- /dev/null
+++ b/include/asm-generic/local64.h
@@ -0,0 +1,96 @@
1#ifndef _ASM_GENERIC_LOCAL64_H
2#define _ASM_GENERIC_LOCAL64_H
3
4#include <linux/percpu.h>
5#include <asm/types.h>
6
7/*
8 * A signed long type for operations which are atomic for a single CPU.
9 * Usually used in combination with per-cpu variables.
10 *
11 * This is the default implementation, which uses atomic64_t. Which is
12 * rather pointless. The whole point behind local64_t is that some processors
13 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
14 * running on this CPU. local64_t allows exploitation of such capabilities.
15 */
16
17/* Implement in terms of atomics. */
18
19#if BITS_PER_LONG == 64
20
21#include <asm/local.h>
22
23typedef struct {
24 local_t a;
25} local64_t;
26
27#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
28
29#define local64_read(l) local_read(&(l)->a)
30#define local64_set(l,i) local_set((&(l)->a),(i))
31#define local64_inc(l) local_inc(&(l)->a)
32#define local64_dec(l) local_dec(&(l)->a)
33#define local64_add(i,l) local_add((i),(&(l)->a))
34#define local64_sub(i,l) local_sub((i),(&(l)->a))
35
36#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
37#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
38#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
39#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
40#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
41#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
42#define local64_inc_return(l) local_inc_return(&(l)->a)
43
44#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
45#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
46#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
47#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
48
49/* Non-atomic variants, ie. preemption disabled and won't be touched
50 * in interrupt, etc. Some archs can optimize this case well. */
51#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
52#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
53#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
54#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
55
56#else /* BITS_PER_LONG != 64 */
57
58#include <asm/atomic.h>
59
60/* Don't use typedef: don't want them to be mixed with atomic_t's. */
61typedef struct {
62 atomic64_t a;
63} local64_t;
64
65#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
66
67#define local64_read(l) atomic64_read(&(l)->a)
68#define local64_set(l,i) atomic64_set((&(l)->a),(i))
69#define local64_inc(l) atomic64_inc(&(l)->a)
70#define local64_dec(l) atomic64_dec(&(l)->a)
71#define local64_add(i,l) atomic64_add((i),(&(l)->a))
72#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
73
74#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
75#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
76#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
77#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
78#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
79#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
80#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
81
82#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
83#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
84#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
85#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
86
87/* Non-atomic variants, ie. preemption disabled and won't be touched
88 * in interrupt, etc. Some archs can optimize this case well. */
89#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
90#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
91#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
92#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
93
94#endif /* BITS_PER_LONG != 64 */
95
96#endif /* _ASM_GENERIC_LOCAL64_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 4e7ae6002056..8a92a170fb7d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -156,10 +156,6 @@
156 CPU_KEEP(exit.data) \ 156 CPU_KEEP(exit.data) \
157 MEM_KEEP(init.data) \ 157 MEM_KEEP(init.data) \
158 MEM_KEEP(exit.data) \ 158 MEM_KEEP(exit.data) \
159 . = ALIGN(8); \
160 VMLINUX_SYMBOL(__start___markers) = .; \
161 *(__markers) \
162 VMLINUX_SYMBOL(__stop___markers) = .; \
163 . = ALIGN(32); \ 159 . = ALIGN(32); \
164 VMLINUX_SYMBOL(__start___tracepoints) = .; \ 160 VMLINUX_SYMBOL(__start___tracepoints) = .; \
165 *(__tracepoints) \ 161 *(__tracepoints) \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 41e46330d9be..dcd6a7c3a435 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,3 +1,8 @@
1/*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
5
1#ifndef _LINUX_FTRACE_H 6#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 7#define _LINUX_FTRACE_H
3 8
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 3167f2df4126..02b8b24f8f51 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -11,8 +11,6 @@ struct trace_array;
11struct tracer; 11struct tracer;
12struct dentry; 12struct dentry;
13 13
14DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
15
16struct trace_print_flags { 14struct trace_print_flags {
17 unsigned long mask; 15 unsigned long mask;
18 const char *name; 16 const char *name;
@@ -58,6 +56,9 @@ struct trace_iterator {
58 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 56 struct ring_buffer_iter *buffer_iter[NR_CPUS];
59 unsigned long iter_flags; 57 unsigned long iter_flags;
60 58
59 /* trace_seq for __print_flags() and __print_symbolic() etc. */
60 struct trace_seq tmp_seq;
61
61 /* The below is zeroed out in pipe_read */ 62 /* The below is zeroed out in pipe_read */
62 struct trace_seq seq; 63 struct trace_seq seq;
63 struct trace_entry *ent; 64 struct trace_entry *ent;
@@ -146,14 +147,19 @@ struct ftrace_event_class {
146 int (*raw_init)(struct ftrace_event_call *); 147 int (*raw_init)(struct ftrace_event_call *);
147}; 148};
148 149
150extern int ftrace_event_reg(struct ftrace_event_call *event,
151 enum trace_reg type);
152
149enum { 153enum {
150 TRACE_EVENT_FL_ENABLED_BIT, 154 TRACE_EVENT_FL_ENABLED_BIT,
151 TRACE_EVENT_FL_FILTERED_BIT, 155 TRACE_EVENT_FL_FILTERED_BIT,
156 TRACE_EVENT_FL_RECORDED_CMD_BIT,
152}; 157};
153 158
154enum { 159enum {
155 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), 160 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
156 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 161 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
162 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
157}; 163};
158 164
159struct ftrace_event_call { 165struct ftrace_event_call {
@@ -171,6 +177,7 @@ struct ftrace_event_call {
171 * 32 bit flags: 177 * 32 bit flags:
172 * bit 1: enabled 178 * bit 1: enabled
173 * bit 2: filter_active 179 * bit 2: filter_active
180 * bit 3: enabled cmd record
174 * 181 *
175 * Changes to flags must hold the event_mutex. 182 * Changes to flags must hold the event_mutex.
176 * 183 *
@@ -257,8 +264,7 @@ static inline void
257perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 264perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
258 u64 count, struct pt_regs *regs, void *head) 265 u64 count, struct pt_regs *regs, void *head)
259{ 266{
260 perf_tp_event(addr, count, raw_data, size, regs, head); 267 perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
261 perf_swevent_put_recursion_context(rctx);
262} 268}
263#endif 269#endif
264 270
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5de838b0fc1a..38e462e00594 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -513,9 +513,6 @@ extern void tracing_start(void);
513extern void tracing_stop(void); 513extern void tracing_stop(void);
514extern void ftrace_off_permanent(void); 514extern void ftrace_off_permanent(void);
515 515
516extern void
517ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
518
519static inline void __attribute__ ((format (printf, 1, 2))) 516static inline void __attribute__ ((format (printf, 1, 2)))
520____trace_printk_check_format(const char *fmt, ...) 517____trace_printk_check_format(const char *fmt, ...)
521{ 518{
@@ -591,8 +588,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
591 588
592extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); 589extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
593#else 590#else
594static inline void
595ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
596static inline int 591static inline int
597trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); 592trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
598 593
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
deleted file mode 100644
index b616d3930c3b..000000000000
--- a/include/linux/kmemtrace.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <trace/events/kmem.h>
13
14#ifdef CONFIG_KMEMTRACE
15extern void kmemtrace_init(void);
16#else
17static inline void kmemtrace_init(void)
18{
19}
20#endif
21
22#endif /* __KERNEL__ */
23
24#endif /* _LINUX_KMEMTRACE_H */
25
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b752e807adde..06aab5eee134 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void);
20extern void acpi_nmi_disable(void); 20extern void acpi_nmi_disable(void);
21extern void acpi_nmi_enable(void); 21extern void acpi_nmi_enable(void);
22#else 22#else
23#ifndef CONFIG_HARDLOCKUP_DETECTOR
23static inline void touch_nmi_watchdog(void) 24static inline void touch_nmi_watchdog(void)
24{ 25{
25 touch_softlockup_watchdog(); 26 touch_softlockup_watchdog();
26} 27}
28#else
29extern void touch_nmi_watchdog(void);
30#endif
27static inline void acpi_nmi_disable(void) { } 31static inline void acpi_nmi_disable(void) { }
28static inline void acpi_nmi_enable(void) { } 32static inline void acpi_nmi_enable(void) { }
29#endif 33#endif
@@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void)
47} 51}
48#endif 52#endif
49 53
54#ifdef CONFIG_LOCKUP_DETECTOR
55int hw_nmi_is_cpu_stuck(struct pt_regs *);
56u64 hw_nmi_get_sample_period(void);
57extern int watchdog_enabled;
58struct ctl_table;
59extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
60 void __user *, size_t *, loff_t *);
61#endif
62
50#endif 63#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5d0266d94985..937495c25073 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -214,8 +214,9 @@ struct perf_event_attr {
214 * See also PERF_RECORD_MISC_EXACT_IP 214 * See also PERF_RECORD_MISC_EXACT_IP
215 */ 215 */
216 precise_ip : 2, /* skid constraint */ 216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */
217 218
218 __reserved_1 : 47; 219 __reserved_1 : 46;
219 220
220 union { 221 union {
221 __u32 wakeup_events; /* wakeup every n events */ 222 __u32 wakeup_events; /* wakeup every n events */
@@ -461,6 +462,7 @@ enum perf_callchain_context {
461 462
462#ifdef CONFIG_PERF_EVENTS 463#ifdef CONFIG_PERF_EVENTS
463# include <asm/perf_event.h> 464# include <asm/perf_event.h>
465# include <asm/local64.h>
464#endif 466#endif
465 467
466struct perf_guest_info_callbacks { 468struct perf_guest_info_callbacks {
@@ -531,14 +533,16 @@ struct hw_perf_event {
531 struct hrtimer hrtimer; 533 struct hrtimer hrtimer;
532 }; 534 };
533#ifdef CONFIG_HAVE_HW_BREAKPOINT 535#ifdef CONFIG_HAVE_HW_BREAKPOINT
534 /* breakpoint */ 536 struct { /* breakpoint */
535 struct arch_hw_breakpoint info; 537 struct arch_hw_breakpoint info;
538 struct list_head bp_list;
539 };
536#endif 540#endif
537 }; 541 };
538 atomic64_t prev_count; 542 local64_t prev_count;
539 u64 sample_period; 543 u64 sample_period;
540 u64 last_period; 544 u64 last_period;
541 atomic64_t period_left; 545 local64_t period_left;
542 u64 interrupts; 546 u64 interrupts;
543 547
544 u64 freq_time_stamp; 548 u64 freq_time_stamp;
@@ -548,7 +552,10 @@ struct hw_perf_event {
548 552
549struct perf_event; 553struct perf_event;
550 554
551#define PERF_EVENT_TXN_STARTED 1 555/*
556 * Common implementation detail of pmu::{start,commit,cancel}_txn
557 */
558#define PERF_EVENT_TXN 0x1
552 559
553/** 560/**
554 * struct pmu - generic performance monitoring unit 561 * struct pmu - generic performance monitoring unit
@@ -562,14 +569,28 @@ struct pmu {
562 void (*unthrottle) (struct perf_event *event); 569 void (*unthrottle) (struct perf_event *event);
563 570
564 /* 571 /*
565 * group events scheduling is treated as a transaction, 572 * Group events scheduling is treated as a transaction, add group
566 * add group events as a whole and perform one schedulability test. 573 * events as a whole and perform one schedulability test. If the test
567 * If test fails, roll back the whole group 574 * fails, roll back the whole group
568 */ 575 */
569 576
577 /*
578 * Start the transaction, after this ->enable() doesn't need
579 * to do schedulability tests.
580 */
570 void (*start_txn) (const struct pmu *pmu); 581 void (*start_txn) (const struct pmu *pmu);
571 void (*cancel_txn) (const struct pmu *pmu); 582 /*
583 * If ->start_txn() disabled the ->enable() schedulability test
584 * then ->commit_txn() is required to perform one. On success
585 * the transaction is closed. On error the transaction is kept
586 * open until ->cancel_txn() is called.
587 */
572 int (*commit_txn) (const struct pmu *pmu); 588 int (*commit_txn) (const struct pmu *pmu);
589 /*
590 * Will cancel the transaction, assumes ->disable() is called for
591 * each successfull ->enable() during the transaction.
592 */
593 void (*cancel_txn) (const struct pmu *pmu);
573}; 594};
574 595
575/** 596/**
@@ -584,7 +605,9 @@ enum perf_event_active_state {
584 605
585struct file; 606struct file;
586 607
587struct perf_mmap_data { 608#define PERF_BUFFER_WRITABLE 0x01
609
610struct perf_buffer {
588 atomic_t refcount; 611 atomic_t refcount;
589 struct rcu_head rcu_head; 612 struct rcu_head rcu_head;
590#ifdef CONFIG_PERF_USE_VMALLOC 613#ifdef CONFIG_PERF_USE_VMALLOC
@@ -650,7 +673,8 @@ struct perf_event {
650 673
651 enum perf_event_active_state state; 674 enum perf_event_active_state state;
652 unsigned int attach_state; 675 unsigned int attach_state;
653 atomic64_t count; 676 local64_t count;
677 atomic64_t child_count;
654 678
655 /* 679 /*
656 * These are the total time in nanoseconds that the event 680 * These are the total time in nanoseconds that the event
@@ -709,7 +733,7 @@ struct perf_event {
709 atomic_t mmap_count; 733 atomic_t mmap_count;
710 int mmap_locked; 734 int mmap_locked;
711 struct user_struct *mmap_user; 735 struct user_struct *mmap_user;
712 struct perf_mmap_data *data; 736 struct perf_buffer *buffer;
713 737
714 /* poll related */ 738 /* poll related */
715 wait_queue_head_t waitq; 739 wait_queue_head_t waitq;
@@ -807,7 +831,7 @@ struct perf_cpu_context {
807 831
808struct perf_output_handle { 832struct perf_output_handle {
809 struct perf_event *event; 833 struct perf_event *event;
810 struct perf_mmap_data *data; 834 struct perf_buffer *buffer;
811 unsigned long wakeup; 835 unsigned long wakeup;
812 unsigned long size; 836 unsigned long size;
813 void *addr; 837 void *addr;
@@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
910 934
911extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 935extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
912 936
913extern void 937#ifndef perf_arch_fetch_caller_regs
914perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); 938static inline void
939perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
940#endif
915 941
916/* 942/*
917 * Take a snapshot of the regs. Skip ip and frame pointer to 943 * Take a snapshot of the regs. Skip ip and frame pointer to
@@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
921 * - bp for callchains 947 * - bp for callchains
922 * - eflags, for future purposes, just in case 948 * - eflags, for future purposes, just in case
923 */ 949 */
924static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) 950static inline void perf_fetch_caller_regs(struct pt_regs *regs)
925{ 951{
926 unsigned long ip;
927
928 memset(regs, 0, sizeof(*regs)); 952 memset(regs, 0, sizeof(*regs));
929 953
930 switch (skip) { 954 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
931 case 1 :
932 ip = CALLER_ADDR0;
933 break;
934 case 2 :
935 ip = CALLER_ADDR1;
936 break;
937 case 3 :
938 ip = CALLER_ADDR2;
939 break;
940 case 4:
941 ip = CALLER_ADDR3;
942 break;
943 /* No need to support further for now */
944 default:
945 ip = 0;
946 }
947
948 return perf_arch_fetch_caller_regs(regs, ip, skip);
949} 955}
950 956
951static inline void 957static inline void
@@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
955 struct pt_regs hot_regs; 961 struct pt_regs hot_regs;
956 962
957 if (!regs) { 963 if (!regs) {
958 perf_fetch_caller_regs(&hot_regs, 1); 964 perf_fetch_caller_regs(&hot_regs);
959 regs = &hot_regs; 965 regs = &hot_regs;
960 } 966 }
961 __perf_sw_event(event_id, nr, nmi, regs, addr); 967 __perf_sw_event(event_id, nr, nmi, regs, addr);
962 } 968 }
963} 969}
964 970
965extern void __perf_event_mmap(struct vm_area_struct *vma); 971extern void perf_event_mmap(struct vm_area_struct *vma);
966
967static inline void perf_event_mmap(struct vm_area_struct *vma)
968{
969 if (vma->vm_flags & VM_EXEC)
970 __perf_event_mmap(vma);
971}
972
973extern struct perf_guest_info_callbacks *perf_guest_cbs; 972extern struct perf_guest_info_callbacks *perf_guest_cbs;
974extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 973extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
975extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 974extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void)
1001extern void perf_event_init(void); 1000extern void perf_event_init(void);
1002extern void perf_tp_event(u64 addr, u64 count, void *record, 1001extern void perf_tp_event(u64 addr, u64 count, void *record,
1003 int entry_size, struct pt_regs *regs, 1002 int entry_size, struct pt_regs *regs,
1004 struct hlist_head *head); 1003 struct hlist_head *head, int rctx);
1005extern void perf_bp_event(struct perf_event *event, void *data); 1004extern void perf_bp_event(struct perf_event *event, void *data);
1006 1005
1007#ifndef perf_misc_flags 1006#ifndef perf_misc_flags
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0478888c6899..3992f50de614 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -316,20 +316,16 @@ extern void scheduler_tick(void);
316 316
317extern void sched_show_task(struct task_struct *p); 317extern void sched_show_task(struct task_struct *p);
318 318
319#ifdef CONFIG_DETECT_SOFTLOCKUP 319#ifdef CONFIG_LOCKUP_DETECTOR
320extern void softlockup_tick(void);
321extern void touch_softlockup_watchdog(void); 320extern void touch_softlockup_watchdog(void);
322extern void touch_softlockup_watchdog_sync(void); 321extern void touch_softlockup_watchdog_sync(void);
323extern void touch_all_softlockup_watchdogs(void); 322extern void touch_all_softlockup_watchdogs(void);
324extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 323extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
325 void __user *buffer, 324 void __user *buffer,
326 size_t *lenp, loff_t *ppos); 325 size_t *lenp, loff_t *ppos);
327extern unsigned int softlockup_panic; 326extern unsigned int softlockup_panic;
328extern int softlockup_thresh; 327extern int softlockup_thresh;
329#else 328#else
330static inline void softlockup_tick(void)
331{
332}
333static inline void touch_softlockup_watchdog(void) 329static inline void touch_softlockup_watchdog(void)
334{ 330{
335} 331}
@@ -2435,18 +2431,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2435 2431
2436#endif /* CONFIG_SMP */ 2432#endif /* CONFIG_SMP */
2437 2433
2438#ifdef CONFIG_TRACING
2439extern void
2440__trace_special(void *__tr, void *__data,
2441 unsigned long arg1, unsigned long arg2, unsigned long arg3);
2442#else
2443static inline void
2444__trace_special(void *__tr, void *__data,
2445 unsigned long arg1, unsigned long arg2, unsigned long arg3)
2446{
2447}
2448#endif
2449
2450extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2434extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2451extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2435extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2452 2436
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 1812dac8c496..1acfa73ce2ac 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,7 +14,8 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kmemtrace.h> 17
18#include <trace/events/kmem.h>
18 19
19#ifndef ARCH_KMALLOC_MINALIGN 20#ifndef ARCH_KMALLOC_MINALIGN
20/* 21/*
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 4ba59cfc1f75..6447a723ecb1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,9 +10,10 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <linux/kmemtrace.h>
14#include <linux/kmemleak.h> 13#include <linux/kmemleak.h>
15 14
15#include <trace/events/kmem.h>
16
16enum stat_item { 17enum stat_item {
17 ALLOC_FASTPATH, /* Allocation from cpu slab */ 18 ALLOC_FASTPATH, /* Allocation from cpu slab */
18 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 13ebb5413a79..a6bfd1367d2a 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -167,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
167 .enter_event = &event_enter_##sname, \ 167 .enter_event = &event_enter_##sname, \
168 .exit_event = &event_exit_##sname, \ 168 .exit_event = &event_exit_##sname, \
169 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ 169 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
170 .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
171 }; 170 };
172 171
173#define SYSCALL_DEFINE0(sname) \ 172#define SYSCALL_DEFINE0(sname) \
@@ -182,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
182 .enter_event = &event_enter__##sname, \ 181 .enter_event = &event_enter__##sname, \
183 .exit_event = &event_exit__##sname, \ 182 .exit_event = &event_exit__##sname, \
184 .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ 183 .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
185 .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
186 }; \ 184 }; \
187 asmlinkage long sys_##sname(void) 185 asmlinkage long sys_##sname(void)
188#else 186#else
diff --git a/include/trace/boot.h b/include/trace/boot.h
deleted file mode 100644
index 088ea089e31d..000000000000
--- a/include/trace/boot.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _LINUX_TRACE_BOOT_H
2#define _LINUX_TRACE_BOOT_H
3
4#include <linux/module.h>
5#include <linux/kallsyms.h>
6#include <linux/init.h>
7
8/*
9 * Structure which defines the trace of an initcall
10 * while it is called.
11 * You don't have to fill the func field since it is
12 * only used internally by the tracer.
13 */
14struct boot_trace_call {
15 pid_t caller;
16 char func[KSYM_SYMBOL_LEN];
17};
18
19/*
20 * Structure which defines the trace of an initcall
21 * while it returns.
22 */
23struct boot_trace_ret {
24 char func[KSYM_SYMBOL_LEN];
25 int result;
26 unsigned long long duration; /* nsecs */
27};
28
29#ifdef CONFIG_BOOT_TRACER
30/* Append the traces on the ring-buffer */
31extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn);
32extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn);
33
34/* Tells the tracer that smp_pre_initcall is finished.
35 * So we can start the tracing
36 */
37extern void start_boot_trace(void);
38
39/* Resume the tracing of other necessary events
40 * such as sched switches
41 */
42extern void enable_boot_trace(void);
43
44/* Suspend this tracing. Actually, only sched_switches tracing have
45 * to be suspended. Initcalls doesn't need it.)
46 */
47extern void disable_boot_trace(void);
48#else
49static inline
50void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { }
51
52static inline
53void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { }
54
55static inline void start_boot_trace(void) { }
56static inline void enable_boot_trace(void) { }
57static inline void disable_boot_trace(void) { }
58#endif /* CONFIG_BOOT_TRACER */
59
60#endif /* __LINUX_TRACE_BOOT_H */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b9e1dd6c6208..9208c92aeab5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -50,31 +50,6 @@ TRACE_EVENT(sched_kthread_stop_ret,
50); 50);
51 51
52/* 52/*
53 * Tracepoint for waiting on task to unschedule:
54 */
55TRACE_EVENT(sched_wait_task,
56
57 TP_PROTO(struct task_struct *p),
58
59 TP_ARGS(p),
60
61 TP_STRUCT__entry(
62 __array( char, comm, TASK_COMM_LEN )
63 __field( pid_t, pid )
64 __field( int, prio )
65 ),
66
67 TP_fast_assign(
68 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
69 __entry->pid = p->pid;
70 __entry->prio = p->prio;
71 ),
72
73 TP_printk("comm=%s pid=%d prio=%d",
74 __entry->comm, __entry->pid, __entry->prio)
75);
76
77/*
78 * Tracepoint for waking up a task: 53 * Tracepoint for waking up a task:
79 */ 54 */
80DECLARE_EVENT_CLASS(sched_wakeup_template, 55DECLARE_EVENT_CLASS(sched_wakeup_template,
@@ -240,6 +215,13 @@ DEFINE_EVENT(sched_process_template, sched_process_exit,
240 TP_ARGS(p)); 215 TP_ARGS(p));
241 216
242/* 217/*
218 * Tracepoint for waiting on task to unschedule:
219 */
220DEFINE_EVENT(sched_process_template, sched_wait_task,
221 TP_PROTO(struct task_struct *p),
222 TP_ARGS(p));
223
224/*
243 * Tracepoint for a waiting task: 225 * Tracepoint for a waiting task:
244 */ 226 */
245TRACE_EVENT(sched_process_wait, 227TRACE_EVENT(sched_process_wait,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 9496b965d62a..c624126a9c8a 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -8,11 +8,7 @@
8#include <linux/hrtimer.h> 8#include <linux/hrtimer.h>
9#include <linux/timer.h> 9#include <linux/timer.h>
10 10
11/** 11DECLARE_EVENT_CLASS(timer_class,
12 * timer_init - called when the timer is initialized
13 * @timer: pointer to struct timer_list
14 */
15TRACE_EVENT(timer_init,
16 12
17 TP_PROTO(struct timer_list *timer), 13 TP_PROTO(struct timer_list *timer),
18 14
@@ -30,6 +26,17 @@ TRACE_EVENT(timer_init,
30); 26);
31 27
32/** 28/**
29 * timer_init - called when the timer is initialized
30 * @timer: pointer to struct timer_list
31 */
32DEFINE_EVENT(timer_class, timer_init,
33
34 TP_PROTO(struct timer_list *timer),
35
36 TP_ARGS(timer)
37);
38
39/**
33 * timer_start - called when the timer is started 40 * timer_start - called when the timer is started
34 * @timer: pointer to struct timer_list 41 * @timer: pointer to struct timer_list
35 * @expires: the timers expiry time 42 * @expires: the timers expiry time
@@ -94,42 +101,22 @@ TRACE_EVENT(timer_expire_entry,
94 * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might 101 * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
95 * be invalid. We solely track the pointer. 102 * be invalid. We solely track the pointer.
96 */ 103 */
97TRACE_EVENT(timer_expire_exit, 104DEFINE_EVENT(timer_class, timer_expire_exit,
98 105
99 TP_PROTO(struct timer_list *timer), 106 TP_PROTO(struct timer_list *timer),
100 107
101 TP_ARGS(timer), 108 TP_ARGS(timer)
102
103 TP_STRUCT__entry(
104 __field(void *, timer )
105 ),
106
107 TP_fast_assign(
108 __entry->timer = timer;
109 ),
110
111 TP_printk("timer=%p", __entry->timer)
112); 109);
113 110
114/** 111/**
115 * timer_cancel - called when the timer is canceled 112 * timer_cancel - called when the timer is canceled
116 * @timer: pointer to struct timer_list 113 * @timer: pointer to struct timer_list
117 */ 114 */
118TRACE_EVENT(timer_cancel, 115DEFINE_EVENT(timer_class, timer_cancel,
119 116
120 TP_PROTO(struct timer_list *timer), 117 TP_PROTO(struct timer_list *timer),
121 118
122 TP_ARGS(timer), 119 TP_ARGS(timer)
123
124 TP_STRUCT__entry(
125 __field( void *, timer )
126 ),
127
128 TP_fast_assign(
129 __entry->timer = timer;
130 ),
131
132 TP_printk("timer=%p", __entry->timer)
133); 120);
134 121
135/** 122/**
@@ -224,14 +211,7 @@ TRACE_EVENT(hrtimer_expire_entry,
224 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) 211 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
225 ); 212 );
226 213
227/** 214DECLARE_EVENT_CLASS(hrtimer_class,
228 * hrtimer_expire_exit - called immediately after the hrtimer callback returns
229 * @timer: pointer to struct hrtimer
230 *
231 * When used in combination with the hrtimer_expire_entry tracepoint we can
232 * determine the runtime of the callback function.
233 */
234TRACE_EVENT(hrtimer_expire_exit,
235 215
236 TP_PROTO(struct hrtimer *hrtimer), 216 TP_PROTO(struct hrtimer *hrtimer),
237 217
@@ -249,24 +229,28 @@ TRACE_EVENT(hrtimer_expire_exit,
249); 229);
250 230
251/** 231/**
252 * hrtimer_cancel - called when the hrtimer is canceled 232 * hrtimer_expire_exit - called immediately after the hrtimer callback returns
253 * @hrtimer: pointer to struct hrtimer 233 * @timer: pointer to struct hrtimer
234 *
235 * When used in combination with the hrtimer_expire_entry tracepoint we can
236 * determine the runtime of the callback function.
254 */ 237 */
255TRACE_EVENT(hrtimer_cancel, 238DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
256 239
257 TP_PROTO(struct hrtimer *hrtimer), 240 TP_PROTO(struct hrtimer *hrtimer),
258 241
259 TP_ARGS(hrtimer), 242 TP_ARGS(hrtimer)
243);
260 244
261 TP_STRUCT__entry( 245/**
262 __field( void *, hrtimer ) 246 * hrtimer_cancel - called when the hrtimer is canceled
263 ), 247 * @hrtimer: pointer to struct hrtimer
248 */
249DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
264 250
265 TP_fast_assign( 251 TP_PROTO(struct hrtimer *hrtimer),
266 __entry->hrtimer = hrtimer;
267 ),
268 252
269 TP_printk("hrtimer=%p", __entry->hrtimer) 253 TP_ARGS(hrtimer)
270); 254);
271 255
272/** 256/**
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 5a64905d7278..a9377c0083ad 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -75,15 +75,12 @@
75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
77 77
78#undef __cpparg
79#define __cpparg(arg...) arg
80
81/* Callbacks are meaningless to ftrace. */ 78/* Callbacks are meaningless to ftrace. */
82#undef TRACE_EVENT_FN 79#undef TRACE_EVENT_FN
83#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 80#define TRACE_EVENT_FN(name, proto, args, tstruct, \
84 assign, print, reg, unreg) \ 81 assign, print, reg, unreg) \
85 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ 82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
86 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ 83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
87 84
88#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 85#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
89 86
@@ -145,7 +142,7 @@
145 * struct trace_seq *s = &iter->seq; 142 * struct trace_seq *s = &iter->seq;
146 * struct ftrace_raw_<call> *field; <-- defined in stage 1 143 * struct ftrace_raw_<call> *field; <-- defined in stage 1
147 * struct trace_entry *entry; 144 * struct trace_entry *entry;
148 * struct trace_seq *p; 145 * struct trace_seq *p = &iter->tmp_seq;
149 * int ret; 146 * int ret;
150 * 147 *
151 * entry = iter->ent; 148 * entry = iter->ent;
@@ -157,12 +154,10 @@
157 * 154 *
158 * field = (typeof(field))entry; 155 * field = (typeof(field))entry;
159 * 156 *
160 * p = &get_cpu_var(ftrace_event_seq);
161 * trace_seq_init(p); 157 * trace_seq_init(p);
162 * ret = trace_seq_printf(s, "%s: ", <call>); 158 * ret = trace_seq_printf(s, "%s: ", <call>);
163 * if (ret) 159 * if (ret)
164 * ret = trace_seq_printf(s, <TP_printk> "\n"); 160 * ret = trace_seq_printf(s, <TP_printk> "\n");
165 * put_cpu();
166 * if (!ret) 161 * if (!ret)
167 * return TRACE_TYPE_PARTIAL_LINE; 162 * return TRACE_TYPE_PARTIAL_LINE;
168 * 163 *
@@ -216,7 +211,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
216 struct trace_seq *s = &iter->seq; \ 211 struct trace_seq *s = &iter->seq; \
217 struct ftrace_raw_##call *field; \ 212 struct ftrace_raw_##call *field; \
218 struct trace_entry *entry; \ 213 struct trace_entry *entry; \
219 struct trace_seq *p; \ 214 struct trace_seq *p = &iter->tmp_seq; \
220 int ret; \ 215 int ret; \
221 \ 216 \
222 event = container_of(trace_event, struct ftrace_event_call, \ 217 event = container_of(trace_event, struct ftrace_event_call, \
@@ -231,12 +226,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
231 \ 226 \
232 field = (typeof(field))entry; \ 227 field = (typeof(field))entry; \
233 \ 228 \
234 p = &get_cpu_var(ftrace_event_seq); \
235 trace_seq_init(p); \ 229 trace_seq_init(p); \
236 ret = trace_seq_printf(s, "%s: ", event->name); \ 230 ret = trace_seq_printf(s, "%s: ", event->name); \
237 if (ret) \ 231 if (ret) \
238 ret = trace_seq_printf(s, print); \ 232 ret = trace_seq_printf(s, print); \
239 put_cpu(); \
240 if (!ret) \ 233 if (!ret) \
241 return TRACE_TYPE_PARTIAL_LINE; \ 234 return TRACE_TYPE_PARTIAL_LINE; \
242 \ 235 \
@@ -255,7 +248,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
255 struct trace_seq *s = &iter->seq; \ 248 struct trace_seq *s = &iter->seq; \
256 struct ftrace_raw_##template *field; \ 249 struct ftrace_raw_##template *field; \
257 struct trace_entry *entry; \ 250 struct trace_entry *entry; \
258 struct trace_seq *p; \ 251 struct trace_seq *p = &iter->tmp_seq; \
259 int ret; \ 252 int ret; \
260 \ 253 \
261 entry = iter->ent; \ 254 entry = iter->ent; \
@@ -267,12 +260,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
267 \ 260 \
268 field = (typeof(field))entry; \ 261 field = (typeof(field))entry; \
269 \ 262 \
270 p = &get_cpu_var(ftrace_event_seq); \
271 trace_seq_init(p); \ 263 trace_seq_init(p); \
272 ret = trace_seq_printf(s, "%s: ", #call); \ 264 ret = trace_seq_printf(s, "%s: ", #call); \
273 if (ret) \ 265 if (ret) \
274 ret = trace_seq_printf(s, print); \ 266 ret = trace_seq_printf(s, print); \
275 put_cpu(); \
276 if (!ret) \ 267 if (!ret) \
277 return TRACE_TYPE_PARTIAL_LINE; \ 268 return TRACE_TYPE_PARTIAL_LINE; \
278 \ 269 \
@@ -439,6 +430,7 @@ static inline notrace int ftrace_get_offsets_##call( \
439 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 430 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
440 * .raw_init = trace_event_raw_init, 431 * .raw_init = trace_event_raw_init,
441 * .probe = ftrace_raw_event_##call, 432 * .probe = ftrace_raw_event_##call,
433 * .reg = ftrace_event_reg,
442 * }; 434 * };
443 * 435 *
444 * static struct ftrace_event_call __used 436 * static struct ftrace_event_call __used
@@ -567,6 +559,7 @@ static struct ftrace_event_class __used event_class_##call = { \
567 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 559 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
568 .raw_init = trace_event_raw_init, \ 560 .raw_init = trace_event_raw_init, \
569 .probe = ftrace_raw_event_##call, \ 561 .probe = ftrace_raw_event_##call, \
562 .reg = ftrace_event_reg, \
570 _TRACE_PERF_INIT(call) \ 563 _TRACE_PERF_INIT(call) \
571}; 564};
572 565
@@ -705,7 +698,7 @@ perf_trace_##call(void *__data, proto) \
705 int __data_size; \ 698 int __data_size; \
706 int rctx; \ 699 int rctx; \
707 \ 700 \
708 perf_fetch_caller_regs(&__regs, 1); \ 701 perf_fetch_caller_regs(&__regs); \
709 \ 702 \
710 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 703 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
711 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 704 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 257e08960d7b..31966a4fb8cc 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -26,7 +26,6 @@ struct syscall_metadata {
26 const char **types; 26 const char **types;
27 const char **args; 27 const char **args;
28 struct list_head enter_fields; 28 struct list_head enter_fields;
29 struct list_head exit_fields;
30 29
31 struct ftrace_event_call *enter_event; 30 struct ftrace_event_call *enter_event;
32 struct ftrace_event_call *exit_event; 31 struct ftrace_event_call *exit_event;