aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpuset.h16
-rw-r--r--include/linux/ftrace.h49
-rw-r--r--include/linux/ftrace_event.h81
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/ring_buffer.h10
-rw-r--r--include/linux/sched.h70
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/syscalls.h57
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/tracepoint.h196
-rw-r--r--include/linux/wait.h35
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h274
-rw-r--r--include/trace/syscall.h10
21 files changed, 582 insertions, 480 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc4d04b..a73454aec333 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
21extern int cpuset_init(void); 21extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24extern void cpuset_cpus_allowed_locked(struct task_struct *p, 24extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
25 struct cpumask *mask);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed) 26#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void); 27void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m, 68extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task); 69 struct task_struct *task);
71 70
72extern void cpuset_lock(void);
73extern void cpuset_unlock(void);
74
75extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
76 72
77static inline int cpuset_do_page_mem_spread(void) 73static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
105{ 101{
106 cpumask_copy(mask, cpu_possible_mask); 102 cpumask_copy(mask, cpu_possible_mask);
107} 103}
108static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 104
109 struct cpumask *mask) 105static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
110{ 106{
111 cpumask_copy(mask, cpu_possible_mask); 107 cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
108 return cpumask_any(cpu_active_mask);
112} 109}
113 110
114static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 111static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
157{ 154{
158} 155}
159 156
160static inline void cpuset_lock(void) {}
161static inline void cpuset_unlock(void) {}
162
163static inline int cpuset_mem_spread_node(void) 157static inline int cpuset_mem_spread_node(void)
164{ 158{
165 return 0; 159 return 0;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index cc12b3c556b3..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
82extern void ftrace_stub(unsigned long a0, unsigned long a1); 82extern void ftrace_stub(unsigned long a0, unsigned long a1);
83 83
84#else /* !CONFIG_FUNCTION_TRACER */ 84#else /* !CONFIG_FUNCTION_TRACER */
85# define register_ftrace_function(ops) do { } while (0) 85/*
86# define unregister_ftrace_function(ops) do { } while (0) 86 * (un)register_ftrace_function must be a macro since the ops parameter
87# define clear_ftrace_function(ops) do { } while (0) 87 * must not be evaluated.
88 */
89#define register_ftrace_function(ops) ({ 0; })
90#define unregister_ftrace_function(ops) ({ 0; })
91static inline void clear_ftrace_function(void) { }
88static inline void ftrace_kill(void) { } 92static inline void ftrace_kill(void) { }
89static inline void ftrace_stop(void) { } 93static inline void ftrace_stop(void) { }
90static inline void ftrace_start(void) { } 94static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
237extern void ftrace_disable_daemon(void); 241extern void ftrace_disable_daemon(void);
238extern void ftrace_enable_daemon(void); 242extern void ftrace_enable_daemon(void);
239#else 243#else
240# define skip_trace(ip) ({ 0; }) 244static inline int skip_trace(unsigned long ip) { return 0; }
241# define ftrace_force_update() ({ 0; }) 245static inline int ftrace_force_update(void) { return 0; }
242# define ftrace_set_filter(buf, len, reset) do { } while (0) 246static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
243# define ftrace_disable_daemon() do { } while (0) 247{
244# define ftrace_enable_daemon() do { } while (0) 248}
249static inline void ftrace_disable_daemon(void) { }
250static inline void ftrace_enable_daemon(void) { }
245static inline void ftrace_release_mod(struct module *mod) {} 251static inline void ftrace_release_mod(struct module *mod) {}
246static inline int register_ftrace_command(struct ftrace_func_command *cmd) 252static inline int register_ftrace_command(struct ftrace_func_command *cmd)
247{ 253{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
314 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 320 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
315 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 321 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
316#else 322#else
317# define time_hardirqs_on(a0, a1) do { } while (0) 323 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
318# define time_hardirqs_off(a0, a1) do { } while (0) 324 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
319#endif 325#endif
320 326
321#ifdef CONFIG_PREEMPT_TRACER 327#ifdef CONFIG_PREEMPT_TRACER
322 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 328 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
323 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 329 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
324#else 330#else
325# define trace_preempt_on(a0, a1) do { } while (0) 331 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
326# define trace_preempt_off(a0, a1) do { } while (0) 332 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
327#endif 333#endif
328 334
329#ifdef CONFIG_FTRACE_MCOUNT_RECORD 335#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
352 int depth; 358 int depth;
353}; 359};
354 360
361/* Type of the callback handlers for tracing function graph*/
362typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
363typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
364
355#ifdef CONFIG_FUNCTION_GRAPH_TRACER 365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
356 366
357/* for init task */ 367/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
400 410
401#define FTRACE_RETFUNC_DEPTH 50 411#define FTRACE_RETFUNC_DEPTH 50
402#define FTRACE_RETSTACK_ALLOC_SIZE 32 412#define FTRACE_RETSTACK_ALLOC_SIZE 32
403/* Type of the callback handlers for tracing function graph*/
404typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
405typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
406
407extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 413extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
408 trace_func_graph_ent_t entryfunc); 414 trace_func_graph_ent_t entryfunc);
409 415
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
441static inline void ftrace_graph_init_task(struct task_struct *t) { } 447static inline void ftrace_graph_init_task(struct task_struct *t) { }
442static inline void ftrace_graph_exit_task(struct task_struct *t) { } 448static inline void ftrace_graph_exit_task(struct task_struct *t) { }
443 449
450static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
451 trace_func_graph_ent_t entryfunc)
452{
453 return -1;
454}
455static inline void unregister_ftrace_graph(void) { }
456
444static inline int task_curr_ret_stack(struct task_struct *tsk) 457static inline int task_curr_ret_stack(struct task_struct *tsk)
445{ 458{
446 return -1; 459 return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
492 return tsk->trace & TSK_TRACE_FL_GRAPH; 505 return tsk->trace & TSK_TRACE_FL_GRAPH;
493} 506}
494 507
495extern int ftrace_dump_on_oops; 508enum ftrace_dump_mode;
509
510extern enum ftrace_dump_mode ftrace_dump_on_oops;
496 511
497#ifdef CONFIG_PREEMPT 512#ifdef CONFIG_PREEMPT
498#define INIT_TRACE_RECURSION .trace_recursion = 0, 513#define INIT_TRACE_RECURSION .trace_recursion = 0,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c0f4b364c711..dc7fc646fa2e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -58,6 +58,7 @@ struct trace_iterator {
58 /* The below is zeroed out in pipe_read */ 58 /* The below is zeroed out in pipe_read */
59 struct trace_seq seq; 59 struct trace_seq seq;
60 struct trace_entry *ent; 60 struct trace_entry *ent;
61 unsigned long lost_events;
61 int leftover; 62 int leftover;
62 int cpu; 63 int cpu;
63 u64 ts; 64 u64 ts;
@@ -69,18 +70,25 @@ struct trace_iterator {
69}; 70};
70 71
71 72
73struct trace_event;
74
72typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, 75typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
73 int flags); 76 int flags, struct trace_event *event);
74struct trace_event { 77
75 struct hlist_node node; 78struct trace_event_functions {
76 struct list_head list;
77 int type;
78 trace_print_func trace; 79 trace_print_func trace;
79 trace_print_func raw; 80 trace_print_func raw;
80 trace_print_func hex; 81 trace_print_func hex;
81 trace_print_func binary; 82 trace_print_func binary;
82}; 83};
83 84
85struct trace_event {
86 struct hlist_node node;
87 struct list_head list;
88 int type;
89 struct trace_event_functions *funcs;
90};
91
84extern int register_ftrace_event(struct trace_event *event); 92extern int register_ftrace_event(struct trace_event *event);
85extern int unregister_ftrace_event(struct trace_event *event); 93extern int unregister_ftrace_event(struct trace_event *event);
86 94
@@ -112,28 +120,67 @@ void tracing_record_cmdline(struct task_struct *tsk);
112 120
113struct event_filter; 121struct event_filter;
114 122
123enum trace_reg {
124 TRACE_REG_REGISTER,
125 TRACE_REG_UNREGISTER,
126 TRACE_REG_PERF_REGISTER,
127 TRACE_REG_PERF_UNREGISTER,
128};
129
130struct ftrace_event_call;
131
132struct ftrace_event_class {
133 char *system;
134 void *probe;
135#ifdef CONFIG_PERF_EVENTS
136 void *perf_probe;
137#endif
138 int (*reg)(struct ftrace_event_call *event,
139 enum trace_reg type);
140 int (*define_fields)(struct ftrace_event_call *);
141 struct list_head *(*get_fields)(struct ftrace_event_call *);
142 struct list_head fields;
143 int (*raw_init)(struct ftrace_event_call *);
144};
145
146enum {
147 TRACE_EVENT_FL_ENABLED_BIT,
148 TRACE_EVENT_FL_FILTERED_BIT,
149};
150
151enum {
152 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
153 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
154};
155
115struct ftrace_event_call { 156struct ftrace_event_call {
116 struct list_head list; 157 struct list_head list;
158 struct ftrace_event_class *class;
117 char *name; 159 char *name;
118 char *system;
119 struct dentry *dir; 160 struct dentry *dir;
120 struct trace_event *event; 161 struct trace_event event;
121 int enabled;
122 int (*regfunc)(struct ftrace_event_call *);
123 void (*unregfunc)(struct ftrace_event_call *);
124 int id;
125 const char *print_fmt; 162 const char *print_fmt;
126 int (*raw_init)(struct ftrace_event_call *);
127 int (*define_fields)(struct ftrace_event_call *);
128 struct list_head fields;
129 int filter_active;
130 struct event_filter *filter; 163 struct event_filter *filter;
131 void *mod; 164 void *mod;
132 void *data; 165 void *data;
133 166
167 /*
168 * 32 bit flags:
169 * bit 1: enabled
170 * bit 2: filter_active
171 *
172 * Changes to flags must hold the event_mutex.
173 *
174 * Note: Reads of flags do not hold the event_mutex since
175 * they occur in critical sections. But the way flags
176 * is currently used, these changes do no affect the code
177 * except that when a change is made, it may have a slight
178 * delay in propagating the changes to other CPUs due to
179 * caching and such.
180 */
181 unsigned int flags;
182
134 int perf_refcount; 183 int perf_refcount;
135 int (*perf_event_enable)(struct ftrace_event_call *);
136 void (*perf_event_disable)(struct ftrace_event_call *);
137}; 184};
138 185
139#define PERF_MAX_TRACE_SIZE 2048 186#define PERF_MAX_TRACE_SIZE 2048
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9365227dbaf6..9fb1c1299032 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -490,6 +490,13 @@ static inline void tracing_off(void) { }
490static inline void tracing_off_permanent(void) { } 490static inline void tracing_off_permanent(void) { }
491static inline int tracing_is_on(void) { return 0; } 491static inline int tracing_is_on(void) { return 0; }
492#endif 492#endif
493
494enum ftrace_dump_mode {
495 DUMP_NONE,
496 DUMP_ALL,
497 DUMP_ORIG,
498};
499
493#ifdef CONFIG_TRACING 500#ifdef CONFIG_TRACING
494extern void tracing_start(void); 501extern void tracing_start(void);
495extern void tracing_stop(void); 502extern void tracing_stop(void);
@@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
571extern int 578extern int
572__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 579__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
573 580
574extern void ftrace_dump(void); 581extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
575#else 582#else
576static inline void 583static inline void
577ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 584ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
592{ 599{
593 return 0; 600 return 0;
594} 601}
595static inline void ftrace_dump(void) { } 602static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
596#endif /* CONFIG_TRACING */ 603#endif /* CONFIG_TRACING */
597 604
598/* 605/*
diff --git a/include/linux/module.h b/include/linux/module.h
index 515d53ae6a79..6914fcad4673 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
465 if (module) { 465 if (module) {
466 preempt_disable(); 466 preempt_disable();
467 __this_cpu_inc(module->refptr->incs); 467 __this_cpu_inc(module->refptr->incs);
468 trace_module_get(module, _THIS_IP_, 468 trace_module_get(module, _THIS_IP_);
469 __this_cpu_read(module->refptr->incs));
470 preempt_enable(); 469 preempt_enable();
471 } 470 }
472} 471}
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
480 479
481 if (likely(module_is_live(module))) { 480 if (likely(module_is_live(module))) {
482 __this_cpu_inc(module->refptr->incs); 481 __this_cpu_inc(module->refptr->incs);
483 trace_module_get(module, _THIS_IP_, 482 trace_module_get(module, _THIS_IP_);
484 __this_cpu_read(module->refptr->incs));
485 } else 483 } else
486 ret = 0; 484 ret = 0;
487 485
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a5195875480a..0006b2df00e1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -60,8 +60,6 @@ static inline long rcu_batches_completed_bh(void)
60 return 0; 60 return 0;
61} 61}
62 62
63extern int rcu_expedited_torture_stats(char *page);
64
65static inline void rcu_force_quiescent_state(void) 63static inline void rcu_force_quiescent_state(void)
66{ 64{
67} 65}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 42cc3a04779e..24e467e526b8 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,7 +35,6 @@ struct notifier_block;
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern int rcu_expedited_torture_stats(char *page);
39 38
40#ifdef CONFIG_TREE_PREEMPT_RCU 39#ifdef CONFIG_TREE_PREEMPT_RCU
41 40
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 5fcc31ed5771..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer,
120 unsigned long length, void *data); 120 unsigned long length, void *data);
121 121
122struct ring_buffer_event * 122struct ring_buffer_event *
123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); 123ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
124 unsigned long *lost_events);
124struct ring_buffer_event * 125struct ring_buffer_event *
125ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); 126ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
127 unsigned long *lost_events);
126 128
127struct ring_buffer_iter * 129struct ring_buffer_iter *
128ring_buffer_read_start(struct ring_buffer *buffer, int cpu); 130ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
131void ring_buffer_read_prepare_sync(void);
132void ring_buffer_read_start(struct ring_buffer_iter *iter);
129void ring_buffer_read_finish(struct ring_buffer_iter *iter); 133void ring_buffer_read_finish(struct ring_buffer_iter *iter);
130 134
131struct ring_buffer_event * 135struct ring_buffer_event *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0447c64af6a..2a5b146fbaf9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -274,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern int select_nohz_load_balancer(int cpu); 275extern int select_nohz_load_balancer(int cpu);
276extern int get_nohz_load_balancer(void); 276extern int get_nohz_load_balancer(void);
277extern int nohz_ratelimit(int cpu);
277#else 278#else
278static inline int select_nohz_load_balancer(int cpu) 279static inline int select_nohz_load_balancer(int cpu)
279{ 280{
280 return 0; 281 return 0;
281} 282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
282#endif 288#endif
283 289
284/* 290/*
@@ -953,6 +959,7 @@ struct sched_domain {
953 char *name; 959 char *name;
954#endif 960#endif
955 961
962 unsigned int span_weight;
956 /* 963 /*
957 * Span of all CPUs in this domain. 964 * Span of all CPUs in this domain.
958 * 965 *
@@ -1025,12 +1032,17 @@ struct sched_domain;
1025#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1026#define WF_FORK 0x02 /* child wakeup after fork */ 1033#define WF_FORK 0x02 /* child wakeup after fork */
1027 1034
1035#define ENQUEUE_WAKEUP 1
1036#define ENQUEUE_WAKING 2
1037#define ENQUEUE_HEAD 4
1038
1039#define DEQUEUE_SLEEP 1
1040
1028struct sched_class { 1041struct sched_class {
1029 const struct sched_class *next; 1042 const struct sched_class *next;
1030 1043
1031 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1044 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1032 bool head); 1045 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1033 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1034 void (*yield_task) (struct rq *rq); 1046 void (*yield_task) (struct rq *rq);
1035 1047
1036 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1039,7 +1051,8 @@ struct sched_class {
1039 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1051 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1040 1052
1041#ifdef CONFIG_SMP 1053#ifdef CONFIG_SMP
1042 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1054 int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1055 int sd_flag, int flags);
1043 1056
1044 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1057 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1045 void (*post_schedule) (struct rq *this_rq); 1058 void (*post_schedule) (struct rq *this_rq);
@@ -1076,36 +1089,8 @@ struct load_weight {
1076 unsigned long weight, inv_weight; 1089 unsigned long weight, inv_weight;
1077}; 1090};
1078 1091
1079/*
1080 * CFS stats for a schedulable entity (task, task-group etc)
1081 *
1082 * Current field usage histogram:
1083 *
1084 * 4 se->block_start
1085 * 4 se->run_node
1086 * 4 se->sleep_start
1087 * 6 se->load.weight
1088 */
1089struct sched_entity {
1090 struct load_weight load; /* for load-balancing */
1091 struct rb_node run_node;
1092 struct list_head group_node;
1093 unsigned int on_rq;
1094
1095 u64 exec_start;
1096 u64 sum_exec_runtime;
1097 u64 vruntime;
1098 u64 prev_sum_exec_runtime;
1099
1100 u64 last_wakeup;
1101 u64 avg_overlap;
1102
1103 u64 nr_migrations;
1104
1105 u64 start_runtime;
1106 u64 avg_wakeup;
1107
1108#ifdef CONFIG_SCHEDSTATS 1092#ifdef CONFIG_SCHEDSTATS
1093struct sched_statistics {
1109 u64 wait_start; 1094 u64 wait_start;
1110 u64 wait_max; 1095 u64 wait_max;
1111 u64 wait_count; 1096 u64 wait_count;
@@ -1137,6 +1122,24 @@ struct sched_entity {
1137 u64 nr_wakeups_affine_attempts; 1122 u64 nr_wakeups_affine_attempts;
1138 u64 nr_wakeups_passive; 1123 u64 nr_wakeups_passive;
1139 u64 nr_wakeups_idle; 1124 u64 nr_wakeups_idle;
1125};
1126#endif
1127
1128struct sched_entity {
1129 struct load_weight load; /* for load-balancing */
1130 struct rb_node run_node;
1131 struct list_head group_node;
1132 unsigned int on_rq;
1133
1134 u64 exec_start;
1135 u64 sum_exec_runtime;
1136 u64 vruntime;
1137 u64 prev_sum_exec_runtime;
1138
1139 u64 nr_migrations;
1140
1141#ifdef CONFIG_SCHEDSTATS
1142 struct sched_statistics statistics;
1140#endif 1143#endif
1141 1144
1142#ifdef CONFIG_FAIR_GROUP_SCHED 1145#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1840,6 +1843,7 @@ extern void sched_clock_idle_sleep_event(void);
1840extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1843extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1841 1844
1842#ifdef CONFIG_HOTPLUG_CPU 1845#ifdef CONFIG_HOTPLUG_CPU
1846extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1843extern void idle_task_exit(void); 1847extern void idle_task_exit(void);
1844#else 1848#else
1845static inline void idle_task_exit(void) {} 1849static inline void idle_task_exit(void) {}
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index baba3a23a814..6b524a0d02e4 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -1,13 +1,101 @@
1#ifndef _LINUX_STOP_MACHINE 1#ifndef _LINUX_STOP_MACHINE
2#define _LINUX_STOP_MACHINE 2#define _LINUX_STOP_MACHINE
3/* "Bogolock": stop the entire machine, disable interrupts. This is a 3
4 very heavy lock, which is equivalent to grabbing every spinlock
5 (and more). So the "read" side to such a lock is anything which
6 disables preeempt. */
7#include <linux/cpu.h> 4#include <linux/cpu.h>
8#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/list.h>
9#include <asm/system.h> 7#include <asm/system.h>
10 8
9/*
10 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
11 * monopolization mechanism. The caller can specify a non-sleeping
12 * function to be executed on a single or multiple cpus preempting all
13 * other processes and monopolizing those cpus until it finishes.
14 *
15 * Resources for this mechanism are preallocated when a cpu is brought
16 * up and requests are guaranteed to be served as long as the target
17 * cpus are online.
18 */
19typedef int (*cpu_stop_fn_t)(void *arg);
20
21#ifdef CONFIG_SMP
22
23struct cpu_stop_work {
24 struct list_head list; /* cpu_stopper->works */
25 cpu_stop_fn_t fn;
26 void *arg;
27 struct cpu_stop_done *done;
28};
29
30int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
31void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
32 struct cpu_stop_work *work_buf);
33int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
34int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
35
36#else /* CONFIG_SMP */
37
38#include <linux/workqueue.h>
39
40struct cpu_stop_work {
41 struct work_struct work;
42 cpu_stop_fn_t fn;
43 void *arg;
44};
45
46static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
47{
48 int ret = -ENOENT;
49 preempt_disable();
50 if (cpu == smp_processor_id())
51 ret = fn(arg);
52 preempt_enable();
53 return ret;
54}
55
56static void stop_one_cpu_nowait_workfn(struct work_struct *work)
57{
58 struct cpu_stop_work *stwork =
59 container_of(work, struct cpu_stop_work, work);
60 preempt_disable();
61 stwork->fn(stwork->arg);
62 preempt_enable();
63}
64
65static inline void stop_one_cpu_nowait(unsigned int cpu,
66 cpu_stop_fn_t fn, void *arg,
67 struct cpu_stop_work *work_buf)
68{
69 if (cpu == smp_processor_id()) {
70 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
71 work_buf->fn = fn;
72 work_buf->arg = arg;
73 schedule_work(&work_buf->work);
74 }
75}
76
77static inline int stop_cpus(const struct cpumask *cpumask,
78 cpu_stop_fn_t fn, void *arg)
79{
80 if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
81 return stop_one_cpu(raw_smp_processor_id(), fn, arg);
82 return -ENOENT;
83}
84
85static inline int try_stop_cpus(const struct cpumask *cpumask,
86 cpu_stop_fn_t fn, void *arg)
87{
88 return stop_cpus(cpumask, fn, arg);
89}
90
91#endif /* CONFIG_SMP */
92
93/*
94 * stop_machine "Bogolock": stop the entire machine, disable
95 * interrupts. This is a very heavy lock, which is equivalent to
96 * grabbing every spinlock (and more). So the "read" side to such a
97 * lock is anything which disables preeempt.
98 */
11#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 99#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
12 100
13/** 101/**
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
36 */ 124 */
37int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 125int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
38 126
39/** 127#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
40 * stop_machine_create: create all stop_machine threads
41 *
42 * Description: This causes all stop_machine threads to be created before
43 * stop_machine actually gets called. This can be used by subsystems that
44 * need a non failing stop_machine infrastructure.
45 */
46int stop_machine_create(void);
47
48/**
49 * stop_machine_destroy: destroy all stop_machine threads
50 *
51 * Description: This causes all stop_machine threads which were created with
52 * stop_machine_create to be destroyed again.
53 */
54void stop_machine_destroy(void);
55
56#else
57 128
58static inline int stop_machine(int (*fn)(void *), void *data, 129static inline int stop_machine(int (*fn)(void *), void *data,
59 const struct cpumask *cpus) 130 const struct cpumask *cpus)
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
65 return ret; 136 return ret;
66} 137}
67 138
68static inline int stop_machine_create(void) { return 0; } 139#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
69static inline void stop_machine_destroy(void) { } 140#endif /* _LINUX_STOP_MACHINE */
70
71#endif /* CONFIG_SMP */
72#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 057929b0a651..a1a86a53bc73 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -103,22 +103,6 @@ struct perf_event_attr;
103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
105 105
106#ifdef CONFIG_PERF_EVENTS
107
108#define TRACE_SYS_ENTER_PERF_INIT(sname) \
109 .perf_event_enable = perf_sysenter_enable, \
110 .perf_event_disable = perf_sysenter_disable,
111
112#define TRACE_SYS_EXIT_PERF_INIT(sname) \
113 .perf_event_enable = perf_sysexit_enable, \
114 .perf_event_disable = perf_sysexit_disable,
115#else
116#define TRACE_SYS_ENTER_PERF(sname)
117#define TRACE_SYS_ENTER_PERF_INIT(sname)
118#define TRACE_SYS_EXIT_PERF(sname)
119#define TRACE_SYS_EXIT_PERF_INIT(sname)
120#endif /* CONFIG_PERF_EVENTS */
121
122#ifdef CONFIG_FTRACE_SYSCALLS 106#ifdef CONFIG_FTRACE_SYSCALLS
123#define __SC_STR_ADECL1(t, a) #a 107#define __SC_STR_ADECL1(t, a) #a
124#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) 108#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -134,54 +118,43 @@ struct perf_event_attr;
134#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) 118#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
135#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 119#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
136 120
121extern struct ftrace_event_class event_class_syscall_enter;
122extern struct ftrace_event_class event_class_syscall_exit;
123extern struct trace_event_functions enter_syscall_print_funcs;
124extern struct trace_event_functions exit_syscall_print_funcs;
125
137#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 126#define SYSCALL_TRACE_ENTER_EVENT(sname) \
138 static const struct syscall_metadata __syscall_meta_##sname; \ 127 static struct syscall_metadata __syscall_meta_##sname; \
139 static struct ftrace_event_call \ 128 static struct ftrace_event_call \
140 __attribute__((__aligned__(4))) event_enter_##sname; \ 129 __attribute__((__aligned__(4))) event_enter_##sname; \
141 static struct trace_event enter_syscall_print_##sname = { \
142 .trace = print_syscall_enter, \
143 }; \
144 static struct ftrace_event_call __used \ 130 static struct ftrace_event_call __used \
145 __attribute__((__aligned__(4))) \ 131 __attribute__((__aligned__(4))) \
146 __attribute__((section("_ftrace_events"))) \ 132 __attribute__((section("_ftrace_events"))) \
147 event_enter_##sname = { \ 133 event_enter_##sname = { \
148 .name = "sys_enter"#sname, \ 134 .name = "sys_enter"#sname, \
149 .system = "syscalls", \ 135 .class = &event_class_syscall_enter, \
150 .event = &enter_syscall_print_##sname, \ 136 .event.funcs = &enter_syscall_print_funcs, \
151 .raw_init = init_syscall_trace, \
152 .define_fields = syscall_enter_define_fields, \
153 .regfunc = reg_event_syscall_enter, \
154 .unregfunc = unreg_event_syscall_enter, \
155 .data = (void *)&__syscall_meta_##sname,\ 137 .data = (void *)&__syscall_meta_##sname,\
156 TRACE_SYS_ENTER_PERF_INIT(sname) \
157 } 138 }
158 139
159#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 140#define SYSCALL_TRACE_EXIT_EVENT(sname) \
160 static const struct syscall_metadata __syscall_meta_##sname; \ 141 static struct syscall_metadata __syscall_meta_##sname; \
161 static struct ftrace_event_call \ 142 static struct ftrace_event_call \
162 __attribute__((__aligned__(4))) event_exit_##sname; \ 143 __attribute__((__aligned__(4))) event_exit_##sname; \
163 static struct trace_event exit_syscall_print_##sname = { \
164 .trace = print_syscall_exit, \
165 }; \
166 static struct ftrace_event_call __used \ 144 static struct ftrace_event_call __used \
167 __attribute__((__aligned__(4))) \ 145 __attribute__((__aligned__(4))) \
168 __attribute__((section("_ftrace_events"))) \ 146 __attribute__((section("_ftrace_events"))) \
169 event_exit_##sname = { \ 147 event_exit_##sname = { \
170 .name = "sys_exit"#sname, \ 148 .name = "sys_exit"#sname, \
171 .system = "syscalls", \ 149 .class = &event_class_syscall_exit, \
172 .event = &exit_syscall_print_##sname, \ 150 .event.funcs = &exit_syscall_print_funcs, \
173 .raw_init = init_syscall_trace, \
174 .define_fields = syscall_exit_define_fields, \
175 .regfunc = reg_event_syscall_exit, \
176 .unregfunc = unreg_event_syscall_exit, \
177 .data = (void *)&__syscall_meta_##sname,\ 151 .data = (void *)&__syscall_meta_##sname,\
178 TRACE_SYS_EXIT_PERF_INIT(sname) \
179 } 152 }
180 153
181#define SYSCALL_METADATA(sname, nb) \ 154#define SYSCALL_METADATA(sname, nb) \
182 SYSCALL_TRACE_ENTER_EVENT(sname); \ 155 SYSCALL_TRACE_ENTER_EVENT(sname); \
183 SYSCALL_TRACE_EXIT_EVENT(sname); \ 156 SYSCALL_TRACE_EXIT_EVENT(sname); \
184 static const struct syscall_metadata __used \ 157 static struct syscall_metadata __used \
185 __attribute__((__aligned__(4))) \ 158 __attribute__((__aligned__(4))) \
186 __attribute__((section("__syscalls_metadata"))) \ 159 __attribute__((section("__syscalls_metadata"))) \
187 __syscall_meta_##sname = { \ 160 __syscall_meta_##sname = { \
@@ -191,12 +164,14 @@ struct perf_event_attr;
191 .args = args_##sname, \ 164 .args = args_##sname, \
192 .enter_event = &event_enter_##sname, \ 165 .enter_event = &event_enter_##sname, \
193 .exit_event = &event_exit_##sname, \ 166 .exit_event = &event_exit_##sname, \
167 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
168 .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
194 }; 169 };
195 170
196#define SYSCALL_DEFINE0(sname) \ 171#define SYSCALL_DEFINE0(sname) \
197 SYSCALL_TRACE_ENTER_EVENT(_##sname); \ 172 SYSCALL_TRACE_ENTER_EVENT(_##sname); \
198 SYSCALL_TRACE_EXIT_EVENT(_##sname); \ 173 SYSCALL_TRACE_EXIT_EVENT(_##sname); \
199 static const struct syscall_metadata __used \ 174 static struct syscall_metadata __used \
200 __attribute__((__aligned__(4))) \ 175 __attribute__((__aligned__(4))) \
201 __attribute__((section("__syscalls_metadata"))) \ 176 __attribute__((section("__syscalls_metadata"))) \
202 __syscall_meta__##sname = { \ 177 __syscall_meta__##sname = { \
@@ -204,6 +179,8 @@ struct perf_event_attr;
204 .nb_args = 0, \ 179 .nb_args = 0, \
205 .enter_event = &event_enter__##sname, \ 180 .enter_event = &event_enter__##sname, \
206 .exit_event = &event_exit__##sname, \ 181 .exit_event = &event_exit__##sname, \
182 .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
183 .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
207 }; \ 184 }; \
208 asmlinkage long sys_##sname(void) 185 asmlinkage long sys_##sname(void)
209#else 186#else
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..b232ccc0ee29 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -42,6 +42,7 @@ enum tick_nohz_mode {
42 * @idle_waketime: Time when the idle was interrupted 42 * @idle_waketime: Time when the idle was interrupted
43 * @idle_exittime: Time when the idle state was left 43 * @idle_exittime: Time when the idle state was left
44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped 44 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
45 * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
45 * @sleep_length: Duration of the current idle sleep 46 * @sleep_length: Duration of the current idle sleep
46 * @do_timer_lst: CPU was the last one doing do_timer before going idle 47 * @do_timer_lst: CPU was the last one doing do_timer before going idle
47 */ 48 */
@@ -60,7 +61,7 @@ struct tick_sched {
60 ktime_t idle_waketime; 61 ktime_t idle_waketime;
61 ktime_t idle_exittime; 62 ktime_t idle_exittime;
62 ktime_t idle_sleeptime; 63 ktime_t idle_sleeptime;
63 ktime_t idle_lastupdate; 64 ktime_t iowait_sleeptime;
64 ktime_t sleep_length; 65 ktime_t sleep_length;
65 unsigned long last_jiffies; 66 unsigned long last_jiffies;
66 unsigned long next_jiffies; 67 unsigned long next_jiffies;
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
124extern void tick_nohz_restart_sched_tick(void); 125extern void tick_nohz_restart_sched_tick(void);
125extern ktime_t tick_nohz_get_sleep_length(void); 126extern ktime_t tick_nohz_get_sleep_length(void);
126extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 127extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
128extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
127# else 129# else
128static inline void tick_nohz_stop_sched_tick(int inidle) { } 130static inline void tick_nohz_stop_sched_tick(int inidle) { }
129static inline void tick_nohz_restart_sched_tick(void) { } 131static inline void tick_nohz_restart_sched_tick(void) { }
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
134 return len; 136 return len;
135} 137}
136static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 138static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
139static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
137# endif /* !NO_HZ */ 140# endif /* !NO_HZ */
138 141
139#endif 142#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..9a59d1f98cd4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -20,12 +20,17 @@
20struct module; 20struct module;
21struct tracepoint; 21struct tracepoint;
22 22
23struct tracepoint_func {
24 void *func;
25 void *data;
26};
27
23struct tracepoint { 28struct tracepoint {
24 const char *name; /* Tracepoint name */ 29 const char *name; /* Tracepoint name */
25 int state; /* State. */ 30 int state; /* State. */
26 void (*regfunc)(void); 31 void (*regfunc)(void);
27 void (*unregfunc)(void); 32 void (*unregfunc)(void);
28 void **funcs; 33 struct tracepoint_func *funcs;
29} __attribute__((aligned(32))); /* 34} __attribute__((aligned(32))); /*
30 * Aligned on 32 bytes because it is 35 * Aligned on 32 bytes because it is
31 * globally visible and gcc happily 36 * globally visible and gcc happily
@@ -33,6 +38,68 @@ struct tracepoint {
33 * Keep in sync with vmlinux.lds.h. 38 * Keep in sync with vmlinux.lds.h.
34 */ 39 */
35 40
41/*
42 * Connect a probe to a tracepoint.
43 * Internal API, should not be used directly.
44 */
45extern int tracepoint_probe_register(const char *name, void *probe, void *data);
46
47/*
48 * Disconnect a probe from a tracepoint.
49 * Internal API, should not be used directly.
50 */
51extern int
52tracepoint_probe_unregister(const char *name, void *probe, void *data);
53
54extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
55 void *data);
56extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
57 void *data);
58extern void tracepoint_probe_update_all(void);
59
60struct tracepoint_iter {
61 struct module *module;
62 struct tracepoint *tracepoint;
63};
64
65extern void tracepoint_iter_start(struct tracepoint_iter *iter);
66extern void tracepoint_iter_next(struct tracepoint_iter *iter);
67extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
68extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
69extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
70 struct tracepoint *begin, struct tracepoint *end);
71
72/*
73 * tracepoint_synchronize_unregister must be called between the last tracepoint
74 * probe unregistration and the end of module exit to make sure there is no
75 * caller executing a probe when it is freed.
76 */
77static inline void tracepoint_synchronize_unregister(void)
78{
79 synchronize_sched();
80}
81
82#define PARAMS(args...) args
83
84#ifdef CONFIG_TRACEPOINTS
85extern void tracepoint_update_probe_range(struct tracepoint *begin,
86 struct tracepoint *end);
87#else
88static inline void tracepoint_update_probe_range(struct tracepoint *begin,
89 struct tracepoint *end)
90{ }
91#endif /* CONFIG_TRACEPOINTS */
92
93#endif /* _LINUX_TRACEPOINT_H */
94
95/*
96 * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
97 * file ifdef protection.
98 * This is due to the way trace events work. If a file includes two
99 * trace event headers under one "CREATE_TRACE_POINTS" the first include
100 * will override the TRACE_EVENT and break the second include.
101 */
102
36#ifndef DECLARE_TRACE 103#ifndef DECLARE_TRACE
37 104
38#define TP_PROTO(args...) args 105#define TP_PROTO(args...) args
@@ -43,17 +110,27 @@ struct tracepoint {
43/* 110/*
44 * it_func[0] is never NULL because there is at least one element in the array 111 * it_func[0] is never NULL because there is at least one element in the array
45 * when the array itself is non NULL. 112 * when the array itself is non NULL.
113 *
114 * Note, the proto and args passed in includes "__data" as the first parameter.
115 * The reason for this is to handle the "void" prototype. If a tracepoint
116 * has a "void" prototype, then it is invalid to declare a function
117 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
118 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
46 */ 119 */
47#define __DO_TRACE(tp, proto, args) \ 120#define __DO_TRACE(tp, proto, args) \
48 do { \ 121 do { \
49 void **it_func; \ 122 struct tracepoint_func *it_func_ptr; \
123 void *it_func; \
124 void *__data; \
50 \ 125 \
51 rcu_read_lock_sched_notrace(); \ 126 rcu_read_lock_sched_notrace(); \
52 it_func = rcu_dereference_sched((tp)->funcs); \ 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
53 if (it_func) { \ 128 if (it_func_ptr) { \
54 do { \ 129 do { \
55 ((void(*)(proto))(*it_func))(args); \ 130 it_func = (it_func_ptr)->func; \
56 } while (*(++it_func)); \ 131 __data = (it_func_ptr)->data; \
132 ((void(*)(proto))(it_func))(args); \
133 } while ((++it_func_ptr)->func); \
57 } \ 134 } \
58 rcu_read_unlock_sched_notrace(); \ 135 rcu_read_unlock_sched_notrace(); \
59 } while (0) 136 } while (0)
@@ -63,24 +140,32 @@ struct tracepoint {
63 * not add unwanted padding between the beginning of the section and the 140 * not add unwanted padding between the beginning of the section and the
64 * structure. Force alignment to the same alignment as the section start. 141 * structure. Force alignment to the same alignment as the section start.
65 */ 142 */
66#define DECLARE_TRACE(name, proto, args) \ 143#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
67 extern struct tracepoint __tracepoint_##name; \ 144 extern struct tracepoint __tracepoint_##name; \
68 static inline void trace_##name(proto) \ 145 static inline void trace_##name(proto) \
69 { \ 146 { \
70 if (unlikely(__tracepoint_##name.state)) \ 147 if (unlikely(__tracepoint_##name.state)) \
71 __DO_TRACE(&__tracepoint_##name, \ 148 __DO_TRACE(&__tracepoint_##name, \
72 TP_PROTO(proto), TP_ARGS(args)); \ 149 TP_PROTO(data_proto), \
150 TP_ARGS(data_args)); \
73 } \ 151 } \
74 static inline int register_trace_##name(void (*probe)(proto)) \ 152 static inline int \
153 register_trace_##name(void (*probe)(data_proto), void *data) \
75 { \ 154 { \
76 return tracepoint_probe_register(#name, (void *)probe); \ 155 return tracepoint_probe_register(#name, (void *)probe, \
156 data); \
77 } \ 157 } \
78 static inline int unregister_trace_##name(void (*probe)(proto)) \ 158 static inline int \
159 unregister_trace_##name(void (*probe)(data_proto), void *data) \
160 { \
161 return tracepoint_probe_unregister(#name, (void *)probe, \
162 data); \
163 } \
164 static inline void \
165 check_trace_callback_type_##name(void (*cb)(data_proto)) \
79 { \ 166 { \
80 return tracepoint_probe_unregister(#name, (void *)probe);\
81 } 167 }
82 168
83
84#define DEFINE_TRACE_FN(name, reg, unreg) \ 169#define DEFINE_TRACE_FN(name, reg, unreg) \
85 static const char __tpstrtab_##name[] \ 170 static const char __tpstrtab_##name[] \
86 __attribute__((section("__tracepoints_strings"))) = #name; \ 171 __attribute__((section("__tracepoints_strings"))) = #name; \
@@ -96,22 +181,24 @@ struct tracepoint {
96#define EXPORT_TRACEPOINT_SYMBOL(name) \ 181#define EXPORT_TRACEPOINT_SYMBOL(name) \
97 EXPORT_SYMBOL(__tracepoint_##name) 182 EXPORT_SYMBOL(__tracepoint_##name)
98 183
99extern void tracepoint_update_probe_range(struct tracepoint *begin,
100 struct tracepoint *end);
101
102#else /* !CONFIG_TRACEPOINTS */ 184#else /* !CONFIG_TRACEPOINTS */
103#define DECLARE_TRACE(name, proto, args) \ 185#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
104 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
105 { } \
106 static inline void trace_##name(proto) \ 186 static inline void trace_##name(proto) \
107 { } \ 187 { } \
108 static inline int register_trace_##name(void (*probe)(proto)) \ 188 static inline int \
189 register_trace_##name(void (*probe)(data_proto), \
190 void *data) \
109 { \ 191 { \
110 return -ENOSYS; \ 192 return -ENOSYS; \
111 } \ 193 } \
112 static inline int unregister_trace_##name(void (*probe)(proto)) \ 194 static inline int \
195 unregister_trace_##name(void (*probe)(data_proto), \
196 void *data) \
113 { \ 197 { \
114 return -ENOSYS; \ 198 return -ENOSYS; \
199 } \
200 static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
201 { \
115 } 202 }
116 203
117#define DEFINE_TRACE_FN(name, reg, unreg) 204#define DEFINE_TRACE_FN(name, reg, unreg)
@@ -119,60 +206,31 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
119#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 206#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
120#define EXPORT_TRACEPOINT_SYMBOL(name) 207#define EXPORT_TRACEPOINT_SYMBOL(name)
121 208
122static inline void tracepoint_update_probe_range(struct tracepoint *begin,
123 struct tracepoint *end)
124{ }
125#endif /* CONFIG_TRACEPOINTS */ 209#endif /* CONFIG_TRACEPOINTS */
126#endif /* DECLARE_TRACE */
127
128/*
129 * Connect a probe to a tracepoint.
130 * Internal API, should not be used directly.
131 */
132extern int tracepoint_probe_register(const char *name, void *probe);
133
134/*
135 * Disconnect a probe from a tracepoint.
136 * Internal API, should not be used directly.
137 */
138extern int tracepoint_probe_unregister(const char *name, void *probe);
139
140extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
141extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
142extern void tracepoint_probe_update_all(void);
143
144struct tracepoint_iter {
145 struct module *module;
146 struct tracepoint *tracepoint;
147};
148
149extern void tracepoint_iter_start(struct tracepoint_iter *iter);
150extern void tracepoint_iter_next(struct tracepoint_iter *iter);
151extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
152extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
153extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
154 struct tracepoint *begin, struct tracepoint *end);
155 210
156/* 211/*
157 * tracepoint_synchronize_unregister must be called between the last tracepoint 212 * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
158 * probe unregistration and the end of module exit to make sure there is no 213 * (void). "void" is a special value in a function prototype and can
159 * caller executing a probe when it is freed. 214 * not be combined with other arguments. Since the DECLARE_TRACE()
215 * macro adds a data element at the beginning of the prototype,
216 * we need a way to differentiate "(void *data, proto)" from
217 * "(void *data, void)". The second prototype is invalid.
218 *
219 * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
220 * and "void *__data" as the callback prototype.
221 *
222 * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
223 * "void *__data, proto" as the callback prototype.
160 */ 224 */
161static inline void tracepoint_synchronize_unregister(void) 225#define DECLARE_TRACE_NOARGS(name) \
162{ 226 __DECLARE_TRACE(name, void, , void *__data, __data)
163 synchronize_sched();
164}
165 227
166#define PARAMS(args...) args 228#define DECLARE_TRACE(name, proto, args) \
167 229 __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
168#endif /* _LINUX_TRACEPOINT_H */ 230 PARAMS(void *__data, proto), \
231 PARAMS(__data, args))
169 232
170/* 233#endif /* DECLARE_TRACE */
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176 234
177#ifndef TRACE_EVENT 235#ifndef TRACE_EVENT
178/* 236/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a48e16b77d5e..76d96d035ea0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
127/* 127/*
128 * Used for wake-one threads: 128 * Used for wake-one threads:
129 */ 129 */
130static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue(q, wait);
135}
136
130static inline void __add_wait_queue_tail(wait_queue_head_t *head, 137static inline void __add_wait_queue_tail(wait_queue_head_t *head,
131 wait_queue_t *new) 138 wait_queue_t *new)
132{ 139{
133 list_add_tail(&new->task_list, &head->task_list); 140 list_add_tail(&new->task_list, &head->task_list);
134} 141}
135 142
143static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
144 wait_queue_t *wait)
145{
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue_tail(q, wait);
148}
149
136static inline void __remove_wait_queue(wait_queue_head_t *head, 150static inline void __remove_wait_queue(wait_queue_head_t *head,
137 wait_queue_t *old) 151 wait_queue_t *old)
138{ 152{
@@ -404,25 +418,6 @@ do { \
404}) 418})
405 419
406/* 420/*
407 * Must be called with the spinlock in the wait_queue_head_t held.
408 */
409static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
410 wait_queue_t * wait)
411{
412 wait->flags |= WQ_FLAG_EXCLUSIVE;
413 __add_wait_queue_tail(q, wait);
414}
415
416/*
417 * Must be called with the spinlock in the wait_queue_head_t held.
418 */
419static inline void remove_wait_queue_locked(wait_queue_head_t *q,
420 wait_queue_t * wait)
421{
422 __remove_wait_queue(q, wait);
423}
424
425/*
426 * These are the old interfaces to sleep waiting for an event. 421 * These are the old interfaces to sleep waiting for an event.
427 * They are racy. DO NOT use them, use the wait_event* interfaces above. 422 * They are racy. DO NOT use them, use the wait_event* interfaces above.
428 * We plan to remove these interfaces. 423 * We plan to remove these interfaces.
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
65 65
66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
67 67
68/* Make all open coded DECLARE_TRACE nops */
69#undef DECLARE_TRACE
70#define DECLARE_TRACE(name, proto, args)
71
68#ifdef CONFIG_EVENT_TRACING 72#ifdef CONFIG_EVENT_TRACING
69#include <trace/ftrace.h> 73#include <trace/ftrace.h>
70#endif 74#endif
@@ -75,6 +79,7 @@
75#undef DEFINE_EVENT 79#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT 80#undef DEFINE_EVENT_PRINT
77#undef TRACE_HEADER_MULTI_READ 81#undef TRACE_HEADER_MULTI_READ
82#undef DECLARE_TRACE
78 83
79/* Only undef what we defined in this file */ 84/* Only undef what we defined in this file */
80#ifdef UNDEF_TRACE_INCLUDE_FILE 85#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
51 TP_printk("%s", __get_str(name)) 51 TP_printk("%s", __get_str(name))
52); 52);
53 53
54#ifdef CONFIG_MODULE_UNLOAD
55/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
56
54DECLARE_EVENT_CLASS(module_refcnt, 57DECLARE_EVENT_CLASS(module_refcnt,
55 58
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 59 TP_PROTO(struct module *mod, unsigned long ip),
57 60
58 TP_ARGS(mod, ip, refcnt), 61 TP_ARGS(mod, ip),
59 62
60 TP_STRUCT__entry( 63 TP_STRUCT__entry(
61 __field( unsigned long, ip ) 64 __field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
65 68
66 TP_fast_assign( 69 TP_fast_assign(
67 __entry->ip = ip; 70 __entry->ip = ip;
68 __entry->refcnt = refcnt; 71 __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
69 __assign_str(name, mod->name); 72 __assign_str(name, mod->name);
70 ), 73 ),
71 74
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
75 78
76DEFINE_EVENT(module_refcnt, module_get, 79DEFINE_EVENT(module_refcnt, module_get,
77 80
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 81 TP_PROTO(struct module *mod, unsigned long ip),
79 82
80 TP_ARGS(mod, ip, refcnt) 83 TP_ARGS(mod, ip)
81); 84);
82 85
83DEFINE_EVENT(module_refcnt, module_put, 86DEFINE_EVENT(module_refcnt, module_put,
84 87
85 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 88 TP_PROTO(struct module *mod, unsigned long ip),
86 89
87 TP_ARGS(mod, ip, refcnt) 90 TP_ARGS(mod, ip)
88); 91);
92#endif /* CONFIG_MODULE_UNLOAD */
89 93
90TRACE_EVENT(module_request, 94TRACE_EVENT(module_request,
91 95
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
1#ifndef _TRACE_NAPI_H_ 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM napi
3
4#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_NAPI_H_ 5#define _TRACE_NAPI_H_
3 6
4#include <linux/netdevice.h> 7#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi), 11 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi)); 12 TP_ARGS(napi));
10 13
11#endif 14#endif /* _TRACE_NAPI_H_ */
15
16/* This part must be outside protection */
17#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
51 51
52/* 52/*
53 * Tracepoint for waiting on task to unschedule: 53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */ 54 */
58TRACE_EVENT(sched_wait_task, 55TRACE_EVENT(sched_wait_task,
59 56
60 TP_PROTO(struct rq *rq, struct task_struct *p), 57 TP_PROTO(struct task_struct *p),
61 58
62 TP_ARGS(rq, p), 59 TP_ARGS(p),
63 60
64 TP_STRUCT__entry( 61 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN ) 62 __array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
79 76
80/* 77/*
81 * Tracepoint for waking up a task: 78 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */ 79 */
86DECLARE_EVENT_CLASS(sched_wakeup_template, 80DECLARE_EVENT_CLASS(sched_wakeup_template,
87 81
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 82 TP_PROTO(struct task_struct *p, int success),
89 83
90 TP_ARGS(rq, p, success), 84 TP_ARGS(p, success),
91 85
92 TP_STRUCT__entry( 86 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN ) 87 __array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
111); 105);
112 106
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup, 107DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 108 TP_PROTO(struct task_struct *p, int success),
115 TP_ARGS(rq, p, success)); 109 TP_ARGS(p, success));
116 110
117/* 111/*
118 * Tracepoint for waking up a new task: 112 * Tracepoint for waking up a new task:
119 *
120 * (NOTE: the 'rq' argument is not used by generic trace events,
121 * but used by the latency tracer plugin. )
122 */ 113 */
123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, 114DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
124 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 115 TP_PROTO(struct task_struct *p, int success),
125 TP_ARGS(rq, p, success)); 116 TP_ARGS(p, success));
126 117
127/* 118/*
128 * Tracepoint for task switches, performed by the scheduler: 119 * Tracepoint for task switches, performed by the scheduler:
129 *
130 * (NOTE: the 'rq' argument is not used by generic trace events,
131 * but used by the latency tracer plugin. )
132 */ 120 */
133TRACE_EVENT(sched_switch, 121TRACE_EVENT(sched_switch,
134 122
135 TP_PROTO(struct rq *rq, struct task_struct *prev, 123 TP_PROTO(struct task_struct *prev,
136 struct task_struct *next), 124 struct task_struct *next),
137 125
138 TP_ARGS(rq, prev, next), 126 TP_ARGS(prev, next),
139 127
140 TP_STRUCT__entry( 128 TP_STRUCT__entry(
141 __array( char, prev_comm, TASK_COMM_LEN ) 129 __array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
100 __entry->sa_handler, __entry->sa_flags) 100 __entry->sa_handler, __entry->sa_flags)
101); 101);
102 102
103/** 103DECLARE_EVENT_CLASS(signal_queue_overflow,
104 * signal_overflow_fail - called when signal queue is overflow
105 * @sig: signal number
106 * @group: signal to process group or not (bool)
107 * @info: pointer to struct siginfo
108 *
109 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
110 * siginfo queue is overflow, and the signal is dropped.
111 * 'group' is not 0 if the signal will be sent to a process group.
112 * 'sig' is always one of RT signals.
113 */
114TRACE_EVENT(signal_overflow_fail,
115 104
116 TP_PROTO(int sig, int group, struct siginfo *info), 105 TP_PROTO(int sig, int group, struct siginfo *info),
117 106
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
135); 124);
136 125
137/** 126/**
127 * signal_overflow_fail - called when signal queue is overflow
128 * @sig: signal number
129 * @group: signal to process group or not (bool)
130 * @info: pointer to struct siginfo
131 *
132 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
133 * siginfo queue is overflow, and the signal is dropped.
134 * 'group' is not 0 if the signal will be sent to a process group.
135 * 'sig' is always one of RT signals.
136 */
137DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
138
139 TP_PROTO(int sig, int group, struct siginfo *info),
140
141 TP_ARGS(sig, group, info)
142);
143
144/**
138 * signal_lose_info - called when siginfo is lost 145 * signal_lose_info - called when siginfo is lost
139 * @sig: signal number 146 * @sig: signal number
140 * @group: signal to process group or not (bool) 147 * @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
145 * 'group' is not 0 if the signal will be sent to a process group. 152 * 'group' is not 0 if the signal will be sent to a process group.
146 * 'sig' is always one of non-RT signals. 153 * 'sig' is always one of non-RT signals.
147 */ 154 */
148TRACE_EVENT(signal_lose_info, 155DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
149 156
150 TP_PROTO(int sig, int group, struct siginfo *info), 157 TP_PROTO(int sig, int group, struct siginfo *info),
151 158
152 TP_ARGS(sig, group, info), 159 TP_ARGS(sig, group, info)
153
154 TP_STRUCT__entry(
155 __field( int, sig )
156 __field( int, group )
157 __field( int, errno )
158 __field( int, code )
159 ),
160
161 TP_fast_assign(
162 __entry->sig = sig;
163 __entry->group = group;
164 TP_STORE_SIGINFO(__entry, info);
165 ),
166
167 TP_printk("sig=%d group=%d errno=%d code=%d",
168 __entry->sig, __entry->group, __entry->errno, __entry->code)
169); 160);
161
170#endif /* _TRACE_SIGNAL_H */ 162#endif /* _TRACE_SIGNAL_H */
171 163
172/* This part must be outside protection */ 164/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 882c64832ffe..e0e8daa6767e 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -62,7 +62,10 @@
62 struct trace_entry ent; \ 62 struct trace_entry ent; \
63 tstruct \ 63 tstruct \
64 char __data[0]; \ 64 char __data[0]; \
65 }; 65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
66#undef DEFINE_EVENT 69#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \ 70#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call \ 71 static struct ftrace_event_call \
@@ -147,16 +150,18 @@
147 * 150 *
148 * entry = iter->ent; 151 * entry = iter->ent;
149 * 152 *
150 * if (entry->type != event_<call>.id) { 153 * if (entry->type != event_<call>->event.type) {
151 * WARN_ON_ONCE(1); 154 * WARN_ON_ONCE(1);
152 * return TRACE_TYPE_UNHANDLED; 155 * return TRACE_TYPE_UNHANDLED;
153 * } 156 * }
154 * 157 *
155 * field = (typeof(field))entry; 158 * field = (typeof(field))entry;
156 * 159 *
157 * p = get_cpu_var(ftrace_event_seq); 160 * p = &get_cpu_var(ftrace_event_seq);
158 * trace_seq_init(p); 161 * trace_seq_init(p);
159 * ret = trace_seq_printf(s, <TP_printk> "\n"); 162 * ret = trace_seq_printf(s, "%s: ", <call>);
163 * if (ret)
164 * ret = trace_seq_printf(s, <TP_printk> "\n");
160 * put_cpu(); 165 * put_cpu();
161 * if (!ret) 166 * if (!ret)
162 * return TRACE_TYPE_PARTIAL_LINE; 167 * return TRACE_TYPE_PARTIAL_LINE;
@@ -201,18 +206,22 @@
201#undef DECLARE_EVENT_CLASS 206#undef DECLARE_EVENT_CLASS
202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
203static notrace enum print_line_t \ 208static notrace enum print_line_t \
204ftrace_raw_output_id_##call(int event_id, const char *name, \ 209ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
205 struct trace_iterator *iter, int flags) \ 210 struct trace_event *trace_event) \
206{ \ 211{ \
212 struct ftrace_event_call *event; \
207 struct trace_seq *s = &iter->seq; \ 213 struct trace_seq *s = &iter->seq; \
208 struct ftrace_raw_##call *field; \ 214 struct ftrace_raw_##call *field; \
209 struct trace_entry *entry; \ 215 struct trace_entry *entry; \
210 struct trace_seq *p; \ 216 struct trace_seq *p; \
211 int ret; \ 217 int ret; \
212 \ 218 \
219 event = container_of(trace_event, struct ftrace_event_call, \
220 event); \
221 \
213 entry = iter->ent; \ 222 entry = iter->ent; \
214 \ 223 \
215 if (entry->type != event_id) { \ 224 if (entry->type != event->event.type) { \
216 WARN_ON_ONCE(1); \ 225 WARN_ON_ONCE(1); \
217 return TRACE_TYPE_UNHANDLED; \ 226 return TRACE_TYPE_UNHANDLED; \
218 } \ 227 } \
@@ -221,7 +230,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
221 \ 230 \
222 p = &get_cpu_var(ftrace_event_seq); \ 231 p = &get_cpu_var(ftrace_event_seq); \
223 trace_seq_init(p); \ 232 trace_seq_init(p); \
224 ret = trace_seq_printf(s, "%s: ", name); \ 233 ret = trace_seq_printf(s, "%s: ", event->name); \
225 if (ret) \ 234 if (ret) \
226 ret = trace_seq_printf(s, print); \ 235 ret = trace_seq_printf(s, print); \
227 put_cpu(); \ 236 put_cpu(); \
@@ -229,21 +238,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
229 return TRACE_TYPE_PARTIAL_LINE; \ 238 return TRACE_TYPE_PARTIAL_LINE; \
230 \ 239 \
231 return TRACE_TYPE_HANDLED; \ 240 return TRACE_TYPE_HANDLED; \
232} 241} \
233 242static struct trace_event_functions ftrace_event_type_funcs_##call = { \
234#undef DEFINE_EVENT 243 .trace = ftrace_raw_output_##call, \
235#define DEFINE_EVENT(template, name, proto, args) \ 244};
236static notrace enum print_line_t \
237ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
238{ \
239 return ftrace_raw_output_id_##template(event_##name.id, \
240 #name, iter, flags); \
241}
242 245
243#undef DEFINE_EVENT_PRINT 246#undef DEFINE_EVENT_PRINT
244#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 247#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
245static notrace enum print_line_t \ 248static notrace enum print_line_t \
246ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 249ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
250 struct trace_event *event) \
247{ \ 251{ \
248 struct trace_seq *s = &iter->seq; \ 252 struct trace_seq *s = &iter->seq; \
249 struct ftrace_raw_##template *field; \ 253 struct ftrace_raw_##template *field; \
@@ -253,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
253 \ 257 \
254 entry = iter->ent; \ 258 entry = iter->ent; \
255 \ 259 \
256 if (entry->type != event_##call.id) { \ 260 if (entry->type != event_##call.event.type) { \
257 WARN_ON_ONCE(1); \ 261 WARN_ON_ONCE(1); \
258 return TRACE_TYPE_UNHANDLED; \ 262 return TRACE_TYPE_UNHANDLED; \
259 } \ 263 } \
@@ -270,7 +274,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
270 return TRACE_TYPE_PARTIAL_LINE; \ 274 return TRACE_TYPE_PARTIAL_LINE; \
271 \ 275 \
272 return TRACE_TYPE_HANDLED; \ 276 return TRACE_TYPE_HANDLED; \
273} 277} \
278static struct trace_event_functions ftrace_event_type_funcs_##call = { \
279 .trace = ftrace_raw_output_##call, \
280};
274 281
275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 282#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
276 283
@@ -376,142 +383,83 @@ static inline notrace int ftrace_get_offsets_##call( \
376 383
377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 384#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
378 385
379#ifdef CONFIG_PERF_EVENTS
380
381/*
382 * Generate the functions needed for tracepoint perf_event support.
383 *
384 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
385 *
386 * static int ftrace_profile_enable_<call>(void)
387 * {
388 * return register_trace_<call>(ftrace_profile_<call>);
389 * }
390 *
391 * static void ftrace_profile_disable_<call>(void)
392 * {
393 * unregister_trace_<call>(ftrace_profile_<call>);
394 * }
395 *
396 */
397
398#undef DECLARE_EVENT_CLASS
399#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
400
401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \
403 \
404static void perf_trace_##name(proto); \
405 \
406static notrace int \
407perf_trace_enable_##name(struct ftrace_event_call *unused) \
408{ \
409 return register_trace_##name(perf_trace_##name); \
410} \
411 \
412static notrace void \
413perf_trace_disable_##name(struct ftrace_event_call *unused) \
414{ \
415 unregister_trace_##name(perf_trace_##name); \
416}
417
418#undef DEFINE_EVENT_PRINT
419#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
420 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
421
422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
423
424#endif /* CONFIG_PERF_EVENTS */
425
426/* 386/*
427 * Stage 4 of the trace events. 387 * Stage 4 of the trace events.
428 * 388 *
429 * Override the macros in <trace/trace_events.h> to include the following: 389 * Override the macros in <trace/trace_events.h> to include the following:
430 * 390 *
431 * static void ftrace_event_<call>(proto)
432 * {
433 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
434 * }
435 *
436 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
437 * {
438 * return register_trace_<call>(ftrace_event_<call>);
439 * }
440 *
441 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
442 * {
443 * unregister_trace_<call>(ftrace_event_<call>);
444 * }
445 *
446 *
447 * For those macros defined with TRACE_EVENT: 391 * For those macros defined with TRACE_EVENT:
448 * 392 *
449 * static struct ftrace_event_call event_<call>; 393 * static struct ftrace_event_call event_<call>;
450 * 394 *
451 * static void ftrace_raw_event_<call>(proto) 395 * static void ftrace_raw_event_<call>(void *__data, proto)
452 * { 396 * {
397 * struct ftrace_event_call *event_call = __data;
398 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
453 * struct ring_buffer_event *event; 399 * struct ring_buffer_event *event;
454 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 400 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
455 * struct ring_buffer *buffer; 401 * struct ring_buffer *buffer;
456 * unsigned long irq_flags; 402 * unsigned long irq_flags;
403 * int __data_size;
457 * int pc; 404 * int pc;
458 * 405 *
459 * local_save_flags(irq_flags); 406 * local_save_flags(irq_flags);
460 * pc = preempt_count(); 407 * pc = preempt_count();
461 * 408 *
409 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
410 *
462 * event = trace_current_buffer_lock_reserve(&buffer, 411 * event = trace_current_buffer_lock_reserve(&buffer,
463 * event_<call>.id, 412 * event_<call>->event.type,
464 * sizeof(struct ftrace_raw_<call>), 413 * sizeof(*entry) + __data_size,
465 * irq_flags, pc); 414 * irq_flags, pc);
466 * if (!event) 415 * if (!event)
467 * return; 416 * return;
468 * entry = ring_buffer_event_data(event); 417 * entry = ring_buffer_event_data(event);
469 * 418 *
470 * <assign>; <-- Here we assign the entries by the __field and 419 * { <assign>; } <-- Here we assign the entries by the __field and
471 * __array macros. 420 * __array macros.
472 *
473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
474 * }
475 *
476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477 * {
478 * int ret;
479 *
480 * ret = register_trace_<call>(ftrace_raw_event_<call>);
481 * if (!ret)
482 * pr_info("event trace: Could not activate trace point "
483 * "probe to <call>");
484 * return ret;
485 * }
486 * 421 *
487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 422 * if (!filter_current_check_discard(buffer, event_call, entry, event))
488 * { 423 * trace_current_buffer_unlock_commit(buffer,
489 * unregister_trace_<call>(ftrace_raw_event_<call>); 424 * event, irq_flags, pc);
490 * } 425 * }
491 * 426 *
492 * static struct trace_event ftrace_event_type_<call> = { 427 * static struct trace_event ftrace_event_type_<call> = {
493 * .trace = ftrace_raw_output_<call>, <-- stage 2 428 * .trace = ftrace_raw_output_<call>, <-- stage 2
494 * }; 429 * };
495 * 430 *
431 * static const char print_fmt_<call>[] = <TP_printk>;
432 *
433 * static struct ftrace_event_class __used event_class_<template> = {
434 * .system = "<system>",
435 * .define_fields = ftrace_define_fields_<call>,
436 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
437 * .raw_init = trace_event_raw_init,
438 * .probe = ftrace_raw_event_##call,
439 * };
440 *
496 * static struct ftrace_event_call __used 441 * static struct ftrace_event_call __used
497 * __attribute__((__aligned__(4))) 442 * __attribute__((__aligned__(4)))
498 * __attribute__((section("_ftrace_events"))) event_<call> = { 443 * __attribute__((section("_ftrace_events"))) event_<call> = {
499 * .name = "<call>", 444 * .name = "<call>",
500 * .system = "<system>", 445 * .class = event_class_<template>,
501 * .raw_init = trace_event_raw_init, 446 * .event = &ftrace_event_type_<call>,
502 * .regfunc = ftrace_reg_event_<call>, 447 * .print_fmt = print_fmt_<call>,
503 * .unregfunc = ftrace_unreg_event_<call>, 448 * };
504 * }
505 * 449 *
506 */ 450 */
507 451
508#ifdef CONFIG_PERF_EVENTS 452#ifdef CONFIG_PERF_EVENTS
509 453
454#define _TRACE_PERF_PROTO(call, proto) \
455 static notrace void \
456 perf_trace_##call(void *__data, proto);
457
510#define _TRACE_PERF_INIT(call) \ 458#define _TRACE_PERF_INIT(call) \
511 .perf_event_enable = perf_trace_enable_##call, \ 459 .perf_probe = perf_trace_##call,
512 .perf_event_disable = perf_trace_disable_##call,
513 460
514#else 461#else
462#define _TRACE_PERF_PROTO(call, proto)
515#define _TRACE_PERF_INIT(call) 463#define _TRACE_PERF_INIT(call)
516#endif /* CONFIG_PERF_EVENTS */ 464#endif /* CONFIG_PERF_EVENTS */
517 465
@@ -545,9 +493,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
545#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 493#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
546 \ 494 \
547static notrace void \ 495static notrace void \
548ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 496ftrace_raw_event_##call(void *__data, proto) \
549 proto) \
550{ \ 497{ \
498 struct ftrace_event_call *event_call = __data; \
551 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 499 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
552 struct ring_buffer_event *event; \ 500 struct ring_buffer_event *event; \
553 struct ftrace_raw_##call *entry; \ 501 struct ftrace_raw_##call *entry; \
@@ -562,14 +510,13 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
562 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 510 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
563 \ 511 \
564 event = trace_current_buffer_lock_reserve(&buffer, \ 512 event = trace_current_buffer_lock_reserve(&buffer, \
565 event_call->id, \ 513 event_call->event.type, \
566 sizeof(*entry) + __data_size, \ 514 sizeof(*entry) + __data_size, \
567 irq_flags, pc); \ 515 irq_flags, pc); \
568 if (!event) \ 516 if (!event) \
569 return; \ 517 return; \
570 entry = ring_buffer_event_data(event); \ 518 entry = ring_buffer_event_data(event); \
571 \ 519 \
572 \
573 tstruct \ 520 tstruct \
574 \ 521 \
575 { assign; } \ 522 { assign; } \
@@ -578,34 +525,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
578 trace_nowake_buffer_unlock_commit(buffer, \ 525 trace_nowake_buffer_unlock_commit(buffer, \
579 event, irq_flags, pc); \ 526 event, irq_flags, pc); \
580} 527}
528/*
529 * The ftrace_test_probe is compiled out, it is only here as a build time check
530 * to make sure that if the tracepoint handling changes, the ftrace probe will
531 * fail to compile unless it too is updated.
532 */
581 533
582#undef DEFINE_EVENT 534#undef DEFINE_EVENT
583#define DEFINE_EVENT(template, call, proto, args) \ 535#define DEFINE_EVENT(template, call, proto, args) \
584 \ 536static inline void ftrace_test_probe_##call(void) \
585static notrace void ftrace_raw_event_##call(proto) \
586{ \
587 ftrace_raw_event_id_##template(&event_##call, args); \
588} \
589 \
590static notrace int \
591ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
592{ \
593 return register_trace_##call(ftrace_raw_event_##call); \
594} \
595 \
596static notrace void \
597ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
598{ \ 537{ \
599 unregister_trace_##call(ftrace_raw_event_##call); \ 538 check_trace_callback_type_##call(ftrace_raw_event_##template); \
600} \ 539}
601 \
602static struct trace_event ftrace_event_type_##call = { \
603 .trace = ftrace_raw_output_##call, \
604};
605 540
606#undef DEFINE_EVENT_PRINT 541#undef DEFINE_EVENT_PRINT
607#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 542#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
608 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
609 543
610#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 544#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
611 545
@@ -622,7 +556,16 @@ static struct trace_event ftrace_event_type_##call = { \
622 556
623#undef DECLARE_EVENT_CLASS 557#undef DECLARE_EVENT_CLASS
624#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 558#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
625static const char print_fmt_##call[] = print; 559_TRACE_PERF_PROTO(call, PARAMS(proto)); \
560static const char print_fmt_##call[] = print; \
561static struct ftrace_event_class __used event_class_##call = { \
562 .system = __stringify(TRACE_SYSTEM), \
563 .define_fields = ftrace_define_fields_##call, \
564 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
565 .raw_init = trace_event_raw_init, \
566 .probe = ftrace_raw_event_##call, \
567 _TRACE_PERF_INIT(call) \
568};
626 569
627#undef DEFINE_EVENT 570#undef DEFINE_EVENT
628#define DEFINE_EVENT(template, call, proto, args) \ 571#define DEFINE_EVENT(template, call, proto, args) \
@@ -631,15 +574,10 @@ static struct ftrace_event_call __used \
631__attribute__((__aligned__(4))) \ 574__attribute__((__aligned__(4))) \
632__attribute__((section("_ftrace_events"))) event_##call = { \ 575__attribute__((section("_ftrace_events"))) event_##call = { \
633 .name = #call, \ 576 .name = #call, \
634 .system = __stringify(TRACE_SYSTEM), \ 577 .class = &event_class_##template, \
635 .event = &ftrace_event_type_##call, \ 578 .event.funcs = &ftrace_event_type_funcs_##template, \
636 .raw_init = trace_event_raw_init, \
637 .regfunc = ftrace_raw_reg_event_##call, \
638 .unregfunc = ftrace_raw_unreg_event_##call, \
639 .print_fmt = print_fmt_##template, \ 579 .print_fmt = print_fmt_##template, \
640 .define_fields = ftrace_define_fields_##template, \ 580};
641 _TRACE_PERF_INIT(call) \
642}
643 581
644#undef DEFINE_EVENT_PRINT 582#undef DEFINE_EVENT_PRINT
645#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 583#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -650,14 +588,9 @@ static struct ftrace_event_call __used \
650__attribute__((__aligned__(4))) \ 588__attribute__((__aligned__(4))) \
651__attribute__((section("_ftrace_events"))) event_##call = { \ 589__attribute__((section("_ftrace_events"))) event_##call = { \
652 .name = #call, \ 590 .name = #call, \
653 .system = __stringify(TRACE_SYSTEM), \ 591 .class = &event_class_##template, \
654 .event = &ftrace_event_type_##call, \ 592 .event.funcs = &ftrace_event_type_funcs_##call, \
655 .raw_init = trace_event_raw_init, \
656 .regfunc = ftrace_raw_reg_event_##call, \
657 .unregfunc = ftrace_raw_unreg_event_##call, \
658 .print_fmt = print_fmt_##call, \ 593 .print_fmt = print_fmt_##call, \
659 .define_fields = ftrace_define_fields_##template, \
660 _TRACE_PERF_INIT(call) \
661} 594}
662 595
663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 596#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -757,17 +690,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
757#undef DECLARE_EVENT_CLASS 690#undef DECLARE_EVENT_CLASS
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 691#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 692static notrace void \
760perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 693perf_trace_##call(void *__data, proto) \
761 struct pt_regs *__regs, proto) \
762{ \ 694{ \
695 struct ftrace_event_call *event_call = __data; \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 696 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 697 struct ftrace_raw_##call *entry; \
698 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
765 u64 __addr = 0, __count = 1; \ 699 u64 __addr = 0, __count = 1; \
766 unsigned long irq_flags; \ 700 unsigned long irq_flags; \
767 int __entry_size; \ 701 int __entry_size; \
768 int __data_size; \ 702 int __data_size; \
769 int rctx; \ 703 int rctx; \
770 \ 704 \
705 perf_fetch_caller_regs(__regs, 1); \
706 \
771 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 707 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
772 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 708 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
773 sizeof(u64)); \ 709 sizeof(u64)); \
@@ -775,33 +711,35 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
775 \ 711 \
776 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ 712 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
777 "profile buffer not large enough")) \ 713 "profile buffer not large enough")) \
778 return; \ 714 goto out; \
779 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 715 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
780 __entry_size, event_call->id, &rctx, &irq_flags); \ 716 __entry_size, event_call->event.type, &rctx, &irq_flags); \
781 if (!entry) \ 717 if (!entry) \
782 return; \ 718 goto out; \
783 tstruct \ 719 tstruct \
784 \ 720 \
785 { assign; } \ 721 { assign; } \
786 \ 722 \
787 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 723 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
788 __count, irq_flags, __regs); \ 724 __count, irq_flags, __regs); \
725 out: \
726 put_cpu_var(perf_trace_regs); \
789} 727}
790 728
729/*
730 * This part is compiled out, it is only here as a build time check
731 * to make sure that if the tracepoint handling changes, the
732 * perf probe will fail to compile unless it too is updated.
733 */
791#undef DEFINE_EVENT 734#undef DEFINE_EVENT
792#define DEFINE_EVENT(template, call, proto, args) \ 735#define DEFINE_EVENT(template, call, proto, args) \
793static notrace void perf_trace_##call(proto) \ 736static inline void perf_test_probe_##call(void) \
794{ \ 737{ \
795 struct ftrace_event_call *event_call = &event_##call; \ 738 check_trace_callback_type_##call(perf_trace_##template); \
796 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
797 \
798 perf_fetch_caller_regs(__regs, 1); \
799 \ 739 \
800 perf_trace_templ_##template(event_call, __regs, args); \
801 \
802 put_cpu_var(perf_trace_regs); \
803} 740}
804 741
742
805#undef DEFINE_EVENT_PRINT 743#undef DEFINE_EVENT_PRINT
806#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 744#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
807 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 745 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index e5e5f48dbfb3..257e08960d7b 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -25,6 +25,8 @@ struct syscall_metadata {
25 int nb_args; 25 int nb_args;
26 const char **types; 26 const char **types;
27 const char **args; 27 const char **args;
28 struct list_head enter_fields;
29 struct list_head exit_fields;
28 30
29 struct ftrace_event_call *enter_event; 31 struct ftrace_event_call *enter_event;
30 struct ftrace_event_call *exit_event; 32 struct ftrace_event_call *exit_event;
@@ -34,16 +36,16 @@ struct syscall_metadata {
34extern unsigned long arch_syscall_addr(int nr); 36extern unsigned long arch_syscall_addr(int nr);
35extern int init_syscall_trace(struct ftrace_event_call *call); 37extern int init_syscall_trace(struct ftrace_event_call *call);
36 38
37extern int syscall_enter_define_fields(struct ftrace_event_call *call);
38extern int syscall_exit_define_fields(struct ftrace_event_call *call);
39extern int reg_event_syscall_enter(struct ftrace_event_call *call); 39extern int reg_event_syscall_enter(struct ftrace_event_call *call);
40extern void unreg_event_syscall_enter(struct ftrace_event_call *call); 40extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
41extern int reg_event_syscall_exit(struct ftrace_event_call *call); 41extern int reg_event_syscall_exit(struct ftrace_event_call *call);
42extern void unreg_event_syscall_exit(struct ftrace_event_call *call); 42extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
43extern int 43extern int
44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); 44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46 struct trace_event *event);
47enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
48 struct trace_event *event);
47#endif 49#endif
48 50
49#ifdef CONFIG_PERF_EVENTS 51#ifdef CONFIG_PERF_EVENTS