aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bitops.h29
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/perf_event.h55
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/trace/events/lock.h29
-rw-r--r--include/trace/ftrace.h60
-rw-r--r--include/trace/syscall.h4
9 files changed, 130 insertions, 92 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index c05a29cb9bb2..25b8b2f33ae9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -25,7 +25,7 @@
25static __inline__ int get_bitmask_order(unsigned int count) 25static __inline__ int get_bitmask_order(unsigned int count)
26{ 26{
27 int order; 27 int order;
28 28
29 order = fls(count); 29 order = fls(count);
30 return order; /* We could be slightly more clever with -1 here... */ 30 return order; /* We could be slightly more clever with -1 here... */
31} 31}
@@ -33,7 +33,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
33static __inline__ int get_count_order(unsigned int count) 33static __inline__ int get_count_order(unsigned int count)
34{ 34{
35 int order; 35 int order;
36 36
37 order = fls(count) - 1; 37 order = fls(count) - 1;
38 if (count & (count - 1)) 38 if (count & (count - 1))
39 order++; 39 order++;
@@ -45,6 +45,31 @@ static inline unsigned long hweight_long(unsigned long w)
45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
46} 46}
47 47
48/*
49 * Clearly slow versions of the hweightN() functions, their benefit is
50 * of course compile time evaluation of constant arguments.
51 */
52#define HWEIGHT8(w) \
53 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
54 (!!((w) & (1ULL << 0))) + \
55 (!!((w) & (1ULL << 1))) + \
56 (!!((w) & (1ULL << 2))) + \
57 (!!((w) & (1ULL << 3))) + \
58 (!!((w) & (1ULL << 4))) + \
59 (!!((w) & (1ULL << 5))) + \
60 (!!((w) & (1ULL << 6))) + \
61 (!!((w) & (1ULL << 7))) )
62
63#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
64#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
65#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
66
67/*
68 * Type invariant version that simply casts things to the
69 * largest type.
70 */
71#define HWEIGHT(w) HWEIGHT64((u64)(w))
72
48/** 73/**
49 * rol32 - rotate a 32-bit value left 74 * rol32 - rotate a 32-bit value left
50 * @word: value to rotate 75 * @word: value to rotate
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1cbb36f2759c..01e6adea07ec 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -134,6 +134,8 @@ extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob); 135extern void unregister_ftrace_function_probe_all(char *glob);
136 136
137extern int ftrace_text_reserved(void *start, void *end);
138
137enum { 139enum {
138 FTRACE_FL_FREE = (1 << 0), 140 FTRACE_FL_FREE = (1 << 0),
139 FTRACE_FL_FAILED = (1 << 1), 141 FTRACE_FL_FAILED = (1 << 1),
@@ -141,7 +143,6 @@ enum {
141 FTRACE_FL_ENABLED = (1 << 3), 143 FTRACE_FL_ENABLED = (1 << 3),
142 FTRACE_FL_NOTRACE = (1 << 4), 144 FTRACE_FL_NOTRACE = (1 << 4),
143 FTRACE_FL_CONVERTED = (1 << 5), 145 FTRACE_FL_CONVERTED = (1 << 5),
144 FTRACE_FL_FROZEN = (1 << 6),
145}; 146};
146 147
147struct dyn_ftrace { 148struct dyn_ftrace {
@@ -250,6 +251,10 @@ static inline int unregister_ftrace_command(char *cmd_name)
250{ 251{
251 return -EINVAL; 252 return -EINVAL;
252} 253}
254static inline int ftrace_text_reserved(void *start, void *end)
255{
256 return 0;
257}
253#endif /* CONFIG_DYNAMIC_FTRACE */ 258#endif /* CONFIG_DYNAMIC_FTRACE */
254 259
255/* totally disable ftrace - can not re-enable after this */ 260/* totally disable ftrace - can not re-enable after this */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 84a5629adfd8..6b7c444ab8f6 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -5,6 +5,7 @@
5#include <linux/trace_seq.h> 5#include <linux/trace_seq.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/hardirq.h> 7#include <linux/hardirq.h>
8#include <linux/perf_event.h>
8 9
9struct trace_array; 10struct trace_array;
10struct tracer; 11struct tracer;
@@ -137,9 +138,6 @@ struct ftrace_event_call {
137 138
138#define FTRACE_MAX_PROFILE_SIZE 2048 139#define FTRACE_MAX_PROFILE_SIZE 2048
139 140
140extern char *perf_trace_buf;
141extern char *perf_trace_buf_nmi;
142
143#define MAX_FILTER_PRED 32 141#define MAX_FILTER_PRED 32
144#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 142#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
145 143
@@ -187,13 +185,27 @@ do { \
187 __trace_printk(ip, fmt, ##args); \ 185 __trace_printk(ip, fmt, ##args); \
188} while (0) 186} while (0)
189 187
190#ifdef CONFIG_EVENT_PROFILE 188#ifdef CONFIG_PERF_EVENTS
191struct perf_event; 189struct perf_event;
192extern int ftrace_profile_enable(int event_id); 190extern int ftrace_profile_enable(int event_id);
193extern void ftrace_profile_disable(int event_id); 191extern void ftrace_profile_disable(int event_id);
194extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 192extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
195 char *filter_str); 193 char *filter_str);
196extern void ftrace_profile_free_filter(struct perf_event *event); 194extern void ftrace_profile_free_filter(struct perf_event *event);
195extern void *
196ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
197 unsigned long *irq_flags);
198
199static inline void
200ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
201 u64 count, unsigned long irq_flags)
202{
203 struct trace_entry *entry = raw_data;
204
205 perf_tp_event(entry->type, addr, count, raw_data, size);
206 perf_swevent_put_recursion_context(rctx);
207 local_irq_restore(irq_flags);
208}
197#endif 209#endif
198 210
199#endif /* _LINUX_FTRACE_EVENT_H */ 211#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 969f6e92d089..5d9c6558e8ab 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -206,6 +206,20 @@ static inline int list_empty_careful(const struct list_head *head)
206} 206}
207 207
208/** 208/**
209 * list_rotate_left - rotate the list to the left
210 * @head: the head of the list
211 */
212static inline void list_rotate_left(struct list_head *head)
213{
214 struct list_head *first;
215
216 if (!list_empty(head)) {
217 first = head->next;
218 list_move_tail(first, head);
219 }
220}
221
222/**
209 * list_is_singular - tests whether a list has just one entry. 223 * list_is_singular - tests whether a list has just one entry.
210 * @head: the list to test. 224 * @head: the list to test.
211 */ 225 */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a177698d95e2..7b18b4fd5df7 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -288,7 +288,7 @@ struct perf_event_mmap_page {
288}; 288};
289 289
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 292#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 293#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
@@ -354,8 +354,8 @@ enum perf_event_type {
354 * u64 stream_id; 354 * u64 stream_id;
355 * }; 355 * };
356 */ 356 */
357 PERF_RECORD_THROTTLE = 5, 357 PERF_RECORD_THROTTLE = 5,
358 PERF_RECORD_UNTHROTTLE = 6, 358 PERF_RECORD_UNTHROTTLE = 6,
359 359
360 /* 360 /*
361 * struct { 361 * struct {
@@ -369,10 +369,10 @@ enum perf_event_type {
369 369
370 /* 370 /*
371 * struct { 371 * struct {
372 * struct perf_event_header header; 372 * struct perf_event_header header;
373 * u32 pid, tid; 373 * u32 pid, tid;
374 * 374 *
375 * struct read_format values; 375 * struct read_format values;
376 * }; 376 * };
377 */ 377 */
378 PERF_RECORD_READ = 8, 378 PERF_RECORD_READ = 8,
@@ -410,7 +410,7 @@ enum perf_event_type {
410 * char data[size];}&& PERF_SAMPLE_RAW 410 * char data[size];}&& PERF_SAMPLE_RAW
411 * }; 411 * };
412 */ 412 */
413 PERF_RECORD_SAMPLE = 9, 413 PERF_RECORD_SAMPLE = 9,
414 414
415 PERF_RECORD_MAX, /* non-ABI */ 415 PERF_RECORD_MAX, /* non-ABI */
416}; 416};
@@ -476,9 +476,11 @@ struct hw_perf_event {
476 union { 476 union {
477 struct { /* hardware */ 477 struct { /* hardware */
478 u64 config; 478 u64 config;
479 u64 last_tag;
479 unsigned long config_base; 480 unsigned long config_base;
480 unsigned long event_base; 481 unsigned long event_base;
481 int idx; 482 int idx;
483 int last_cpu;
482 }; 484 };
483 struct { /* software */ 485 struct { /* software */
484 s64 remaining; 486 s64 remaining;
@@ -496,9 +498,8 @@ struct hw_perf_event {
496 atomic64_t period_left; 498 atomic64_t period_left;
497 u64 interrupts; 499 u64 interrupts;
498 500
499 u64 freq_count; 501 u64 freq_time_stamp;
500 u64 freq_interrupts; 502 u64 freq_count_stamp;
501 u64 freq_stamp;
502#endif 503#endif
503}; 504};
504 505
@@ -510,6 +511,8 @@ struct perf_event;
510struct pmu { 511struct pmu {
511 int (*enable) (struct perf_event *event); 512 int (*enable) (struct perf_event *event);
512 void (*disable) (struct perf_event *event); 513 void (*disable) (struct perf_event *event);
514 int (*start) (struct perf_event *event);
515 void (*stop) (struct perf_event *event);
513 void (*read) (struct perf_event *event); 516 void (*read) (struct perf_event *event);
514 void (*unthrottle) (struct perf_event *event); 517 void (*unthrottle) (struct perf_event *event);
515}; 518};
@@ -563,6 +566,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
563 struct perf_sample_data *, 566 struct perf_sample_data *,
564 struct pt_regs *regs); 567 struct pt_regs *regs);
565 568
569enum perf_group_flag {
570 PERF_GROUP_SOFTWARE = 0x1,
571};
572
566/** 573/**
567 * struct perf_event - performance event kernel representation: 574 * struct perf_event - performance event kernel representation:
568 */ 575 */
@@ -572,6 +579,7 @@ struct perf_event {
572 struct list_head event_entry; 579 struct list_head event_entry;
573 struct list_head sibling_list; 580 struct list_head sibling_list;
574 int nr_siblings; 581 int nr_siblings;
582 int group_flags;
575 struct perf_event *group_leader; 583 struct perf_event *group_leader;
576 struct perf_event *output; 584 struct perf_event *output;
577 const struct pmu *pmu; 585 const struct pmu *pmu;
@@ -656,7 +664,7 @@ struct perf_event {
656 664
657 perf_overflow_handler_t overflow_handler; 665 perf_overflow_handler_t overflow_handler;
658 666
659#ifdef CONFIG_EVENT_PROFILE 667#ifdef CONFIG_EVENT_TRACING
660 struct event_filter *filter; 668 struct event_filter *filter;
661#endif 669#endif
662 670
@@ -681,7 +689,8 @@ struct perf_event_context {
681 */ 689 */
682 struct mutex mutex; 690 struct mutex mutex;
683 691
684 struct list_head group_list; 692 struct list_head pinned_groups;
693 struct list_head flexible_groups;
685 struct list_head event_list; 694 struct list_head event_list;
686 int nr_events; 695 int nr_events;
687 int nr_active; 696 int nr_active;
@@ -744,10 +753,9 @@ extern int perf_max_events;
744 753
745extern const struct pmu *hw_perf_event_init(struct perf_event *event); 754extern const struct pmu *hw_perf_event_init(struct perf_event *event);
746 755
747extern void perf_event_task_sched_in(struct task_struct *task, int cpu); 756extern void perf_event_task_sched_in(struct task_struct *task);
748extern void perf_event_task_sched_out(struct task_struct *task, 757extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
749 struct task_struct *next, int cpu); 758extern void perf_event_task_tick(struct task_struct *task);
750extern void perf_event_task_tick(struct task_struct *task, int cpu);
751extern int perf_event_init_task(struct task_struct *child); 759extern int perf_event_init_task(struct task_struct *child);
752extern void perf_event_exit_task(struct task_struct *child); 760extern void perf_event_exit_task(struct task_struct *child);
753extern void perf_event_free_task(struct task_struct *task); 761extern void perf_event_free_task(struct task_struct *task);
@@ -762,7 +770,7 @@ extern int perf_event_task_disable(void);
762extern int perf_event_task_enable(void); 770extern int perf_event_task_enable(void);
763extern int hw_perf_group_sched_in(struct perf_event *group_leader, 771extern int hw_perf_group_sched_in(struct perf_event *group_leader,
764 struct perf_cpu_context *cpuctx, 772 struct perf_cpu_context *cpuctx,
765 struct perf_event_context *ctx, int cpu); 773 struct perf_event_context *ctx);
766extern void perf_event_update_userpage(struct perf_event *event); 774extern void perf_event_update_userpage(struct perf_event *event);
767extern int perf_event_release_kernel(struct perf_event *event); 775extern int perf_event_release_kernel(struct perf_event *event);
768extern struct perf_event * 776extern struct perf_event *
@@ -851,8 +859,7 @@ extern int sysctl_perf_event_mlock;
851extern int sysctl_perf_event_sample_rate; 859extern int sysctl_perf_event_sample_rate;
852 860
853extern void perf_event_init(void); 861extern void perf_event_init(void);
854extern void perf_tp_event(int event_id, u64 addr, u64 count, 862extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
855 void *record, int entry_size);
856extern void perf_bp_event(struct perf_event *event, void *data); 863extern void perf_bp_event(struct perf_event *event, void *data);
857 864
858#ifndef perf_misc_flags 865#ifndef perf_misc_flags
@@ -873,12 +880,12 @@ extern void perf_event_enable(struct perf_event *event);
873extern void perf_event_disable(struct perf_event *event); 880extern void perf_event_disable(struct perf_event *event);
874#else 881#else
875static inline void 882static inline void
876perf_event_task_sched_in(struct task_struct *task, int cpu) { } 883perf_event_task_sched_in(struct task_struct *task) { }
877static inline void 884static inline void
878perf_event_task_sched_out(struct task_struct *task, 885perf_event_task_sched_out(struct task_struct *task,
879 struct task_struct *next, int cpu) { } 886 struct task_struct *next) { }
880static inline void 887static inline void
881perf_event_task_tick(struct task_struct *task, int cpu) { } 888perf_event_task_tick(struct task_struct *task) { }
882static inline int perf_event_init_task(struct task_struct *child) { return 0; } 889static inline int perf_event_init_task(struct task_struct *child) { return 0; }
883static inline void perf_event_exit_task(struct task_struct *child) { } 890static inline void perf_event_exit_task(struct task_struct *child) { }
884static inline void perf_event_free_task(struct task_struct *task) { } 891static inline void perf_event_free_task(struct task_struct *task) { }
@@ -893,13 +900,13 @@ static inline void
893perf_sw_event(u32 event_id, u64 nr, int nmi, 900perf_sw_event(u32 event_id, u64 nr, int nmi,
894 struct pt_regs *regs, u64 addr) { } 901 struct pt_regs *regs, u64 addr) { }
895static inline void 902static inline void
896perf_bp_event(struct perf_event *event, void *data) { } 903perf_bp_event(struct perf_event *event, void *data) { }
897 904
898static inline void perf_event_mmap(struct vm_area_struct *vma) { } 905static inline void perf_event_mmap(struct vm_area_struct *vma) { }
899static inline void perf_event_comm(struct task_struct *tsk) { } 906static inline void perf_event_comm(struct task_struct *tsk) { }
900static inline void perf_event_fork(struct task_struct *tsk) { } 907static inline void perf_event_fork(struct task_struct *tsk) { }
901static inline void perf_event_init(void) { } 908static inline void perf_event_init(void) { }
902static inline int perf_swevent_get_recursion_context(void) { return -1; } 909static inline int perf_swevent_get_recursion_context(void) { return -1; }
903static inline void perf_swevent_put_recursion_context(int rctx) { } 910static inline void perf_swevent_put_recursion_context(int rctx) { }
904static inline void perf_event_enable(struct perf_event *event) { } 911static inline void perf_event_enable(struct perf_event *event) { }
905static inline void perf_event_disable(struct perf_event *event) { } 912static inline void perf_event_disable(struct perf_event *event) { }
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 91bd7d78a07d..8126f239edf0 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,7 +99,7 @@ struct perf_event_attr;
99#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 99#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
100#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 100#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
101 101
102#ifdef CONFIG_EVENT_PROFILE 102#ifdef CONFIG_PERF_EVENTS
103 103
104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
105 .profile_enable = prof_sysenter_enable, \ 105 .profile_enable = prof_sysenter_enable, \
@@ -113,7 +113,7 @@ struct perf_event_attr;
113#define TRACE_SYS_ENTER_PROFILE_INIT(sname) 113#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
114#define TRACE_SYS_EXIT_PROFILE(sname) 114#define TRACE_SYS_EXIT_PROFILE(sname)
115#define TRACE_SYS_EXIT_PROFILE_INIT(sname) 115#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
116#endif 116#endif /* CONFIG_PERF_EVENTS */
117 117
118#ifdef CONFIG_FTRACE_SYSCALLS 118#ifdef CONFIG_FTRACE_SYSCALLS
119#define __SC_STR_ADECL1(t, a) #a 119#define __SC_STR_ADECL1(t, a) #a
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba125aa8..5c1dcfc16c60 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
20 TP_STRUCT__entry( 20 TP_STRUCT__entry(
21 __field(unsigned int, flags) 21 __field(unsigned int, flags)
22 __string(name, lock->name) 22 __string(name, lock->name)
23 __field(void *, lockdep_addr)
23 ), 24 ),
24 25
25 TP_fast_assign( 26 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); 27 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name); 28 __assign_str(name, lock->name);
29 __entry->lockdep_addr = lock;
28 ), 30 ),
29 31
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", 32 TP_printk("%p %s%s%s", __entry->lockdep_addr,
33 (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "", 34 (__entry->flags & 2) ? "read " : "",
32 __get_str(name)) 35 __get_str(name))
33); 36);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
40 43
41 TP_STRUCT__entry( 44 TP_STRUCT__entry(
42 __string(name, lock->name) 45 __string(name, lock->name)
46 __field(void *, lockdep_addr)
43 ), 47 ),
44 48
45 TP_fast_assign( 49 TP_fast_assign(
46 __assign_str(name, lock->name); 50 __assign_str(name, lock->name);
51 __entry->lockdep_addr = lock;
47 ), 52 ),
48 53
49 TP_printk("%s", __get_str(name)) 54 TP_printk("%p %s",
55 __entry->lockdep_addr, __get_str(name))
50); 56);
51 57
52#ifdef CONFIG_LOCK_STAT 58#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
59 65
60 TP_STRUCT__entry( 66 TP_STRUCT__entry(
61 __string(name, lock->name) 67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
62 ), 69 ),
63 70
64 TP_fast_assign( 71 TP_fast_assign(
65 __assign_str(name, lock->name); 72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
66 ), 74 ),
67 75
68 TP_printk("%s", __get_str(name)) 76 TP_printk("%p %s",
77 __entry->lockdep_addr, __get_str(name))
69); 78);
70 79
71TRACE_EVENT(lock_acquired, 80TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
75 84
76 TP_STRUCT__entry( 85 TP_STRUCT__entry(
77 __string(name, lock->name) 86 __string(name, lock->name)
78 __field(unsigned long, wait_usec) 87 __field(s64, wait_nsec)
79 __field(unsigned long, wait_nsec_rem) 88 __field(void *, lockdep_addr)
80 ), 89 ),
90
81 TP_fast_assign( 91 TP_fast_assign(
82 __assign_str(name, lock->name); 92 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); 93 __entry->wait_nsec = waittime;
84 __entry->wait_usec = (unsigned long) waittime; 94 __entry->lockdep_addr = lock;
85 ), 95 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, 96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
87 __entry->wait_nsec_rem) 97 __get_str(name),
98 __entry->wait_nsec)
88); 99);
89 100
90#endif 101#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f23a0ca6910a..0804cd594803 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -376,7 +376,7 @@ static inline notrace int ftrace_get_offsets_##call( \
376 376
377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
378 378
379#ifdef CONFIG_EVENT_PROFILE 379#ifdef CONFIG_PERF_EVENTS
380 380
381/* 381/*
382 * Generate the functions needed for tracepoint perf_event support. 382 * Generate the functions needed for tracepoint perf_event support.
@@ -421,7 +421,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
421 421
422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
423 423
424#endif 424#endif /* CONFIG_PERF_EVENTS */
425 425
426/* 426/*
427 * Stage 4 of the trace events. 427 * Stage 4 of the trace events.
@@ -505,7 +505,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
505 * 505 *
506 */ 506 */
507 507
508#ifdef CONFIG_EVENT_PROFILE 508#ifdef CONFIG_PERF_EVENTS
509 509
510#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PROFILE_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 511 .profile_enable = ftrace_profile_enable_##call, \
@@ -513,7 +513,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
513 513
514#else 514#else
515#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PROFILE_INIT(call)
516#endif 516#endif /* CONFIG_PERF_EVENTS */
517 517
518#undef __entry 518#undef __entry
519#define __entry entry 519#define __entry entry
@@ -736,7 +736,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
736 * } 736 * }
737 */ 737 */
738 738
739#ifdef CONFIG_EVENT_PROFILE 739#ifdef CONFIG_PERF_EVENTS
740 740
741#undef __entry 741#undef __entry
742#define __entry entry 742#define __entry entry
@@ -761,22 +761,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 761 proto) \
762{ \ 762{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 extern int perf_swevent_get_recursion_context(void); \
765 extern void perf_swevent_put_recursion_context(int rctx); \
766 extern void perf_tp_event(int, u64, u64, void *, int); \
767 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
768 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
769 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
770 struct trace_entry *ent; \
771 int __entry_size; \ 767 int __entry_size; \
772 int __data_size; \ 768 int __data_size; \
773 char *trace_buf; \
774 char *raw_data; \
775 int __cpu; \
776 int rctx; \ 769 int rctx; \
777 int pc; \
778 \
779 pc = preempt_count(); \
780 \ 770 \
781 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 771 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
782 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 772 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
@@ -786,42 +776,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
786 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
787 "profile buffer not large enough")) \ 777 "profile buffer not large enough")) \
788 return; \ 778 return; \
789 \ 779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
790 local_irq_save(irq_flags); \ 780 __entry_size, event_call->id, &rctx, &irq_flags); \
791 \ 781 if (!entry) \
792 rctx = perf_swevent_get_recursion_context(); \ 782 return; \
793 if (rctx < 0) \
794 goto end_recursion; \
795 \
796 __cpu = smp_processor_id(); \
797 \
798 if (in_nmi()) \
799 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
800 else \
801 trace_buf = rcu_dereference(perf_trace_buf); \
802 \
803 if (!trace_buf) \
804 goto end; \
805 \
806 raw_data = per_cpu_ptr(trace_buf, __cpu); \
807 \
808 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
809 entry = (struct ftrace_raw_##call *)raw_data; \
810 ent = &entry->ent; \
811 tracing_generic_entry_update(ent, irq_flags, pc); \
812 ent->type = event_call->id; \
813 \
814 tstruct \ 783 tstruct \
815 \ 784 \
816 { assign; } \ 785 { assign; } \
817 \ 786 \
818 perf_tp_event(event_call->id, __addr, __count, entry, \ 787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
819 __entry_size); \ 788 __count, irq_flags); \
820 \
821end: \
822 perf_swevent_put_recursion_context(rctx); \
823end_recursion: \
824 local_irq_restore(irq_flags); \
825} 789}
826 790
827#undef DEFINE_EVENT 791#undef DEFINE_EVENT
@@ -838,7 +802,7 @@ static notrace void ftrace_profile_##call(proto) \
838 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 802 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
839 803
840#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 804#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
841#endif /* CONFIG_EVENT_PROFILE */ 805#endif /* CONFIG_PERF_EVENTS */
842 806
843#undef _TRACE_PROFILE_INIT 807#undef _TRACE_PROFILE_INIT
844 808
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8cd410254456..0387100752f0 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -45,12 +45,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48#ifdef CONFIG_EVENT_PROFILE 48
49#ifdef CONFIG_PERF_EVENTS
49int prof_sysenter_enable(struct ftrace_event_call *call); 50int prof_sysenter_enable(struct ftrace_event_call *call);
50void prof_sysenter_disable(struct ftrace_event_call *call); 51void prof_sysenter_disable(struct ftrace_event_call *call);
51int prof_sysexit_enable(struct ftrace_event_call *call); 52int prof_sysexit_enable(struct ftrace_event_call *call);
52void prof_sysexit_disable(struct ftrace_event_call *call); 53void prof_sysexit_disable(struct ftrace_event_call *call);
53
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */