aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:37:49 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:54:19 -0400
commitc032862fba51a3ca504752d3a25186b324c5ce83 (patch)
tree955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /include/trace
parentfda76e074c7737fc57855dd17c762e50ed526052 (diff)
parent8700c95adb033843fc163d112b9d21d4fda78018 (diff)
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched upstream updates in order to fix some dependencies. Merge a common upstream merge point that has these updates. Conflicts: include/linux/perf_event.h kernel/rcutree.h kernel/rcutree_plugin.h Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/block.h8
-rw-r--r--include/trace/events/filemap.h58
-rw-r--r--include/trace/events/printk.h25
-rw-r--r--include/trace/events/rcu.h55
-rw-r--r--include/trace/events/regmap.h48
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/trace/ftrace.h49
7 files changed, 195 insertions, 50 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9961726523d0..9c1467357b03 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce,
257 257
258/** 258/**
259 * block_bio_complete - completed all work on the block operation 259 * block_bio_complete - completed all work on the block operation
260 * @q: queue holding the block operation
260 * @bio: block operation completed 261 * @bio: block operation completed
261 * @error: io error value 262 * @error: io error value
262 * 263 *
@@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce,
265 */ 266 */
266TRACE_EVENT(block_bio_complete, 267TRACE_EVENT(block_bio_complete,
267 268
268 TP_PROTO(struct bio *bio, int error), 269 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
269 270
270 TP_ARGS(bio, error), 271 TP_ARGS(q, bio, error),
271 272
272 TP_STRUCT__entry( 273 TP_STRUCT__entry(
273 __field( dev_t, dev ) 274 __field( dev_t, dev )
@@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete,
278 ), 279 ),
279 280
280 TP_fast_assign( 281 TP_fast_assign(
281 __entry->dev = bio->bi_bdev ? 282 __entry->dev = bio->bi_bdev->bd_dev;
282 bio->bi_bdev->bd_dev : 0;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_sector;
284 __entry->nr_sector = bio->bi_size >> 9; 284 __entry->nr_sector = bio->bi_size >> 9;
285 __entry->error = error; 285 __entry->error = error;
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
new file mode 100644
index 000000000000..0421f49a20f7
--- /dev/null
+++ b/include/trace/events/filemap.h
@@ -0,0 +1,58 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM filemap
3
4#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_FILEMAP_H
6
7#include <linux/types.h>
8#include <linux/tracepoint.h>
9#include <linux/mm.h>
10#include <linux/memcontrol.h>
11#include <linux/device.h>
12#include <linux/kdev_t.h>
13
14DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
15
16 TP_PROTO(struct page *page),
17
18 TP_ARGS(page),
19
20 TP_STRUCT__entry(
21 __field(struct page *, page)
22 __field(unsigned long, i_ino)
23 __field(unsigned long, index)
24 __field(dev_t, s_dev)
25 ),
26
27 TP_fast_assign(
28 __entry->page = page;
29 __entry->i_ino = page->mapping->host->i_ino;
30 __entry->index = page->index;
31 if (page->mapping->host->i_sb)
32 __entry->s_dev = page->mapping->host->i_sb->s_dev;
33 else
34 __entry->s_dev = page->mapping->host->i_rdev;
35 ),
36
37 TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
38 MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
39 __entry->i_ino,
40 __entry->page,
41 page_to_pfn(__entry->page),
42 __entry->index << PAGE_SHIFT)
43);
44
45DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
46 TP_PROTO(struct page *page),
47 TP_ARGS(page)
48 );
49
50DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
51 TP_PROTO(struct page *page),
52 TP_ARGS(page)
53 );
54
55#endif /* _TRACE_FILEMAP_H */
56
57/* This part must be outside protection */
58#include <trace/define_trace.h>
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
index 94ec79cc011a..c008bc99f9fa 100644
--- a/include/trace/events/printk.h
+++ b/include/trace/events/printk.h
@@ -6,31 +6,18 @@
6 6
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8 8
9TRACE_EVENT_CONDITION(console, 9TRACE_EVENT(console,
10 TP_PROTO(const char *log_buf, unsigned start, unsigned end, 10 TP_PROTO(const char *text, size_t len),
11 unsigned log_buf_len),
12 11
13 TP_ARGS(log_buf, start, end, log_buf_len), 12 TP_ARGS(text, len),
14
15 TP_CONDITION(start != end),
16 13
17 TP_STRUCT__entry( 14 TP_STRUCT__entry(
18 __dynamic_array(char, msg, end - start + 1) 15 __dynamic_array(char, msg, len + 1)
19 ), 16 ),
20 17
21 TP_fast_assign( 18 TP_fast_assign(
22 if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) { 19 memcpy(__get_dynamic_array(msg), text, len);
23 memcpy(__get_dynamic_array(msg), 20 ((char *)__get_dynamic_array(msg))[len] = 0;
24 log_buf + (start & (log_buf_len - 1)),
25 log_buf_len - (start & (log_buf_len - 1)));
26 memcpy((char *)__get_dynamic_array(msg) +
27 log_buf_len - (start & (log_buf_len - 1)),
28 log_buf, end & (log_buf_len - 1));
29 } else
30 memcpy(__get_dynamic_array(msg),
31 log_buf + (start & (log_buf_len - 1)),
32 end - start);
33 ((char *)__get_dynamic_array(msg))[end - start] = 0;
34 ), 21 ),
35 22
36 TP_printk("%s", __get_str(msg)) 23 TP_printk("%s", __get_str(msg))
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 1918e832da4f..59ebcc89f148 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -72,6 +72,58 @@ TRACE_EVENT(rcu_grace_period,
72); 72);
73 73
74/* 74/*
75 * Tracepoint for future grace-period events, including those for no-callbacks
76 * CPUs. The caller should pull the data from the rcu_node structure,
77 * other than rcuname, which comes from the rcu_state structure, and event,
78 * which is one of the following:
79 *
80 * "Startleaf": Request a nocb grace period based on leaf-node data.
81 * "Startedleaf": Leaf-node start proved sufficient.
82 * "Startedleafroot": Leaf-node start proved sufficient after checking root.
83 * "Startedroot": Requested a nocb grace period based on root-node data.
84 * "StartWait": Start waiting for the requested grace period.
85 * "ResumeWait": Resume waiting after signal.
86 * "EndWait": Complete wait.
87 * "Cleanup": Clean up rcu_node structure after previous GP.
88 * "CleanupMore": Clean up, and another no-CB GP is needed.
89 */
90TRACE_EVENT(rcu_future_grace_period,
91
92 TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed,
93 unsigned long c, u8 level, int grplo, int grphi,
94 char *gpevent),
95
96 TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
97
98 TP_STRUCT__entry(
99 __field(char *, rcuname)
100 __field(unsigned long, gpnum)
101 __field(unsigned long, completed)
102 __field(unsigned long, c)
103 __field(u8, level)
104 __field(int, grplo)
105 __field(int, grphi)
106 __field(char *, gpevent)
107 ),
108
109 TP_fast_assign(
110 __entry->rcuname = rcuname;
111 __entry->gpnum = gpnum;
112 __entry->completed = completed;
113 __entry->c = c;
114 __entry->level = level;
115 __entry->grplo = grplo;
116 __entry->grphi = grphi;
117 __entry->gpevent = gpevent;
118 ),
119
120 TP_printk("%s %lu %lu %lu %u %d %d %s",
121 __entry->rcuname, __entry->gpnum, __entry->completed,
122 __entry->c, __entry->level, __entry->grplo, __entry->grphi,
123 __entry->gpevent)
124);
125
126/*
75 * Tracepoint for grace-period-initialization events. These are 127 * Tracepoint for grace-period-initialization events. These are
76 * distinguished by the type of RCU, the new grace-period number, the 128 * distinguished by the type of RCU, the new grace-period number, the
77 * rcu_node structure level, the starting and ending CPU covered by the 129 * rcu_node structure level, the starting and ending CPU covered by the
@@ -601,6 +653,9 @@ TRACE_EVENT(rcu_barrier,
601#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 653#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
602#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ 654#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
603 qsmask) do { } while (0) 655 qsmask) do { } while (0)
656#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
657 level, grplo, grphi, event) \
658 do { } while (0)
604#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 659#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
605#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 660#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
606#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ 661#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 41a7dbd570e2..a43a2f67bd8e 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
175 175
176); 176);
177 177
178DECLARE_EVENT_CLASS(regmap_async,
179
180 TP_PROTO(struct device *dev),
181
182 TP_ARGS(dev),
183
184 TP_STRUCT__entry(
185 __string( name, dev_name(dev) )
186 ),
187
188 TP_fast_assign(
189 __assign_str(name, dev_name(dev));
190 ),
191
192 TP_printk("%s", __get_str(name))
193);
194
195DEFINE_EVENT(regmap_block, regmap_async_write_start,
196
197 TP_PROTO(struct device *dev, unsigned int reg, int count),
198
199 TP_ARGS(dev, reg, count)
200);
201
202DEFINE_EVENT(regmap_async, regmap_async_io_complete,
203
204 TP_PROTO(struct device *dev),
205
206 TP_ARGS(dev)
207
208);
209
210DEFINE_EVENT(regmap_async, regmap_async_complete_start,
211
212 TP_PROTO(struct device *dev),
213
214 TP_ARGS(dev)
215
216);
217
218DEFINE_EVENT(regmap_async, regmap_async_complete_done,
219
220 TP_PROTO(struct device *dev),
221
222 TP_ARGS(dev)
223
224);
225
178#endif /* _TRACE_REGMAP_H */ 226#endif /* _TRACE_REGMAP_H */
179 227
180/* This part must be outside protection */ 228/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 5a8671e8a67f..e5586caff67a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
147 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", 147 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
148 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, 148 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
149 { 16, "Z" }, { 32, "X" }, { 64, "x" }, 149 { 16, "Z" }, { 32, "X" }, { 64, "x" },
150 { 128, "W" }) : "R", 150 { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
151 __entry->prev_state & TASK_STATE_MAX ? "+" : "", 151 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
152 __entry->next_comm, __entry->next_pid, __entry->next_prio) 152 __entry->next_comm, __entry->next_pid, __entry->next_prio)
153); 153);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 40dc5e8fe340..19edd7facaa1 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -227,29 +227,18 @@ static notrace enum print_line_t \
227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
228 struct trace_event *trace_event) \ 228 struct trace_event *trace_event) \
229{ \ 229{ \
230 struct ftrace_event_call *event; \
231 struct trace_seq *s = &iter->seq; \ 230 struct trace_seq *s = &iter->seq; \
231 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
232 struct ftrace_raw_##call *field; \ 232 struct ftrace_raw_##call *field; \
233 struct trace_entry *entry; \
234 struct trace_seq *p = &iter->tmp_seq; \
235 int ret; \ 233 int ret; \
236 \ 234 \
237 event = container_of(trace_event, struct ftrace_event_call, \ 235 field = (typeof(field))iter->ent; \
238 event); \
239 \
240 entry = iter->ent; \
241 \
242 if (entry->type != event->event.type) { \
243 WARN_ON_ONCE(1); \
244 return TRACE_TYPE_UNHANDLED; \
245 } \
246 \
247 field = (typeof(field))entry; \
248 \ 236 \
249 trace_seq_init(p); \ 237 ret = ftrace_raw_output_prep(iter, trace_event); \
250 ret = trace_seq_printf(s, "%s: ", event->name); \
251 if (ret) \ 238 if (ret) \
252 ret = trace_seq_printf(s, print); \ 239 return ret; \
240 \
241 ret = trace_seq_printf(s, print); \
253 if (!ret) \ 242 if (!ret) \
254 return TRACE_TYPE_PARTIAL_LINE; \ 243 return TRACE_TYPE_PARTIAL_LINE; \
255 \ 244 \
@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
335 324
336#undef DECLARE_EVENT_CLASS 325#undef DECLARE_EVENT_CLASS
337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 326#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
338static int notrace \ 327static int notrace __init \
339ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 328ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
340{ \ 329{ \
341 struct ftrace_raw_##call field; \ 330 struct ftrace_raw_##call field; \
@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \
414 * 403 *
415 * static void ftrace_raw_event_<call>(void *__data, proto) 404 * static void ftrace_raw_event_<call>(void *__data, proto)
416 * { 405 * {
417 * struct ftrace_event_call *event_call = __data; 406 * struct ftrace_event_file *ftrace_file = __data;
407 * struct ftrace_event_call *event_call = ftrace_file->event_call;
418 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 408 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
419 * struct ring_buffer_event *event; 409 * struct ring_buffer_event *event;
420 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 410 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \
423 * int __data_size; 413 * int __data_size;
424 * int pc; 414 * int pc;
425 * 415 *
416 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
417 * &ftrace_file->flags))
418 * return;
419 *
426 * local_save_flags(irq_flags); 420 * local_save_flags(irq_flags);
427 * pc = preempt_count(); 421 * pc = preempt_count();
428 * 422 *
429 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 423 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
430 * 424 *
431 * event = trace_current_buffer_lock_reserve(&buffer, 425 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
432 * event_<call>->event.type, 426 * event_<call>->event.type,
433 * sizeof(*entry) + __data_size, 427 * sizeof(*entry) + __data_size,
434 * irq_flags, pc); 428 * irq_flags, pc);
@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \
440 * __array macros. 434 * __array macros.
441 * 435 *
442 * if (!filter_current_check_discard(buffer, event_call, entry, event)) 436 * if (!filter_current_check_discard(buffer, event_call, entry, event))
443 * trace_current_buffer_unlock_commit(buffer, 437 * trace_nowake_buffer_unlock_commit(buffer,
444 * event, irq_flags, pc); 438 * event, irq_flags, pc);
445 * } 439 * }
446 * 440 *
@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \
518static notrace void \ 512static notrace void \
519ftrace_raw_event_##call(void *__data, proto) \ 513ftrace_raw_event_##call(void *__data, proto) \
520{ \ 514{ \
521 struct ftrace_event_call *event_call = __data; \ 515 struct ftrace_event_file *ftrace_file = __data; \
516 struct ftrace_event_call *event_call = ftrace_file->event_call; \
522 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 517 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
523 struct ring_buffer_event *event; \ 518 struct ring_buffer_event *event; \
524 struct ftrace_raw_##call *entry; \ 519 struct ftrace_raw_##call *entry; \
@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \
527 int __data_size; \ 522 int __data_size; \
528 int pc; \ 523 int pc; \
529 \ 524 \
525 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
526 &ftrace_file->flags)) \
527 return; \
528 \
530 local_save_flags(irq_flags); \ 529 local_save_flags(irq_flags); \
531 pc = preempt_count(); \ 530 pc = preempt_count(); \
532 \ 531 \
533 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 532 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
534 \ 533 \
535 event = trace_current_buffer_lock_reserve(&buffer, \ 534 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
536 event_call->event.type, \ 535 event_call->event.type, \
537 sizeof(*entry) + __data_size, \ 536 sizeof(*entry) + __data_size, \
538 irq_flags, pc); \ 537 irq_flags, pc); \
@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \
581#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 580#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
582_TRACE_PERF_PROTO(call, PARAMS(proto)); \ 581_TRACE_PERF_PROTO(call, PARAMS(proto)); \
583static const char print_fmt_##call[] = print; \ 582static const char print_fmt_##call[] = print; \
584static struct ftrace_event_class __used event_class_##call = { \ 583static struct ftrace_event_class __used __refdata event_class_##call = { \
585 .system = __stringify(TRACE_SYSTEM), \ 584 .system = __stringify(TRACE_SYSTEM), \
586 .define_fields = ftrace_define_fields_##call, \ 585 .define_fields = ftrace_define_fields_##call, \
587 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 586 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \
705#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 704#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
706#endif /* CONFIG_PERF_EVENTS */ 705#endif /* CONFIG_PERF_EVENTS */
707 706
708#undef _TRACE_PROFILE_INIT
709