aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/perf_counter.h60
-rw-r--r--include/linux/sunrpc/xdr.h10
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/trace/ftrace.h183
7 files changed, 224 insertions, 48 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index d7cd193c2277..a81170de7f6b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
90}; 90};
91 91
92 92void tracing_generic_entry_update(struct trace_entry *entry,
93 unsigned long flags,
94 int pc);
93struct ring_buffer_event * 95struct ring_buffer_event *
94trace_current_buffer_lock_reserve(int type, unsigned long len, 96trace_current_buffer_lock_reserve(int type, unsigned long len,
95 unsigned long flags, int pc); 97 unsigned long flags, int pc);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 16713dc672e4..3060bdc35ffe 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
110 110
111struct kvm_kernel_irq_routing_entry { 111struct kvm_kernel_irq_routing_entry {
112 u32 gsi; 112 u32 gsi;
113 u32 type;
113 int (*set)(struct kvm_kernel_irq_routing_entry *e, 114 int (*set)(struct kvm_kernel_irq_routing_entry *e,
114 struct kvm *kvm, int level); 115 struct kvm *kvm, int level);
115 union { 116 union {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index fdffb413b192..f6b90240dd41 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
473extern int nfs_flush_incompatible(struct file *file, struct page *page); 473extern int nfs_flush_incompatible(struct file *file, struct page *page);
474extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); 474extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
475extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); 475extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
476extern void nfs_writedata_release(void *);
477 476
478/* 477/*
479 * Try to write back everything synchronously (but check the 478 * Try to write back everything synchronously (but check the
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
488extern int nfs_commit_inode(struct inode *, int); 487extern int nfs_commit_inode(struct inode *, int);
489extern struct nfs_write_data *nfs_commitdata_alloc(void); 488extern struct nfs_write_data *nfs_commitdata_alloc(void);
490extern void nfs_commit_free(struct nfs_write_data *wdata); 489extern void nfs_commit_free(struct nfs_write_data *wdata);
491extern void nfs_commitdata_release(void *wdata);
492#else 490#else
493static inline int 491static inline int
494nfs_commit_inode(struct inode *inode, int how) 492nfs_commit_inode(struct inode *inode, int how)
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
507 * Allocate nfs_write_data structures 505 * Allocate nfs_write_data structures
508 */ 506 */
509extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); 507extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
508extern void nfs_writedata_free(struct nfs_write_data *);
510 509
511/* 510/*
512 * linux/fs/nfs/read.c 511 * linux/fs/nfs/read.c
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
515extern int nfs_readpages(struct file *, struct address_space *, 514extern int nfs_readpages(struct file *, struct address_space *,
516 struct list_head *, unsigned); 515 struct list_head *, unsigned);
517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 516extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, 517extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *); 518 struct page *);
521 519
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
523 * Allocate nfs_read_data structures 521 * Allocate nfs_read_data structures
524 */ 522 */
525extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); 523extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
524extern void nfs_readdata_free(struct nfs_read_data *);
526 525
527/* 526/*
528 * linux/fs/nfs3proc.c 527 * linux/fs/nfs3proc.c
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index e604e6ef72dd..b53f7006cc4e 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -115,27 +115,44 @@ enum perf_counter_sample_format {
115 PERF_SAMPLE_TID = 1U << 1, 115 PERF_SAMPLE_TID = 1U << 1,
116 PERF_SAMPLE_TIME = 1U << 2, 116 PERF_SAMPLE_TIME = 1U << 2,
117 PERF_SAMPLE_ADDR = 1U << 3, 117 PERF_SAMPLE_ADDR = 1U << 3,
118 PERF_SAMPLE_GROUP = 1U << 4, 118 PERF_SAMPLE_READ = 1U << 4,
119 PERF_SAMPLE_CALLCHAIN = 1U << 5, 119 PERF_SAMPLE_CALLCHAIN = 1U << 5,
120 PERF_SAMPLE_ID = 1U << 6, 120 PERF_SAMPLE_ID = 1U << 6,
121 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9, 123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_RAW = 1U << 10,
124 125
125 PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ 126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
126}; 127};
127 128
128/* 129/*
129 * Bits that can be set in attr.read_format to request that 130 * The format of the data returned by read() on a perf counter fd,
130 * reads on the counter should return the indicated quantities, 131 * as specified by attr.read_format:
131 * in increasing order of bit value, after the counter value. 132 *
133 * struct read_format {
134 * { u64 value;
135 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
136 * { u64 time_running; } && PERF_FORMAT_RUNNING
137 * { u64 id; } && PERF_FORMAT_ID
138 * } && !PERF_FORMAT_GROUP
139 *
140 * { u64 nr;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 value;
144 * { u64 id; } && PERF_FORMAT_ID
145 * } cntr[nr];
146 * } && PERF_FORMAT_GROUP
147 * };
132 */ 148 */
133enum perf_counter_read_format { 149enum perf_counter_read_format {
134 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
135 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
136 PERF_FORMAT_ID = 1U << 2, 152 PERF_FORMAT_ID = 1U << 2,
153 PERF_FORMAT_GROUP = 1U << 3,
137 154
138 PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ 155 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
139}; 156};
140 157
141#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 158#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -342,10 +359,8 @@ enum perf_event_type {
342 * struct { 359 * struct {
343 * struct perf_event_header header; 360 * struct perf_event_header header;
344 * u32 pid, tid; 361 * u32 pid, tid;
345 * u64 value; 362 *
346 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 363 * struct read_format values;
347 * { u64 time_running; } && PERF_FORMAT_RUNNING
348 * { u64 parent_id; } && PERF_FORMAT_ID
349 * }; 364 * };
350 */ 365 */
351 PERF_EVENT_READ = 8, 366 PERF_EVENT_READ = 8,
@@ -363,11 +378,24 @@ enum perf_event_type {
363 * { u32 cpu, res; } && PERF_SAMPLE_CPU 378 * { u32 cpu, res; } && PERF_SAMPLE_CPU
364 * { u64 period; } && PERF_SAMPLE_PERIOD 379 * { u64 period; } && PERF_SAMPLE_PERIOD
365 * 380 *
366 * { u64 nr; 381 * { struct read_format values; } && PERF_SAMPLE_READ
367 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
368 * 382 *
369 * { u64 nr, 383 * { u64 nr,
370 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 384 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
385 *
386 * #
387 * # The RAW record below is opaque data wrt the ABI
388 * #
389 * # That is, the ABI doesn't make any promises wrt to
390 * # the stability of its content, it may vary depending
391 * # on event, hardware, kernel version and phase of
392 * # the moon.
393 * #
394 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
395 * #
396 *
397 * { u32 size;
398 * char data[size];}&& PERF_SAMPLE_RAW
371 * }; 399 * };
372 */ 400 */
373 PERF_EVENT_SAMPLE = 9, 401 PERF_EVENT_SAMPLE = 9,
@@ -413,6 +441,11 @@ struct perf_callchain_entry {
413 __u64 ip[PERF_MAX_STACK_DEPTH]; 441 __u64 ip[PERF_MAX_STACK_DEPTH];
414}; 442};
415 443
444struct perf_raw_record {
445 u32 size;
446 void *data;
447};
448
416struct task_struct; 449struct task_struct;
417 450
418/** 451/**
@@ -681,10 +714,13 @@ struct perf_sample_data {
681 struct pt_regs *regs; 714 struct pt_regs *regs;
682 u64 addr; 715 u64 addr;
683 u64 period; 716 u64 period;
717 struct perf_raw_record *raw;
684}; 718};
685 719
686extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 720extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
687 struct perf_sample_data *data); 721 struct perf_sample_data *data);
722extern void perf_counter_output(struct perf_counter *counter, int nmi,
723 struct perf_sample_data *data);
688 724
689/* 725/*
690 * Return 1 for a software counter, 0 for a hardware counter 726 * Return 1 for a software counter, 0 for a hardware counter
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b99c625fddfe..7da466ba4b0d 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -117,17 +117,15 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le
117static inline __be32 * 117static inline __be32 *
118xdr_encode_hyper(__be32 *p, __u64 val) 118xdr_encode_hyper(__be32 *p, __u64 val)
119{ 119{
120 *p++ = htonl(val >> 32); 120 *(__be64 *)p = cpu_to_be64(val);
121 *p++ = htonl(val & 0xFFFFFFFF); 121 return p + 2;
122 return p;
123} 122}
124 123
125static inline __be32 * 124static inline __be32 *
126xdr_decode_hyper(__be32 *p, __u64 *valp) 125xdr_decode_hyper(__be32 *p, __u64 *valp)
127{ 126{
128 *valp = ((__u64) ntohl(*p++)) << 32; 127 *valp = be64_to_cpup((__be64 *)p);
129 *valp |= ntohl(*p++); 128 return p + 2;
130 return p;
131} 129}
132 130
133/* 131/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6788e1a4d4ca..cf3c2f5dba51 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,7 +77,14 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80extern void init_waitqueue_head(wait_queue_head_t *q); 80extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
81
82#define init_waitqueue_head(q) \
83 do { \
84 static struct lock_class_key __key; \
85 \
86 __init_waitqueue_head((q), &__key); \
87 } while (0)
81 88
82#ifdef CONFIG_LOCKDEP 89#ifdef CONFIG_LOCKDEP
83# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 90# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e5..f64fbaae781a 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
144#undef TP_fast_assign 144#undef TP_fast_assign
145#define TP_fast_assign(args...) args 145#define TP_fast_assign(args...) args
146 146
147#undef TP_perf_assign
148#define TP_perf_assign(args...)
149
147#undef TRACE_EVENT 150#undef TRACE_EVENT
148#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
149static int \ 152static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
345 348
346#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
347 350
351#ifdef CONFIG_EVENT_PROFILE
352
353/*
354 * Generate the functions needed for tracepoint perf_counter support.
355 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
359 * {
360 * int ret = 0;
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * }
367 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
369 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count))
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * }
373 *
374 */
375
376#undef TRACE_EVENT
377#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
378 \
379static void ftrace_profile_##call(proto); \
380 \
381static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
382{ \
383 int ret = 0; \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389} \
390 \
391static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
392{ \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \
394 unregister_trace_##call(ftrace_profile_##call); \
395}
396
397#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
398
399#endif
400
348/* 401/*
349 * Stage 4 of the trace events. 402 * Stage 4 of the trace events.
350 * 403 *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
447#define TP_FMT(fmt, args...) fmt "\n", ##args 500#define TP_FMT(fmt, args...) fmt "\n", ##args
448 501
449#ifdef CONFIG_EVENT_PROFILE 502#ifdef CONFIG_EVENT_PROFILE
450#define _TRACE_PROFILE(call, proto, args) \
451static void ftrace_profile_##call(proto) \
452{ \
453 extern void perf_tpcounter_event(int); \
454 perf_tpcounter_event(event_##call.id); \
455} \
456 \
457static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
458{ \
459 int ret = 0; \
460 \
461 if (!atomic_inc_return(&event_call->profile_count)) \
462 ret = register_trace_##call(ftrace_profile_##call); \
463 \
464 return ret; \
465} \
466 \
467static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
468{ \
469 if (atomic_add_negative(-1, &event_call->profile_count)) \
470 unregister_trace_##call(ftrace_profile_##call); \
471}
472 503
473#define _TRACE_PROFILE_INIT(call) \ 504#define _TRACE_PROFILE_INIT(call) \
474 .profile_count = ATOMIC_INIT(-1), \ 505 .profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
476 .profile_disable = ftrace_profile_disable_##call, 507 .profile_disable = ftrace_profile_disable_##call,
477 508
478#else 509#else
479#define _TRACE_PROFILE(call, proto, args)
480#define _TRACE_PROFILE_INIT(call) 510#define _TRACE_PROFILE_INIT(call)
481#endif 511#endif
482 512
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
502 532
503#undef TRACE_EVENT 533#undef TRACE_EVENT
504#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 534#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
505_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
506 \ 535 \
507static struct ftrace_event_call event_##call; \ 536static struct ftrace_event_call event_##call; \
508 \ 537 \
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
586 615
587#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 616#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
588 617
589#undef _TRACE_PROFILE 618/*
619 * Define the insertion callback to profile events
620 *
621 * The job is very similar to ftrace_raw_event_<call> except that we don't
622 * insert in the ring buffer but in a perf counter.
623 *
624 * static void ftrace_profile_<call>(proto)
625 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags;
632 * int __entry_size;
633 * int __data_size;
634 * int pc;
635 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count();
638 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
640 *
641 * // Below we want to get the aligned size by taking into account
642 * // the u32 field that will later store the buffer size
643 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
644 * sizeof(u64));
645 * __entry_size -= sizeof(u32);
646 *
647 * do {
648 * char raw_data[__entry_size]; <- allocate our sample in the stack
649 * struct trace_entry *ent;
650 *
651 * zero dead bytes from alignment to avoid stack leak to userspace:
652 *
653 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
654 * entry = (struct ftrace_raw_<call> *)raw_data;
655 * ent = &entry->ent;
656 * tracing_generic_entry_update(ent, irq_flags, pc);
657 * ent->type = event_call->id;
658 *
659 * <tstruct> <- do some jobs with dynamic arrays
660 *
661 * <assign> <- affect our values
662 *
663 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
664 * __entry_size); <- submit them to perf counter
665 * } while (0);
666 *
667 * }
668 */
669
670#ifdef CONFIG_EVENT_PROFILE
671
672#undef __perf_addr
673#define __perf_addr(a) __addr = (a)
674
675#undef __perf_count
676#define __perf_count(c) __count = (c)
677
678#undef TRACE_EVENT
679#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
680static void ftrace_profile_##call(proto) \
681{ \
682 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
683 struct ftrace_event_call *event_call = &event_##call; \
684 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
685 struct ftrace_raw_##call *entry; \
686 u64 __addr = 0, __count = 1; \
687 unsigned long irq_flags; \
688 int __entry_size; \
689 int __data_size; \
690 int pc; \
691 \
692 local_save_flags(irq_flags); \
693 pc = preempt_count(); \
694 \
695 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
696 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
697 sizeof(u64)); \
698 __entry_size -= sizeof(u32); \
699 \
700 do { \
701 char raw_data[__entry_size]; \
702 struct trace_entry *ent; \
703 \
704 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
705 entry = (struct ftrace_raw_##call *)raw_data; \
706 ent = &entry->ent; \
707 tracing_generic_entry_update(ent, irq_flags, pc); \
708 ent->type = event_call->id; \
709 \
710 tstruct \
711 \
712 { assign; } \
713 \
714 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
715 __entry_size); \
716 } while (0); \
717 \
718}
719
720#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
721#endif /* CONFIG_EVENT_PROFILE */
722
590#undef _TRACE_PROFILE_INIT 723#undef _TRACE_PROFILE_INIT
591 724