aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2009-08-10 17:40:50 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2009-08-10 17:40:50 -0400
commitdcbf77cac640af0ab944d5cbb07934bf6708b4d9 (patch)
treee2f4d1b3a1089ee10436cb19740a9cb99f2dc527 /include
parentc00aafcd4977769e8728292302ddbbb8b1082fab (diff)
parent85dfd81dc57e8183a277ddd7a56aa65c96f3f487 (diff)
Merge branch 'master' into for-linus
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_pciids.h5
-rw-r--r--include/linux/decompress/generic.h32
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/input/matrix_keypad.h13
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/nodemask.h28
-rw-r--r--include/linux/perf_counter.h11
-rw-r--r--include/trace/ftrace.h183
11 files changed, 239 insertions, 53 deletions
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 9d4c00491547..853508499d20 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -370,6 +370,11 @@
370 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 370 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
371 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 371 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
372 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 372 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
373 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
374 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
375 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
376 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
377 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
373 {0, 0, 0} 378 {0, 0, 0}
374 379
375#define r128_PCI_IDS \ 380#define r128_PCI_IDS \
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 6dfb856327bb..0c7111a55a1a 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,31 +1,37 @@
1#ifndef DECOMPRESS_GENERIC_H 1#ifndef DECOMPRESS_GENERIC_H
2#define DECOMPRESS_GENERIC_H 2#define DECOMPRESS_GENERIC_H
3 3
4/* Minimal chunksize to be read.
5 *Bzip2 prefers at least 4096
6 *Lzma prefers 0x10000 */
7#define COMPR_IOBUF_SIZE 4096
8
9typedef int (*decompress_fn) (unsigned char *inbuf, int len, 4typedef int (*decompress_fn) (unsigned char *inbuf, int len,
10 int(*fill)(void*, unsigned int), 5 int(*fill)(void*, unsigned int),
11 int(*writebb)(void*, unsigned int), 6 int(*flush)(void*, unsigned int),
12 unsigned char *output, 7 unsigned char *outbuf,
13 int *posp, 8 int *posp,
14 void(*error)(char *x)); 9 void(*error)(char *x));
15 10
16/* inbuf - input buffer 11/* inbuf - input buffer
17 *len - len of pre-read data in inbuf 12 *len - len of pre-read data in inbuf
18 *fill - function to fill inbuf if empty 13 *fill - function to fill inbuf when empty
19 *writebb - function to write out outbug 14 *flush - function to write out outbuf
15 *outbuf - output buffer
20 *posp - if non-null, input position (number of bytes read) will be 16 *posp - if non-null, input position (number of bytes read) will be
21 * returned here 17 * returned here
22 * 18 *
23 *If len != 0, the inbuf is initialized (with as much data), and fill 19 *If len != 0, inbuf should contain all the necessary input data, and fill
24 *should not be called 20 *should be NULL
25 *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE 21 *If len = 0, inbuf can be NULL, in which case the decompressor will allocate
26 *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE 22 *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
23 *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
24 *bytes should be read per call. Replace XXX with the appropriate decompressor
25 *name, i.e. LZMA_IOBUF_SIZE.
26 *
27 *If flush = NULL, outbuf must be large enough to buffer all the expected
28 *output. If flush != NULL, the output buffer will be allocated by the
29 *decompressor (outbuf = NULL), and the flush function will be called to
30 *flush the output buffer at the appropriate time (decompressor and stream
31 *dependent).
27 */ 32 */
28 33
34
29/* Utility routine to detect the decompression method */ 35/* Utility routine to detect the decompression method */
30decompress_fn decompress_method(const unsigned char *inbuf, int len, 36decompress_fn decompress_method(const unsigned char *inbuf, int len,
31 const char **name); 37 const char **name);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a36ffa5a77a4..67888a9e0655 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2137,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
2137 2137
2138extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); 2138extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
2139 2139
2140extern struct inode * inode_init_always(struct super_block *, struct inode *); 2140extern int inode_init_always(struct super_block *, struct inode *);
2141extern void inode_init_once(struct inode *); 2141extern void inode_init_once(struct inode *);
2142extern void inode_add_to_lists(struct super_block *, struct inode *); 2142extern void inode_add_to_lists(struct super_block *, struct inode *);
2143extern void iput(struct inode *); 2143extern void iput(struct inode *);
@@ -2164,6 +2164,7 @@ extern void __iget(struct inode * inode);
2164extern void iget_failed(struct inode *); 2164extern void iget_failed(struct inode *);
2165extern void clear_inode(struct inode *); 2165extern void clear_inode(struct inode *);
2166extern void destroy_inode(struct inode *); 2166extern void destroy_inode(struct inode *);
2167extern void __destroy_inode(struct inode *);
2167extern struct inode *new_inode(struct super_block *); 2168extern struct inode *new_inode(struct super_block *);
2168extern int should_remove_suid(struct dentry *); 2169extern int should_remove_suid(struct dentry *);
2169extern int file_remove_suid(struct file *); 2170extern int file_remove_suid(struct file *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5c093ffc655b..a81170de7f6b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
90}; 90};
91 91
92 92void tracing_generic_entry_update(struct trace_entry *entry,
93 unsigned long flags,
94 int pc);
93struct ring_buffer_event * 95struct ring_buffer_event *
94trace_current_buffer_lock_reserve(int type, unsigned long len, 96trace_current_buffer_lock_reserve(int type, unsigned long len,
95 unsigned long flags, int pc); 97 unsigned long flags, int pc);
@@ -119,11 +121,9 @@ struct ftrace_event_call {
119 void *filter; 121 void *filter;
120 void *mod; 122 void *mod;
121 123
122#ifdef CONFIG_EVENT_PROFILE 124 atomic_t profile_count;
123 atomic_t profile_count; 125 int (*profile_enable)(struct ftrace_event_call *);
124 int (*profile_enable)(struct ftrace_event_call *); 126 void (*profile_disable)(struct ftrace_event_call *);
125 void (*profile_disable)(struct ftrace_event_call *);
126#endif
127}; 127};
128 128
129#define MAX_FILTER_PRED 32 129#define MAX_FILTER_PRED 32
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 7964516c6954..15d5903af2dd 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -15,12 +15,13 @@
15#define KEY_COL(k) (((k) >> 16) & 0xff) 15#define KEY_COL(k) (((k) >> 16) & 0xff)
16#define KEY_VAL(k) ((k) & 0xffff) 16#define KEY_VAL(k) ((k) & 0xffff)
17 17
18#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
19
18/** 20/**
19 * struct matrix_keymap_data - keymap for matrix keyboards 21 * struct matrix_keymap_data - keymap for matrix keyboards
20 * @keymap: pointer to array of uint32 values encoded with KEY() macro 22 * @keymap: pointer to array of uint32 values encoded with KEY() macro
21 * representing keymap 23 * representing keymap
22 * @keymap_size: number of entries (initialized) in this keymap 24 * @keymap_size: number of entries (initialized) in this keymap
23 * @max_keymap_size: maximum size of keymap supported by the device
24 * 25 *
25 * This structure is supposed to be used by platform code to supply 26 * This structure is supposed to be used by platform code to supply
26 * keymaps to drivers that implement matrix-like keypads/keyboards. 27 * keymaps to drivers that implement matrix-like keypads/keyboards.
@@ -28,14 +29,13 @@
28struct matrix_keymap_data { 29struct matrix_keymap_data {
29 const uint32_t *keymap; 30 const uint32_t *keymap;
30 unsigned int keymap_size; 31 unsigned int keymap_size;
31 unsigned int max_keymap_size;
32}; 32};
33 33
34/** 34/**
35 * struct matrix_keypad_platform_data - platform-dependent keypad data 35 * struct matrix_keypad_platform_data - platform-dependent keypad data
36 * @keymap_data: pointer to &matrix_keymap_data 36 * @keymap_data: pointer to &matrix_keymap_data
37 * @row_gpios: array of gpio numbers reporesenting rows 37 * @row_gpios: pointer to array of gpio numbers representing rows
38 * @col_gpios: array of gpio numbers reporesenting colums 38 * @col_gpios: pointer to array of gpio numbers reporesenting colums
39 * @num_row_gpios: actual number of row gpios used by device 39 * @num_row_gpios: actual number of row gpios used by device
40 * @num_col_gpios: actual number of col gpios used by device 40 * @num_col_gpios: actual number of col gpios used by device
41 * @col_scan_delay_us: delay, measured in microseconds, that is 41 * @col_scan_delay_us: delay, measured in microseconds, that is
@@ -48,8 +48,9 @@ struct matrix_keymap_data {
48struct matrix_keypad_platform_data { 48struct matrix_keypad_platform_data {
49 const struct matrix_keymap_data *keymap_data; 49 const struct matrix_keymap_data *keymap_data;
50 50
51 unsigned int row_gpios[MATRIX_MAX_ROWS]; 51 const unsigned int *row_gpios;
52 unsigned int col_gpios[MATRIX_MAX_COLS]; 52 const unsigned int *col_gpios;
53
53 unsigned int num_row_gpios; 54 unsigned int num_row_gpios;
54 unsigned int num_col_gpios; 55 unsigned int num_col_gpios;
55 56
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 16713dc672e4..3060bdc35ffe 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
110 110
111struct kvm_kernel_irq_routing_entry { 111struct kvm_kernel_irq_routing_entry {
112 u32 gsi; 112 u32 gsi;
113 u32 type;
113 int (*set)(struct kvm_kernel_irq_routing_entry *e, 114 int (*set)(struct kvm_kernel_irq_routing_entry *e,
114 struct kvm *kvm, int level); 115 struct kvm *kvm, int level);
115 union { 116 union {
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 5675b63a0631..0f32a9b6ff55 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -251,7 +251,7 @@ struct mtd_info {
251 251
252static inline struct mtd_info *dev_to_mtd(struct device *dev) 252static inline struct mtd_info *dev_to_mtd(struct device *dev)
253{ 253{
254 return dev ? container_of(dev, struct mtd_info, dev) : NULL; 254 return dev ? dev_get_drvdata(dev) : NULL;
255} 255}
256 256
257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index af6dcb992bc3..b70313d33ff8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -47,6 +47,8 @@ struct mtd_partition {
47#define MTDPART_SIZ_FULL (0) 47#define MTDPART_SIZ_FULL (0)
48 48
49 49
50struct mtd_info;
51
50int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 52int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
51int del_mtd_partitions(struct mtd_info *); 53int del_mtd_partitions(struct mtd_info *);
52 54
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 829b94b156f2..b359c4a9ec9e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -82,6 +82,12 @@
82 * to generate slightly worse code. So use a simple one-line #define 82 * to generate slightly worse code. So use a simple one-line #define
83 * for node_isset(), instead of wrapping an inline inside a macro, the 83 * for node_isset(), instead of wrapping an inline inside a macro, the
84 * way we do the other calls. 84 * way we do the other calls.
85 *
86 * NODEMASK_SCRATCH
87 * When doing above logical AND, OR, XOR, Remap operations the callers tend to
88 * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
89 * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
90 * for such situations. See below and CPUMASK_ALLOC also.
85 */ 91 */
86 92
87#include <linux/kernel.h> 93#include <linux/kernel.h>
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
473#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) 479#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
474#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 480#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
475 481
482/*
483 * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
484 */
485
486#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
487#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
488#define NODEMASK_FREE(m) kfree(m)
489#else
490#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
491#define NODEMASK_FREE(m)
492#endif
493
494/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
495struct nodemask_scratch {
496 nodemask_t mask1;
497 nodemask_t mask2;
498};
499
500#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
501#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
502
503
476#endif /* __LINUX_NODEMASK_H */ 504#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index e604e6ef72dd..a9d823a93fe8 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
121 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9, 123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_RAW = 1U << 10,
124 125
125 PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ 126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
126}; 127};
127 128
128/* 129/*
@@ -368,6 +369,8 @@ enum perf_event_type {
368 * 369 *
369 * { u64 nr, 370 * { u64 nr,
370 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 371 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
372 * { u32 size;
373 * char data[size];}&& PERF_SAMPLE_RAW
371 * }; 374 * };
372 */ 375 */
373 PERF_EVENT_SAMPLE = 9, 376 PERF_EVENT_SAMPLE = 9,
@@ -413,6 +416,11 @@ struct perf_callchain_entry {
413 __u64 ip[PERF_MAX_STACK_DEPTH]; 416 __u64 ip[PERF_MAX_STACK_DEPTH];
414}; 417};
415 418
419struct perf_raw_record {
420 u32 size;
421 void *data;
422};
423
416struct task_struct; 424struct task_struct;
417 425
418/** 426/**
@@ -681,6 +689,7 @@ struct perf_sample_data {
681 struct pt_regs *regs; 689 struct pt_regs *regs;
682 u64 addr; 690 u64 addr;
683 u64 period; 691 u64 period;
692 struct perf_raw_record *raw;
684}; 693};
685 694
686extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 695extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e5..f64fbaae781a 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
144#undef TP_fast_assign 144#undef TP_fast_assign
145#define TP_fast_assign(args...) args 145#define TP_fast_assign(args...) args
146 146
147#undef TP_perf_assign
148#define TP_perf_assign(args...)
149
147#undef TRACE_EVENT 150#undef TRACE_EVENT
148#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
149static int \ 152static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
345 348
346#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
347 350
351#ifdef CONFIG_EVENT_PROFILE
352
353/*
354 * Generate the functions needed for tracepoint perf_counter support.
355 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
359 * {
360 * int ret = 0;
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * }
367 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
369 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count))
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * }
373 *
374 */
375
376#undef TRACE_EVENT
377#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
378 \
379static void ftrace_profile_##call(proto); \
380 \
381static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
382{ \
383 int ret = 0; \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389} \
390 \
391static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
392{ \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \
394 unregister_trace_##call(ftrace_profile_##call); \
395}
396
397#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
398
399#endif
400
348/* 401/*
349 * Stage 4 of the trace events. 402 * Stage 4 of the trace events.
350 * 403 *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
447#define TP_FMT(fmt, args...) fmt "\n", ##args 500#define TP_FMT(fmt, args...) fmt "\n", ##args
448 501
449#ifdef CONFIG_EVENT_PROFILE 502#ifdef CONFIG_EVENT_PROFILE
450#define _TRACE_PROFILE(call, proto, args) \
451static void ftrace_profile_##call(proto) \
452{ \
453 extern void perf_tpcounter_event(int); \
454 perf_tpcounter_event(event_##call.id); \
455} \
456 \
457static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
458{ \
459 int ret = 0; \
460 \
461 if (!atomic_inc_return(&event_call->profile_count)) \
462 ret = register_trace_##call(ftrace_profile_##call); \
463 \
464 return ret; \
465} \
466 \
467static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
468{ \
469 if (atomic_add_negative(-1, &event_call->profile_count)) \
470 unregister_trace_##call(ftrace_profile_##call); \
471}
472 503
473#define _TRACE_PROFILE_INIT(call) \ 504#define _TRACE_PROFILE_INIT(call) \
474 .profile_count = ATOMIC_INIT(-1), \ 505 .profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
476 .profile_disable = ftrace_profile_disable_##call, 507 .profile_disable = ftrace_profile_disable_##call,
477 508
478#else 509#else
479#define _TRACE_PROFILE(call, proto, args)
480#define _TRACE_PROFILE_INIT(call) 510#define _TRACE_PROFILE_INIT(call)
481#endif 511#endif
482 512
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
502 532
503#undef TRACE_EVENT 533#undef TRACE_EVENT
504#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 534#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
505_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
506 \ 535 \
507static struct ftrace_event_call event_##call; \ 536static struct ftrace_event_call event_##call; \
508 \ 537 \
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
586 615
587#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 616#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
588 617
589#undef _TRACE_PROFILE 618/*
619 * Define the insertion callback to profile events
620 *
621 * The job is very similar to ftrace_raw_event_<call> except that we don't
622 * insert in the ring buffer but in a perf counter.
623 *
624 * static void ftrace_profile_<call>(proto)
625 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags;
632 * int __entry_size;
633 * int __data_size;
634 * int pc;
635 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count();
638 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
640 *
641 * // Below we want to get the aligned size by taking into account
642 * // the u32 field that will later store the buffer size
643 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
644 * sizeof(u64));
645 * __entry_size -= sizeof(u32);
646 *
647 * do {
648 * char raw_data[__entry_size]; <- allocate our sample in the stack
649 * struct trace_entry *ent;
650 *
651 * zero dead bytes from alignment to avoid stack leak to userspace:
652 *
653 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
654 * entry = (struct ftrace_raw_<call> *)raw_data;
655 * ent = &entry->ent;
656 * tracing_generic_entry_update(ent, irq_flags, pc);
657 * ent->type = event_call->id;
658 *
659 * <tstruct> <- do some jobs with dynamic arrays
660 *
661 * <assign> <- affect our values
662 *
663 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
664 * __entry_size); <- submit them to perf counter
665 * } while (0);
666 *
667 * }
668 */
669
670#ifdef CONFIG_EVENT_PROFILE
671
672#undef __perf_addr
673#define __perf_addr(a) __addr = (a)
674
675#undef __perf_count
676#define __perf_count(c) __count = (c)
677
678#undef TRACE_EVENT
679#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
680static void ftrace_profile_##call(proto) \
681{ \
682 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
683 struct ftrace_event_call *event_call = &event_##call; \
684 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
685 struct ftrace_raw_##call *entry; \
686 u64 __addr = 0, __count = 1; \
687 unsigned long irq_flags; \
688 int __entry_size; \
689 int __data_size; \
690 int pc; \
691 \
692 local_save_flags(irq_flags); \
693 pc = preempt_count(); \
694 \
695 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
696 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
697 sizeof(u64)); \
698 __entry_size -= sizeof(u32); \
699 \
700 do { \
701 char raw_data[__entry_size]; \
702 struct trace_entry *ent; \
703 \
704 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
705 entry = (struct ftrace_raw_##call *)raw_data; \
706 ent = &entry->ent; \
707 tracing_generic_entry_update(ent, irq_flags, pc); \
708 ent->type = event_call->id; \
709 \
710 tstruct \
711 \
712 { assign; } \
713 \
714 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
715 __entry_size); \
716 } while (0); \
717 \
718}
719
720#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
721#endif /* CONFIG_EVENT_PROFILE */
722
590#undef _TRACE_PROFILE_INIT 723#undef _TRACE_PROFILE_INIT
591 724