diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 14:04:19 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 14:04:19 -0400 |
| commit | 714f83d5d9f7c785f622259dad1f4fad12d64664 (patch) | |
| tree | 20563541ae438e11d686b4d629074eb002a481b7 /include/linux | |
| parent | 8901e7ffc2fa78ede7ce9826dbad68a3a25dc2dc (diff) | |
| parent | 645dae969c3b8651c5bc7c54a1835ec03820f85f (diff) | |
Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (413 commits)
tracing, net: fix net tree and tracing tree merge interaction
tracing, powerpc: fix powerpc tree and tracing tree interaction
ring-buffer: do not remove reader page from list on ring buffer free
function-graph: allow unregistering twice
trace: make argument 'mem' of trace_seq_putmem() const
tracing: add missing 'extern' keywords to trace_output.h
tracing: provide trace_seq_reserve()
blktrace: print out BLK_TN_MESSAGE properly
blktrace: extract duplidate code
blktrace: fix memory leak when freeing struct blk_io_trace
blktrace: fix blk_probes_ref chaos
blktrace: make classic output more classic
blktrace: fix off-by-one bug
blktrace: fix the original blktrace
blktrace: fix a race when creating blk_tree_root in debugfs
blktrace: fix timestamp in binary output
tracing, Text Edit Lock: cleanup
tracing: filter fix for TRACE_EVENT_FORMAT events
ftrace: Using FTRACE_WARN_ON() to check "freed record" in ftrace_release()
x86: kretprobe-booster interrupt emulation code fix
...
Fix up trivial conflicts in
arch/parisc/include/asm/ftrace.h
include/linux/memory.h
kernel/extable.c
kernel/module.c
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/blktrace_api.h | 5 | ||||
| -rw-r--r-- | include/linux/compiler.h | 6 | ||||
| -rw-r--r-- | include/linux/debugfs.h | 8 | ||||
| -rw-r--r-- | include/linux/ftrace.h | 244 | ||||
| -rw-r--r-- | include/linux/ftrace_irq.h | 2 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 73 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 5 | ||||
| -rw-r--r-- | include/linux/kernel.h | 133 | ||||
| -rw-r--r-- | include/linux/memory.h | 6 | ||||
| -rw-r--r-- | include/linux/module.h | 5 | ||||
| -rw-r--r-- | include/linux/ring_buffer.h | 38 | ||||
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 68 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 9 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 53 | ||||
| -rw-r--r-- | include/linux/syscalls.h | 60 | ||||
| -rw-r--r-- | include/linux/trace_clock.h | 19 | ||||
| -rw-r--r-- | include/linux/tracepoint.h | 116 |
18 files changed, 670 insertions, 184 deletions
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 6e915878e88c..d960889e92ef 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -144,6 +144,9 @@ struct blk_user_trace_setup { | |||
| 144 | 144 | ||
| 145 | #ifdef __KERNEL__ | 145 | #ifdef __KERNEL__ |
| 146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | 146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) |
| 147 | |||
| 148 | #include <linux/sysfs.h> | ||
| 149 | |||
| 147 | struct blk_trace { | 150 | struct blk_trace { |
| 148 | int trace_state; | 151 | int trace_state; |
| 149 | struct rchan *rchan; | 152 | struct rchan *rchan; |
| @@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 194 | extern int blk_trace_startstop(struct request_queue *q, int start); | 197 | extern int blk_trace_startstop(struct request_queue *q, int start); |
| 195 | extern int blk_trace_remove(struct request_queue *q); | 198 | extern int blk_trace_remove(struct request_queue *q); |
| 196 | 199 | ||
| 200 | extern struct attribute_group blk_trace_attr_group; | ||
| 201 | |||
| 197 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
| 198 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
| 199 | #define blk_trace_shutdown(q) do { } while (0) | 204 | #define blk_trace_shutdown(q) do { } while (0) |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d95da1020f1c..6faa7e549de4 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -68,6 +68,7 @@ struct ftrace_branch_data { | |||
| 68 | unsigned long miss; | 68 | unsigned long miss; |
| 69 | unsigned long hit; | 69 | unsigned long hit; |
| 70 | }; | 70 | }; |
| 71 | unsigned long miss_hit[2]; | ||
| 71 | }; | 72 | }; |
| 72 | }; | 73 | }; |
| 73 | 74 | ||
| @@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 125 | .line = __LINE__, \ | 126 | .line = __LINE__, \ |
| 126 | }; \ | 127 | }; \ |
| 127 | ______r = !!(cond); \ | 128 | ______r = !!(cond); \ |
| 128 | if (______r) \ | 129 | ______f.miss_hit[______r]++; \ |
| 129 | ______f.hit++; \ | ||
| 130 | else \ | ||
| 131 | ______f.miss++; \ | ||
| 132 | ______r; \ | 130 | ______r; \ |
| 133 | })) | 131 | })) |
| 134 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 132 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index af0e01d4c663..eb5c2ba2f81a 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
| @@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, | |||
| 71 | struct dentry *debugfs_create_blob(const char *name, mode_t mode, | 71 | struct dentry *debugfs_create_blob(const char *name, mode_t mode, |
| 72 | struct dentry *parent, | 72 | struct dentry *parent, |
| 73 | struct debugfs_blob_wrapper *blob); | 73 | struct debugfs_blob_wrapper *blob); |
| 74 | |||
| 75 | bool debugfs_initialized(void); | ||
| 76 | |||
| 74 | #else | 77 | #else |
| 75 | 78 | ||
| 76 | #include <linux/err.h> | 79 | #include <linux/err.h> |
| @@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, | |||
| 183 | return ERR_PTR(-ENODEV); | 186 | return ERR_PTR(-ENODEV); |
| 184 | } | 187 | } |
| 185 | 188 | ||
| 189 | static inline bool debugfs_initialized(void) | ||
| 190 | { | ||
| 191 | return false; | ||
| 192 | } | ||
| 193 | |||
| 186 | #endif | 194 | #endif |
| 187 | 195 | ||
| 188 | #endif | 196 | #endif |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a7f8134c594e..015a3d22cf74 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -1,15 +1,18 @@ | |||
| 1 | #ifndef _LINUX_FTRACE_H | 1 | #ifndef _LINUX_FTRACE_H |
| 2 | #define _LINUX_FTRACE_H | 2 | #define _LINUX_FTRACE_H |
| 3 | 3 | ||
| 4 | #include <linux/linkage.h> | 4 | #include <linux/trace_clock.h> |
| 5 | #include <linux/fs.h> | ||
| 6 | #include <linux/ktime.h> | ||
| 7 | #include <linux/init.h> | ||
| 8 | #include <linux/types.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/kallsyms.h> | 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/linkage.h> | ||
| 11 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/ktime.h> | ||
| 12 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | |||
| 15 | #include <asm/ftrace.h> | ||
| 13 | 16 | ||
| 14 | #ifdef CONFIG_FUNCTION_TRACER | 17 | #ifdef CONFIG_FUNCTION_TRACER |
| 15 | 18 | ||
| @@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
| 95 | loff_t *ppos); | 98 | loff_t *ppos); |
| 96 | #endif | 99 | #endif |
| 97 | 100 | ||
| 101 | struct ftrace_func_command { | ||
| 102 | struct list_head list; | ||
| 103 | char *name; | ||
| 104 | int (*func)(char *func, char *cmd, | ||
| 105 | char *params, int enable); | ||
| 106 | }; | ||
| 107 | |||
| 98 | #ifdef CONFIG_DYNAMIC_FTRACE | 108 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 99 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | 109 | |
| 100 | #include <asm/ftrace.h> | 110 | int ftrace_arch_code_modify_prepare(void); |
| 111 | int ftrace_arch_code_modify_post_process(void); | ||
| 112 | |||
| 113 | struct seq_file; | ||
| 114 | |||
| 115 | struct ftrace_probe_ops { | ||
| 116 | void (*func)(unsigned long ip, | ||
| 117 | unsigned long parent_ip, | ||
| 118 | void **data); | ||
| 119 | int (*callback)(unsigned long ip, void **data); | ||
| 120 | void (*free)(void **data); | ||
| 121 | int (*print)(struct seq_file *m, | ||
| 122 | unsigned long ip, | ||
| 123 | struct ftrace_probe_ops *ops, | ||
| 124 | void *data); | ||
| 125 | }; | ||
| 126 | |||
| 127 | extern int | ||
| 128 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
| 129 | void *data); | ||
| 130 | extern void | ||
| 131 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
| 132 | void *data); | ||
| 133 | extern void | ||
| 134 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | ||
| 135 | extern void unregister_ftrace_function_probe_all(char *glob); | ||
| 101 | 136 | ||
| 102 | enum { | 137 | enum { |
| 103 | FTRACE_FL_FREE = (1 << 0), | 138 | FTRACE_FL_FREE = (1 << 0), |
| @@ -110,15 +145,23 @@ enum { | |||
| 110 | }; | 145 | }; |
| 111 | 146 | ||
| 112 | struct dyn_ftrace { | 147 | struct dyn_ftrace { |
| 113 | struct list_head list; | 148 | union { |
| 114 | unsigned long ip; /* address of mcount call-site */ | 149 | unsigned long ip; /* address of mcount call-site */ |
| 115 | unsigned long flags; | 150 | struct dyn_ftrace *freelist; |
| 116 | struct dyn_arch_ftrace arch; | 151 | }; |
| 152 | union { | ||
| 153 | unsigned long flags; | ||
| 154 | struct dyn_ftrace *newlist; | ||
| 155 | }; | ||
| 156 | struct dyn_arch_ftrace arch; | ||
| 117 | }; | 157 | }; |
| 118 | 158 | ||
| 119 | int ftrace_force_update(void); | 159 | int ftrace_force_update(void); |
| 120 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 160 | void ftrace_set_filter(unsigned char *buf, int len, int reset); |
| 121 | 161 | ||
| 162 | int register_ftrace_command(struct ftrace_func_command *cmd); | ||
| 163 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | ||
| 164 | |||
| 122 | /* defined in arch */ | 165 | /* defined in arch */ |
| 123 | extern int ftrace_ip_converted(unsigned long ip); | 166 | extern int ftrace_ip_converted(unsigned long ip); |
| 124 | extern int ftrace_dyn_arch_init(void *data); | 167 | extern int ftrace_dyn_arch_init(void *data); |
| @@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); | |||
| 126 | extern void ftrace_caller(void); | 169 | extern void ftrace_caller(void); |
| 127 | extern void ftrace_call(void); | 170 | extern void ftrace_call(void); |
| 128 | extern void mcount_call(void); | 171 | extern void mcount_call(void); |
| 172 | |||
| 173 | #ifndef FTRACE_ADDR | ||
| 174 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | ||
| 175 | #endif | ||
| 129 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 176 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 130 | extern void ftrace_graph_caller(void); | 177 | extern void ftrace_graph_caller(void); |
| 131 | extern int ftrace_enable_ftrace_graph_caller(void); | 178 | extern int ftrace_enable_ftrace_graph_caller(void); |
| @@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |||
| 136 | #endif | 183 | #endif |
| 137 | 184 | ||
| 138 | /** | 185 | /** |
| 139 | * ftrace_make_nop - convert code into top | 186 | * ftrace_make_nop - convert code into nop |
| 140 | * @mod: module structure if called by module load initialization | 187 | * @mod: module structure if called by module load initialization |
| 141 | * @rec: the mcount call site record | 188 | * @rec: the mcount call site record |
| 142 | * @addr: the address that the call site should be calling | 189 | * @addr: the address that the call site should be calling |
| @@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod, | |||
| 181 | */ | 228 | */ |
| 182 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); | 229 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
| 183 | 230 | ||
| 184 | |||
| 185 | /* May be defined in arch */ | 231 | /* May be defined in arch */ |
| 186 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 232 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
| 187 | 233 | ||
| @@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void); | |||
| 198 | # define ftrace_disable_daemon() do { } while (0) | 244 | # define ftrace_disable_daemon() do { } while (0) |
| 199 | # define ftrace_enable_daemon() do { } while (0) | 245 | # define ftrace_enable_daemon() do { } while (0) |
| 200 | static inline void ftrace_release(void *start, unsigned long size) { } | 246 | static inline void ftrace_release(void *start, unsigned long size) { } |
| 247 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | ||
| 248 | { | ||
| 249 | return -EINVAL; | ||
| 250 | } | ||
| 251 | static inline int unregister_ftrace_command(char *cmd_name) | ||
| 252 | { | ||
| 253 | return -EINVAL; | ||
| 254 | } | ||
| 201 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 255 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 202 | 256 | ||
| 203 | /* totally disable ftrace - can not re-enable after this */ | 257 | /* totally disable ftrace - can not re-enable after this */ |
| @@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
| 233 | #endif | 287 | #endif |
| 234 | } | 288 | } |
| 235 | 289 | ||
| 236 | #ifdef CONFIG_FRAME_POINTER | 290 | #ifndef HAVE_ARCH_CALLER_ADDR |
| 237 | /* TODO: need to fix this for ARM */ | 291 | # ifdef CONFIG_FRAME_POINTER |
| 238 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 292 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
| 239 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) | 293 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) |
| 240 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) | 294 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) |
| 241 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) | 295 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) |
| 242 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) | 296 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) |
| 243 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) | 297 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) |
| 244 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) | 298 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) |
| 245 | #else | 299 | # else |
| 246 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 300 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
| 247 | # define CALLER_ADDR1 0UL | 301 | # define CALLER_ADDR1 0UL |
| 248 | # define CALLER_ADDR2 0UL | 302 | # define CALLER_ADDR2 0UL |
| 249 | # define CALLER_ADDR3 0UL | 303 | # define CALLER_ADDR3 0UL |
| 250 | # define CALLER_ADDR4 0UL | 304 | # define CALLER_ADDR4 0UL |
| 251 | # define CALLER_ADDR5 0UL | 305 | # define CALLER_ADDR5 0UL |
| 252 | # define CALLER_ADDR6 0UL | 306 | # define CALLER_ADDR6 0UL |
| 253 | #endif | 307 | # endif |
| 308 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ | ||
| 254 | 309 | ||
| 255 | #ifdef CONFIG_IRQSOFF_TRACER | 310 | #ifdef CONFIG_IRQSOFF_TRACER |
| 256 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 311 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
| @@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
| 268 | # define trace_preempt_off(a0, a1) do { } while (0) | 323 | # define trace_preempt_off(a0, a1) do { } while (0) |
| 269 | #endif | 324 | #endif |
| 270 | 325 | ||
| 271 | #ifdef CONFIG_TRACING | ||
| 272 | extern int ftrace_dump_on_oops; | ||
| 273 | |||
| 274 | extern void tracing_start(void); | ||
| 275 | extern void tracing_stop(void); | ||
| 276 | extern void ftrace_off_permanent(void); | ||
| 277 | |||
| 278 | extern void | ||
| 279 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
| 280 | |||
| 281 | /** | ||
| 282 | * ftrace_printk - printf formatting in the ftrace buffer | ||
| 283 | * @fmt: the printf format for printing | ||
| 284 | * | ||
| 285 | * Note: __ftrace_printk is an internal function for ftrace_printk and | ||
| 286 | * the @ip is passed in via the ftrace_printk macro. | ||
| 287 | * | ||
| 288 | * This function allows a kernel developer to debug fast path sections | ||
| 289 | * that printk is not appropriate for. By scattering in various | ||
| 290 | * printk like tracing in the code, a developer can quickly see | ||
| 291 | * where problems are occurring. | ||
| 292 | * | ||
| 293 | * This is intended as a debugging tool for the developer only. | ||
| 294 | * Please refrain from leaving ftrace_printks scattered around in | ||
| 295 | * your code. | ||
| 296 | */ | ||
| 297 | # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) | ||
| 298 | extern int | ||
| 299 | __ftrace_printk(unsigned long ip, const char *fmt, ...) | ||
| 300 | __attribute__ ((format (printf, 2, 3))); | ||
| 301 | extern void ftrace_dump(void); | ||
| 302 | #else | ||
| 303 | static inline void | ||
| 304 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | ||
| 305 | static inline int | ||
| 306 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | ||
| 307 | |||
| 308 | static inline void tracing_start(void) { } | ||
| 309 | static inline void tracing_stop(void) { } | ||
| 310 | static inline void ftrace_off_permanent(void) { } | ||
| 311 | static inline int | ||
| 312 | ftrace_printk(const char *fmt, ...) | ||
| 313 | { | ||
| 314 | return 0; | ||
| 315 | } | ||
| 316 | static inline void ftrace_dump(void) { } | ||
| 317 | #endif | ||
| 318 | |||
| 319 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 326 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 320 | extern void ftrace_init(void); | 327 | extern void ftrace_init(void); |
| 321 | extern void ftrace_init_module(struct module *mod, | 328 | extern void ftrace_init_module(struct module *mod, |
| @@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod, | |||
| 327 | unsigned long *start, unsigned long *end) { } | 334 | unsigned long *start, unsigned long *end) { } |
| 328 | #endif | 335 | #endif |
| 329 | 336 | ||
| 330 | enum { | ||
| 331 | POWER_NONE = 0, | ||
| 332 | POWER_CSTATE = 1, | ||
| 333 | POWER_PSTATE = 2, | ||
| 334 | }; | ||
| 335 | |||
| 336 | struct power_trace { | ||
| 337 | #ifdef CONFIG_POWER_TRACER | ||
| 338 | ktime_t stamp; | ||
| 339 | ktime_t end; | ||
| 340 | int type; | ||
| 341 | int state; | ||
| 342 | #endif | ||
| 343 | }; | ||
| 344 | |||
| 345 | #ifdef CONFIG_POWER_TRACER | ||
| 346 | extern void trace_power_start(struct power_trace *it, unsigned int type, | ||
| 347 | unsigned int state); | ||
| 348 | extern void trace_power_mark(struct power_trace *it, unsigned int type, | ||
| 349 | unsigned int state); | ||
| 350 | extern void trace_power_end(struct power_trace *it); | ||
| 351 | #else | ||
| 352 | static inline void trace_power_start(struct power_trace *it, unsigned int type, | ||
| 353 | unsigned int state) { } | ||
| 354 | static inline void trace_power_mark(struct power_trace *it, unsigned int type, | ||
| 355 | unsigned int state) { } | ||
| 356 | static inline void trace_power_end(struct power_trace *it) { } | ||
| 357 | #endif | ||
| 358 | |||
| 359 | |||
| 360 | /* | 337 | /* |
| 361 | * Structure that defines an entry function trace. | 338 | * Structure that defines an entry function trace. |
| 362 | */ | 339 | */ |
| @@ -398,8 +375,7 @@ struct ftrace_ret_stack { | |||
| 398 | extern void return_to_handler(void); | 375 | extern void return_to_handler(void); |
| 399 | 376 | ||
| 400 | extern int | 377 | extern int |
| 401 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | 378 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
| 402 | unsigned long func, int *depth); | ||
| 403 | extern void | 379 | extern void |
| 404 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | 380 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); |
| 405 | 381 | ||
| @@ -514,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
| 514 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 490 | return tsk->trace & TSK_TRACE_FL_GRAPH; |
| 515 | } | 491 | } |
| 516 | 492 | ||
| 493 | extern int ftrace_dump_on_oops; | ||
| 494 | |||
| 517 | #endif /* CONFIG_TRACING */ | 495 | #endif /* CONFIG_TRACING */ |
| 518 | 496 | ||
| 497 | |||
| 498 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
| 499 | |||
| 500 | void trace_hw_branch(u64 from, u64 to); | ||
| 501 | void trace_hw_branch_oops(void); | ||
| 502 | |||
| 503 | #else /* CONFIG_HW_BRANCH_TRACER */ | ||
| 504 | |||
| 505 | static inline void trace_hw_branch(u64 from, u64 to) {} | ||
| 506 | static inline void trace_hw_branch_oops(void) {} | ||
| 507 | |||
| 508 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||
| 509 | |||
| 510 | /* | ||
| 511 | * A syscall entry in the ftrace syscalls array. | ||
| 512 | * | ||
| 513 | * @name: name of the syscall | ||
| 514 | * @nb_args: number of parameters it takes | ||
| 515 | * @types: list of types as strings | ||
| 516 | * @args: list of args as strings (args[i] matches types[i]) | ||
| 517 | */ | ||
| 518 | struct syscall_metadata { | ||
| 519 | const char *name; | ||
| 520 | int nb_args; | ||
| 521 | const char **types; | ||
| 522 | const char **args; | ||
| 523 | }; | ||
| 524 | |||
| 525 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
| 526 | extern void arch_init_ftrace_syscalls(void); | ||
| 527 | extern struct syscall_metadata *syscall_nr_to_meta(int nr); | ||
| 528 | extern void start_ftrace_syscalls(void); | ||
| 529 | extern void stop_ftrace_syscalls(void); | ||
| 530 | extern void ftrace_syscall_enter(struct pt_regs *regs); | ||
| 531 | extern void ftrace_syscall_exit(struct pt_regs *regs); | ||
| 532 | #else | ||
| 533 | static inline void start_ftrace_syscalls(void) { } | ||
| 534 | static inline void stop_ftrace_syscalls(void) { } | ||
| 535 | static inline void ftrace_syscall_enter(struct pt_regs *regs) { } | ||
| 536 | static inline void ftrace_syscall_exit(struct pt_regs *regs) { } | ||
| 537 | #endif | ||
| 538 | |||
| 519 | #endif /* _LINUX_FTRACE_H */ | 539 | #endif /* _LINUX_FTRACE_H */ |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 366a054d0b05..dca7bf8cffe2 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define _LINUX_FTRACE_IRQ_H | 2 | #define _LINUX_FTRACE_IRQ_H |
| 3 | 3 | ||
| 4 | 4 | ||
| 5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) | 5 | #ifdef CONFIG_FTRACE_NMI_ENTER |
| 6 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
| 7 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
| 8 | #else | 8 | #else |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..faa1cf848bcd 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -15,55 +15,61 @@ | |||
| 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
| 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
| 17 | * | 17 | * |
| 18 | * The hardirq count can be overridden per architecture, the default is: | 18 | * The hardirq count can in theory reach the same as NR_IRQS. |
| 19 | * In reality, the number of nested IRQS is limited to the stack | ||
| 20 | * size as well. For archs with over 1000 IRQS it is not practical | ||
| 21 | * to expect that they will all nest. We give a max of 10 bits for | ||
| 22 | * hardirq nesting. An arch may choose to give less than 10 bits. | ||
| 23 | * m68k expects it to be 8. | ||
| 19 | * | 24 | * |
| 20 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 25 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
| 21 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 26 | * - bit 26 is the NMI_MASK |
| 27 | * - bit 28 is the PREEMPT_ACTIVE flag | ||
| 22 | * | 28 | * |
| 23 | * PREEMPT_MASK: 0x000000ff | 29 | * PREEMPT_MASK: 0x000000ff |
| 24 | * SOFTIRQ_MASK: 0x0000ff00 | 30 | * SOFTIRQ_MASK: 0x0000ff00 |
| 25 | * HARDIRQ_MASK: 0x0fff0000 | 31 | * HARDIRQ_MASK: 0x03ff0000 |
| 32 | * NMI_MASK: 0x04000000 | ||
| 26 | */ | 33 | */ |
| 27 | #define PREEMPT_BITS 8 | 34 | #define PREEMPT_BITS 8 |
| 28 | #define SOFTIRQ_BITS 8 | 35 | #define SOFTIRQ_BITS 8 |
| 36 | #define NMI_BITS 1 | ||
| 29 | 37 | ||
| 30 | #ifndef HARDIRQ_BITS | 38 | #define MAX_HARDIRQ_BITS 10 |
| 31 | #define HARDIRQ_BITS 12 | ||
| 32 | 39 | ||
| 33 | #ifndef MAX_HARDIRQS_PER_CPU | 40 | #ifndef HARDIRQ_BITS |
| 34 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 41 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
| 35 | #endif | 42 | #endif |
| 36 | 43 | ||
| 37 | /* | 44 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
| 38 | * The hardirq mask has to be large enough to have space for potentially | 45 | #error HARDIRQ_BITS too high! |
| 39 | * all IRQ sources in the system nesting on a single CPU. | ||
| 40 | */ | ||
| 41 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | ||
| 42 | # error HARDIRQ_BITS is too low! | ||
| 43 | #endif | ||
| 44 | #endif | 46 | #endif |
| 45 | 47 | ||
| 46 | #define PREEMPT_SHIFT 0 | 48 | #define PREEMPT_SHIFT 0 |
| 47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | 49 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
| 48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 50 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
| 51 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
| 49 | 52 | ||
| 50 | #define __IRQ_MASK(x) ((1UL << (x))-1) | 53 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
| 51 | 54 | ||
| 52 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 55 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
| 53 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 56 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
| 54 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 57 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
| 58 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
| 55 | 59 | ||
| 56 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | 60 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
| 57 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | 61 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
| 58 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 62 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
| 63 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
| 59 | 64 | ||
| 60 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 65 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
| 61 | #error PREEMPT_ACTIVE is too low! | 66 | #error PREEMPT_ACTIVE is too low! |
| 62 | #endif | 67 | #endif |
| 63 | 68 | ||
| 64 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | 69 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
| 65 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | 70 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
| 66 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 71 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
| 72 | | NMI_MASK)) | ||
| 67 | 73 | ||
| 68 | /* | 74 | /* |
| 69 | * Are we doing bottom half or hardware interrupt processing? | 75 | * Are we doing bottom half or hardware interrupt processing? |
| @@ -73,6 +79,11 @@ | |||
| 73 | #define in_softirq() (softirq_count()) | 79 | #define in_softirq() (softirq_count()) |
| 74 | #define in_interrupt() (irq_count()) | 80 | #define in_interrupt() (irq_count()) |
| 75 | 81 | ||
| 82 | /* | ||
| 83 | * Are we in NMI context? | ||
| 84 | */ | ||
| 85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
| 86 | |||
| 76 | #if defined(CONFIG_PREEMPT) | 87 | #if defined(CONFIG_PREEMPT) |
| 77 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
| 78 | # define PREEMPT_CHECK_OFFSET 1 | 89 | # define PREEMPT_CHECK_OFFSET 1 |
| @@ -164,20 +175,24 @@ extern void irq_enter(void); | |||
| 164 | */ | 175 | */ |
| 165 | extern void irq_exit(void); | 176 | extern void irq_exit(void); |
| 166 | 177 | ||
| 167 | #define nmi_enter() \ | 178 | #define nmi_enter() \ |
| 168 | do { \ | 179 | do { \ |
| 169 | ftrace_nmi_enter(); \ | 180 | ftrace_nmi_enter(); \ |
| 170 | lockdep_off(); \ | 181 | BUG_ON(in_nmi()); \ |
| 171 | rcu_nmi_enter(); \ | 182 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
| 172 | __irq_enter(); \ | 183 | lockdep_off(); \ |
| 184 | rcu_nmi_enter(); \ | ||
| 185 | trace_hardirq_enter(); \ | ||
| 173 | } while (0) | 186 | } while (0) |
| 174 | 187 | ||
| 175 | #define nmi_exit() \ | 188 | #define nmi_exit() \ |
| 176 | do { \ | 189 | do { \ |
| 177 | __irq_exit(); \ | 190 | trace_hardirq_exit(); \ |
| 178 | rcu_nmi_exit(); \ | 191 | rcu_nmi_exit(); \ |
| 179 | lockdep_on(); \ | 192 | lockdep_on(); \ |
| 180 | ftrace_nmi_exit(); \ | 193 | BUG_ON(!in_nmi()); \ |
| 194 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | ||
| 195 | ftrace_nmi_exit(); \ | ||
| 181 | } while (0) | 196 | } while (0) |
| 182 | 197 | ||
| 183 | #endif /* LINUX_HARDIRQ_H */ | 198 | #endif /* LINUX_HARDIRQ_H */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c68bffd182bb..ce2c07d99fc3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -278,6 +278,11 @@ enum | |||
| 278 | NR_SOFTIRQS | 278 | NR_SOFTIRQS |
| 279 | }; | 279 | }; |
| 280 | 280 | ||
| 281 | /* map softirq index to softirq name. update 'softirq_to_name' in | ||
| 282 | * kernel/softirq.c when adding a new softirq. | ||
| 283 | */ | ||
| 284 | extern char *softirq_to_name[NR_SOFTIRQS]; | ||
| 285 | |||
| 281 | /* softirq mask and active fields moved to irq_cpustat_t in | 286 | /* softirq mask and active fields moved to irq_cpustat_t in |
| 282 | * asm/hardirq.h to get better cache usage. KAO | 287 | * asm/hardirq.h to get better cache usage. KAO |
| 283 | */ | 288 | */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cff58e288a22..d9e75ec7def5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -392,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 392 | #endif | 392 | #endif |
| 393 | 393 | ||
| 394 | /* | 394 | /* |
| 395 | * General tracing related utility functions - trace_printk(), | ||
| 396 | * tracing_on/tracing_off and tracing_start()/tracing_stop | ||
| 397 | * | ||
| 398 | * Use tracing_on/tracing_off when you want to quickly turn on or off | ||
| 399 | * tracing. It simply enables or disables the recording of the trace events. | ||
| 400 | * This also corresponds to the user space debugfs/tracing/tracing_on | ||
| 401 | * file, which gives a means for the kernel and userspace to interact. | ||
| 402 | * Place a tracing_off() in the kernel where you want tracing to end. | ||
| 403 | * From user space, examine the trace, and then echo 1 > tracing_on | ||
| 404 | * to continue tracing. | ||
| 405 | * | ||
| 406 | * tracing_stop/tracing_start has slightly more overhead. It is used | ||
| 407 | * by things like suspend to ram where disabling the recording of the | ||
| 408 | * trace is not enough, but tracing must actually stop because things | ||
| 409 | * like calling smp_processor_id() may crash the system. | ||
| 410 | * | ||
| 411 | * Most likely, you want to use tracing_on/tracing_off. | ||
| 412 | */ | ||
| 413 | #ifdef CONFIG_RING_BUFFER | ||
| 414 | void tracing_on(void); | ||
| 415 | void tracing_off(void); | ||
| 416 | /* trace_off_permanent stops recording with no way to bring it back */ | ||
| 417 | void tracing_off_permanent(void); | ||
| 418 | int tracing_is_on(void); | ||
| 419 | #else | ||
| 420 | static inline void tracing_on(void) { } | ||
| 421 | static inline void tracing_off(void) { } | ||
| 422 | static inline void tracing_off_permanent(void) { } | ||
| 423 | static inline int tracing_is_on(void) { return 0; } | ||
| 424 | #endif | ||
| 425 | #ifdef CONFIG_TRACING | ||
| 426 | extern void tracing_start(void); | ||
| 427 | extern void tracing_stop(void); | ||
| 428 | extern void ftrace_off_permanent(void); | ||
| 429 | |||
| 430 | extern void | ||
| 431 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
| 432 | |||
| 433 | static inline void __attribute__ ((format (printf, 1, 2))) | ||
| 434 | ____trace_printk_check_format(const char *fmt, ...) | ||
| 435 | { | ||
| 436 | } | ||
| 437 | #define __trace_printk_check_format(fmt, args...) \ | ||
| 438 | do { \ | ||
| 439 | if (0) \ | ||
| 440 | ____trace_printk_check_format(fmt, ##args); \ | ||
| 441 | } while (0) | ||
| 442 | |||
| 443 | /** | ||
| 444 | * trace_printk - printf formatting in the ftrace buffer | ||
| 445 | * @fmt: the printf format for printing | ||
| 446 | * | ||
| 447 | * Note: __trace_printk is an internal function for trace_printk and | ||
| 448 | * the @ip is passed in via the trace_printk macro. | ||
| 449 | * | ||
| 450 | * This function allows a kernel developer to debug fast path sections | ||
| 451 | * that printk is not appropriate for. By scattering in various | ||
| 452 | * printk like tracing in the code, a developer can quickly see | ||
| 453 | * where problems are occurring. | ||
| 454 | * | ||
| 455 | * This is intended as a debugging tool for the developer only. | ||
| 456 | * Please refrain from leaving trace_printks scattered around in | ||
| 457 | * your code. | ||
| 458 | */ | ||
| 459 | |||
| 460 | #define trace_printk(fmt, args...) \ | ||
| 461 | do { \ | ||
| 462 | __trace_printk_check_format(fmt, ##args); \ | ||
| 463 | if (__builtin_constant_p(fmt)) { \ | ||
| 464 | static const char *trace_printk_fmt \ | ||
| 465 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
| 466 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
| 467 | \ | ||
| 468 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ | ||
| 469 | } else \ | ||
| 470 | __trace_printk(_THIS_IP_, fmt, ##args); \ | ||
| 471 | } while (0) | ||
| 472 | |||
| 473 | extern int | ||
| 474 | __trace_bprintk(unsigned long ip, const char *fmt, ...) | ||
| 475 | __attribute__ ((format (printf, 2, 3))); | ||
| 476 | |||
| 477 | extern int | ||
| 478 | __trace_printk(unsigned long ip, const char *fmt, ...) | ||
| 479 | __attribute__ ((format (printf, 2, 3))); | ||
| 480 | |||
| 481 | /* | ||
| 482 | * The double __builtin_constant_p is because gcc will give us an error | ||
| 483 | * if we try to allocate the static variable to fmt if it is not a | ||
| 484 | * constant. Even with the outer if statement. | ||
| 485 | */ | ||
| 486 | #define ftrace_vprintk(fmt, vargs) \ | ||
| 487 | do { \ | ||
| 488 | if (__builtin_constant_p(fmt)) { \ | ||
| 489 | static const char *trace_printk_fmt \ | ||
| 490 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
| 491 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
| 492 | \ | ||
| 493 | __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ | ||
| 494 | } else \ | ||
| 495 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ | ||
| 496 | } while (0) | ||
| 497 | |||
| 498 | extern int | ||
| 499 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); | ||
| 500 | |||
| 501 | extern int | ||
| 502 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | ||
| 503 | |||
| 504 | extern void ftrace_dump(void); | ||
| 505 | #else | ||
| 506 | static inline void | ||
| 507 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | ||
| 508 | static inline int | ||
| 509 | trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | ||
| 510 | |||
| 511 | static inline void tracing_start(void) { } | ||
| 512 | static inline void tracing_stop(void) { } | ||
| 513 | static inline void ftrace_off_permanent(void) { } | ||
| 514 | static inline int | ||
| 515 | trace_printk(const char *fmt, ...) | ||
| 516 | { | ||
| 517 | return 0; | ||
| 518 | } | ||
| 519 | static inline int | ||
| 520 | ftrace_vprintk(const char *fmt, va_list ap) | ||
| 521 | { | ||
| 522 | return 0; | ||
| 523 | } | ||
| 524 | static inline void ftrace_dump(void) { } | ||
| 525 | #endif /* CONFIG_TRACING */ | ||
| 526 | |||
| 527 | /* | ||
| 395 | * Display an IP address in readable format. | 528 | * Display an IP address in readable format. |
| 396 | */ | 529 | */ |
| 397 | 530 | ||
diff --git a/include/linux/memory.h b/include/linux/memory.h index 42767d1a62e7..37fa19b34ef5 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -110,4 +110,10 @@ struct memory_accessor { | |||
| 110 | off_t offset, size_t count); | 110 | off_t offset, size_t count); |
| 111 | }; | 111 | }; |
| 112 | 112 | ||
| 113 | /* | ||
| 114 | * Kernel text modification mutex, used for code patching. Users of this lock | ||
| 115 | * can sleep. | ||
| 116 | */ | ||
| 117 | extern struct mutex text_mutex; | ||
| 118 | |||
| 113 | #endif /* _LINUX_MEMORY_H_ */ | 119 | #endif /* _LINUX_MEMORY_H_ */ |
diff --git a/include/linux/module.h b/include/linux/module.h index d246da0b0f8c..627ac082e2a6 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -333,6 +333,11 @@ struct module | |||
| 333 | unsigned int num_tracepoints; | 333 | unsigned int num_tracepoints; |
| 334 | #endif | 334 | #endif |
| 335 | 335 | ||
| 336 | #ifdef CONFIG_TRACING | ||
| 337 | const char **trace_bprintk_fmt_start; | ||
| 338 | unsigned int num_trace_bprintk_fmt; | ||
| 339 | #endif | ||
| 340 | |||
| 336 | #ifdef CONFIG_MODULE_UNLOAD | 341 | #ifdef CONFIG_MODULE_UNLOAD |
| 337 | /* What modules depend on me? */ | 342 | /* What modules depend on me? */ |
| 338 | struct list_head modules_which_use_me; | 343 | struct list_head modules_which_use_me; |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b359660082..e1b7b2173885 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -8,7 +8,7 @@ struct ring_buffer; | |||
| 8 | struct ring_buffer_iter; | 8 | struct ring_buffer_iter; |
| 9 | 9 | ||
| 10 | /* | 10 | /* |
| 11 | * Don't reference this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
| 12 | */ | 12 | */ |
| 13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
| 14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type:2, len:3, time_delta:27; |
| @@ -18,10 +18,13 @@ struct ring_buffer_event { | |||
| 18 | /** | 18 | /** |
| 19 | * enum ring_buffer_type - internal ring buffer types | 19 | * enum ring_buffer_type - internal ring buffer types |
| 20 | * | 20 | * |
| 21 | * @RINGBUF_TYPE_PADDING: Left over page padding | 21 | * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event |
| 22 | * array is ignored | 22 | * If time_delta is 0: |
| 23 | * size is variable depending on how much | 23 | * array is ignored |
| 24 | * size is variable depending on how much | ||
| 24 | * padding is needed | 25 | * padding is needed |
| 26 | * If time_delta is non zero: | ||
| 27 | * everything else same as RINGBUF_TYPE_DATA | ||
| 25 | * | 28 | * |
| 26 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 29 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
| 27 | * array[0] = time delta (28 .. 59) | 30 | * array[0] = time delta (28 .. 59) |
| @@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
| 65 | return event->time_delta; | 68 | return event->time_delta; |
| 66 | } | 69 | } |
| 67 | 70 | ||
| 71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | ||
| 72 | |||
| 68 | /* | 73 | /* |
| 69 | * size is in bytes for each per CPU buffer. | 74 | * size is in bytes for each per CPU buffer. |
| 70 | */ | 75 | */ |
| @@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer); | |||
| 74 | 79 | ||
| 75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 80 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
| 76 | 81 | ||
| 77 | struct ring_buffer_event * | 82 | struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, |
| 78 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 83 | unsigned long length); |
| 79 | unsigned long length, | ||
| 80 | unsigned long *flags); | ||
| 81 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 84 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
| 82 | struct ring_buffer_event *event, | 85 | struct ring_buffer_event *event); |
| 83 | unsigned long flags); | ||
| 84 | int ring_buffer_write(struct ring_buffer *buffer, | 86 | int ring_buffer_write(struct ring_buffer *buffer, |
| 85 | unsigned long length, void *data); | 87 | unsigned long length, void *data); |
| 86 | 88 | ||
| @@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | |||
| 121 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 123 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
| 122 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 124 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
| 123 | 125 | ||
| 124 | u64 ring_buffer_time_stamp(int cpu); | 126 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
| 125 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 127 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
| 128 | int cpu, u64 *ts); | ||
| 129 | void ring_buffer_set_clock(struct ring_buffer *buffer, | ||
| 130 | u64 (*clock)(void)); | ||
| 131 | |||
| 132 | size_t ring_buffer_page_len(void *page); | ||
| 126 | 133 | ||
| 127 | void tracing_on(void); | ||
| 128 | void tracing_off(void); | ||
| 129 | void tracing_off_permanent(void); | ||
| 130 | 134 | ||
| 131 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); | 135 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); |
| 132 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | 136 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); |
| 133 | int ring_buffer_read_page(struct ring_buffer *buffer, | 137 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
| 134 | void **data_page, int cpu, int full); | 138 | size_t len, int cpu, int full); |
| 135 | 139 | ||
| 136 | enum ring_buffer_flags { | 140 | enum ring_buffer_flags { |
| 137 | RB_FL_OVERWRITE = 1 << 0, | 141 | RB_FL_OVERWRITE = 1 << 0, |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9da5aa0771ef..b94f3541f67b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -138,6 +138,8 @@ extern unsigned long nr_uninterruptible(void); | |||
| 138 | extern unsigned long nr_active(void); | 138 | extern unsigned long nr_active(void); |
| 139 | extern unsigned long nr_iowait(void); | 139 | extern unsigned long nr_iowait(void); |
| 140 | 140 | ||
| 141 | extern unsigned long get_parent_ip(unsigned long addr); | ||
| 142 | |||
| 141 | struct seq_file; | 143 | struct seq_file; |
| 142 | struct cfs_rq; | 144 | struct cfs_rq; |
| 143 | struct task_group; | 145 | struct task_group; |
| @@ -1405,6 +1407,8 @@ struct task_struct { | |||
| 1405 | int curr_ret_stack; | 1407 | int curr_ret_stack; |
| 1406 | /* Stack of return addresses for return function tracing */ | 1408 | /* Stack of return addresses for return function tracing */ |
| 1407 | struct ftrace_ret_stack *ret_stack; | 1409 | struct ftrace_ret_stack *ret_stack; |
| 1410 | /* time stamp for last schedule */ | ||
| 1411 | unsigned long long ftrace_timestamp; | ||
| 1408 | /* | 1412 | /* |
| 1409 | * Number of functions that haven't been traced | 1413 | * Number of functions that haven't been traced |
| 1410 | * because of depth overrun. | 1414 | * because of depth overrun. |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 6ca6a7b66d75..f4523651fa42 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <trace/kmemtrace.h> | ||
| 17 | 18 | ||
| 18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
| 19 | struct cache_sizes { | 20 | struct cache_sizes { |
| @@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
| 28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
| 30 | 31 | ||
| 31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
| 33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
| 34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
| 35 | #else | ||
| 36 | static __always_inline void * | ||
| 37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
| 32 | { | 38 | { |
| 39 | return kmem_cache_alloc(cachep, flags); | ||
| 40 | } | ||
| 41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 42 | { | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | #endif | ||
| 46 | |||
| 47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
| 48 | { | ||
| 49 | struct kmem_cache *cachep; | ||
| 50 | void *ret; | ||
| 51 | |||
| 33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
| 34 | int i = 0; | 53 | int i = 0; |
| 35 | 54 | ||
| @@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
| 47 | found: | 66 | found: |
| 48 | #ifdef CONFIG_ZONE_DMA | 67 | #ifdef CONFIG_ZONE_DMA |
| 49 | if (flags & GFP_DMA) | 68 | if (flags & GFP_DMA) |
| 50 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 69 | cachep = malloc_sizes[i].cs_dmacachep; |
| 51 | flags); | 70 | else |
| 52 | #endif | 71 | #endif |
| 53 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 72 | cachep = malloc_sizes[i].cs_cachep; |
| 73 | |||
| 74 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
| 75 | |||
| 76 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
| 77 | size, slab_buffer_size(cachep), flags); | ||
| 78 | |||
| 79 | return ret; | ||
| 54 | } | 80 | } |
| 55 | return __kmalloc(size, flags); | 81 | return __kmalloc(size, flags); |
| 56 | } | 82 | } |
| @@ -59,8 +85,25 @@ found: | |||
| 59 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 85 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 60 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 86 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 61 | 87 | ||
| 62 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 88 | #ifdef CONFIG_KMEMTRACE |
| 89 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
| 90 | gfp_t flags, | ||
| 91 | int nodeid); | ||
| 92 | #else | ||
| 93 | static __always_inline void * | ||
| 94 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
| 95 | gfp_t flags, | ||
| 96 | int nodeid) | ||
| 97 | { | ||
| 98 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
| 99 | } | ||
| 100 | #endif | ||
| 101 | |||
| 102 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 63 | { | 103 | { |
| 104 | struct kmem_cache *cachep; | ||
| 105 | void *ret; | ||
| 106 | |||
| 64 | if (__builtin_constant_p(size)) { | 107 | if (__builtin_constant_p(size)) { |
| 65 | int i = 0; | 108 | int i = 0; |
| 66 | 109 | ||
| @@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 78 | found: | 121 | found: |
| 79 | #ifdef CONFIG_ZONE_DMA | 122 | #ifdef CONFIG_ZONE_DMA |
| 80 | if (flags & GFP_DMA) | 123 | if (flags & GFP_DMA) |
| 81 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 124 | cachep = malloc_sizes[i].cs_dmacachep; |
| 82 | flags, node); | 125 | else |
| 83 | #endif | 126 | #endif |
| 84 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 127 | cachep = malloc_sizes[i].cs_cachep; |
| 85 | flags, node); | 128 | |
| 129 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
| 130 | |||
| 131 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, | ||
| 132 | ret, size, slab_buffer_size(cachep), | ||
| 133 | flags, node); | ||
| 134 | |||
| 135 | return ret; | ||
| 86 | } | 136 | } |
| 87 | return __kmalloc_node(size, flags, node); | 137 | return __kmalloc_node(size, flags, node); |
| 88 | } | 138 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab9..0ec00b39d006 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
| @@ -3,14 +3,15 @@ | |||
| 3 | 3 | ||
| 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 5 | 5 | ||
| 6 | static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
| 7 | gfp_t flags) | ||
| 7 | { | 8 | { |
| 8 | return kmem_cache_alloc_node(cachep, flags, -1); | 9 | return kmem_cache_alloc_node(cachep, flags, -1); |
| 9 | } | 10 | } |
| 10 | 11 | ||
| 11 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 12 | 13 | ||
| 13 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 14 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 14 | { | 15 | { |
| 15 | return __kmalloc_node(size, flags, node); | 16 | return __kmalloc_node(size, flags, node); |
| 16 | } | 17 | } |
| @@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 23 | * kmalloc is the normal method of allocating memory | 24 | * kmalloc is the normal method of allocating memory |
| 24 | * in the kernel. | 25 | * in the kernel. |
| 25 | */ | 26 | */ |
| 26 | static inline void *kmalloc(size_t size, gfp_t flags) | 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 27 | { | 28 | { |
| 28 | return __kmalloc_node(size, flags, -1); | 29 | return __kmalloc_node(size, flags, -1); |
| 29 | } | 30 | } |
| 30 | 31 | ||
| 31 | static inline void *__kmalloc(size_t size, gfp_t flags) | 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
| 32 | { | 33 | { |
| 33 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
| 34 | } | 35 | } |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index e37b6aa8a9fb..a1f90528e70b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
| 13 | #include <trace/kmemtrace.h> | ||
| 13 | 14 | ||
| 14 | enum stat_item { | 15 | enum stat_item { |
| 15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| @@ -217,13 +218,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
| 217 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 218 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 218 | void *__kmalloc(size_t size, gfp_t flags); | 219 | void *__kmalloc(size_t size, gfp_t flags); |
| 219 | 220 | ||
| 221 | #ifdef CONFIG_KMEMTRACE | ||
| 222 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
| 223 | #else | ||
| 224 | static __always_inline void * | ||
| 225 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
| 226 | { | ||
| 227 | return kmem_cache_alloc(s, gfpflags); | ||
| 228 | } | ||
| 229 | #endif | ||
| 230 | |||
| 220 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 231 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
| 221 | { | 232 | { |
| 222 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 233 | unsigned int order = get_order(size); |
| 234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
| 235 | |||
| 236 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
| 237 | size, PAGE_SIZE << order, flags); | ||
| 238 | |||
| 239 | return ret; | ||
| 223 | } | 240 | } |
| 224 | 241 | ||
| 225 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 242 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 226 | { | 243 | { |
| 244 | void *ret; | ||
| 245 | |||
| 227 | if (__builtin_constant_p(size)) { | 246 | if (__builtin_constant_p(size)) { |
| 228 | if (size > SLUB_MAX_SIZE) | 247 | if (size > SLUB_MAX_SIZE) |
| 229 | return kmalloc_large(size, flags); | 248 | return kmalloc_large(size, flags); |
| @@ -234,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
| 234 | if (!s) | 253 | if (!s) |
| 235 | return ZERO_SIZE_PTR; | 254 | return ZERO_SIZE_PTR; |
| 236 | 255 | ||
| 237 | return kmem_cache_alloc(s, flags); | 256 | ret = kmem_cache_alloc_notrace(s, flags); |
| 257 | |||
| 258 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
| 259 | _THIS_IP_, ret, | ||
| 260 | size, s->size, flags); | ||
| 261 | |||
| 262 | return ret; | ||
| 238 | } | 263 | } |
| 239 | } | 264 | } |
| 240 | return __kmalloc(size, flags); | 265 | return __kmalloc(size, flags); |
| @@ -244,8 +269,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
| 244 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 269 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 245 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 270 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 246 | 271 | ||
| 272 | #ifdef CONFIG_KMEMTRACE | ||
| 273 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
| 274 | gfp_t gfpflags, | ||
| 275 | int node); | ||
| 276 | #else | ||
| 277 | static __always_inline void * | ||
| 278 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
| 279 | gfp_t gfpflags, | ||
| 280 | int node) | ||
| 281 | { | ||
| 282 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
| 283 | } | ||
| 284 | #endif | ||
| 285 | |||
| 247 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 286 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 248 | { | 287 | { |
| 288 | void *ret; | ||
| 289 | |||
| 249 | if (__builtin_constant_p(size) && | 290 | if (__builtin_constant_p(size) && |
| 250 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | 291 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
| 251 | struct kmem_cache *s = kmalloc_slab(size); | 292 | struct kmem_cache *s = kmalloc_slab(size); |
| @@ -253,7 +294,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 253 | if (!s) | 294 | if (!s) |
| 254 | return ZERO_SIZE_PTR; | 295 | return ZERO_SIZE_PTR; |
| 255 | 296 | ||
| 256 | return kmem_cache_alloc_node(s, flags, node); | 297 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
| 298 | |||
| 299 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
| 300 | _THIS_IP_, ret, | ||
| 301 | size, s->size, flags, node); | ||
| 302 | |||
| 303 | return ret; | ||
| 257 | } | 304 | } |
| 258 | return __kmalloc_node(size, flags, node); | 305 | return __kmalloc_node(size, flags, node); |
| 259 | } | 306 | } |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 18771cac2f85..6470f74074af 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -65,6 +65,7 @@ struct old_linux_dirent; | |||
| 65 | #include <asm/signal.h> | 65 | #include <asm/signal.h> |
| 66 | #include <linux/quota.h> | 66 | #include <linux/quota.h> |
| 67 | #include <linux/key.h> | 67 | #include <linux/key.h> |
| 68 | #include <linux/ftrace.h> | ||
| 68 | 69 | ||
| 69 | #define __SC_DECL1(t1, a1) t1 a1 | 70 | #define __SC_DECL1(t1, a1) t1 a1 |
| 70 | #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) | 71 | #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) |
| @@ -95,7 +96,46 @@ struct old_linux_dirent; | |||
| 95 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) | 96 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) |
| 96 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 97 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
| 97 | 98 | ||
| 99 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
| 100 | #define __SC_STR_ADECL1(t, a) #a | ||
| 101 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) | ||
| 102 | #define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__) | ||
| 103 | #define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__) | ||
| 104 | #define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__) | ||
| 105 | #define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__) | ||
| 106 | |||
| 107 | #define __SC_STR_TDECL1(t, a) #t | ||
| 108 | #define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__) | ||
| 109 | #define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__) | ||
| 110 | #define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__) | ||
| 111 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) | ||
| 112 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) | ||
| 113 | |||
| 114 | #define SYSCALL_METADATA(sname, nb) \ | ||
| 115 | static const struct syscall_metadata __used \ | ||
| 116 | __attribute__((__aligned__(4))) \ | ||
| 117 | __attribute__((section("__syscalls_metadata"))) \ | ||
| 118 | __syscall_meta_##sname = { \ | ||
| 119 | .name = "sys"#sname, \ | ||
| 120 | .nb_args = nb, \ | ||
| 121 | .types = types_##sname, \ | ||
| 122 | .args = args_##sname, \ | ||
| 123 | } | ||
| 124 | |||
| 125 | #define SYSCALL_DEFINE0(sname) \ | ||
| 126 | static const struct syscall_metadata __used \ | ||
| 127 | __attribute__((__aligned__(4))) \ | ||
| 128 | __attribute__((section("__syscalls_metadata"))) \ | ||
| 129 | __syscall_meta_##sname = { \ | ||
| 130 | .name = "sys_"#sname, \ | ||
| 131 | .nb_args = 0, \ | ||
| 132 | }; \ | ||
| 133 | asmlinkage long sys_##sname(void) | ||
| 134 | |||
| 135 | #else | ||
| 98 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) | 136 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) |
| 137 | #endif | ||
| 138 | |||
| 99 | #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) | 139 | #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) |
| 100 | #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) | 140 | #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) |
| 101 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) | 141 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) |
| @@ -117,10 +157,26 @@ struct old_linux_dirent; | |||
| 117 | #endif | 157 | #endif |
| 118 | #endif | 158 | #endif |
| 119 | 159 | ||
| 160 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
| 161 | #define SYSCALL_DEFINEx(x, sname, ...) \ | ||
| 162 | static const char *types_##sname[] = { \ | ||
| 163 | __SC_STR_TDECL##x(__VA_ARGS__) \ | ||
| 164 | }; \ | ||
| 165 | static const char *args_##sname[] = { \ | ||
| 166 | __SC_STR_ADECL##x(__VA_ARGS__) \ | ||
| 167 | }; \ | ||
| 168 | SYSCALL_METADATA(sname, x); \ | ||
| 169 | __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) | ||
| 170 | #else | ||
| 171 | #define SYSCALL_DEFINEx(x, sname, ...) \ | ||
| 172 | __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) | ||
| 173 | #endif | ||
| 174 | |||
| 120 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS | 175 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
| 121 | 176 | ||
| 122 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name | 177 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name |
| 123 | #define SYSCALL_DEFINEx(x, name, ...) \ | 178 | |
| 179 | #define __SYSCALL_DEFINEx(x, name, ...) \ | ||
| 124 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ | 180 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ |
| 125 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ | 181 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ |
| 126 | asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ | 182 | asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ |
| @@ -134,7 +190,7 @@ struct old_linux_dirent; | |||
| 134 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 190 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
| 135 | 191 | ||
| 136 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name | 192 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name |
| 137 | #define SYSCALL_DEFINEx(x, name, ...) \ | 193 | #define __SYSCALL_DEFINEx(x, name, ...) \ |
| 138 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) | 194 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) |
| 139 | 195 | ||
| 140 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 196 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h new file mode 100644 index 000000000000..7a8130384087 --- /dev/null +++ b/include/linux/trace_clock.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | #ifndef _LINUX_TRACE_CLOCK_H | ||
| 2 | #define _LINUX_TRACE_CLOCK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * 3 trace clock variants, with differing scalability/precision | ||
| 6 | * tradeoffs: | ||
| 7 | * | ||
| 8 | * - local: CPU-local trace clock | ||
| 9 | * - medium: scalable global clock with some jitter | ||
| 10 | * - global: globally monotonic, serialized clock | ||
| 11 | */ | ||
| 12 | #include <linux/compiler.h> | ||
| 13 | #include <linux/types.h> | ||
| 14 | |||
| 15 | extern u64 notrace trace_clock_local(void); | ||
| 16 | extern u64 notrace trace_clock(void); | ||
| 17 | extern u64 notrace trace_clock_global(void); | ||
| 18 | |||
| 19 | #endif /* _LINUX_TRACE_CLOCK_H */ | ||
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 757005458366..d35a7ee7611f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -31,8 +31,8 @@ struct tracepoint { | |||
| 31 | * Keep in sync with vmlinux.lds.h. | 31 | * Keep in sync with vmlinux.lds.h. |
| 32 | */ | 32 | */ |
| 33 | 33 | ||
| 34 | #define TPPROTO(args...) args | 34 | #define TP_PROTO(args...) args |
| 35 | #define TPARGS(args...) args | 35 | #define TP_ARGS(args...) args |
| 36 | 36 | ||
| 37 | #ifdef CONFIG_TRACEPOINTS | 37 | #ifdef CONFIG_TRACEPOINTS |
| 38 | 38 | ||
| @@ -65,7 +65,7 @@ struct tracepoint { | |||
| 65 | { \ | 65 | { \ |
| 66 | if (unlikely(__tracepoint_##name.state)) \ | 66 | if (unlikely(__tracepoint_##name.state)) \ |
| 67 | __DO_TRACE(&__tracepoint_##name, \ | 67 | __DO_TRACE(&__tracepoint_##name, \ |
| 68 | TPPROTO(proto), TPARGS(args)); \ | 68 | TP_PROTO(proto), TP_ARGS(args)); \ |
| 69 | } \ | 69 | } \ |
| 70 | static inline int register_trace_##name(void (*probe)(proto)) \ | 70 | static inline int register_trace_##name(void (*probe)(proto)) \ |
| 71 | { \ | 71 | { \ |
| @@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 153 | synchronize_sched(); | 153 | synchronize_sched(); |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | #define PARAMS(args...) args | ||
| 157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | ||
| 158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
| 159 | |||
| 160 | |||
| 161 | /* | ||
| 162 | * For use with the TRACE_EVENT macro: | ||
| 163 | * | ||
| 164 | * We define a tracepoint, its arguments, its printk format | ||
| 165 | * and its 'fast binay record' layout. | ||
| 166 | * | ||
| 167 | * Firstly, name your tracepoint via TRACE_EVENT(name : the | ||
| 168 | * 'subsystem_event' notation is fine. | ||
| 169 | * | ||
| 170 | * Think about this whole construct as the | ||
| 171 | * 'trace_sched_switch() function' from now on. | ||
| 172 | * | ||
| 173 | * | ||
| 174 | * TRACE_EVENT(sched_switch, | ||
| 175 | * | ||
| 176 | * * | ||
| 177 | * * A function has a regular function arguments | ||
| 178 | * * prototype, declare it via TP_PROTO(): | ||
| 179 | * * | ||
| 180 | * | ||
| 181 | * TP_PROTO(struct rq *rq, struct task_struct *prev, | ||
| 182 | * struct task_struct *next), | ||
| 183 | * | ||
| 184 | * * | ||
| 185 | * * Define the call signature of the 'function'. | ||
| 186 | * * (Design sidenote: we use this instead of a | ||
| 187 | * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) | ||
| 188 | * * | ||
| 189 | * | ||
| 190 | * TP_ARGS(rq, prev, next), | ||
| 191 | * | ||
| 192 | * * | ||
| 193 | * * Fast binary tracing: define the trace record via | ||
| 194 | * * TP_STRUCT__entry(). You can think about it like a | ||
| 195 | * * regular C structure local variable definition. | ||
| 196 | * * | ||
| 197 | * * This is how the trace record is structured and will | ||
| 198 | * * be saved into the ring buffer. These are the fields | ||
| 199 | * * that will be exposed to user-space in | ||
| 200 | * * /debug/tracing/events/<*>/format. | ||
| 201 | * * | ||
| 202 | * * The declared 'local variable' is called '__entry' | ||
| 203 | * * | ||
| 204 | * * __field(pid_t, prev_prid) is equivalent to a standard declariton: | ||
| 205 | * * | ||
| 206 | * * pid_t prev_pid; | ||
| 207 | * * | ||
| 208 | * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: | ||
| 209 | * * | ||
| 210 | * * char prev_comm[TASK_COMM_LEN]; | ||
| 211 | * * | ||
| 212 | * | ||
| 213 | * TP_STRUCT__entry( | ||
| 214 | * __array( char, prev_comm, TASK_COMM_LEN ) | ||
| 215 | * __field( pid_t, prev_pid ) | ||
| 216 | * __field( int, prev_prio ) | ||
| 217 | * __array( char, next_comm, TASK_COMM_LEN ) | ||
| 218 | * __field( pid_t, next_pid ) | ||
| 219 | * __field( int, next_prio ) | ||
| 220 | * ), | ||
| 221 | * | ||
| 222 | * * | ||
| 223 | * * Assign the entry into the trace record, by embedding | ||
| 224 | * * a full C statement block into TP_fast_assign(). You | ||
| 225 | * * can refer to the trace record as '__entry' - | ||
| 226 | * * otherwise you can put arbitrary C code in here. | ||
| 227 | * * | ||
| 228 | * * Note: this C code will execute every time a trace event | ||
| 229 | * * happens, on an active tracepoint. | ||
| 230 | * * | ||
| 231 | * | ||
| 232 | * TP_fast_assign( | ||
| 233 | * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | ||
| 234 | * __entry->prev_pid = prev->pid; | ||
| 235 | * __entry->prev_prio = prev->prio; | ||
| 236 | * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | ||
| 237 | * __entry->next_pid = next->pid; | ||
| 238 | * __entry->next_prio = next->prio; | ||
| 239 | * ) | ||
| 240 | * | ||
| 241 | * * | ||
| 242 | * * Formatted output of a trace record via TP_printk(). | ||
| 243 | * * This is how the tracepoint will appear under ftrace | ||
| 244 | * * plugins that make use of this tracepoint. | ||
| 245 | * * | ||
| 246 | * * (raw-binary tracing wont actually perform this step.) | ||
| 247 | * * | ||
| 248 | * | ||
| 249 | * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | ||
| 250 | * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | ||
| 251 | * __entry->next_comm, __entry->next_pid, __entry->next_prio), | ||
| 252 | * | ||
| 253 | * ); | ||
| 254 | * | ||
| 255 | * This macro construct is thus used for the regular printk format | ||
| 256 | * tracing setup, it is used to construct a function pointer based | ||
| 257 | * tracepoint callback (this is used by programmatic plugins and | ||
| 258 | * can also by used by generic instrumentation like SystemTap), and | ||
| 259 | * it is also used to expose a structured trace record in | ||
| 260 | * /debug/tracing/events/. | ||
| 261 | */ | ||
| 262 | |||
| 263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | ||
| 264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
| 265 | |||
| 156 | #endif | 266 | #endif |
