diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dynamic_debug.h | 39 | ||||
| -rw-r--r-- | include/linux/ftrace_event.h | 8 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 8 | ||||
| -rw-r--r-- | include/linux/irq_work.h | 20 | ||||
| -rw-r--r-- | include/linux/jump_label.h | 74 | ||||
| -rw-r--r-- | include/linux/jump_label_ref.h | 44 | ||||
| -rw-r--r-- | include/linux/module.h | 5 | ||||
| -rw-r--r-- | include/linux/oprofile.h | 7 | ||||
| -rw-r--r-- | include/linux/percpu.h | 9 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 212 | ||||
| -rw-r--r-- | include/linux/sched.h | 9 | ||||
| -rw-r--r-- | include/linux/stop_machine.h | 10 | ||||
| -rw-r--r-- | include/linux/tracepoint.h | 5 |
13 files changed, 348 insertions, 102 deletions
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 52c0da4bdd18..bef3cda44c4c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
| 2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
| 3 | 3 | ||
| 4 | #include <linux/jump_label.h> | ||
| 5 | |||
| 4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
| 5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
| 6 | * use independent hash functions, to reduce the chance of false positives. | 8 | * use independent hash functions, to reduce the chance of false positives. |
| @@ -22,8 +24,6 @@ struct _ddebug { | |||
| 22 | const char *function; | 24 | const char *function; |
| 23 | const char *filename; | 25 | const char *filename; |
| 24 | const char *format; | 26 | const char *format; |
| 25 | char primary_hash; | ||
| 26 | char secondary_hash; | ||
| 27 | unsigned int lineno:24; | 27 | unsigned int lineno:24; |
| 28 | /* | 28 | /* |
| 29 | * The flags field controls the behaviour at the callsite. | 29 | * The flags field controls the behaviour at the callsite. |
| @@ -33,6 +33,7 @@ struct _ddebug { | |||
| 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ | 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ |
| 34 | #define _DPRINTK_FLAGS_DEFAULT 0 | 34 | #define _DPRINTK_FLAGS_DEFAULT 0 |
| 35 | unsigned int flags:8; | 35 | unsigned int flags:8; |
| 36 | char enabled; | ||
| 36 | } __attribute__((aligned(8))); | 37 | } __attribute__((aligned(8))); |
| 37 | 38 | ||
| 38 | 39 | ||
| @@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
| 42 | #if defined(CONFIG_DYNAMIC_DEBUG) | 43 | #if defined(CONFIG_DYNAMIC_DEBUG) |
| 43 | extern int ddebug_remove_module(const char *mod_name); | 44 | extern int ddebug_remove_module(const char *mod_name); |
| 44 | 45 | ||
| 45 | #define __dynamic_dbg_enabled(dd) ({ \ | ||
| 46 | int __ret = 0; \ | ||
| 47 | if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \ | ||
| 48 | (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \ | ||
| 49 | if (unlikely(dd.flags)) \ | ||
| 50 | __ret = 1; \ | ||
| 51 | __ret; }) | ||
| 52 | |||
| 53 | #define dynamic_pr_debug(fmt, ...) do { \ | 46 | #define dynamic_pr_debug(fmt, ...) do { \ |
| 47 | __label__ do_printk; \ | ||
| 48 | __label__ out; \ | ||
| 54 | static struct _ddebug descriptor \ | 49 | static struct _ddebug descriptor \ |
| 55 | __used \ | 50 | __used \ |
| 56 | __attribute__((section("__verbose"), aligned(8))) = \ | 51 | __attribute__((section("__verbose"), aligned(8))) = \ |
| 57 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 52 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
| 58 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 53 | _DPRINTK_FLAGS_DEFAULT }; \ |
| 59 | if (__dynamic_dbg_enabled(descriptor)) \ | 54 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
| 60 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | 55 | goto out; \ |
| 56 | do_printk: \ | ||
| 57 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | ||
| 58 | out: ; \ | ||
| 61 | } while (0) | 59 | } while (0) |
| 62 | 60 | ||
| 63 | 61 | ||
| 64 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ | 62 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ |
| 63 | __label__ do_printk; \ | ||
| 64 | __label__ out; \ | ||
| 65 | static struct _ddebug descriptor \ | 65 | static struct _ddebug descriptor \ |
| 66 | __used \ | 66 | __used \ |
| 67 | __attribute__((section("__verbose"), aligned(8))) = \ | 67 | __attribute__((section("__verbose"), aligned(8))) = \ |
| 68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
| 69 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 69 | _DPRINTK_FLAGS_DEFAULT }; \ |
| 70 | if (__dynamic_dbg_enabled(descriptor)) \ | 70 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
| 71 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | 71 | goto out; \ |
| 72 | do_printk: \ | ||
| 73 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | ||
| 74 | out: ; \ | ||
| 72 | } while (0) | 75 | } while (0) |
| 73 | 76 | ||
| 74 | #else | 77 | #else |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 02b8b24f8f51..8beabb958f61 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -191,8 +191,8 @@ struct ftrace_event_call { | |||
| 191 | unsigned int flags; | 191 | unsigned int flags; |
| 192 | 192 | ||
| 193 | #ifdef CONFIG_PERF_EVENTS | 193 | #ifdef CONFIG_PERF_EVENTS |
| 194 | int perf_refcount; | 194 | int perf_refcount; |
| 195 | struct hlist_head *perf_events; | 195 | struct hlist_head __percpu *perf_events; |
| 196 | #endif | 196 | #endif |
| 197 | }; | 197 | }; |
| 198 | 198 | ||
| @@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | |||
| 252 | 252 | ||
| 253 | extern int perf_trace_init(struct perf_event *event); | 253 | extern int perf_trace_init(struct perf_event *event); |
| 254 | extern void perf_trace_destroy(struct perf_event *event); | 254 | extern void perf_trace_destroy(struct perf_event *event); |
| 255 | extern int perf_trace_enable(struct perf_event *event); | 255 | extern int perf_trace_add(struct perf_event *event, int flags); |
| 256 | extern void perf_trace_disable(struct perf_event *event); | 256 | extern void perf_trace_del(struct perf_event *event, int flags); |
| 257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
| 258 | char *filter_str); | 258 | char *filter_str); |
| 259 | extern void ftrace_profile_free_filter(struct perf_event *event); | 259 | extern void ftrace_profile_free_filter(struct perf_event *event); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6f..531495db1708 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
| 19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
| 20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
| 21 | #include <trace/events/irq.h> | ||
| 21 | 22 | ||
| 22 | /* | 23 | /* |
| 23 | * These correspond to the IORESOURCE_IRQ_* defines in | 24 | * These correspond to the IORESOURCE_IRQ_* defines in |
| @@ -407,7 +408,12 @@ asmlinkage void do_softirq(void); | |||
| 407 | asmlinkage void __do_softirq(void); | 408 | asmlinkage void __do_softirq(void); |
| 408 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | 409 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
| 409 | extern void softirq_init(void); | 410 | extern void softirq_init(void); |
| 410 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) | 411 | static inline void __raise_softirq_irqoff(unsigned int nr) |
| 412 | { | ||
| 413 | trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL); | ||
| 414 | or_softirq_pending(1UL << nr); | ||
| 415 | } | ||
| 416 | |||
| 411 | extern void raise_softirq_irqoff(unsigned int nr); | 417 | extern void raise_softirq_irqoff(unsigned int nr); |
| 412 | extern void raise_softirq(unsigned int nr); | 418 | extern void raise_softirq(unsigned int nr); |
| 413 | extern void wakeup_softirqd(void); | 419 | extern void wakeup_softirqd(void); |
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 000000000000..4fa09d4d0b71 --- /dev/null +++ b/include/linux/irq_work.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _LINUX_IRQ_WORK_H | ||
| 2 | #define _LINUX_IRQ_WORK_H | ||
| 3 | |||
| 4 | struct irq_work { | ||
| 5 | struct irq_work *next; | ||
| 6 | void (*func)(struct irq_work *); | ||
| 7 | }; | ||
| 8 | |||
| 9 | static inline | ||
| 10 | void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) | ||
| 11 | { | ||
| 12 | entry->next = NULL; | ||
| 13 | entry->func = func; | ||
| 14 | } | ||
| 15 | |||
| 16 | bool irq_work_queue(struct irq_work *entry); | ||
| 17 | void irq_work_run(void); | ||
| 18 | void irq_work_sync(struct irq_work *entry); | ||
| 19 | |||
| 20 | #endif /* _LINUX_IRQ_WORK_H */ | ||
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 000000000000..b67cb180e6e9 --- /dev/null +++ b/include/linux/jump_label.h | |||
| @@ -0,0 +1,74 @@ | |||
| 1 | #ifndef _LINUX_JUMP_LABEL_H | ||
| 2 | #define _LINUX_JUMP_LABEL_H | ||
| 3 | |||
| 4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) | ||
| 5 | # include <asm/jump_label.h> | ||
| 6 | # define HAVE_JUMP_LABEL | ||
| 7 | #endif | ||
| 8 | |||
| 9 | enum jump_label_type { | ||
| 10 | JUMP_LABEL_ENABLE, | ||
| 11 | JUMP_LABEL_DISABLE | ||
| 12 | }; | ||
| 13 | |||
| 14 | struct module; | ||
| 15 | |||
| 16 | #ifdef HAVE_JUMP_LABEL | ||
| 17 | |||
| 18 | extern struct jump_entry __start___jump_table[]; | ||
| 19 | extern struct jump_entry __stop___jump_table[]; | ||
| 20 | |||
| 21 | extern void arch_jump_label_transform(struct jump_entry *entry, | ||
| 22 | enum jump_label_type type); | ||
| 23 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | ||
| 24 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
| 25 | extern void jump_label_apply_nops(struct module *mod); | ||
| 26 | extern int jump_label_text_reserved(void *start, void *end); | ||
| 27 | |||
| 28 | #define jump_label_enable(key) \ | ||
| 29 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
| 30 | |||
| 31 | #define jump_label_disable(key) \ | ||
| 32 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
| 33 | |||
| 34 | #else | ||
| 35 | |||
| 36 | #define JUMP_LABEL(key, label) \ | ||
| 37 | do { \ | ||
| 38 | if (unlikely(*key)) \ | ||
| 39 | goto label; \ | ||
| 40 | } while (0) | ||
| 41 | |||
| 42 | #define jump_label_enable(cond_var) \ | ||
| 43 | do { \ | ||
| 44 | *(cond_var) = 1; \ | ||
| 45 | } while (0) | ||
| 46 | |||
| 47 | #define jump_label_disable(cond_var) \ | ||
| 48 | do { \ | ||
| 49 | *(cond_var) = 0; \ | ||
| 50 | } while (0) | ||
| 51 | |||
| 52 | static inline int jump_label_apply_nops(struct module *mod) | ||
| 53 | { | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline int jump_label_text_reserved(void *start, void *end) | ||
| 58 | { | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | #endif | ||
| 63 | |||
| 64 | #define COND_STMT(key, stmt) \ | ||
| 65 | do { \ | ||
| 66 | __label__ jl_enabled; \ | ||
| 67 | JUMP_LABEL(key, jl_enabled); \ | ||
| 68 | if (0) { \ | ||
| 69 | jl_enabled: \ | ||
| 70 | stmt; \ | ||
| 71 | } \ | ||
| 72 | } while (0) | ||
| 73 | |||
| 74 | #endif | ||
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h new file mode 100644 index 000000000000..e5d012ad92c6 --- /dev/null +++ b/include/linux/jump_label_ref.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
| 2 | #define _LINUX_JUMP_LABEL_REF_H | ||
| 3 | |||
| 4 | #include <linux/jump_label.h> | ||
| 5 | #include <asm/atomic.h> | ||
| 6 | |||
| 7 | #ifdef HAVE_JUMP_LABEL | ||
| 8 | |||
| 9 | static inline void jump_label_inc(atomic_t *key) | ||
| 10 | { | ||
| 11 | if (atomic_add_return(1, key) == 1) | ||
| 12 | jump_label_enable(key); | ||
| 13 | } | ||
| 14 | |||
| 15 | static inline void jump_label_dec(atomic_t *key) | ||
| 16 | { | ||
| 17 | if (atomic_dec_and_test(key)) | ||
| 18 | jump_label_disable(key); | ||
| 19 | } | ||
| 20 | |||
| 21 | #else /* !HAVE_JUMP_LABEL */ | ||
| 22 | |||
| 23 | static inline void jump_label_inc(atomic_t *key) | ||
| 24 | { | ||
| 25 | atomic_inc(key); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void jump_label_dec(atomic_t *key) | ||
| 29 | { | ||
| 30 | atomic_dec(key); | ||
| 31 | } | ||
| 32 | |||
| 33 | #undef JUMP_LABEL | ||
| 34 | #define JUMP_LABEL(key, label) \ | ||
| 35 | do { \ | ||
| 36 | if (unlikely(__builtin_choose_expr( \ | ||
| 37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
| 38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
| 39 | goto label; \ | ||
| 40 | } while (0) | ||
| 41 | |||
| 42 | #endif /* HAVE_JUMP_LABEL */ | ||
| 43 | |||
| 44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/module.h b/include/linux/module.h index aace066bad8f..b29e7458b966 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -350,7 +350,10 @@ struct module | |||
| 350 | struct tracepoint *tracepoints; | 350 | struct tracepoint *tracepoints; |
| 351 | unsigned int num_tracepoints; | 351 | unsigned int num_tracepoints; |
| 352 | #endif | 352 | #endif |
| 353 | 353 | #ifdef HAVE_JUMP_LABEL | |
| 354 | struct jump_entry *jump_entries; | ||
| 355 | unsigned int num_jump_entries; | ||
| 356 | #endif | ||
| 354 | #ifdef CONFIG_TRACING | 357 | #ifdef CONFIG_TRACING |
| 355 | const char **trace_bprintk_fmt_start; | 358 | const char **trace_bprintk_fmt_start; |
| 356 | unsigned int num_trace_bprintk_fmt; | 359 | unsigned int num_trace_bprintk_fmt; |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5171639ecf0f..32fb81212fd1 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/init.h> | ||
| 18 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
| 19 | 20 | ||
| 20 | /* Each escaped entry is prefixed by ESCAPE_CODE | 21 | /* Each escaped entry is prefixed by ESCAPE_CODE |
| @@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); | |||
| 185 | int oprofile_add_data64(struct op_entry *entry, u64 val); | 186 | int oprofile_add_data64(struct op_entry *entry, u64 val); |
| 186 | int oprofile_write_commit(struct op_entry *entry); | 187 | int oprofile_write_commit(struct op_entry *entry); |
| 187 | 188 | ||
| 189 | #ifdef CONFIG_PERF_EVENTS | ||
| 190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | ||
| 191 | void oprofile_perf_exit(void); | ||
| 192 | char *op_name_from_perf_id(void); | ||
| 193 | #endif /* CONFIG_PERF_EVENTS */ | ||
| 194 | |||
| 188 | #endif /* OPROFILE_H */ | 195 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 49466b13c5c6..0eb50832aa00 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -39,6 +39,15 @@ | |||
| 39 | preempt_enable(); \ | 39 | preempt_enable(); \ |
| 40 | } while (0) | 40 | } while (0) |
| 41 | 41 | ||
| 42 | #define get_cpu_ptr(var) ({ \ | ||
| 43 | preempt_disable(); \ | ||
| 44 | this_cpu_ptr(var); }) | ||
| 45 | |||
| 46 | #define put_cpu_ptr(var) do { \ | ||
| 47 | (void)(var); \ | ||
| 48 | preempt_enable(); \ | ||
| 49 | } while (0) | ||
| 50 | |||
| 42 | #ifdef CONFIG_SMP | 51 | #ifdef CONFIG_SMP |
| 43 | 52 | ||
| 44 | /* minimum unit size, also is the maximum supported allocation size */ | 53 | /* minimum unit size, also is the maximum supported allocation size */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 716f99b682c1..057bf22a8323 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -486,6 +486,8 @@ struct perf_guest_info_callbacks { | |||
| 486 | #include <linux/workqueue.h> | 486 | #include <linux/workqueue.h> |
| 487 | #include <linux/ftrace.h> | 487 | #include <linux/ftrace.h> |
| 488 | #include <linux/cpu.h> | 488 | #include <linux/cpu.h> |
| 489 | #include <linux/irq_work.h> | ||
| 490 | #include <linux/jump_label_ref.h> | ||
| 489 | #include <asm/atomic.h> | 491 | #include <asm/atomic.h> |
| 490 | #include <asm/local.h> | 492 | #include <asm/local.h> |
| 491 | 493 | ||
| @@ -529,16 +531,22 @@ struct hw_perf_event { | |||
| 529 | int last_cpu; | 531 | int last_cpu; |
| 530 | }; | 532 | }; |
| 531 | struct { /* software */ | 533 | struct { /* software */ |
| 532 | s64 remaining; | ||
| 533 | struct hrtimer hrtimer; | 534 | struct hrtimer hrtimer; |
| 534 | }; | 535 | }; |
| 535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 536 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 536 | struct { /* breakpoint */ | 537 | struct { /* breakpoint */ |
| 537 | struct arch_hw_breakpoint info; | 538 | struct arch_hw_breakpoint info; |
| 538 | struct list_head bp_list; | 539 | struct list_head bp_list; |
| 540 | /* | ||
| 541 | * Crufty hack to avoid the chicken and egg | ||
| 542 | * problem hw_breakpoint has with context | ||
| 543 | * creation and event initalization. | ||
| 544 | */ | ||
| 545 | struct task_struct *bp_target; | ||
| 539 | }; | 546 | }; |
| 540 | #endif | 547 | #endif |
| 541 | }; | 548 | }; |
| 549 | int state; | ||
| 542 | local64_t prev_count; | 550 | local64_t prev_count; |
| 543 | u64 sample_period; | 551 | u64 sample_period; |
| 544 | u64 last_period; | 552 | u64 last_period; |
| @@ -550,6 +558,13 @@ struct hw_perf_event { | |||
| 550 | #endif | 558 | #endif |
| 551 | }; | 559 | }; |
| 552 | 560 | ||
| 561 | /* | ||
| 562 | * hw_perf_event::state flags | ||
| 563 | */ | ||
| 564 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
| 565 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
| 566 | #define PERF_HES_ARCH 0x04 | ||
| 567 | |||
| 553 | struct perf_event; | 568 | struct perf_event; |
| 554 | 569 | ||
| 555 | /* | 570 | /* |
| @@ -561,36 +576,70 @@ struct perf_event; | |||
| 561 | * struct pmu - generic performance monitoring unit | 576 | * struct pmu - generic performance monitoring unit |
| 562 | */ | 577 | */ |
| 563 | struct pmu { | 578 | struct pmu { |
| 564 | int (*enable) (struct perf_event *event); | 579 | struct list_head entry; |
| 565 | void (*disable) (struct perf_event *event); | 580 | |
| 566 | int (*start) (struct perf_event *event); | 581 | int * __percpu pmu_disable_count; |
| 567 | void (*stop) (struct perf_event *event); | 582 | struct perf_cpu_context * __percpu pmu_cpu_context; |
| 568 | void (*read) (struct perf_event *event); | 583 | int task_ctx_nr; |
| 569 | void (*unthrottle) (struct perf_event *event); | 584 | |
| 585 | /* | ||
| 586 | * Fully disable/enable this PMU, can be used to protect from the PMI | ||
| 587 | * as well as for lazy/batch writing of the MSRs. | ||
| 588 | */ | ||
| 589 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | ||
| 590 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | ||
| 570 | 591 | ||
| 571 | /* | 592 | /* |
| 572 | * Group events scheduling is treated as a transaction, add group | 593 | * Try and initialize the event for this PMU. |
| 573 | * events as a whole and perform one schedulability test. If the test | 594 | * Should return -ENOENT when the @event doesn't match this PMU. |
| 574 | * fails, roll back the whole group | ||
| 575 | */ | 595 | */ |
| 596 | int (*event_init) (struct perf_event *event); | ||
| 597 | |||
| 598 | #define PERF_EF_START 0x01 /* start the counter when adding */ | ||
| 599 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | ||
| 600 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | ||
| 576 | 601 | ||
| 577 | /* | 602 | /* |
| 578 | * Start the transaction, after this ->enable() doesn't need | 603 | * Adds/Removes a counter to/from the PMU, can be done inside |
| 579 | * to do schedulability tests. | 604 | * a transaction, see the ->*_txn() methods. |
| 580 | */ | 605 | */ |
| 581 | void (*start_txn) (const struct pmu *pmu); | 606 | int (*add) (struct perf_event *event, int flags); |
| 607 | void (*del) (struct perf_event *event, int flags); | ||
| 608 | |||
| 582 | /* | 609 | /* |
| 583 | * If ->start_txn() disabled the ->enable() schedulability test | 610 | * Starts/Stops a counter present on the PMU. The PMI handler |
| 611 | * should stop the counter when perf_event_overflow() returns | ||
| 612 | * !0. ->start() will be used to continue. | ||
| 613 | */ | ||
| 614 | void (*start) (struct perf_event *event, int flags); | ||
| 615 | void (*stop) (struct perf_event *event, int flags); | ||
| 616 | |||
| 617 | /* | ||
| 618 | * Updates the counter value of the event. | ||
| 619 | */ | ||
| 620 | void (*read) (struct perf_event *event); | ||
| 621 | |||
| 622 | /* | ||
| 623 | * Group events scheduling is treated as a transaction, add | ||
| 624 | * group events as a whole and perform one schedulability test. | ||
| 625 | * If the test fails, roll back the whole group | ||
| 626 | * | ||
| 627 | * Start the transaction, after this ->add() doesn't need to | ||
| 628 | * do schedulability tests. | ||
| 629 | */ | ||
| 630 | void (*start_txn) (struct pmu *pmu); /* optional */ | ||
| 631 | /* | ||
| 632 | * If ->start_txn() disabled the ->add() schedulability test | ||
| 584 | * then ->commit_txn() is required to perform one. On success | 633 | * then ->commit_txn() is required to perform one. On success |
| 585 | * the transaction is closed. On error the transaction is kept | 634 | * the transaction is closed. On error the transaction is kept |
| 586 | * open until ->cancel_txn() is called. | 635 | * open until ->cancel_txn() is called. |
| 587 | */ | 636 | */ |
| 588 | int (*commit_txn) (const struct pmu *pmu); | 637 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
| 589 | /* | 638 | /* |
| 590 | * Will cancel the transaction, assumes ->disable() is called for | 639 | * Will cancel the transaction, assumes ->del() is called |
| 591 | * each successfull ->enable() during the transaction. | 640 | * for each successfull ->add() during the transaction. |
| 592 | */ | 641 | */ |
| 593 | void (*cancel_txn) (const struct pmu *pmu); | 642 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
| 594 | }; | 643 | }; |
| 595 | 644 | ||
| 596 | /** | 645 | /** |
| @@ -631,11 +680,6 @@ struct perf_buffer { | |||
| 631 | void *data_pages[0]; | 680 | void *data_pages[0]; |
| 632 | }; | 681 | }; |
| 633 | 682 | ||
| 634 | struct perf_pending_entry { | ||
| 635 | struct perf_pending_entry *next; | ||
| 636 | void (*func)(struct perf_pending_entry *); | ||
| 637 | }; | ||
| 638 | |||
| 639 | struct perf_sample_data; | 683 | struct perf_sample_data; |
| 640 | 684 | ||
| 641 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 685 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, |
| @@ -656,6 +700,7 @@ struct swevent_hlist { | |||
| 656 | 700 | ||
| 657 | #define PERF_ATTACH_CONTEXT 0x01 | 701 | #define PERF_ATTACH_CONTEXT 0x01 |
| 658 | #define PERF_ATTACH_GROUP 0x02 | 702 | #define PERF_ATTACH_GROUP 0x02 |
| 703 | #define PERF_ATTACH_TASK 0x04 | ||
| 659 | 704 | ||
| 660 | /** | 705 | /** |
| 661 | * struct perf_event - performance event kernel representation: | 706 | * struct perf_event - performance event kernel representation: |
| @@ -669,7 +714,7 @@ struct perf_event { | |||
| 669 | int nr_siblings; | 714 | int nr_siblings; |
| 670 | int group_flags; | 715 | int group_flags; |
| 671 | struct perf_event *group_leader; | 716 | struct perf_event *group_leader; |
| 672 | const struct pmu *pmu; | 717 | struct pmu *pmu; |
| 673 | 718 | ||
| 674 | enum perf_event_active_state state; | 719 | enum perf_event_active_state state; |
| 675 | unsigned int attach_state; | 720 | unsigned int attach_state; |
| @@ -743,7 +788,7 @@ struct perf_event { | |||
| 743 | int pending_wakeup; | 788 | int pending_wakeup; |
| 744 | int pending_kill; | 789 | int pending_kill; |
| 745 | int pending_disable; | 790 | int pending_disable; |
| 746 | struct perf_pending_entry pending; | 791 | struct irq_work pending; |
| 747 | 792 | ||
| 748 | atomic_t event_limit; | 793 | atomic_t event_limit; |
| 749 | 794 | ||
| @@ -763,12 +808,19 @@ struct perf_event { | |||
| 763 | #endif /* CONFIG_PERF_EVENTS */ | 808 | #endif /* CONFIG_PERF_EVENTS */ |
| 764 | }; | 809 | }; |
| 765 | 810 | ||
| 811 | enum perf_event_context_type { | ||
| 812 | task_context, | ||
| 813 | cpu_context, | ||
| 814 | }; | ||
| 815 | |||
| 766 | /** | 816 | /** |
| 767 | * struct perf_event_context - event context structure | 817 | * struct perf_event_context - event context structure |
| 768 | * | 818 | * |
| 769 | * Used as a container for task events and CPU events as well: | 819 | * Used as a container for task events and CPU events as well: |
| 770 | */ | 820 | */ |
| 771 | struct perf_event_context { | 821 | struct perf_event_context { |
| 822 | enum perf_event_context_type type; | ||
| 823 | struct pmu *pmu; | ||
| 772 | /* | 824 | /* |
| 773 | * Protect the states of the events in the list, | 825 | * Protect the states of the events in the list, |
| 774 | * nr_active, and the list: | 826 | * nr_active, and the list: |
| @@ -808,6 +860,12 @@ struct perf_event_context { | |||
| 808 | struct rcu_head rcu_head; | 860 | struct rcu_head rcu_head; |
| 809 | }; | 861 | }; |
| 810 | 862 | ||
| 863 | /* | ||
| 864 | * Number of contexts where an event can trigger: | ||
| 865 | * task, softirq, hardirq, nmi. | ||
| 866 | */ | ||
| 867 | #define PERF_NR_CONTEXTS 4 | ||
| 868 | |||
| 811 | /** | 869 | /** |
| 812 | * struct perf_event_cpu_context - per cpu event context structure | 870 | * struct perf_event_cpu_context - per cpu event context structure |
| 813 | */ | 871 | */ |
| @@ -815,18 +873,9 @@ struct perf_cpu_context { | |||
| 815 | struct perf_event_context ctx; | 873 | struct perf_event_context ctx; |
| 816 | struct perf_event_context *task_ctx; | 874 | struct perf_event_context *task_ctx; |
| 817 | int active_oncpu; | 875 | int active_oncpu; |
| 818 | int max_pertask; | ||
| 819 | int exclusive; | 876 | int exclusive; |
| 820 | struct swevent_hlist *swevent_hlist; | 877 | struct list_head rotation_list; |
| 821 | struct mutex hlist_mutex; | 878 | int jiffies_interval; |
| 822 | int hlist_refcount; | ||
| 823 | |||
| 824 | /* | ||
| 825 | * Recursion avoidance: | ||
| 826 | * | ||
| 827 | * task, softirq, irq, nmi context | ||
| 828 | */ | ||
| 829 | int recursion[4]; | ||
| 830 | }; | 879 | }; |
| 831 | 880 | ||
| 832 | struct perf_output_handle { | 881 | struct perf_output_handle { |
| @@ -842,26 +891,34 @@ struct perf_output_handle { | |||
| 842 | 891 | ||
| 843 | #ifdef CONFIG_PERF_EVENTS | 892 | #ifdef CONFIG_PERF_EVENTS |
| 844 | 893 | ||
| 845 | /* | 894 | extern int perf_pmu_register(struct pmu *pmu); |
| 846 | * Set by architecture code: | 895 | extern void perf_pmu_unregister(struct pmu *pmu); |
| 847 | */ | 896 | |
| 848 | extern int perf_max_events; | 897 | extern int perf_num_counters(void); |
| 898 | extern const char *perf_pmu_name(void); | ||
| 899 | extern void __perf_event_task_sched_in(struct task_struct *task); | ||
| 900 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
| 849 | 901 | ||
| 850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 902 | extern atomic_t perf_task_events; |
| 903 | |||
| 904 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
| 905 | { | ||
| 906 | COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); | ||
| 907 | } | ||
| 908 | |||
| 909 | static inline | ||
| 910 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
| 911 | { | ||
| 912 | COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); | ||
| 913 | } | ||
| 851 | 914 | ||
| 852 | extern void perf_event_task_sched_in(struct task_struct *task); | ||
| 853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
| 854 | extern void perf_event_task_tick(struct task_struct *task); | ||
| 855 | extern int perf_event_init_task(struct task_struct *child); | 915 | extern int perf_event_init_task(struct task_struct *child); |
| 856 | extern void perf_event_exit_task(struct task_struct *child); | 916 | extern void perf_event_exit_task(struct task_struct *child); |
| 857 | extern void perf_event_free_task(struct task_struct *task); | 917 | extern void perf_event_free_task(struct task_struct *task); |
| 858 | extern void set_perf_event_pending(void); | 918 | extern void perf_event_delayed_put(struct task_struct *task); |
| 859 | extern void perf_event_do_pending(void); | ||
| 860 | extern void perf_event_print_debug(void); | 919 | extern void perf_event_print_debug(void); |
| 861 | extern void __perf_disable(void); | 920 | extern void perf_pmu_disable(struct pmu *pmu); |
| 862 | extern bool __perf_enable(void); | 921 | extern void perf_pmu_enable(struct pmu *pmu); |
| 863 | extern void perf_disable(void); | ||
| 864 | extern void perf_enable(void); | ||
| 865 | extern int perf_event_task_disable(void); | 922 | extern int perf_event_task_disable(void); |
| 866 | extern int perf_event_task_enable(void); | 923 | extern int perf_event_task_enable(void); |
| 867 | extern void perf_event_update_userpage(struct perf_event *event); | 924 | extern void perf_event_update_userpage(struct perf_event *event); |
| @@ -869,7 +926,7 @@ extern int perf_event_release_kernel(struct perf_event *event); | |||
| 869 | extern struct perf_event * | 926 | extern struct perf_event * |
| 870 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 927 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
| 871 | int cpu, | 928 | int cpu, |
| 872 | pid_t pid, | 929 | struct task_struct *task, |
| 873 | perf_overflow_handler_t callback); | 930 | perf_overflow_handler_t callback); |
| 874 | extern u64 perf_event_read_value(struct perf_event *event, | 931 | extern u64 perf_event_read_value(struct perf_event *event, |
| 875 | u64 *enabled, u64 *running); | 932 | u64 *enabled, u64 *running); |
| @@ -920,14 +977,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
| 920 | */ | 977 | */ |
| 921 | static inline int is_software_event(struct perf_event *event) | 978 | static inline int is_software_event(struct perf_event *event) |
| 922 | { | 979 | { |
| 923 | switch (event->attr.type) { | 980 | return event->pmu->task_ctx_nr == perf_sw_context; |
| 924 | case PERF_TYPE_SOFTWARE: | ||
| 925 | case PERF_TYPE_TRACEPOINT: | ||
| 926 | /* for now the breakpoint stuff also works as software event */ | ||
| 927 | case PERF_TYPE_BREAKPOINT: | ||
| 928 | return 1; | ||
| 929 | } | ||
| 930 | return 0; | ||
| 931 | } | 981 | } |
| 932 | 982 | ||
| 933 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 983 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| @@ -954,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
| 954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 1004 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
| 955 | } | 1005 | } |
| 956 | 1006 | ||
| 957 | static inline void | 1007 | static __always_inline void |
| 958 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1008 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
| 959 | { | 1009 | { |
| 960 | if (atomic_read(&perf_swevent_enabled[event_id])) { | 1010 | struct pt_regs hot_regs; |
| 961 | struct pt_regs hot_regs; | 1011 | |
| 962 | 1012 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | |
| 963 | if (!regs) { | 1013 | return; |
| 964 | perf_fetch_caller_regs(&hot_regs); | 1014 | |
| 965 | regs = &hot_regs; | 1015 | have_event: |
| 966 | } | 1016 | if (!regs) { |
| 967 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1017 | perf_fetch_caller_regs(&hot_regs); |
| 1018 | regs = &hot_regs; | ||
| 968 | } | 1019 | } |
| 1020 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
| 969 | } | 1021 | } |
| 970 | 1022 | ||
| 971 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1023 | extern void perf_event_mmap(struct vm_area_struct *vma); |
| @@ -976,7 +1028,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks | |||
| 976 | extern void perf_event_comm(struct task_struct *tsk); | 1028 | extern void perf_event_comm(struct task_struct *tsk); |
| 977 | extern void perf_event_fork(struct task_struct *tsk); | 1029 | extern void perf_event_fork(struct task_struct *tsk); |
| 978 | 1030 | ||
| 979 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 1031 | /* Callchains */ |
| 1032 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
| 1033 | |||
| 1034 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | ||
| 1035 | struct pt_regs *regs); | ||
| 1036 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
| 1037 | struct pt_regs *regs); | ||
| 1038 | |||
| 1039 | |||
| 1040 | static inline void | ||
| 1041 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
| 1042 | { | ||
| 1043 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
| 1044 | entry->ip[entry->nr++] = ip; | ||
| 1045 | } | ||
| 980 | 1046 | ||
| 981 | extern int sysctl_perf_event_paranoid; | 1047 | extern int sysctl_perf_event_paranoid; |
| 982 | extern int sysctl_perf_event_mlock; | 1048 | extern int sysctl_perf_event_mlock; |
| @@ -1019,21 +1085,18 @@ extern int perf_swevent_get_recursion_context(void); | |||
| 1019 | extern void perf_swevent_put_recursion_context(int rctx); | 1085 | extern void perf_swevent_put_recursion_context(int rctx); |
| 1020 | extern void perf_event_enable(struct perf_event *event); | 1086 | extern void perf_event_enable(struct perf_event *event); |
| 1021 | extern void perf_event_disable(struct perf_event *event); | 1087 | extern void perf_event_disable(struct perf_event *event); |
| 1088 | extern void perf_event_task_tick(void); | ||
| 1022 | #else | 1089 | #else |
| 1023 | static inline void | 1090 | static inline void |
| 1024 | perf_event_task_sched_in(struct task_struct *task) { } | 1091 | perf_event_task_sched_in(struct task_struct *task) { } |
| 1025 | static inline void | 1092 | static inline void |
| 1026 | perf_event_task_sched_out(struct task_struct *task, | 1093 | perf_event_task_sched_out(struct task_struct *task, |
| 1027 | struct task_struct *next) { } | 1094 | struct task_struct *next) { } |
| 1028 | static inline void | ||
| 1029 | perf_event_task_tick(struct task_struct *task) { } | ||
| 1030 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1095 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 1031 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1096 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 1032 | static inline void perf_event_free_task(struct task_struct *task) { } | 1097 | static inline void perf_event_free_task(struct task_struct *task) { } |
| 1033 | static inline void perf_event_do_pending(void) { } | 1098 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
| 1034 | static inline void perf_event_print_debug(void) { } | 1099 | static inline void perf_event_print_debug(void) { } |
| 1035 | static inline void perf_disable(void) { } | ||
| 1036 | static inline void perf_enable(void) { } | ||
| 1037 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1100 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
| 1038 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1101 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
| 1039 | 1102 | ||
| @@ -1056,6 +1119,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } | |||
| 1056 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1119 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
| 1057 | static inline void perf_event_enable(struct perf_event *event) { } | 1120 | static inline void perf_event_enable(struct perf_event *event) { } |
| 1058 | static inline void perf_event_disable(struct perf_event *event) { } | 1121 | static inline void perf_event_disable(struct perf_event *event) { } |
| 1122 | static inline void perf_event_task_tick(void) { } | ||
| 1059 | #endif | 1123 | #endif |
| 1060 | 1124 | ||
| 1061 | #define perf_output_put(handle, x) \ | 1125 | #define perf_output_put(handle, x) \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e18473f0eb78..61b4ecf1da50 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1160,6 +1160,13 @@ struct sched_rt_entity { | |||
| 1160 | 1160 | ||
| 1161 | struct rcu_node; | 1161 | struct rcu_node; |
| 1162 | 1162 | ||
| 1163 | enum perf_event_task_context { | ||
| 1164 | perf_invalid_context = -1, | ||
| 1165 | perf_hw_context = 0, | ||
| 1166 | perf_sw_context, | ||
| 1167 | perf_nr_task_contexts, | ||
| 1168 | }; | ||
| 1169 | |||
| 1163 | struct task_struct { | 1170 | struct task_struct { |
| 1164 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1171 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
| 1165 | void *stack; | 1172 | void *stack; |
| @@ -1433,7 +1440,7 @@ struct task_struct { | |||
| 1433 | struct futex_pi_state *pi_state_cache; | 1440 | struct futex_pi_state *pi_state_cache; |
| 1434 | #endif | 1441 | #endif |
| 1435 | #ifdef CONFIG_PERF_EVENTS | 1442 | #ifdef CONFIG_PERF_EVENTS |
| 1436 | struct perf_event_context *perf_event_ctxp; | 1443 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
| 1437 | struct mutex perf_event_mutex; | 1444 | struct mutex perf_event_mutex; |
| 1438 | struct list_head perf_event_list; | 1445 | struct list_head perf_event_list; |
| 1439 | #endif | 1446 | #endif |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 6b524a0d02e4..1808960c5059 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
| @@ -126,8 +126,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
| 126 | 126 | ||
| 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
| 128 | 128 | ||
| 129 | static inline int stop_machine(int (*fn)(void *), void *data, | 129 | static inline int __stop_machine(int (*fn)(void *), void *data, |
| 130 | const struct cpumask *cpus) | 130 | const struct cpumask *cpus) |
| 131 | { | 131 | { |
| 132 | int ret; | 132 | int ret; |
| 133 | local_irq_disable(); | 133 | local_irq_disable(); |
| @@ -136,5 +136,11 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
| 136 | return ret; | 136 | return ret; |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static inline int stop_machine(int (*fn)(void *), void *data, | ||
| 140 | const struct cpumask *cpus) | ||
| 141 | { | ||
| 142 | return __stop_machine(fn, data, cpus); | ||
| 143 | } | ||
| 144 | |||
| 139 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 145 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
| 140 | #endif /* _LINUX_STOP_MACHINE */ | 146 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 103d1b61aacb..a4a90b6726ce 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <linux/rcupdate.h> | 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/jump_label.h> | ||
| 20 | 21 | ||
| 21 | struct module; | 22 | struct module; |
| 22 | struct tracepoint; | 23 | struct tracepoint; |
| @@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
| 145 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
| 146 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
| 147 | { \ | 148 | { \ |
| 148 | if (unlikely(__tracepoint_##name.state)) \ | 149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ |
| 150 | return; \ | ||
| 151 | do_trace: \ | ||
| 149 | __DO_TRACE(&__tracepoint_##name, \ | 152 | __DO_TRACE(&__tracepoint_##name, \ |
| 150 | TP_PROTO(data_proto), \ | 153 | TP_PROTO(data_proto), \ |
| 151 | TP_ARGS(data_args)); \ | 154 | TP_ARGS(data_args)); \ |
