aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/irq_work.h20
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/jump_label_ref.h44
-rw-r--r--include/linux/perf_event.h57
4 files changed, 115 insertions, 24 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644
index 000000000000..4fa09d4d0b71
--- /dev/null
+++ b/include/linux/irq_work.h
@@ -0,0 +1,20 @@
1#ifndef _LINUX_IRQ_WORK_H
2#define _LINUX_IRQ_WORK_H
3
4struct irq_work {
5 struct irq_work *next;
6 void (*func)(struct irq_work *);
7};
8
9static inline
10void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
11{
12 entry->next = NULL;
13 entry->func = func;
14}
15
16bool irq_work_queue(struct irq_work *entry);
17void irq_work_run(void);
18void irq_work_sync(struct irq_work *entry);
19
20#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b72cd9f92c2e..b67cb180e6e9 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -25,10 +25,10 @@ extern void jump_label_update(unsigned long key, enum jump_label_type type);
25extern void jump_label_apply_nops(struct module *mod); 25extern void jump_label_apply_nops(struct module *mod);
26extern int jump_label_text_reserved(void *start, void *end); 26extern int jump_label_text_reserved(void *start, void *end);
27 27
28#define enable_jump_label(key) \ 28#define jump_label_enable(key) \
29 jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); 29 jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
30 30
31#define disable_jump_label(key) \ 31#define jump_label_disable(key) \
32 jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); 32 jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
33 33
34#else 34#else
@@ -39,12 +39,12 @@ do { \
39 goto label; \ 39 goto label; \
40} while (0) 40} while (0)
41 41
42#define enable_jump_label(cond_var) \ 42#define jump_label_enable(cond_var) \
43do { \ 43do { \
44 *(cond_var) = 1; \ 44 *(cond_var) = 1; \
45} while (0) 45} while (0)
46 46
47#define disable_jump_label(cond_var) \ 47#define jump_label_disable(cond_var) \
48do { \ 48do { \
49 *(cond_var) = 0; \ 49 *(cond_var) = 0; \
50} while (0) 50} while (0)
@@ -61,4 +61,14 @@ static inline int jump_label_text_reserved(void *start, void *end)
61 61
62#endif 62#endif
63 63
64#define COND_STMT(key, stmt) \
65do { \
66 __label__ jl_enabled; \
67 JUMP_LABEL(key, jl_enabled); \
68 if (0) { \
69jl_enabled: \
70 stmt; \
71 } \
72} while (0)
73
64#endif 74#endif
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h
new file mode 100644
index 000000000000..e5d012ad92c6
--- /dev/null
+++ b/include/linux/jump_label_ref.h
@@ -0,0 +1,44 @@
1#ifndef _LINUX_JUMP_LABEL_REF_H
2#define _LINUX_JUMP_LABEL_REF_H
3
4#include <linux/jump_label.h>
5#include <asm/atomic.h>
6
7#ifdef HAVE_JUMP_LABEL
8
9static inline void jump_label_inc(atomic_t *key)
10{
11 if (atomic_add_return(1, key) == 1)
12 jump_label_enable(key);
13}
14
15static inline void jump_label_dec(atomic_t *key)
16{
17 if (atomic_dec_and_test(key))
18 jump_label_disable(key);
19}
20
21#else /* !HAVE_JUMP_LABEL */
22
23static inline void jump_label_inc(atomic_t *key)
24{
25 atomic_inc(key);
26}
27
28static inline void jump_label_dec(atomic_t *key)
29{
30 atomic_dec(key);
31}
32
33#undef JUMP_LABEL
34#define JUMP_LABEL(key, label) \
35do { \
36 if (unlikely(__builtin_choose_expr( \
37 __builtin_types_compatible_p(typeof(key), atomic_t *), \
38 atomic_read((atomic_t *)(key)), *(key)))) \
39 goto label; \
40} while (0)
41
42#endif /* HAVE_JUMP_LABEL */
43
44#endif /* _LINUX_JUMP_LABEL_REF_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a9227e985207..057bf22a8323 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -486,6 +486,8 @@ struct perf_guest_info_callbacks {
486#include <linux/workqueue.h> 486#include <linux/workqueue.h>
487#include <linux/ftrace.h> 487#include <linux/ftrace.h>
488#include <linux/cpu.h> 488#include <linux/cpu.h>
489#include <linux/irq_work.h>
490#include <linux/jump_label_ref.h>
489#include <asm/atomic.h> 491#include <asm/atomic.h>
490#include <asm/local.h> 492#include <asm/local.h>
491 493
@@ -535,6 +537,12 @@ struct hw_perf_event {
535 struct { /* breakpoint */ 537 struct { /* breakpoint */
536 struct arch_hw_breakpoint info; 538 struct arch_hw_breakpoint info;
537 struct list_head bp_list; 539 struct list_head bp_list;
540 /*
541 * Crufty hack to avoid the chicken and egg
542 * problem hw_breakpoint has with context
543 * creation and event initalization.
544 */
545 struct task_struct *bp_target;
538 }; 546 };
539#endif 547#endif
540 }; 548 };
@@ -672,11 +680,6 @@ struct perf_buffer {
672 void *data_pages[0]; 680 void *data_pages[0];
673}; 681};
674 682
675struct perf_pending_entry {
676 struct perf_pending_entry *next;
677 void (*func)(struct perf_pending_entry *);
678};
679
680struct perf_sample_data; 683struct perf_sample_data;
681 684
682typedef void (*perf_overflow_handler_t)(struct perf_event *, int, 685typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -697,6 +700,7 @@ struct swevent_hlist {
697 700
698#define PERF_ATTACH_CONTEXT 0x01 701#define PERF_ATTACH_CONTEXT 0x01
699#define PERF_ATTACH_GROUP 0x02 702#define PERF_ATTACH_GROUP 0x02
703#define PERF_ATTACH_TASK 0x04
700 704
701/** 705/**
702 * struct perf_event - performance event kernel representation: 706 * struct perf_event - performance event kernel representation:
@@ -784,7 +788,7 @@ struct perf_event {
784 int pending_wakeup; 788 int pending_wakeup;
785 int pending_kill; 789 int pending_kill;
786 int pending_disable; 790 int pending_disable;
787 struct perf_pending_entry pending; 791 struct irq_work pending;
788 792
789 atomic_t event_limit; 793 atomic_t event_limit;
790 794
@@ -892,14 +896,26 @@ extern void perf_pmu_unregister(struct pmu *pmu);
892 896
893extern int perf_num_counters(void); 897extern int perf_num_counters(void);
894extern const char *perf_pmu_name(void); 898extern const char *perf_pmu_name(void);
895extern void perf_event_task_sched_in(struct task_struct *task); 899extern void __perf_event_task_sched_in(struct task_struct *task);
896extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 900extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
901
902extern atomic_t perf_task_events;
903
904static inline void perf_event_task_sched_in(struct task_struct *task)
905{
906 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
907}
908
909static inline
910void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
911{
912 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
913}
914
897extern int perf_event_init_task(struct task_struct *child); 915extern int perf_event_init_task(struct task_struct *child);
898extern void perf_event_exit_task(struct task_struct *child); 916extern void perf_event_exit_task(struct task_struct *child);
899extern void perf_event_free_task(struct task_struct *task); 917extern void perf_event_free_task(struct task_struct *task);
900extern void perf_event_delayed_put(struct task_struct *task); 918extern void perf_event_delayed_put(struct task_struct *task);
901extern void set_perf_event_pending(void);
902extern void perf_event_do_pending(void);
903extern void perf_event_print_debug(void); 919extern void perf_event_print_debug(void);
904extern void perf_pmu_disable(struct pmu *pmu); 920extern void perf_pmu_disable(struct pmu *pmu);
905extern void perf_pmu_enable(struct pmu *pmu); 921extern void perf_pmu_enable(struct pmu *pmu);
@@ -988,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
988 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 1004 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
989} 1005}
990 1006
991static inline void 1007static __always_inline void
992perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 1008perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
993{ 1009{
994 if (atomic_read(&perf_swevent_enabled[event_id])) { 1010 struct pt_regs hot_regs;
995 struct pt_regs hot_regs; 1011
996 1012 JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
997 if (!regs) { 1013 return;
998 perf_fetch_caller_regs(&hot_regs); 1014
999 regs = &hot_regs; 1015have_event:
1000 } 1016 if (!regs) {
1001 __perf_sw_event(event_id, nr, nmi, regs, addr); 1017 perf_fetch_caller_regs(&hot_regs);
1018 regs = &hot_regs;
1002 } 1019 }
1020 __perf_sw_event(event_id, nr, nmi, regs, addr);
1003} 1021}
1004 1022
1005extern void perf_event_mmap(struct vm_area_struct *vma); 1023extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1078,7 +1096,6 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1078static inline void perf_event_exit_task(struct task_struct *child) { } 1096static inline void perf_event_exit_task(struct task_struct *child) { }
1079static inline void perf_event_free_task(struct task_struct *task) { } 1097static inline void perf_event_free_task(struct task_struct *task) { }
1080static inline void perf_event_delayed_put(struct task_struct *task) { } 1098static inline void perf_event_delayed_put(struct task_struct *task) { }
1081static inline void perf_event_do_pending(void) { }
1082static inline void perf_event_print_debug(void) { } 1099static inline void perf_event_print_debug(void) { }
1083static inline int perf_event_task_disable(void) { return -EINVAL; } 1100static inline int perf_event_task_disable(void) { return -EINVAL; }
1084static inline int perf_event_task_enable(void) { return -EINVAL; } 1101static inline int perf_event_task_enable(void) { return -EINVAL; }