aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-03-05 16:10:19 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-23 05:58:04 -0400
commit50f16a8bf9d7a92c437ed1867d0f7e1dc6a9aca9 (patch)
treeb3794dc4959e7dedbdddfb1e428c4fe67126e45b /kernel
parent4e16ed99416ef569a89782a7234f95007919fadd (diff)
perf: Remove type specific target pointers
The only reason CQM had to use a hard-coded pmu type was so it could use cqm_target in hw_perf_event. Do away with the {tp,bp,cqm}_target pointers and provide a non type specific one. This allows us to do away with that silly pmu type as well. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Vince Weaver <vince@deater.net> Cc: acme@kernel.org Cc: acme@redhat.com Cc: hpa@zytor.com Cc: jolsa@redhat.com Cc: kanaka.d.juvva@intel.com Cc: matt.fleming@intel.com Cc: tglx@linutronix.de Cc: torvalds@linux-foundation.org Cc: vikas.shivappa@linux.intel.com Link: http://lkml.kernel.org/r/20150305211019.GU21418@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/events/hw_breakpoint.c8
-rw-r--r--kernel/trace/trace_uprobe.c10
3 files changed, 13 insertions, 19 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 71109a045450..525062b6fba1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7171,18 +7171,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
7171 7171
7172 if (task) { 7172 if (task) {
7173 event->attach_state = PERF_ATTACH_TASK; 7173 event->attach_state = PERF_ATTACH_TASK;
7174
7175 if (attr->type == PERF_TYPE_TRACEPOINT)
7176 event->hw.tp_target = task;
7177#ifdef CONFIG_HAVE_HW_BREAKPOINT
7178 /* 7174 /*
7179 * hw_breakpoint is a bit difficult here.. 7175 * XXX pmu::event_init needs to know what task to account to
7176 * and we cannot use the ctx information because we need the
7177 * pmu before we get a ctx.
7180 */ 7178 */
7181 else if (attr->type == PERF_TYPE_BREAKPOINT) 7179 event->hw.target = task;
7182 event->hw.bp_target = task;
7183#endif
7184 else if (attr->type == PERF_TYPE_INTEL_CQM)
7185 event->hw.cqm_target = task;
7186 } 7180 }
7187 7181
7188 if (!overflow_handler && parent_event) { 7182 if (!overflow_handler && parent_event) {
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9803a6600d49..92ce5f4ccc26 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
116 */ 116 */
117static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) 117static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
118{ 118{
119 struct task_struct *tsk = bp->hw.bp_target; 119 struct task_struct *tsk = bp->hw.target;
120 struct perf_event *iter; 120 struct perf_event *iter;
121 int count = 0; 121 int count = 0;
122 122
123 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 123 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
124 if (iter->hw.bp_target == tsk && 124 if (iter->hw.target == tsk &&
125 find_slot_idx(iter) == type && 125 find_slot_idx(iter) == type &&
126 (iter->cpu < 0 || cpu == iter->cpu)) 126 (iter->cpu < 0 || cpu == iter->cpu))
127 count += hw_breakpoint_weight(iter); 127 count += hw_breakpoint_weight(iter);
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
153 int nr; 153 int nr;
154 154
155 nr = info->cpu_pinned; 155 nr = info->cpu_pinned;
156 if (!bp->hw.bp_target) 156 if (!bp->hw.target)
157 nr += max_task_bp_pinned(cpu, type); 157 nr += max_task_bp_pinned(cpu, type);
158 else 158 else
159 nr += task_bp_pinned(cpu, bp, type); 159 nr += task_bp_pinned(cpu, bp, type);
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
210 weight = -weight; 210 weight = -weight;
211 211
212 /* Pinned counter cpu profiling */ 212 /* Pinned counter cpu profiling */
213 if (!bp->hw.bp_target) { 213 if (!bp->hw.target) {
214 get_bp_info(bp->cpu, type)->cpu_pinned += weight; 214 get_bp_info(bp->cpu, type)->cpu_pinned += weight;
215 return; 215 return;
216 } 216 }
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index b11441321e7a..93fdc7791eaa 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1005 return true; 1005 return true;
1006 1006
1007 list_for_each_entry(event, &filter->perf_events, hw.tp_list) { 1007 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1008 if (event->hw.tp_target->mm == mm) 1008 if (event->hw.target->mm == mm)
1009 return true; 1009 return true;
1010 } 1010 }
1011 1011
@@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1015static inline bool 1015static inline bool
1016uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) 1016uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1017{ 1017{
1018 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); 1018 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1019} 1019}
1020 1020
1021static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1021static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
@@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1023 bool done; 1023 bool done;
1024 1024
1025 write_lock(&tu->filter.rwlock); 1025 write_lock(&tu->filter.rwlock);
1026 if (event->hw.tp_target) { 1026 if (event->hw.target) {
1027 list_del(&event->hw.tp_list); 1027 list_del(&event->hw.tp_list);
1028 done = tu->filter.nr_systemwide || 1028 done = tu->filter.nr_systemwide ||
1029 (event->hw.tp_target->flags & PF_EXITING) || 1029 (event->hw.target->flags & PF_EXITING) ||
1030 uprobe_filter_event(tu, event); 1030 uprobe_filter_event(tu, event);
1031 } else { 1031 } else {
1032 tu->filter.nr_systemwide--; 1032 tu->filter.nr_systemwide--;
@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1046 int err; 1046 int err;
1047 1047
1048 write_lock(&tu->filter.rwlock); 1048 write_lock(&tu->filter.rwlock);
1049 if (event->hw.tp_target) { 1049 if (event->hw.target) {
1050 /* 1050 /*
1051 * event->parent != NULL means copy_process(), we can avoid 1051 * event->parent != NULL means copy_process(), we can avoid
1052 * uprobe_apply(). current->mm must be probed and we can rely 1052 * uprobe_apply(). current->mm must be probed and we can rely