diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/hw_breakpoint.c | 74 |
1 files changed, 45 insertions, 29 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index b600fc27f161..02b492504a5a 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static int task_bp_pinned(struct task_struct *tsk) | ||
87 | { | ||
88 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
89 | struct list_head *list; | ||
90 | struct perf_event *bp; | ||
91 | unsigned long flags; | ||
92 | int count = 0; | ||
93 | |||
94 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
95 | return 0; | ||
96 | |||
97 | list = &ctx->event_list; | ||
98 | |||
99 | spin_lock_irqsave(&ctx->lock, flags); | ||
100 | |||
101 | /* | ||
102 | * The current breakpoint counter is not included in the list | ||
103 | * at the open() callback time | ||
104 | */ | ||
105 | list_for_each_entry(bp, list, event_entry) { | ||
106 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
107 | count++; | ||
108 | } | ||
109 | |||
110 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
111 | |||
112 | return count; | ||
113 | } | ||
114 | |||
86 | /* | 115 | /* |
87 | * Report the number of pinned/un-pinned breakpoints we have in | 116 | * Report the number of pinned/un-pinned breakpoints we have in |
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 117 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
89 | */ | 118 | */ |
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | 119 | static void |
120 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | ||
91 | { | 121 | { |
122 | int cpu = bp->cpu; | ||
123 | struct task_struct *tsk = bp->ctx->task; | ||
124 | |||
92 | if (cpu >= 0) { | 125 | if (cpu >= 0) { |
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | 126 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); |
94 | slots->pinned += max_task_bp_pinned(cpu); | 127 | if (!tsk) |
128 | slots->pinned += max_task_bp_pinned(cpu); | ||
129 | else | ||
130 | slots->pinned += task_bp_pinned(tsk); | ||
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | 131 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
96 | 132 | ||
97 | return; | 133 | return; |
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
101 | unsigned int nr; | 137 | unsigned int nr; |
102 | 138 | ||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | 139 | nr = per_cpu(nr_cpu_bp_pinned, cpu); |
104 | nr += max_task_bp_pinned(cpu); | 140 | if (!tsk) |
141 | nr += max_task_bp_pinned(cpu); | ||
142 | else | ||
143 | nr += task_bp_pinned(tsk); | ||
105 | 144 | ||
106 | if (nr > slots->pinned) | 145 | if (nr > slots->pinned) |
107 | slots->pinned = nr; | 146 | slots->pinned = nr; |
@@ -118,33 +157,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
118 | */ | 157 | */ |
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | 158 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) |
120 | { | 159 | { |
121 | int count = 0; | ||
122 | struct perf_event *bp; | ||
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
124 | unsigned int *tsk_pinned; | 160 | unsigned int *tsk_pinned; |
125 | struct list_head *list; | 161 | int count = 0; |
126 | unsigned long flags; | ||
127 | |||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
129 | return; | ||
130 | |||
131 | list = &ctx->event_list; | ||
132 | |||
133 | spin_lock_irqsave(&ctx->lock, flags); | ||
134 | |||
135 | /* | ||
136 | * The current breakpoint counter is not included in the list | ||
137 | * at the open() callback time | ||
138 | */ | ||
139 | list_for_each_entry(bp, list, event_entry) { | ||
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
141 | count++; | ||
142 | } | ||
143 | |||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
145 | 162 | ||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | 163 | count = task_bp_pinned(tsk); |
147 | return; | ||
148 | 164 | ||
149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | 165 | tsk_pinned = per_cpu(task_bp_pinned, cpu); |
150 | if (enable) { | 166 | if (enable) { |
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp) | |||
233 | 249 | ||
234 | mutex_lock(&nr_bp_mutex); | 250 | mutex_lock(&nr_bp_mutex); |
235 | 251 | ||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | 252 | fetch_bp_busy_slots(&slots, bp); |
237 | 253 | ||
238 | /* Flexible counters need to keep at least one slot */ | 254 | /* Flexible counters need to keep at least one slot */ |
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { |