aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hw_breakpoint.c63
1 files changed, 45 insertions, 18 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 8ead1345e33b..974498b858fc 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -77,6 +77,11 @@ struct bp_busy_slots {
77/* Serialize accesses to the above constraints */ 77/* Serialize accesses to the above constraints */
78static DEFINE_MUTEX(nr_bp_mutex); 78static DEFINE_MUTEX(nr_bp_mutex);
79 79
80__weak int hw_breakpoint_weight(struct perf_event *bp)
81{
82 return 1;
83}
84
80static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) 85static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
81{ 86{
82 if (bp->attr.bp_type & HW_BREAKPOINT_RW) 87 if (bp->attr.bp_type & HW_BREAKPOINT_RW)
@@ -124,7 +129,7 @@ static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
124 list_for_each_entry(bp, list, event_entry) { 129 list_for_each_entry(bp, list, event_entry) {
125 if (bp->attr.type == PERF_TYPE_BREAKPOINT) 130 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
126 if (find_slot_idx(bp) == type) 131 if (find_slot_idx(bp) == type)
127 count++; 132 count += hw_breakpoint_weight(bp);
128 } 133 }
129 134
130 raw_spin_unlock_irqrestore(&ctx->lock, flags); 135 raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -174,25 +179,40 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
174} 179}
175 180
176/* 181/*
182 * For now, continue to consider flexible as pinned, until we can
183 * ensure no flexible event can ever be scheduled before a pinned event
184 * in a same cpu.
185 */
186static void
187fetch_this_slot(struct bp_busy_slots *slots, int weight)
188{
189 slots->pinned += weight;
190}
191
192/*
177 * Add a pinned breakpoint for the given task in our constraint table 193 * Add a pinned breakpoint for the given task in our constraint table
178 */ 194 */
179static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, 195static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
180 enum bp_type_idx type) 196 enum bp_type_idx type, int weight)
181{ 197{
182 unsigned int *tsk_pinned; 198 unsigned int *tsk_pinned;
183 int count = 0; 199 int old_count = 0;
200 int old_idx = 0;
201 int idx = 0;
184 202
185 count = task_bp_pinned(tsk, type); 203 old_count = task_bp_pinned(tsk, type);
204 old_idx = old_count - 1;
205 idx = old_idx + weight;
186 206
187 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); 207 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
188 if (enable) { 208 if (enable) {
189 tsk_pinned[count]++; 209 tsk_pinned[idx]++;
190 if (count > 0) 210 if (old_count > 0)
191 tsk_pinned[count-1]--; 211 tsk_pinned[old_idx]--;
192 } else { 212 } else {
193 tsk_pinned[count]--; 213 tsk_pinned[idx]--;
194 if (count > 0) 214 if (old_count > 0)
195 tsk_pinned[count-1]++; 215 tsk_pinned[old_idx]++;
196 } 216 }
197} 217}
198 218
@@ -200,7 +220,8 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
200 * Add/remove the given breakpoint in our constraint table 220 * Add/remove the given breakpoint in our constraint table
201 */ 221 */
202static void 222static void
203toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type) 223toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
224 int weight)
204{ 225{
205 int cpu = bp->cpu; 226 int cpu = bp->cpu;
206 struct task_struct *tsk = bp->ctx->task; 227 struct task_struct *tsk = bp->ctx->task;
@@ -208,20 +229,20 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type)
208 /* Pinned counter task profiling */ 229 /* Pinned counter task profiling */
209 if (tsk) { 230 if (tsk) {
210 if (cpu >= 0) { 231 if (cpu >= 0) {
211 toggle_bp_task_slot(tsk, cpu, enable, type); 232 toggle_bp_task_slot(tsk, cpu, enable, type, weight);
212 return; 233 return;
213 } 234 }
214 235
215 for_each_online_cpu(cpu) 236 for_each_online_cpu(cpu)
216 toggle_bp_task_slot(tsk, cpu, enable, type); 237 toggle_bp_task_slot(tsk, cpu, enable, type, weight);
217 return; 238 return;
218 } 239 }
219 240
220 /* Pinned counter cpu profiling */ 241 /* Pinned counter cpu profiling */
221 if (enable) 242 if (enable)
222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)++; 243 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
223 else 244 else
224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)--; 245 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
225} 246}
226 247
227/* 248/*
@@ -269,6 +290,7 @@ static int __reserve_bp_slot(struct perf_event *bp)
269{ 290{
270 struct bp_busy_slots slots = {0}; 291 struct bp_busy_slots slots = {0};
271 enum bp_type_idx type; 292 enum bp_type_idx type;
293 int weight;
272 294
273 /* Basic checks */ 295 /* Basic checks */
274 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || 296 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
@@ -276,13 +298,16 @@ static int __reserve_bp_slot(struct perf_event *bp)
276 return -EINVAL; 298 return -EINVAL;
277 299
278 type = find_slot_idx(bp); 300 type = find_slot_idx(bp);
301 weight = hw_breakpoint_weight(bp);
302
279 fetch_bp_busy_slots(&slots, bp, type); 303 fetch_bp_busy_slots(&slots, bp, type);
304 fetch_this_slot(&slots, weight);
280 305
281 /* Flexible counters need to keep at least one slot */ 306 /* Flexible counters need to keep at least one slot */
282 if (slots.pinned + (!!slots.flexible) == HBP_NUM) 307 if (slots.pinned + (!!slots.flexible) > HBP_NUM)
283 return -ENOSPC; 308 return -ENOSPC;
284 309
285 toggle_bp_slot(bp, true, type); 310 toggle_bp_slot(bp, true, type, weight);
286 311
287 return 0; 312 return 0;
288} 313}
@@ -303,9 +328,11 @@ int reserve_bp_slot(struct perf_event *bp)
303static void __release_bp_slot(struct perf_event *bp) 328static void __release_bp_slot(struct perf_event *bp)
304{ 329{
305 enum bp_type_idx type; 330 enum bp_type_idx type;
331 int weight;
306 332
307 type = find_slot_idx(bp); 333 type = find_slot_idx(bp);
308 toggle_bp_slot(bp, false, type); 334 weight = hw_breakpoint_weight(bp);
335 toggle_bp_slot(bp, false, type, weight);
309} 336}
310 337
311void release_bp_slot(struct perf_event *bp) 338void release_bp_slot(struct perf_event *bp)