diff options
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r-- | kernel/hw_breakpoint.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 02b492504a5a..03a0773ac2b2 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -52,7 +52,7 @@ | |||
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | 52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); |
53 | 53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | 54 | /* Number of pinned task breakpoints in a cpu */ |
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | 55 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
56 | 56 | ||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | 58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); |
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex); | |||
73 | static unsigned int max_task_bp_pinned(int cpu) | 73 | static unsigned int max_task_bp_pinned(int cpu) |
74 | { | 74 | { |
75 | int i; | 75 | int i; |
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | 76 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
77 | 77 | ||
78 | for (i = HBP_NUM -1; i >= 0; i--) { | 78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | 79 | if (tsk_pinned[i] > 0) |
@@ -162,7 +162,7 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | |||
162 | 162 | ||
163 | count = task_bp_pinned(tsk); | 163 | count = task_bp_pinned(tsk); |
164 | 164 | ||
165 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | 165 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
166 | if (enable) { | 166 | if (enable) { |
167 | tsk_pinned[count]++; | 167 | tsk_pinned[count]++; |
168 | if (count > 0) | 168 | if (count > 0) |
@@ -209,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
209 | * - If attached to a single cpu, check: | 209 | * - If attached to a single cpu, check: |
210 | * | 210 | * |
211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | 211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
212 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | 212 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
213 | * | 213 | * |
214 | * -> If there are already non-pinned counters in this cpu, it means | 214 | * -> If there are already non-pinned counters in this cpu, it means |
215 | * there is already a free slot for them. | 215 | * there is already a free slot for them. |
@@ -220,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
220 | * - If attached to every cpus, check: | 220 | * - If attached to every cpus, check: |
221 | * | 221 | * |
222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | 222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
223 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | 223 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
224 | * | 224 | * |
225 | * -> This is roughly the same, except we check the number of per cpu | 225 | * -> This is roughly the same, except we check the number of per cpu |
226 | * bp for every cpu and we keep the max one. Same for the per tasks | 226 | * bp for every cpu and we keep the max one. Same for the per tasks |
@@ -232,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
232 | * - If attached to a single cpu, check: | 232 | * - If attached to a single cpu, check: |
233 | * | 233 | * |
234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | 234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
235 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
236 | * | 236 | * |
237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | 237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
238 | * one register at least (or they will never be fed). | 238 | * one register at least (or they will never be fed). |
@@ -240,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
240 | * - If attached to every cpus, check: | 240 | * - If attached to every cpus, check: |
241 | * | 241 | * |
242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
243 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | 243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
244 | */ | 244 | */ |
245 | int reserve_bp_slot(struct perf_event *bp) | 245 | int reserve_bp_slot(struct perf_event *bp) |
246 | { | 246 | { |