aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r--kernel/hw_breakpoint.c86
1 files changed, 59 insertions, 27 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 89e8a050c43a..8ead1345e33b 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -45,18 +45,28 @@
45 45
46#include <linux/hw_breakpoint.h> 46#include <linux/hw_breakpoint.h>
47 47
48enum bp_type_idx {
49 TYPE_INST = 0,
50#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
51 TYPE_DATA = 0,
52#else
53 TYPE_DATA = 1,
54#endif
55 TYPE_MAX
56};
57
48/* 58/*
49 * Constraints data 59 * Constraints data
50 */ 60 */
51 61
52/* Number of pinned cpu breakpoints in a cpu */ 62/* Number of pinned cpu breakpoints in a cpu */
53static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); 63static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
54 64
55/* Number of pinned task breakpoints in a cpu */ 65/* Number of pinned task breakpoints in a cpu */
56static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); 66static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[TYPE_MAX][HBP_NUM]);
57 67
58/* Number of non-pinned cpu/task breakpoints in a cpu */ 68/* Number of non-pinned cpu/task breakpoints in a cpu */
59static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); 69static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
60 70
61/* Gather the number of total pinned and un-pinned bp in a cpuset */ 71/* Gather the number of total pinned and un-pinned bp in a cpuset */
62struct bp_busy_slots { 72struct bp_busy_slots {
@@ -67,14 +77,22 @@ struct bp_busy_slots {
67/* Serialize accesses to the above constraints */ 77/* Serialize accesses to the above constraints */
68static DEFINE_MUTEX(nr_bp_mutex); 78static DEFINE_MUTEX(nr_bp_mutex);
69 79
80static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
81{
82 if (bp->attr.bp_type & HW_BREAKPOINT_RW)
83 return TYPE_DATA;
84
85 return TYPE_INST;
86}
87
70/* 88/*
71 * Report the maximum number of pinned breakpoints a task 89 * Report the maximum number of pinned breakpoints a task
72 * have in this cpu 90 * have in this cpu
73 */ 91 */
74static unsigned int max_task_bp_pinned(int cpu) 92static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
75{ 93{
76 int i; 94 int i;
77 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); 95 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
78 96
79 for (i = HBP_NUM -1; i >= 0; i--) { 97 for (i = HBP_NUM -1; i >= 0; i--) {
80 if (tsk_pinned[i] > 0) 98 if (tsk_pinned[i] > 0)
@@ -84,7 +102,7 @@ static unsigned int max_task_bp_pinned(int cpu)
84 return 0; 102 return 0;
85} 103}
86 104
87static int task_bp_pinned(struct task_struct *tsk) 105static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
88{ 106{
89 struct perf_event_context *ctx = tsk->perf_event_ctxp; 107 struct perf_event_context *ctx = tsk->perf_event_ctxp;
90 struct list_head *list; 108 struct list_head *list;
@@ -105,7 +123,8 @@ static int task_bp_pinned(struct task_struct *tsk)
105 */ 123 */
106 list_for_each_entry(bp, list, event_entry) { 124 list_for_each_entry(bp, list, event_entry) {
107 if (bp->attr.type == PERF_TYPE_BREAKPOINT) 125 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
108 count++; 126 if (find_slot_idx(bp) == type)
127 count++;
109 } 128 }
110 129
111 raw_spin_unlock_irqrestore(&ctx->lock, flags); 130 raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -118,18 +137,19 @@ static int task_bp_pinned(struct task_struct *tsk)
118 * a given cpu (cpu > -1) or in all of them (cpu = -1). 137 * a given cpu (cpu > -1) or in all of them (cpu = -1).
119 */ 138 */
120static void 139static void
121fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) 140fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
141 enum bp_type_idx type)
122{ 142{
123 int cpu = bp->cpu; 143 int cpu = bp->cpu;
124 struct task_struct *tsk = bp->ctx->task; 144 struct task_struct *tsk = bp->ctx->task;
125 145
126 if (cpu >= 0) { 146 if (cpu >= 0) {
127 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); 147 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
128 if (!tsk) 148 if (!tsk)
129 slots->pinned += max_task_bp_pinned(cpu); 149 slots->pinned += max_task_bp_pinned(cpu, type);
130 else 150 else
131 slots->pinned += task_bp_pinned(tsk); 151 slots->pinned += task_bp_pinned(tsk, type);
132 slots->flexible = per_cpu(nr_bp_flexible, cpu); 152 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
133 153
134 return; 154 return;
135 } 155 }
@@ -137,16 +157,16 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
137 for_each_online_cpu(cpu) { 157 for_each_online_cpu(cpu) {
138 unsigned int nr; 158 unsigned int nr;
139 159
140 nr = per_cpu(nr_cpu_bp_pinned, cpu); 160 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
141 if (!tsk) 161 if (!tsk)
142 nr += max_task_bp_pinned(cpu); 162 nr += max_task_bp_pinned(cpu, type);
143 else 163 else
144 nr += task_bp_pinned(tsk); 164 nr += task_bp_pinned(tsk, type);
145 165
146 if (nr > slots->pinned) 166 if (nr > slots->pinned)
147 slots->pinned = nr; 167 slots->pinned = nr;
148 168
149 nr = per_cpu(nr_bp_flexible, cpu); 169 nr = per_cpu(nr_bp_flexible[type], cpu);
150 170
151 if (nr > slots->flexible) 171 if (nr > slots->flexible)
152 slots->flexible = nr; 172 slots->flexible = nr;
@@ -156,14 +176,15 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
156/* 176/*
157 * Add a pinned breakpoint for the given task in our constraint table 177 * Add a pinned breakpoint for the given task in our constraint table
158 */ 178 */
159static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) 179static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
180 enum bp_type_idx type)
160{ 181{
161 unsigned int *tsk_pinned; 182 unsigned int *tsk_pinned;
162 int count = 0; 183 int count = 0;
163 184
164 count = task_bp_pinned(tsk); 185 count = task_bp_pinned(tsk, type);
165 186
166 tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); 187 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
167 if (enable) { 188 if (enable) {
168 tsk_pinned[count]++; 189 tsk_pinned[count]++;
169 if (count > 0) 190 if (count > 0)
@@ -178,7 +199,8 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
178/* 199/*
179 * Add/remove the given breakpoint in our constraint table 200 * Add/remove the given breakpoint in our constraint table
180 */ 201 */
181static void toggle_bp_slot(struct perf_event *bp, bool enable) 202static void
203toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type)
182{ 204{
183 int cpu = bp->cpu; 205 int cpu = bp->cpu;
184 struct task_struct *tsk = bp->ctx->task; 206 struct task_struct *tsk = bp->ctx->task;
@@ -186,20 +208,20 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
186 /* Pinned counter task profiling */ 208 /* Pinned counter task profiling */
187 if (tsk) { 209 if (tsk) {
188 if (cpu >= 0) { 210 if (cpu >= 0) {
189 toggle_bp_task_slot(tsk, cpu, enable); 211 toggle_bp_task_slot(tsk, cpu, enable, type);
190 return; 212 return;
191 } 213 }
192 214
193 for_each_online_cpu(cpu) 215 for_each_online_cpu(cpu)
194 toggle_bp_task_slot(tsk, cpu, enable); 216 toggle_bp_task_slot(tsk, cpu, enable, type);
195 return; 217 return;
196 } 218 }
197 219
198 /* Pinned counter cpu profiling */ 220 /* Pinned counter cpu profiling */
199 if (enable) 221 if (enable)
200 per_cpu(nr_cpu_bp_pinned, bp->cpu)++; 222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)++;
201 else 223 else
202 per_cpu(nr_cpu_bp_pinned, bp->cpu)--; 224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)--;
203} 225}
204 226
205/* 227/*
@@ -246,14 +268,21 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
246static int __reserve_bp_slot(struct perf_event *bp) 268static int __reserve_bp_slot(struct perf_event *bp)
247{ 269{
248 struct bp_busy_slots slots = {0}; 270 struct bp_busy_slots slots = {0};
271 enum bp_type_idx type;
249 272
250 fetch_bp_busy_slots(&slots, bp); 273 /* Basic checks */
274 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
275 bp->attr.bp_type == HW_BREAKPOINT_INVALID)
276 return -EINVAL;
277
278 type = find_slot_idx(bp);
279 fetch_bp_busy_slots(&slots, bp, type);
251 280
252 /* Flexible counters need to keep at least one slot */ 281 /* Flexible counters need to keep at least one slot */
253 if (slots.pinned + (!!slots.flexible) == HBP_NUM) 282 if (slots.pinned + (!!slots.flexible) == HBP_NUM)
254 return -ENOSPC; 283 return -ENOSPC;
255 284
256 toggle_bp_slot(bp, true); 285 toggle_bp_slot(bp, true, type);
257 286
258 return 0; 287 return 0;
259} 288}
@@ -273,7 +302,10 @@ int reserve_bp_slot(struct perf_event *bp)
273 302
274static void __release_bp_slot(struct perf_event *bp) 303static void __release_bp_slot(struct perf_event *bp)
275{ 304{
276 toggle_bp_slot(bp, false); 305 enum bp_type_idx type;
306
307 type = find_slot_idx(bp);
308 toggle_bp_slot(bp, false, type);
277} 309}
278 310
279void release_bp_slot(struct perf_event *bp) 311void release_bp_slot(struct perf_event *bp)