aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-16 08:37:10 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:30 -0400
commita4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch)
treee8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /arch/sh/kernel/perf_event.c
parentfa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff)
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh/kernel/perf_event.c')
-rw-r--r--arch/sh/kernel/perf_event.c75
1 files changed, 51 insertions, 24 deletions
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 4bbe19058a5..cf39c487346 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -206,26 +206,52 @@ again:
206 local64_add(delta, &event->count); 206 local64_add(delta, &event->count);
207} 207}
208 208
209static void sh_pmu_disable(struct perf_event *event) 209static void sh_pmu_stop(struct perf_event *event, int flags)
210{ 210{
211 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 211 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
212 struct hw_perf_event *hwc = &event->hw; 212 struct hw_perf_event *hwc = &event->hw;
213 int idx = hwc->idx; 213 int idx = hwc->idx;
214 214
215 clear_bit(idx, cpuc->active_mask); 215 if (!(event->hw.state & PERF_HES_STOPPED)) {
216 sh_pmu->disable(hwc, idx); 216 sh_pmu->disable(hwc, idx);
217 cpuc->events[idx] = NULL;
218 event->hw.state |= PERF_HES_STOPPED;
219 }
217 220
218 barrier(); 221 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
222 sh_perf_event_update(event, &event->hw, idx);
223 event->hw.state |= PERF_HES_UPTODATE;
224 }
225}
219 226
220 sh_perf_event_update(event, &event->hw, idx); 227static void sh_pmu_start(struct perf_event *event, int flags)
228{
229 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
230 struct hw_perf_event *hwc = &event->hw;
231 int idx = hwc->idx;
221 232
222 cpuc->events[idx] = NULL; 233 if (WARN_ON_ONCE(idx == -1))
223 clear_bit(idx, cpuc->used_mask); 234 return;
235
236 if (flags & PERF_EF_RELOAD)
237 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
238
239 cpuc->events[idx] = event;
240 event->hw.state = 0;
241 sh_pmu->enable(hwc, idx);
242}
243
244static void sh_pmu_del(struct perf_event *event, int flags)
245{
246 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
247
248 sh_pmu_stop(event, PERF_EF_UPDATE);
249 __clear_bit(event->hw.idx, cpuc->used_mask);
224 250
225 perf_event_update_userpage(event); 251 perf_event_update_userpage(event);
226} 252}
227 253
228static int sh_pmu_enable(struct perf_event *event) 254static int sh_pmu_add(struct perf_event *event, int flags)
229{ 255{
230 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 256 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
231 struct hw_perf_event *hwc = &event->hw; 257 struct hw_perf_event *hwc = &event->hw;
@@ -234,21 +260,20 @@ static int sh_pmu_enable(struct perf_event *event)
234 260
235 perf_pmu_disable(event->pmu); 261 perf_pmu_disable(event->pmu);
236 262
237 if (test_and_set_bit(idx, cpuc->used_mask)) { 263 if (__test_and_set_bit(idx, cpuc->used_mask)) {
238 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); 264 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
239 if (idx == sh_pmu->num_events) 265 if (idx == sh_pmu->num_events)
240 goto out; 266 goto out;
241 267
242 set_bit(idx, cpuc->used_mask); 268 __set_bit(idx, cpuc->used_mask);
243 hwc->idx = idx; 269 hwc->idx = idx;
244 } 270 }
245 271
246 sh_pmu->disable(hwc, idx); 272 sh_pmu->disable(hwc, idx);
247 273
248 cpuc->events[idx] = event; 274 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
249 set_bit(idx, cpuc->active_mask); 275 if (flags & PERF_EF_START)
250 276 sh_pmu_start(event, PERF_EF_RELOAD);
251 sh_pmu->enable(hwc, idx);
252 277
253 perf_event_update_userpage(event); 278 perf_event_update_userpage(event);
254 ret = 0; 279 ret = 0;
@@ -285,7 +310,7 @@ static int sh_pmu_event_init(struct perf_event *event)
285 return err; 310 return err;
286} 311}
287 312
288static void sh_pmu_pmu_enable(struct pmu *pmu) 313static void sh_pmu_enable(struct pmu *pmu)
289{ 314{
290 if (!sh_pmu_initialized()) 315 if (!sh_pmu_initialized())
291 return; 316 return;
@@ -293,7 +318,7 @@ static void sh_pmu_pmu_enable(struct pmu *pmu)
293 sh_pmu->enable_all(); 318 sh_pmu->enable_all();
294} 319}
295 320
296static void sh_pmu_pmu_disable(struct pmu *pmu) 321static void sh_pmu_disable(struct pmu *pmu)
297{ 322{
298 if (!sh_pmu_initialized()) 323 if (!sh_pmu_initialized())
299 return; 324 return;
@@ -302,11 +327,13 @@ static void sh_pmu_pmu_disable(struct pmu *pmu)
302} 327}
303 328
304static struct pmu pmu = { 329static struct pmu pmu = {
305 .pmu_enable = sh_pmu_pmu_enable, 330 .pmu_enable = sh_pmu_enable,
306 .pmu_disable = sh_pmu_pmu_disable, 331 .pmu_disable = sh_pmu_disable,
307 .event_init = sh_pmu_event_init, 332 .event_init = sh_pmu_event_init,
308 .enable = sh_pmu_enable, 333 .add = sh_pmu_add,
309 .disable = sh_pmu_disable, 334 .del = sh_pmu_del,
335 .start = sh_pmu_start,
336 .stop = sh_pmu_stop,
310 .read = sh_pmu_read, 337 .read = sh_pmu_read,
311}; 338};
312 339
@@ -334,15 +361,15 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
334 return NOTIFY_OK; 361 return NOTIFY_OK;
335} 362}
336 363
337int __cpuinit register_sh_pmu(struct sh_pmu *pmu) 364int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
338{ 365{
339 if (sh_pmu) 366 if (sh_pmu)
340 return -EBUSY; 367 return -EBUSY;
341 sh_pmu = pmu; 368 sh_pmu = _pmu;
342 369
343 pr_info("Performance Events: %s support registered\n", pmu->name); 370 pr_info("Performance Events: %s support registered\n", _pmu->name);
344 371
345 WARN_ON(pmu->num_events > MAX_HWEVENTS); 372 WARN_ON(_pmu->num_events > MAX_HWEVENTS);
346 373
347 perf_pmu_register(&pmu); 374 perf_pmu_register(&pmu);
348 perf_cpu_notifier(sh_pmu_notifier); 375 perf_cpu_notifier(sh_pmu_notifier);