aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-19 15:26:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-06 03:30:17 -0400
commit0322cd6ec504b0bf08ca7b2c3d7f43bda37d79c9 (patch)
tree8e5487e1a6700eb865ec1f268d51e9d6d3d38a71 /kernel/perf_counter.c
parentb8e83514b64577b48bfb794fe85fcde40a9343ca (diff)
perf_counter: unify irq output code
Impact: cleanup Having 3 slightly different copies of the same code around does nobody any good. First step in revamping the output format. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Orig-LKML-Reference: <20090319194233.929962222@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c106
1 files changed, 56 insertions, 50 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 68a56a68bc74..f054b8c9bf96 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1354,6 +1354,60 @@ static const struct file_operations perf_fops = {
1354}; 1354};
1355 1355
1356/* 1356/*
1357 * Output
1358 */
1359
1360static void perf_counter_store_irq(struct perf_counter *counter, u64 data)
1361{
1362 struct perf_data *irqdata = counter->irqdata;
1363
1364 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1365 irqdata->overrun++;
1366 } else {
1367 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1368
1369 *p = data;
1370 irqdata->len += sizeof(u64);
1371 }
1372}
1373
1374static void perf_counter_handle_group(struct perf_counter *counter)
1375{
1376 struct perf_counter *leader, *sub;
1377
1378 leader = counter->group_leader;
1379 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1380 if (sub != counter)
1381 sub->hw_ops->read(sub);
1382 perf_counter_store_irq(counter, sub->hw_event.event_config);
1383 perf_counter_store_irq(counter, atomic64_read(&sub->count));
1384 }
1385}
1386
1387void perf_counter_output(struct perf_counter *counter,
1388 int nmi, struct pt_regs *regs)
1389{
1390 switch (counter->hw_event.record_type) {
1391 case PERF_RECORD_SIMPLE:
1392 return;
1393
1394 case PERF_RECORD_IRQ:
1395 perf_counter_store_irq(counter, instruction_pointer(regs));
1396 break;
1397
1398 case PERF_RECORD_GROUP:
1399 perf_counter_handle_group(counter);
1400 break;
1401 }
1402
1403 if (nmi) {
1404 counter->wakeup_pending = 1;
1405 set_perf_counter_pending();
1406 } else
1407 wake_up(&counter->waitq);
1408}
1409
1410/*
1357 * Generic software counter infrastructure 1411 * Generic software counter infrastructure
1358 */ 1412 */
1359 1413
@@ -1395,54 +1449,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
1395 atomic64_set(&hwc->count, -left); 1449 atomic64_set(&hwc->count, -left);
1396} 1450}
1397 1451
1398static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data)
1399{
1400 struct perf_data *irqdata = counter->irqdata;
1401
1402 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1403 irqdata->overrun++;
1404 } else {
1405 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1406
1407 *p = data;
1408 irqdata->len += sizeof(u64);
1409 }
1410}
1411
1412static void perf_swcounter_handle_group(struct perf_counter *sibling)
1413{
1414 struct perf_counter *counter, *group_leader = sibling->group_leader;
1415
1416 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
1417 counter->hw_ops->read(counter);
1418 perf_swcounter_store_irq(sibling, counter->hw_event.event_config);
1419 perf_swcounter_store_irq(sibling, atomic64_read(&counter->count));
1420 }
1421}
1422
1423static void perf_swcounter_interrupt(struct perf_counter *counter,
1424 int nmi, struct pt_regs *regs)
1425{
1426 switch (counter->hw_event.record_type) {
1427 case PERF_RECORD_SIMPLE:
1428 break;
1429
1430 case PERF_RECORD_IRQ:
1431 perf_swcounter_store_irq(counter, instruction_pointer(regs));
1432 break;
1433
1434 case PERF_RECORD_GROUP:
1435 perf_swcounter_handle_group(counter);
1436 break;
1437 }
1438
1439 if (nmi) {
1440 counter->wakeup_pending = 1;
1441 set_perf_counter_pending();
1442 } else
1443 wake_up(&counter->waitq);
1444}
1445
1446static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 1452static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1447{ 1453{
1448 struct perf_counter *counter; 1454 struct perf_counter *counter;
@@ -1461,7 +1467,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1461 regs = task_pt_regs(current); 1467 regs = task_pt_regs(current);
1462 1468
1463 if (regs) 1469 if (regs)
1464 perf_swcounter_interrupt(counter, 0, regs); 1470 perf_counter_output(counter, 0, regs);
1465 1471
1466 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); 1472 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1467 1473
@@ -1473,7 +1479,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
1473{ 1479{
1474 perf_swcounter_update(counter); 1480 perf_swcounter_update(counter);
1475 perf_swcounter_set_period(counter); 1481 perf_swcounter_set_period(counter);
1476 perf_swcounter_interrupt(counter, nmi, regs); 1482 perf_counter_output(counter, nmi, regs);
1477} 1483}
1478 1484
1479static int perf_swcounter_match(struct perf_counter *counter, 1485static int perf_swcounter_match(struct perf_counter *counter,