aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c14
-rw-r--r--kernel/perf_counter.c35
2 files changed, 29 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 560dd1e7b524..0a4d14f279ae 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -624,13 +624,13 @@ hw_perf_counter_init(struct perf_counter *counter)
624 int err; 624 int err;
625 625
626 if (!ppmu) 626 if (!ppmu)
627 return NULL; 627 return ERR_PTR(-ENXIO);
628 if ((s64)counter->hw_event.irq_period < 0) 628 if ((s64)counter->hw_event.irq_period < 0)
629 return NULL; 629 return ERR_PTR(-EINVAL);
630 if (!perf_event_raw(&counter->hw_event)) { 630 if (!perf_event_raw(&counter->hw_event)) {
631 ev = perf_event_id(&counter->hw_event); 631 ev = perf_event_id(&counter->hw_event);
632 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 632 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
633 return NULL; 633 return ERR_PTR(-EOPNOTSUPP);
634 ev = ppmu->generic_events[ev]; 634 ev = ppmu->generic_events[ev];
635 } else { 635 } else {
636 ev = perf_event_config(&counter->hw_event); 636 ev = perf_event_config(&counter->hw_event);
@@ -656,14 +656,14 @@ hw_perf_counter_init(struct perf_counter *counter)
656 n = collect_events(counter->group_leader, ppmu->n_counter - 1, 656 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
657 ctrs, events); 657 ctrs, events);
658 if (n < 0) 658 if (n < 0)
659 return NULL; 659 return ERR_PTR(-EINVAL);
660 } 660 }
661 events[n] = ev; 661 events[n] = ev;
662 ctrs[n] = counter; 662 ctrs[n] = counter;
663 if (check_excludes(ctrs, n, 1)) 663 if (check_excludes(ctrs, n, 1))
664 return NULL; 664 return ERR_PTR(-EINVAL);
665 if (power_check_constraints(events, n + 1)) 665 if (power_check_constraints(events, n + 1))
666 return NULL; 666 return ERR_PTR(-EINVAL);
667 667
668 counter->hw.config = events[n]; 668 counter->hw.config = events[n];
669 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); 669 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
@@ -687,7 +687,7 @@ hw_perf_counter_init(struct perf_counter *counter)
687 counter->destroy = hw_perf_counter_destroy; 687 counter->destroy = hw_perf_counter_destroy;
688 688
689 if (err) 689 if (err)
690 return NULL; 690 return ERR_PTR(err);
691 return &power_perf_ops; 691 return &power_perf_ops;
692} 692}
693 693
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f35e89e3d6a4..d07b45278b4f 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2453,10 +2453,11 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2453{ 2453{
2454 const struct hw_perf_counter_ops *hw_ops; 2454 const struct hw_perf_counter_ops *hw_ops;
2455 struct perf_counter *counter; 2455 struct perf_counter *counter;
2456 long err;
2456 2457
2457 counter = kzalloc(sizeof(*counter), gfpflags); 2458 counter = kzalloc(sizeof(*counter), gfpflags);
2458 if (!counter) 2459 if (!counter)
2459 return NULL; 2460 return ERR_PTR(-ENOMEM);
2460 2461
2461 /* 2462 /*
2462 * Single counters are their own group leaders, with an 2463 * Single counters are their own group leaders, with an
@@ -2505,12 +2506,18 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2505 hw_ops = tp_perf_counter_init(counter); 2506 hw_ops = tp_perf_counter_init(counter);
2506 break; 2507 break;
2507 } 2508 }
2509done:
2510 err = 0;
2511 if (!hw_ops)
2512 err = -EINVAL;
2513 else if (IS_ERR(hw_ops))
2514 err = PTR_ERR(hw_ops);
2508 2515
2509 if (!hw_ops) { 2516 if (err) {
2510 kfree(counter); 2517 kfree(counter);
2511 return NULL; 2518 return ERR_PTR(err);
2512 } 2519 }
2513done: 2520
2514 counter->hw_ops = hw_ops; 2521 counter->hw_ops = hw_ops;
2515 2522
2516 return counter; 2523 return counter;
@@ -2583,10 +2590,10 @@ SYSCALL_DEFINE5(perf_counter_open,
2583 goto err_put_context; 2590 goto err_put_context;
2584 } 2591 }
2585 2592
2586 ret = -EINVAL;
2587 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, 2593 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2588 GFP_KERNEL); 2594 GFP_KERNEL);
2589 if (!counter) 2595 ret = PTR_ERR(counter);
2596 if (IS_ERR(counter))
2590 goto err_put_context; 2597 goto err_put_context;
2591 2598
2592 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 2599 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
@@ -2658,8 +2665,8 @@ inherit_counter(struct perf_counter *parent_counter,
2658 child_counter = perf_counter_alloc(&parent_counter->hw_event, 2665 child_counter = perf_counter_alloc(&parent_counter->hw_event,
2659 parent_counter->cpu, child_ctx, 2666 parent_counter->cpu, child_ctx,
2660 group_leader, GFP_KERNEL); 2667 group_leader, GFP_KERNEL);
2661 if (!child_counter) 2668 if (IS_ERR(child_counter))
2662 return NULL; 2669 return child_counter;
2663 2670
2664 /* 2671 /*
2665 * Link it up in the child's context: 2672 * Link it up in the child's context:
@@ -2710,15 +2717,17 @@ static int inherit_group(struct perf_counter *parent_counter,
2710{ 2717{
2711 struct perf_counter *leader; 2718 struct perf_counter *leader;
2712 struct perf_counter *sub; 2719 struct perf_counter *sub;
2720 struct perf_counter *child_ctr;
2713 2721
2714 leader = inherit_counter(parent_counter, parent, parent_ctx, 2722 leader = inherit_counter(parent_counter, parent, parent_ctx,
2715 child, NULL, child_ctx); 2723 child, NULL, child_ctx);
2716 if (!leader) 2724 if (IS_ERR(leader))
2717 return -ENOMEM; 2725 return PTR_ERR(leader);
2718 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 2726 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2719 if (!inherit_counter(sub, parent, parent_ctx, 2727 child_ctr = inherit_counter(sub, parent, parent_ctx,
2720 child, leader, child_ctx)) 2728 child, leader, child_ctx);
2721 return -ENOMEM; 2729 if (IS_ERR(child_ctr))
2730 return PTR_ERR(child_ctr);
2722 } 2731 }
2723 return 0; 2732 return 0;
2724} 2733}