aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-06 09:51:45 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:31 -0400
commit15ac9a395a753cb28c674e7ea80386ffdff21785 (patch)
tree63879e3031a6ed8e372ffd254ef97ff703a4d478 /kernel/perf_event.c
parenta4eaf7f14675cb512d69f0c928055e73d0c6d252 (diff)
perf: Remove the sysfs bits
Neither the overcommit nor the reservation sysfs parameter were actually working, remove them as they'll only get in the way. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c124
1 files changed, 0 insertions, 124 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3bace4fd0355..8462e69409ae 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -39,10 +39,6 @@
39 */ 39 */
40static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 40static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
41 41
42int perf_max_events __read_mostly = 1;
43static int perf_reserved_percpu __read_mostly;
44static int perf_overcommit __read_mostly = 1;
45
46static atomic_t nr_events __read_mostly; 42static atomic_t nr_events __read_mostly;
47static atomic_t nr_mmap_events __read_mostly; 43static atomic_t nr_mmap_events __read_mostly;
48static atomic_t nr_comm_events __read_mostly; 44static atomic_t nr_comm_events __read_mostly;
@@ -66,11 +62,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
66 62
67static atomic64_t perf_event_id; 63static atomic64_t perf_event_id;
68 64
69/*
70 * Lock for (sysadmin-configurable) event reservations:
71 */
72static DEFINE_SPINLOCK(perf_resource_lock);
73
74void __weak perf_event_print_debug(void) { } 65void __weak perf_event_print_debug(void) { }
75 66
76void perf_pmu_disable(struct pmu *pmu) 67void perf_pmu_disable(struct pmu *pmu)
@@ -480,16 +471,6 @@ static void __perf_event_remove_from_context(void *info)
480 471
481 list_del_event(event, ctx); 472 list_del_event(event, ctx);
482 473
483 if (!ctx->task) {
484 /*
485 * Allow more per task events with respect to the
486 * reservation:
487 */
488 cpuctx->max_pertask =
489 min(perf_max_events - ctx->nr_events,
490 perf_max_events - perf_reserved_percpu);
491 }
492
493 raw_spin_unlock(&ctx->lock); 474 raw_spin_unlock(&ctx->lock);
494} 475}
495 476
@@ -823,9 +804,6 @@ static void __perf_install_in_context(void *info)
823 } 804 }
824 } 805 }
825 806
826 if (!err && !ctx->task && cpuctx->max_pertask)
827 cpuctx->max_pertask--;
828
829unlock: 807unlock:
830 raw_spin_unlock(&ctx->lock); 808 raw_spin_unlock(&ctx->lock);
831} 809}
@@ -5930,10 +5908,6 @@ static void __cpuinit perf_event_init_cpu(int cpu)
5930 5908
5931 cpuctx = &per_cpu(perf_cpu_context, cpu); 5909 cpuctx = &per_cpu(perf_cpu_context, cpu);
5932 5910
5933 spin_lock(&perf_resource_lock);
5934 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5935 spin_unlock(&perf_resource_lock);
5936
5937 mutex_lock(&cpuctx->hlist_mutex); 5911 mutex_lock(&cpuctx->hlist_mutex);
5938 if (cpuctx->hlist_refcount > 0) { 5912 if (cpuctx->hlist_refcount > 0) {
5939 struct swevent_hlist *hlist; 5913 struct swevent_hlist *hlist;
@@ -6008,101 +5982,3 @@ void __init perf_event_init(void)
6008 perf_tp_register(); 5982 perf_tp_register();
6009 perf_cpu_notifier(perf_cpu_notify); 5983 perf_cpu_notifier(perf_cpu_notify);
6010} 5984}
6011
6012static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
6013 struct sysdev_class_attribute *attr,
6014 char *buf)
6015{
6016 return sprintf(buf, "%d\n", perf_reserved_percpu);
6017}
6018
6019static ssize_t
6020perf_set_reserve_percpu(struct sysdev_class *class,
6021 struct sysdev_class_attribute *attr,
6022 const char *buf,
6023 size_t count)
6024{
6025 struct perf_cpu_context *cpuctx;
6026 unsigned long val;
6027 int err, cpu, mpt;
6028
6029 err = strict_strtoul(buf, 10, &val);
6030 if (err)
6031 return err;
6032 if (val > perf_max_events)
6033 return -EINVAL;
6034
6035 spin_lock(&perf_resource_lock);
6036 perf_reserved_percpu = val;
6037 for_each_online_cpu(cpu) {
6038 cpuctx = &per_cpu(perf_cpu_context, cpu);
6039 raw_spin_lock_irq(&cpuctx->ctx.lock);
6040 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
6041 perf_max_events - perf_reserved_percpu);
6042 cpuctx->max_pertask = mpt;
6043 raw_spin_unlock_irq(&cpuctx->ctx.lock);
6044 }
6045 spin_unlock(&perf_resource_lock);
6046
6047 return count;
6048}
6049
6050static ssize_t perf_show_overcommit(struct sysdev_class *class,
6051 struct sysdev_class_attribute *attr,
6052 char *buf)
6053{
6054 return sprintf(buf, "%d\n", perf_overcommit);
6055}
6056
6057static ssize_t
6058perf_set_overcommit(struct sysdev_class *class,
6059 struct sysdev_class_attribute *attr,
6060 const char *buf, size_t count)
6061{
6062 unsigned long val;
6063 int err;
6064
6065 err = strict_strtoul(buf, 10, &val);
6066 if (err)
6067 return err;
6068 if (val > 1)
6069 return -EINVAL;
6070
6071 spin_lock(&perf_resource_lock);
6072 perf_overcommit = val;
6073 spin_unlock(&perf_resource_lock);
6074
6075 return count;
6076}
6077
6078static SYSDEV_CLASS_ATTR(
6079 reserve_percpu,
6080 0644,
6081 perf_show_reserve_percpu,
6082 perf_set_reserve_percpu
6083 );
6084
6085static SYSDEV_CLASS_ATTR(
6086 overcommit,
6087 0644,
6088 perf_show_overcommit,
6089 perf_set_overcommit
6090 );
6091
6092static struct attribute *perfclass_attrs[] = {
6093 &attr_reserve_percpu.attr,
6094 &attr_overcommit.attr,
6095 NULL
6096};
6097
6098static struct attribute_group perfclass_attr_group = {
6099 .attrs = perfclass_attrs,
6100 .name = "perf_events",
6101};
6102
6103static int __init perf_event_sysfs_init(void)
6104{
6105 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
6106 &perfclass_attr_group);
6107}
6108device_initcall(perf_event_sysfs_init);