aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-11 05:25:05 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-11 10:48:38 -0400
commitdf58ab24bf26b166874bfb18b3b5a2e0a8e63179 (patch)
tree388b2fb9d94864c9bd6d6ab9329c31760b7366ae
parent0764771dab80d7b84b9a271bee7f1b21a04a3f0c (diff)
perf_counter: Rename perf_counter_limit sysctl
Rename perf_counter_limit to perf_counter_max_sample_rate and prohibit creation of counters with a known higher sample frequency. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--kernel/perf_counter.c27
-rw-r--r--kernel/sysctl.c6
3 files changed, 23 insertions, 12 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 386be915baa1..95c797c480e8 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
650 650
651extern int sysctl_perf_counter_paranoid; 651extern int sysctl_perf_counter_paranoid;
652extern int sysctl_perf_counter_mlock; 652extern int sysctl_perf_counter_mlock;
653extern int sysctl_perf_counter_limit; 653extern int sysctl_perf_counter_sample_rate;
654 654
655extern void perf_counter_init(void); 655extern void perf_counter_init(void);
656 656
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 63f1987c1c1c..3b2829de5590 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45 45
46/* 46/*
47 * 0 - not paranoid 47 * perf counter paranoia level:
48 * 1 - disallow cpu counters to unpriv 48 * 0 - not paranoid
49 * 2 - disallow kernel profiling to unpriv 49 * 1 - disallow cpu counters to unpriv
50 * 2 - disallow kernel profiling to unpriv
50 */ 51 */
51int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */ 52int sysctl_perf_counter_paranoid __read_mostly;
52 53
53static inline bool perf_paranoid_cpu(void) 54static inline bool perf_paranoid_cpu(void)
54{ 55{
@@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
61} 62}
62 63
63int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 64int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
64int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ 65
66/*
67 * max perf counter sample rate
68 */
69int sysctl_perf_counter_sample_rate __read_mostly = 100000;
65 70
66static atomic64_t perf_counter_id; 71static atomic64_t perf_counter_id;
67 72
@@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1244 if (interrupts == MAX_INTERRUPTS) { 1249 if (interrupts == MAX_INTERRUPTS) {
1245 perf_log_throttle(counter, 1); 1250 perf_log_throttle(counter, 1);
1246 counter->pmu->unthrottle(counter); 1251 counter->pmu->unthrottle(counter);
1247 interrupts = 2*sysctl_perf_counter_limit/HZ; 1252 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1248 } 1253 }
1249 1254
1250 if (!counter->attr.freq || !counter->attr.sample_freq) 1255 if (!counter->attr.freq || !counter->attr.sample_freq)
@@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1682 1687
1683 spin_lock_irq(&ctx->lock); 1688 spin_lock_irq(&ctx->lock);
1684 if (counter->attr.freq) { 1689 if (counter->attr.freq) {
1685 if (value > sysctl_perf_counter_limit) { 1690 if (value > sysctl_perf_counter_sample_rate) {
1686 ret = -EINVAL; 1691 ret = -EINVAL;
1687 goto unlock; 1692 goto unlock;
1688 } 1693 }
@@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
2979 } else { 2984 } else {
2980 if (hwc->interrupts != MAX_INTERRUPTS) { 2985 if (hwc->interrupts != MAX_INTERRUPTS) {
2981 hwc->interrupts++; 2986 hwc->interrupts++;
2982 if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) { 2987 if (HZ * hwc->interrupts >
2988 (u64)sysctl_perf_counter_sample_rate) {
2983 hwc->interrupts = MAX_INTERRUPTS; 2989 hwc->interrupts = MAX_INTERRUPTS;
2984 perf_log_throttle(counter, 0); 2990 perf_log_throttle(counter, 0);
2985 ret = 1; 2991 ret = 1;
@@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open,
3639 return -EACCES; 3645 return -EACCES;
3640 } 3646 }
3641 3647
3648 if (attr.freq) {
3649 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
3650 return -EINVAL;
3651 }
3652
3642 /* 3653 /*
3643 * Get the target context (task or percpu): 3654 * Get the target context (task or percpu):
3644 */ 3655 */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 344a65981dee..9fd4e436b696 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = {
932 }, 932 },
933 { 933 {
934 .ctl_name = CTL_UNNUMBERED, 934 .ctl_name = CTL_UNNUMBERED,
935 .procname = "perf_counter_int_limit", 935 .procname = "perf_counter_max_sample_rate",
936 .data = &sysctl_perf_counter_limit, 936 .data = &sysctl_perf_counter_sample_rate,
937 .maxlen = sizeof(sysctl_perf_counter_limit), 937 .maxlen = sizeof(sysctl_perf_counter_sample_rate),
938 .mode = 0644, 938 .mode = 0644,
939 .proc_handler = &proc_dointvec, 939 .proc_handler = &proc_dointvec,
940 }, 940 },