aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2016-04-21 11:28:50 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-04-27 09:20:39 -0400
commitc5dfd78eb79851e278b7973031b9ca363da87a7e (patch)
treeeb48703a86c059b4de2a13e4c7021232c22e3715
parentc2a218c63ba36946aca5943c0c8ebd3a42e3dc4b (diff)
perf core: Allow setting up max frame stack depth via sysctl
The default remains 127, which is good for most cases, and not even hit most of the time, but then for some cases, as reported by Brendan, 1024+ deep frames are appearing on the radar for things like groovy, ruby. And in some workloads putting a _lower_ cap on this may make sense. One that is per event still needs to be put in place tho. The new file is: # cat /proc/sys/kernel/perf_event_max_stack 127 Chaging it: # echo 256 > /proc/sys/kernel/perf_event_max_stack # cat /proc/sys/kernel/perf_event_max_stack 256 But as soon as there is some event using callchains we get: # echo 512 > /proc/sys/kernel/perf_event_max_stack -bash: echo: write error: Device or resource busy # Because we only allocate the callchain percpu data structures when there is a user, which allows for changing the max easily, its just a matter of having no callchain users at that point. Reported-and-Tested-by: Brendan Gregg <brendan.d.gregg@gmail.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: David Ahern <dsahern@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: He Kuang <hekuang@huawei.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Milian Wolff <milian.wolff@kdab.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: Wang Nan <wangnan0@huawei.com> Cc: Zefan Li <lizefan@huawei.com> Link: http://lkml.kernel.org/r/20160426002928.GB16708@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--arch/arm/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/perf_callchain.c4
-rw-r--r--arch/metag/kernel/perf_callchain.c2
-rw-r--r--arch/mips/kernel/perf_event.c4
-rw-r--r--arch/powerpc/perf/callchain.c4
-rw-r--r--arch/sparc/kernel/perf_event.c6
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/xtensa/kernel/perf_event.c4
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/events/callchain.c35
-rw-r--r--kernel/sysctl.c12
13 files changed, 84 insertions, 23 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 57653a44b128..260cde08e92e 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -60,6 +60,7 @@ show up in /proc/sys/kernel:
60- panic_on_warn 60- panic_on_warn
61- perf_cpu_time_max_percent 61- perf_cpu_time_max_percent
62- perf_event_paranoid 62- perf_event_paranoid
63- perf_event_max_stack
63- pid_max 64- pid_max
64- powersave-nap [ PPC only ] 65- powersave-nap [ PPC only ]
65- printk 66- printk
@@ -654,6 +655,19 @@ users (without CAP_SYS_ADMIN). The default value is 1.
654 655
655============================================================== 656==============================================================
656 657
658perf_event_max_stack:
659
660Controls maximum number of stack frames to copy for (attr.sample_type &
661PERF_SAMPLE_CALLCHAIN) configured events, for instance, when using
662'perf record -g' or 'perf trace --call-graph fp'.
663
664This can only be done when no events are in use that have callchains
665enabled, otherwise writing to this file will return -EBUSY.
666
667The default value is 127.
668
669==============================================================
670
657pid_max: 671pid_max:
658 672
659PID allocation wrap value. When the kernel's next PID value 673PID allocation wrap value. When the kernel's next PID value
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 4e02ae5950ff..27563befa8a2 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
75 75
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 76 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
77 77
78 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 78 while ((entry->nr < sysctl_perf_event_max_stack) &&
79 tail && !((unsigned long)tail & 0x3)) 79 tail && !((unsigned long)tail & 0x3))
80 tail = user_backtrace(tail, entry); 80 tail = user_backtrace(tail, entry);
81} 81}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index ff4665462a02..32c3c6e70119 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
122 122
123 tail = (struct frame_tail __user *)regs->regs[29]; 123 tail = (struct frame_tail __user *)regs->regs[29];
124 124
125 while (entry->nr < PERF_MAX_STACK_DEPTH && 125 while (entry->nr < sysctl_perf_event_max_stack &&
126 tail && !((unsigned long)tail & 0xf)) 126 tail && !((unsigned long)tail & 0xf))
127 tail = user_backtrace(tail, entry); 127 tail = user_backtrace(tail, entry);
128 } else { 128 } else {
@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
132 132
133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
134 134
135 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 135 while ((entry->nr < sysctl_perf_event_max_stack) &&
136 tail && !((unsigned long)tail & 0x3)) 136 tail && !((unsigned long)tail & 0x3))
137 tail = compat_user_backtrace(tail, entry); 137 tail = compat_user_backtrace(tail, entry);
138#endif 138#endif
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 315633461a94..252abc12a5a3 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
65 65
66 --frame; 66 --frame;
67 67
68 while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) 68 while ((entry->nr < sysctl_perf_event_max_stack) && frame)
69 frame = user_backtrace(frame, entry); 69 frame = user_backtrace(frame, entry);
70} 70}
71 71
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index c1cf9c6c3f77..5021c546ad07 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -35,7 +35,7 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
35 addr = *sp++; 35 addr = *sp++;
36 if (__kernel_text_address(addr)) { 36 if (__kernel_text_address(addr)) {
37 perf_callchain_store(entry, addr); 37 perf_callchain_store(entry, addr);
38 if (entry->nr >= PERF_MAX_STACK_DEPTH) 38 if (entry->nr >= sysctl_perf_event_max_stack)
39 break; 39 break;
40 } 40 }
41 } 41 }
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
59 } 59 }
60 do { 60 do {
61 perf_callchain_store(entry, pc); 61 perf_callchain_store(entry, pc);
62 if (entry->nr >= PERF_MAX_STACK_DEPTH) 62 if (entry->nr >= sysctl_perf_event_max_stack)
63 break; 63 break;
64 pc = unwind_stack(current, &sp, pc, &ra); 64 pc = unwind_stack(current, &sp, pc, &ra);
65 } while (pc); 65 } while (pc);
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index e04a6752b399..22d9015c1acc 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
247 sp = regs->gpr[1]; 247 sp = regs->gpr[1];
248 perf_callchain_store(entry, next_ip); 248 perf_callchain_store(entry, next_ip);
249 249
250 while (entry->nr < PERF_MAX_STACK_DEPTH) { 250 while (entry->nr < sysctl_perf_event_max_stack) {
251 fp = (unsigned long __user *) sp; 251 fp = (unsigned long __user *) sp;
252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
253 return; 253 return;
@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
453 sp = regs->gpr[1]; 453 sp = regs->gpr[1];
454 perf_callchain_store(entry, next_ip); 454 perf_callchain_store(entry, next_ip);
455 455
456 while (entry->nr < PERF_MAX_STACK_DEPTH) { 456 while (entry->nr < sysctl_perf_event_max_stack) {
457 fp = (unsigned int __user *) (unsigned long) sp; 457 fp = (unsigned int __user *) (unsigned long) sp;
458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) 458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
459 return; 459 return;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6596f66ce112..a4b8b5aed21c 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
1756 } 1756 }
1757 } 1757 }
1758#endif 1758#endif
1759 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1759 } while (entry->nr < sysctl_perf_event_max_stack);
1760} 1760}
1761 1761
1762static inline int 1762static inline int
@@ -1790,7 +1790,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1790 pc = sf.callers_pc; 1790 pc = sf.callers_pc;
1791 ufp = (unsigned long)sf.fp + STACK_BIAS; 1791 ufp = (unsigned long)sf.fp + STACK_BIAS;
1792 perf_callchain_store(entry, pc); 1792 perf_callchain_store(entry, pc);
1793 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1793 } while (entry->nr < sysctl_perf_event_max_stack);
1794} 1794}
1795 1795
1796static void perf_callchain_user_32(struct perf_callchain_entry *entry, 1796static void perf_callchain_user_32(struct perf_callchain_entry *entry,
@@ -1822,7 +1822,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1822 ufp = (unsigned long)sf.fp; 1822 ufp = (unsigned long)sf.fp;
1823 } 1823 }
1824 perf_callchain_store(entry, pc); 1824 perf_callchain_store(entry, pc);
1825 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1825 } while (entry->nr < sysctl_perf_event_max_stack);
1826} 1826}
1827 1827
1828void 1828void
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 041e442a3e28..41d93d0e972b 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2277,7 +2277,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2277 2277
2278 fp = compat_ptr(ss_base + regs->bp); 2278 fp = compat_ptr(ss_base + regs->bp);
2279 pagefault_disable(); 2279 pagefault_disable();
2280 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2280 while (entry->nr < sysctl_perf_event_max_stack) {
2281 unsigned long bytes; 2281 unsigned long bytes;
2282 frame.next_frame = 0; 2282 frame.next_frame = 0;
2283 frame.return_address = 0; 2283 frame.return_address = 0;
@@ -2337,7 +2337,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2337 return; 2337 return;
2338 2338
2339 pagefault_disable(); 2339 pagefault_disable();
2340 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2340 while (entry->nr < sysctl_perf_event_max_stack) {
2341 unsigned long bytes; 2341 unsigned long bytes;
2342 frame.next_frame = NULL; 2342 frame.next_frame = NULL;
2343 frame.return_address = 0; 2343 frame.return_address = 0;
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 54f01188c29c..a6b00b3af429 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -332,14 +332,14 @@ static int callchain_trace(struct stackframe *frame, void *data)
332void perf_callchain_kernel(struct perf_callchain_entry *entry, 332void perf_callchain_kernel(struct perf_callchain_entry *entry,
333 struct pt_regs *regs) 333 struct pt_regs *regs)
334{ 334{
335 xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH, 335 xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
336 callchain_trace, NULL, entry); 336 callchain_trace, NULL, entry);
337} 337}
338 338
339void perf_callchain_user(struct perf_callchain_entry *entry, 339void perf_callchain_user(struct perf_callchain_entry *entry,
340 struct pt_regs *regs) 340 struct pt_regs *regs)
341{ 341{
342 xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH, 342 xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
343 callchain_trace, entry); 343 callchain_trace, entry);
344} 344}
345 345
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 85749ae8cb5f..a090700cccca 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -58,7 +58,7 @@ struct perf_guest_info_callbacks {
58 58
59struct perf_callchain_entry { 59struct perf_callchain_entry {
60 __u64 nr; 60 __u64 nr;
61 __u64 ip[PERF_MAX_STACK_DEPTH]; 61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
62}; 62};
63 63
64struct perf_raw_record { 64struct perf_raw_record {
@@ -993,9 +993,11 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
993extern int get_callchain_buffers(void); 993extern int get_callchain_buffers(void);
994extern void put_callchain_buffers(void); 994extern void put_callchain_buffers(void);
995 995
996extern int sysctl_perf_event_max_stack;
997
996static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 998static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
997{ 999{
998 if (entry->nr < PERF_MAX_STACK_DEPTH) { 1000 if (entry->nr < sysctl_perf_event_max_stack) {
999 entry->ip[entry->nr++] = ip; 1001 entry->ip[entry->nr++] = ip;
1000 return 0; 1002 return 0;
1001 } else { 1003 } else {
@@ -1017,6 +1019,8 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1017 void __user *buffer, size_t *lenp, 1019 void __user *buffer, size_t *lenp,
1018 loff_t *ppos); 1020 loff_t *ppos);
1019 1021
1022int perf_event_max_stack_handler(struct ctl_table *table, int write,
1023 void __user *buffer, size_t *lenp, loff_t *ppos);
1020 1024
1021static inline bool perf_paranoid_tracepoint_raw(void) 1025static inline bool perf_paranoid_tracepoint_raw(void)
1022{ 1026{
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 499d9e933f8e..f5a19548be12 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -66,7 +66,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
66 /* check sanity of attributes */ 66 /* check sanity of attributes */
67 if (attr->max_entries == 0 || attr->key_size != 4 || 67 if (attr->max_entries == 0 || attr->key_size != 4 ||
68 value_size < 8 || value_size % 8 || 68 value_size < 8 || value_size % 8 ||
69 value_size / 8 > PERF_MAX_STACK_DEPTH) 69 value_size / 8 > sysctl_perf_event_max_stack)
70 return ERR_PTR(-EINVAL); 70 return ERR_PTR(-EINVAL);
71 71
72 /* hash table size must be power of 2 */ 72 /* hash table size must be power of 2 */
@@ -124,8 +124,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
124 struct perf_callchain_entry *trace; 124 struct perf_callchain_entry *trace;
125 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 125 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
126 u32 max_depth = map->value_size / 8; 126 u32 max_depth = map->value_size / 8;
127 /* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */ 127 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
128 u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth; 128 u32 init_nr = sysctl_perf_event_max_stack - max_depth;
129 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 129 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
130 u32 hash, id, trace_nr, trace_len; 130 u32 hash, id, trace_nr, trace_len;
131 bool user = flags & BPF_F_USER_STACK; 131 bool user = flags & BPF_F_USER_STACK;
@@ -143,7 +143,7 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
143 return -EFAULT; 143 return -EFAULT;
144 144
145 /* get_perf_callchain() guarantees that trace->nr >= init_nr 145 /* get_perf_callchain() guarantees that trace->nr >= init_nr
146 * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth 146 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
147 */ 147 */
148 trace_nr = trace->nr - init_nr; 148 trace_nr = trace->nr - init_nr;
149 149
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 343c22f5e867..b9325e7dcba1 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -18,6 +18,14 @@ struct callchain_cpus_entries {
18 struct perf_callchain_entry *cpu_entries[0]; 18 struct perf_callchain_entry *cpu_entries[0];
19}; 19};
20 20
21int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
22
23static inline size_t perf_callchain_entry__sizeof(void)
24{
25 return (sizeof(struct perf_callchain_entry) +
26 sizeof(__u64) * sysctl_perf_event_max_stack);
27}
28
21static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 29static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
22static atomic_t nr_callchain_events; 30static atomic_t nr_callchain_events;
23static DEFINE_MUTEX(callchain_mutex); 31static DEFINE_MUTEX(callchain_mutex);
@@ -73,7 +81,7 @@ static int alloc_callchain_buffers(void)
73 if (!entries) 81 if (!entries)
74 return -ENOMEM; 82 return -ENOMEM;
75 83
76 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; 84 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
77 85
78 for_each_possible_cpu(cpu) { 86 for_each_possible_cpu(cpu) {
79 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, 87 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
@@ -147,7 +155,8 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
147 155
148 cpu = smp_processor_id(); 156 cpu = smp_processor_id();
149 157
150 return &entries->cpu_entries[cpu][*rctx]; 158 return (((void *)entries->cpu_entries[cpu]) +
159 (*rctx * perf_callchain_entry__sizeof()));
151} 160}
152 161
153static void 162static void
@@ -215,3 +224,25 @@ exit_put:
215 224
216 return entry; 225 return entry;
217} 226}
227
228int perf_event_max_stack_handler(struct ctl_table *table, int write,
229 void __user *buffer, size_t *lenp, loff_t *ppos)
230{
231 int new_value = sysctl_perf_event_max_stack, ret;
232 struct ctl_table new_table = *table;
233
234 new_table.data = &new_value;
235 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
236 if (ret || !write)
237 return ret;
238
239 mutex_lock(&callchain_mutex);
240 if (atomic_read(&nr_callchain_events))
241 ret = -EBUSY;
242 else
243 sysctl_perf_event_max_stack = new_value;
244
245 mutex_unlock(&callchain_mutex);
246
247 return ret;
248}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 725587f10667..c8b318663525 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -130,6 +130,9 @@ static int one_thousand = 1000;
130#ifdef CONFIG_PRINTK 130#ifdef CONFIG_PRINTK
131static int ten_thousand = 10000; 131static int ten_thousand = 10000;
132#endif 132#endif
133#ifdef CONFIG_PERF_EVENTS
134static int six_hundred_forty_kb = 640 * 1024;
135#endif
133 136
134/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ 137/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
135static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; 138static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -1144,6 +1147,15 @@ static struct ctl_table kern_table[] = {
1144 .extra1 = &zero, 1147 .extra1 = &zero,
1145 .extra2 = &one_hundred, 1148 .extra2 = &one_hundred,
1146 }, 1149 },
1150 {
1151 .procname = "perf_event_max_stack",
1152 .data = NULL, /* filled in by handler */
1153 .maxlen = sizeof(sysctl_perf_event_max_stack),
1154 .mode = 0644,
1155 .proc_handler = perf_event_max_stack_handler,
1156 .extra1 = &zero,
1157 .extra2 = &six_hundred_forty_kb,
1158 },
1147#endif 1159#endif
1148#ifdef CONFIG_KMEMCHECK 1160#ifdef CONFIG_KMEMCHECK
1149 { 1161 {