aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/stackmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/stackmap.c')
-rw-r--r--kernel/bpf/stackmap.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 499d9e933f8e..f5a19548be12 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -66,7 +66,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
66 /* check sanity of attributes */ 66 /* check sanity of attributes */
67 if (attr->max_entries == 0 || attr->key_size != 4 || 67 if (attr->max_entries == 0 || attr->key_size != 4 ||
68 value_size < 8 || value_size % 8 || 68 value_size < 8 || value_size % 8 ||
69 value_size / 8 > PERF_MAX_STACK_DEPTH) 69 value_size / 8 > sysctl_perf_event_max_stack)
70 return ERR_PTR(-EINVAL); 70 return ERR_PTR(-EINVAL);
71 71
72 /* hash table size must be power of 2 */ 72 /* hash table size must be power of 2 */
@@ -124,8 +124,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
124 struct perf_callchain_entry *trace; 124 struct perf_callchain_entry *trace;
125 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 125 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
126 u32 max_depth = map->value_size / 8; 126 u32 max_depth = map->value_size / 8;
127 /* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */ 127 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
128 u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth; 128 u32 init_nr = sysctl_perf_event_max_stack - max_depth;
129 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 129 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
130 u32 hash, id, trace_nr, trace_len; 130 u32 hash, id, trace_nr, trace_len;
131 bool user = flags & BPF_F_USER_STACK; 131 bool user = flags & BPF_F_USER_STACK;
@@ -143,7 +143,7 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
143 return -EFAULT; 143 return -EFAULT;
144 144
145 /* get_perf_callchain() guarantees that trace->nr >= init_nr 145 /* get_perf_callchain() guarantees that trace->nr >= init_nr
146 * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth 146 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
147 */ 147 */
148 trace_nr = trace->nr - init_nr; 148 trace_nr = trace->nr - init_nr;
149 149