aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/bpf/verifier.c11
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_probe_tmpl.h6
8 files changed, 43 insertions, 19 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d43b14535827..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
44 struct stack_map_irq_work *work; 44 struct stack_map_irq_work *work;
45 45
46 work = container_of(entry, struct stack_map_irq_work, irq_work); 46 work = container_of(entry, struct stack_map_irq_work, irq_work);
47 up_read(work->sem); 47 up_read_non_owner(work->sem);
48 work->sem = NULL; 48 work->sem = NULL;
49} 49}
50 50
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
338 } else { 338 } else {
339 work->sem = &current->mm->mmap_sem; 339 work->sem = &current->mm->mmap_sem;
340 irq_work_queue(&work->irq_work); 340 irq_work_queue(&work->irq_work);
341 /*
342 * The irq_work will release the mmap_sem with
343 * up_read_non_owner(). The rwsem_release() is called
344 * here to release the lock from lockdep's perspective.
345 */
346 rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
341 } 347 }
342} 348}
343 349
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56674a7c3778..8f295b790297 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1617 return 0; 1617 return 0;
1618} 1618}
1619 1619
1620static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, 1620static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
1621 int size, enum bpf_access_type t) 1621 u32 regno, int off, int size,
1622 enum bpf_access_type t)
1622{ 1623{
1623 struct bpf_reg_state *regs = cur_regs(env); 1624 struct bpf_reg_state *regs = cur_regs(env);
1624 struct bpf_reg_state *reg = &regs[regno]; 1625 struct bpf_reg_state *reg = &regs[regno];
1625 struct bpf_insn_access_aux info; 1626 struct bpf_insn_access_aux info = {};
1626 1627
1627 if (reg->smin_value < 0) { 1628 if (reg->smin_value < 0) {
1628 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 1629 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
1636 return -EACCES; 1637 return -EACCES;
1637 } 1638 }
1638 1639
1640 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1641
1639 return 0; 1642 return 0;
1640} 1643}
1641 1644
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
2032 verbose(env, "cannot write into socket\n"); 2035 verbose(env, "cannot write into socket\n");
2033 return -EACCES; 2036 return -EACCES;
2034 } 2037 }
2035 err = check_sock_access(env, regno, off, size, t); 2038 err = check_sock_access(env, insn_idx, regno, off, size, t);
2036 if (!err && value_regno >= 0) 2039 if (!err && value_regno >= 0)
2037 mark_reg_unknown(env, regs, value_regno); 2040 mark_reg_unknown(env, regs, value_regno);
2038 } else { 2041 } else {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e5ede6918050..26d6edab051a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
4963 } 4963 }
4964} 4964}
4965 4965
4966static int perf_event_check_period(struct perf_event *event, u64 value)
4967{
4968 return event->pmu->check_period(event, value);
4969}
4970
4966static int perf_event_period(struct perf_event *event, u64 __user *arg) 4971static int perf_event_period(struct perf_event *event, u64 __user *arg)
4967{ 4972{
4968 u64 value; 4973 u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4984 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4980 return -EINVAL; 4985 return -EINVAL;
4981 4986
4987 if (perf_event_check_period(event, value))
4988 return -EINVAL;
4989
4982 event_function_call(event, __perf_event_period, &value); 4990 event_function_call(event, __perf_event_period, &value);
4983 4991
4984 return 0; 4992 return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
9391 return 0; 9399 return 0;
9392} 9400}
9393 9401
9402static int perf_event_nop_int(struct perf_event *event, u64 value)
9403{
9404 return 0;
9405}
9406
9394static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9407static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
9395 9408
9396static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 9409static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
9691 pmu->pmu_disable = perf_pmu_nop_void; 9704 pmu->pmu_disable = perf_pmu_nop_void;
9692 } 9705 }
9693 9706
9707 if (!pmu->check_period)
9708 pmu->check_period = perf_event_nop_int;
9709
9694 if (!pmu->event_idx) 9710 if (!pmu->event_idx)
9695 pmu->event_idx = perf_event_idx_default; 9711 pmu->event_idx = perf_event_idx_default;
9696 9712
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 309ef5a64af5..5ab4fe3b1dcc 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
734 size = sizeof(struct ring_buffer); 734 size = sizeof(struct ring_buffer);
735 size += nr_pages * sizeof(void *); 735 size += nr_pages * sizeof(void *);
736 736
737 if (order_base_2(size) >= MAX_ORDER) 737 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
738 goto fail; 738 goto fail;
739 739
740 rb = kzalloc(size, GFP_KERNEL); 740 rb = kzalloc(size, GFP_KERNEL);
diff --git a/kernel/signal.c b/kernel/signal.c
index 99fa8ff06fd9..57b7771e20d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2436,9 +2436,12 @@ relock:
2436 } 2436 }
2437 2437
2438 /* Has this task already been marked for death? */ 2438 /* Has this task already been marked for death? */
2439 ksig->info.si_signo = signr = SIGKILL; 2439 if (signal_group_exit(signal)) {
2440 if (signal_group_exit(signal)) 2440 ksig->info.si_signo = signr = SIGKILL;
2441 sigdelset(&current->pending.signal, SIGKILL);
2442 recalc_sigpending();
2441 goto fatal; 2443 goto fatal;
2444 }
2442 2445
2443 for (;;) { 2446 for (;;) {
2444 struct k_sigaction *ka; 2447 struct k_sigaction *ka;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
3384 const char tgid_space[] = " "; 3384 const char tgid_space[] = " ";
3385 const char space[] = " "; 3385 const char space[] = " ";
3386 3386
3387 print_event_info(buf, m);
3388
3387 seq_printf(m, "# %s _-----=> irqs-off\n", 3389 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space); 3390 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n", 3391 seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d5fb09ebba8b..9eaf07f99212 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
861static nokprobe_inline int 861static nokprobe_inline int
862fetch_store_strlen(unsigned long addr) 862fetch_store_strlen(unsigned long addr)
863{ 863{
864 mm_segment_t old_fs;
865 int ret, len = 0; 864 int ret, len = 0;
866 u8 c; 865 u8 c;
867 866
868 old_fs = get_fs();
869 set_fs(KERNEL_DS);
870 pagefault_disable();
871
872 do { 867 do {
873 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); 868 ret = probe_mem_read(&c, (u8 *)addr + len, 1);
874 len++; 869 len++;
875 } while (c && ret == 0 && len < MAX_STRING_SIZE); 870 } while (c && ret == 0 && len < MAX_STRING_SIZE);
876 871
877 pagefault_enable();
878 set_fs(old_fs);
879
880 return (ret < 0) ? ret : len; 872 return (ret < 0) ? ret : len;
881} 873}
882 874
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index 5c56afc17cf8..4737bb8c07a3 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
180 if (unlikely(arg->dynamic)) 180 if (unlikely(arg->dynamic))
181 *dl = make_data_loc(maxlen, dyndata - base); 181 *dl = make_data_loc(maxlen, dyndata - base);
182 ret = process_fetch_insn(arg->code, regs, dl, base); 182 ret = process_fetch_insn(arg->code, regs, dl, base);
183 if (unlikely(ret < 0 && arg->dynamic)) 183 if (unlikely(ret < 0 && arg->dynamic)) {
184 *dl = make_data_loc(0, dyndata - base); 184 *dl = make_data_loc(0, dyndata - base);
185 else 185 } else {
186 dyndata += ret; 186 dyndata += ret;
187 maxlen -= ret;
188 }
187 } 189 }
188} 190}
189 191