diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/stackmap.c | 8 | ||||
-rw-r--r-- | kernel/events/core.c | 16 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 10 |
5 files changed, 27 insertions, 11 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) | |||
44 | struct stack_map_irq_work *work; | 44 | struct stack_map_irq_work *work; |
45 | 45 | ||
46 | work = container_of(entry, struct stack_map_irq_work, irq_work); | 46 | work = container_of(entry, struct stack_map_irq_work, irq_work); |
47 | up_read(work->sem); | 47 | up_read_non_owner(work->sem); |
48 | work->sem = NULL; | 48 | work->sem = NULL; |
49 | } | 49 | } |
50 | 50 | ||
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
338 | } else { | 338 | } else { |
339 | work->sem = ¤t->mm->mmap_sem; | 339 | work->sem = ¤t->mm->mmap_sem; |
340 | irq_work_queue(&work->irq_work); | 340 | irq_work_queue(&work->irq_work); |
341 | /* | ||
342 | * The irq_work will release the mmap_sem with | ||
343 | * up_read_non_owner(). The rwsem_release() is called | ||
344 | * here to release the lock from lockdep's perspective. | ||
345 | */ | ||
346 | rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); | ||
341 | } | 347 | } |
342 | } | 348 | } |
343 | 349 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
4963 | } | 4963 | } |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
4967 | { | ||
4968 | return event->pmu->check_period(event, value); | ||
4969 | } | ||
4970 | |||
4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4967 | { | 4972 | { |
4968 | u64 value; | 4973 | u64 value; |
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4980 | return -EINVAL; | 4985 | return -EINVAL; |
4981 | 4986 | ||
4987 | if (perf_event_check_period(event, value)) | ||
4988 | return -EINVAL; | ||
4989 | |||
4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
4983 | 4991 | ||
4984 | return 0; | 4992 | return 0; |
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
9391 | return 0; | 9399 | return 0; |
9392 | } | 9400 | } |
9393 | 9401 | ||
9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
9403 | { | ||
9404 | return 0; | ||
9405 | } | ||
9406 | |||
9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
9395 | 9408 | ||
9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
@@ -9691,6 +9704,9 @@ got_cpu_context: | |||
9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
9692 | } | 9705 | } |
9693 | 9706 | ||
9707 | if (!pmu->check_period) | ||
9708 | pmu->check_period = perf_event_nop_int; | ||
9709 | |||
9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= MAX_ORDER) | 737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
738 | goto fail; | 738 | goto fail; |
739 | 739 | ||
740 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file | |||
3384 | const char tgid_space[] = " "; | 3384 | const char tgid_space[] = " "; |
3385 | const char space[] = " "; | 3385 | const char space[] = " "; |
3386 | 3386 | ||
3387 | print_event_info(buf, m); | ||
3388 | |||
3387 | seq_printf(m, "# %s _-----=> irqs-off\n", | 3389 | seq_printf(m, "# %s _-----=> irqs-off\n", |
3388 | tgid ? tgid_space : space); | 3390 | tgid ? tgid_space : space); |
3389 | seq_printf(m, "# %s / _----=> need-resched\n", | 3391 | seq_printf(m, "# %s / _----=> need-resched\n", |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { | |||
861 | static nokprobe_inline int | 861 | static nokprobe_inline int |
862 | fetch_store_strlen(unsigned long addr) | 862 | fetch_store_strlen(unsigned long addr) |
863 | { | 863 | { |
864 | mm_segment_t old_fs; | ||
865 | int ret, len = 0; | 864 | int ret, len = 0; |
866 | u8 c; | 865 | u8 c; |
867 | 866 | ||
868 | old_fs = get_fs(); | ||
869 | set_fs(KERNEL_DS); | ||
870 | pagefault_disable(); | ||
871 | |||
872 | do { | 867 | do { |
873 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | 868 | ret = probe_mem_read(&c, (u8 *)addr + len, 1); |
874 | len++; | 869 | len++; |
875 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | 870 | } while (c && ret == 0 && len < MAX_STRING_SIZE); |
876 | 871 | ||
877 | pagefault_enable(); | ||
878 | set_fs(old_fs); | ||
879 | |||
880 | return (ret < 0) ? ret : len; | 872 | return (ret < 0) ? ret : len; |
881 | } | 873 | } |
882 | 874 | ||