aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/cgroup/cgroup.c10
-rw-r--r--kernel/compat.c19
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/fail_function.c10
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/locking/rtmutex.c5
-rw-r--r--kernel/memremap.c1
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/trace/bpf_trace.c68
-rw-r--r--kernel/workqueue.c10
13 files changed, 82 insertions, 69 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3aeb4ea2a93a..dd172ee16716 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1849,7 +1849,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
1849 union bpf_attr attr = {}; 1849 union bpf_attr attr = {};
1850 int err; 1850 int err;
1851 1851
1852 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) 1852 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
1853 return -EPERM; 1853 return -EPERM;
1854 1854
1855 err = check_uarg_tail_zero(uattr, sizeof(attr), size); 1855 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8cda3bc3ae22..4bfb2908ec15 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3183,6 +3183,16 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
3183 if (cgroup_is_threaded(cgrp)) 3183 if (cgroup_is_threaded(cgrp))
3184 return 0; 3184 return 0;
3185 3185
3186 /*
3187 * If @cgroup is populated or has domain controllers enabled, it
3188 * can't be switched. While the below cgroup_can_be_thread_root()
3189 * test can catch the same conditions, that's only when @parent is
3190 * not mixable, so let's check it explicitly.
3191 */
3192 if (cgroup_is_populated(cgrp) ||
3193 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
3194 return -EOPNOTSUPP;
3195
3186 /* we're joining the parent's domain, ensure its validity */ 3196 /* we're joining the parent's domain, ensure its validity */
3187 if (!cgroup_is_valid_domain(dom_cgrp) || 3197 if (!cgroup_is_valid_domain(dom_cgrp) ||
3188 !cgroup_can_be_thread_root(dom_cgrp)) 3198 !cgroup_can_be_thread_root(dom_cgrp))
diff --git a/kernel/compat.c b/kernel/compat.c
index 3247fe761f60..3f5fa8902e7d 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
488} 488}
489EXPORT_SYMBOL_GPL(get_compat_sigset); 489EXPORT_SYMBOL_GPL(get_compat_sigset);
490 490
491int
492put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
493 unsigned int size)
494{
495 /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
496#ifdef __BIG_ENDIAN
497 compat_sigset_t v;
498 switch (_NSIG_WORDS) {
499 case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
500 case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
501 case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
502 case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
503 }
504 return copy_to_user(compat, &v, size) ? -EFAULT : 0;
505#else
506 return copy_to_user(compat, set, size) ? -EFAULT : 0;
507#endif
508}
509
510#ifdef CONFIG_NUMA 491#ifdef CONFIG_NUMA
511COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 492COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
512 compat_uptr_t __user *, pages32, 493 compat_uptr_t __user *, pages32,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 96db9ae5d5af..4b838470fac4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2246 struct perf_event_context *task_ctx, 2246 struct perf_event_context *task_ctx,
2247 enum event_type_t event_type) 2247 enum event_type_t event_type)
2248{ 2248{
2249 enum event_type_t ctx_event_type = event_type & EVENT_ALL; 2249 enum event_type_t ctx_event_type;
2250 bool cpu_event = !!(event_type & EVENT_CPU); 2250 bool cpu_event = !!(event_type & EVENT_CPU);
2251 2251
2252 /* 2252 /*
@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2256 if (event_type & EVENT_PINNED) 2256 if (event_type & EVENT_PINNED)
2257 event_type |= EVENT_FLEXIBLE; 2257 event_type |= EVENT_FLEXIBLE;
2258 2258
2259 ctx_event_type = event_type & EVENT_ALL;
2260
2259 perf_pmu_disable(cpuctx->ctx.pmu); 2261 perf_pmu_disable(cpuctx->ctx.pmu);
2260 if (task_ctx) 2262 if (task_ctx)
2261 task_ctx_sched_out(cpuctx, task_ctx, event_type); 2263 task_ctx_sched_out(cpuctx, task_ctx, event_type);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 21b0122cb39c..1d5632d8bbcc 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -14,6 +14,15 @@
14 14
15static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs); 15static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
16 16
17static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
18 unsigned long flags)
19{
20 /*
21 * A dummy post handler is required to prohibit optimizing, because
22 * jump optimization does not support execution path overriding.
23 */
24}
25
17struct fei_attr { 26struct fei_attr {
18 struct list_head list; 27 struct list_head list;
19 struct kprobe kp; 28 struct kprobe kp;
@@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
56 return NULL; 65 return NULL;
57 } 66 }
58 attr->kp.pre_handler = fei_kprobe_handler; 67 attr->kp.pre_handler = fei_kprobe_handler;
68 attr->kp.post_handler = fei_post_handler;
59 attr->retval = adjust_error_retval(addr, 0); 69 attr->retval = adjust_error_retval(addr, 0);
60 INIT_LIST_HEAD(&attr->list); 70 INIT_LIST_HEAD(&attr->list);
61 } 71 }
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 52a0a7af8640..e7214093dcd1 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
373 if (kernel_text_address(entry->code)) 373 if (kernel_text_address(entry->code))
374 arch_jump_label_transform(entry, jump_label_type(entry)); 374 arch_jump_label_transform(entry, jump_label_type(entry));
375 else 375 else
376 WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code); 376 WARN_ONCE(1, "can't patch jump_label at %pS",
377 (void *)(unsigned long)entry->code);
377 } 378 }
378 } 379 }
379} 380}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 65cc0cb984e6..940633c63254 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) 1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1617{ 1617{
1618 DEFINE_WAKE_Q(wake_q); 1618 DEFINE_WAKE_Q(wake_q);
1619 unsigned long flags;
1619 bool postunlock; 1620 bool postunlock;
1620 1621
1621 raw_spin_lock_irq(&lock->wait_lock); 1622 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1622 postunlock = __rt_mutex_futex_unlock(lock, &wake_q); 1623 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1623 raw_spin_unlock_irq(&lock->wait_lock); 1624 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1624 1625
1625 if (postunlock) 1626 if (postunlock)
1626 rt_mutex_postunlock(&wake_q); 1627 rt_mutex_postunlock(&wake_q);
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 4dd4274cabe2..895e6b76b25e 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -427,7 +427,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
427 err_pfn_remap: 427 err_pfn_remap:
428 err_radix: 428 err_radix:
429 pgmap_radix_release(res, pgoff); 429 pgmap_radix_release(res, pgoff);
430 devres_free(pgmap);
431 return ERR_PTR(error); 430 return ERR_PTR(error);
432} 431}
433EXPORT_SYMBOL(devm_memremap_pages); 432EXPORT_SYMBOL(devm_memremap_pages);
diff --git a/kernel/module.c b/kernel/module.c
index ad2d420024f6..e42764acedb4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4228,7 +4228,7 @@ static int modules_open(struct inode *inode, struct file *file)
4228 m->private = kallsyms_show_value() ? NULL : (void *)8ul; 4228 m->private = kallsyms_show_value() ? NULL : (void *)8ul;
4229 } 4229 }
4230 4230
4231 return 0; 4231 return err;
4232} 4232}
4233 4233
4234static const struct file_operations proc_modules_operations = { 4234static const struct file_operations proc_modules_operations = {
diff --git a/kernel/panic.c b/kernel/panic.c
index 2cfef408fec9..4b794f1d8561 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
640 */ 640 */
641__visible void __stack_chk_fail(void) 641__visible void __stack_chk_fail(void)
642{ 642{
643 panic("stack-protector: Kernel stack is corrupted in: %p\n", 643 panic("stack-protector: Kernel stack is corrupted in: %pB\n",
644 __builtin_return_address(0)); 644 __builtin_return_address(0));
645} 645}
646EXPORT_SYMBOL(__stack_chk_fail); 646EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e7c535eee0a6..c94895bc5a2c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6683,13 +6683,18 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
6683 parent_quota = parent_b->hierarchical_quota; 6683 parent_quota = parent_b->hierarchical_quota;
6684 6684
6685 /* 6685 /*
6686 * Ensure max(child_quota) <= parent_quota, inherit when no 6686 * Ensure max(child_quota) <= parent_quota. On cgroup2,
6687 * always take the min. On cgroup1, only inherit when no
6687 * limit is set: 6688 * limit is set:
6688 */ 6689 */
6689 if (quota == RUNTIME_INF) 6690 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
6690 quota = parent_quota; 6691 quota = min(quota, parent_quota);
6691 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 6692 } else {
6692 return -EINVAL; 6693 if (quota == RUNTIME_INF)
6694 quota = parent_quota;
6695 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
6696 return -EINVAL;
6697 }
6693 } 6698 }
6694 cfs_b->hierarchical_quota = quota; 6699 cfs_b->hierarchical_quota = quota;
6695 6700
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c634e093951f..7f9691c86b6e 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -661,7 +661,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
661 .arg3_type = ARG_ANYTHING, 661 .arg3_type = ARG_ANYTHING,
662}; 662};
663 663
664BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx, 664static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
665{
666 switch (func_id) {
667 case BPF_FUNC_perf_event_output:
668 return &bpf_perf_event_output_proto_tp;
669 case BPF_FUNC_get_stackid:
670 return &bpf_get_stackid_proto_tp;
671 default:
672 return tracing_func_proto(func_id);
673 }
674}
675
676static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
677 struct bpf_insn_access_aux *info)
678{
679 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
680 return false;
681 if (type != BPF_READ)
682 return false;
683 if (off % size != 0)
684 return false;
685
686 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
687 return true;
688}
689
690const struct bpf_verifier_ops tracepoint_verifier_ops = {
691 .get_func_proto = tp_prog_func_proto,
692 .is_valid_access = tp_prog_is_valid_access,
693};
694
695const struct bpf_prog_ops tracepoint_prog_ops = {
696};
697
698BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
665 struct bpf_perf_event_value *, buf, u32, size) 699 struct bpf_perf_event_value *, buf, u32, size)
666{ 700{
667 int err = -EINVAL; 701 int err = -EINVAL;
@@ -678,8 +712,8 @@ clear:
678 return err; 712 return err;
679} 713}
680 714
681static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { 715static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
682 .func = bpf_perf_prog_read_value_tp, 716 .func = bpf_perf_prog_read_value,
683 .gpl_only = true, 717 .gpl_only = true,
684 .ret_type = RET_INTEGER, 718 .ret_type = RET_INTEGER,
685 .arg1_type = ARG_PTR_TO_CTX, 719 .arg1_type = ARG_PTR_TO_CTX,
@@ -687,7 +721,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
687 .arg3_type = ARG_CONST_SIZE, 721 .arg3_type = ARG_CONST_SIZE,
688}; 722};
689 723
690static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 724static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
691{ 725{
692 switch (func_id) { 726 switch (func_id) {
693 case BPF_FUNC_perf_event_output: 727 case BPF_FUNC_perf_event_output:
@@ -695,34 +729,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
695 case BPF_FUNC_get_stackid: 729 case BPF_FUNC_get_stackid:
696 return &bpf_get_stackid_proto_tp; 730 return &bpf_get_stackid_proto_tp;
697 case BPF_FUNC_perf_prog_read_value: 731 case BPF_FUNC_perf_prog_read_value:
698 return &bpf_perf_prog_read_value_proto_tp; 732 return &bpf_perf_prog_read_value_proto;
699 default: 733 default:
700 return tracing_func_proto(func_id); 734 return tracing_func_proto(func_id);
701 } 735 }
702} 736}
703 737
704static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
705 struct bpf_insn_access_aux *info)
706{
707 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
708 return false;
709 if (type != BPF_READ)
710 return false;
711 if (off % size != 0)
712 return false;
713
714 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
715 return true;
716}
717
718const struct bpf_verifier_ops tracepoint_verifier_ops = {
719 .get_func_proto = tp_prog_func_proto,
720 .is_valid_access = tp_prog_is_valid_access,
721};
722
723const struct bpf_prog_ops tracepoint_prog_ops = {
724};
725
726static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 738static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
727 struct bpf_insn_access_aux *info) 739 struct bpf_insn_access_aux *info)
728{ 740{
@@ -791,7 +803,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
791} 803}
792 804
793const struct bpf_verifier_ops perf_event_verifier_ops = { 805const struct bpf_verifier_ops perf_event_verifier_ops = {
794 .get_func_proto = tp_prog_func_proto, 806 .get_func_proto = pe_prog_func_proto,
795 .is_valid_access = pe_prog_is_valid_access, 807 .is_valid_access = pe_prog_is_valid_access,
796 .convert_ctx_access = pe_prog_convert_ctx_access, 808 .convert_ctx_access = pe_prog_convert_ctx_access,
797}; 809};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bb9a519cbf50..6ec6ba65127b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3018,14 +3018,6 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
3018 return ret; 3018 return ret;
3019} 3019}
3020 3020
3021/*
3022 * See cancel_delayed_work()
3023 */
3024bool cancel_work(struct work_struct *work)
3025{
3026 return __cancel_work(work, false);
3027}
3028
3029/** 3021/**
3030 * cancel_delayed_work - cancel a delayed work 3022 * cancel_delayed_work - cancel a delayed work
3031 * @dwork: delayed_work to cancel 3023 * @dwork: delayed_work to cancel
@@ -5337,7 +5329,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5337 5329
5338 ret = device_register(&wq_dev->dev); 5330 ret = device_register(&wq_dev->dev);
5339 if (ret) { 5331 if (ret) {
5340 kfree(wq_dev); 5332 put_device(&wq_dev->dev);
5341 wq->wq_dev = NULL; 5333 wq->wq_dev = NULL;
5342 return ret; 5334 return ret;
5343 } 5335 }