diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 47 | ||||
-rw-r--r-- | kernel/audit.h | 3 | ||||
-rw-r--r-- | kernel/audit_tree.c | 88 | ||||
-rw-r--r-- | kernel/auditsc.c | 9 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 12 | ||||
-rw-r--r-- | kernel/fork.c | 141 | ||||
-rw-r--r-- | kernel/gcov/base.c | 5 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 16 | ||||
-rw-r--r-- | kernel/module.c | 9 | ||||
-rw-r--r-- | kernel/params.c | 4 | ||||
-rw-r--r-- | kernel/pid.c | 15 | ||||
-rw-r--r-- | kernel/printk/printk.c | 53 | ||||
-rw-r--r-- | kernel/ptrace.c | 39 | ||||
-rw-r--r-- | kernel/signal.c | 14 | ||||
-rw-r--r-- | kernel/smp.c | 80 | ||||
-rw-r--r-- | kernel/sys.c | 47 | ||||
-rw-r--r-- | kernel/sysctl.c | 16 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 8 |
20 files changed, 405 insertions, 218 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 72ab759a0b43..ab5745ddf962 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -43,6 +43,7 @@ | |||
43 | 43 | ||
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | 45 | ||
46 | #include <linux/file.h> | ||
46 | #include <linux/init.h> | 47 | #include <linux/init.h> |
47 | #include <linux/types.h> | 48 | #include <linux/types.h> |
48 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
@@ -107,6 +108,7 @@ static u32 audit_rate_limit; | |||
107 | * When set to zero, this means unlimited. */ | 108 | * When set to zero, this means unlimited. */ |
108 | static u32 audit_backlog_limit = 64; | 109 | static u32 audit_backlog_limit = 64; |
109 | #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ) | 110 | #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ) |
111 | static u32 audit_backlog_wait_time_master = AUDIT_BACKLOG_WAIT_TIME; | ||
110 | static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; | 112 | static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; |
111 | static u32 audit_backlog_wait_overflow = 0; | 113 | static u32 audit_backlog_wait_overflow = 0; |
112 | 114 | ||
@@ -338,13 +340,13 @@ static int audit_set_backlog_limit(u32 limit) | |||
338 | static int audit_set_backlog_wait_time(u32 timeout) | 340 | static int audit_set_backlog_wait_time(u32 timeout) |
339 | { | 341 | { |
340 | return audit_do_config_change("audit_backlog_wait_time", | 342 | return audit_do_config_change("audit_backlog_wait_time", |
341 | &audit_backlog_wait_time, timeout); | 343 | &audit_backlog_wait_time_master, timeout); |
342 | } | 344 | } |
343 | 345 | ||
344 | static int audit_set_enabled(u32 state) | 346 | static int audit_set_enabled(u32 state) |
345 | { | 347 | { |
346 | int rc; | 348 | int rc; |
347 | if (state < AUDIT_OFF || state > AUDIT_LOCKED) | 349 | if (state > AUDIT_LOCKED) |
348 | return -EINVAL; | 350 | return -EINVAL; |
349 | 351 | ||
350 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state); | 352 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state); |
@@ -663,7 +665,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
663 | case AUDIT_MAKE_EQUIV: | 665 | case AUDIT_MAKE_EQUIV: |
664 | /* Only support auditd and auditctl in initial pid namespace | 666 | /* Only support auditd and auditctl in initial pid namespace |
665 | * for now. */ | 667 | * for now. */ |
666 | if ((task_active_pid_ns(current) != &init_pid_ns)) | 668 | if (task_active_pid_ns(current) != &init_pid_ns) |
667 | return -EPERM; | 669 | return -EPERM; |
668 | 670 | ||
669 | if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) | 671 | if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) |
@@ -834,7 +836,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
834 | s.lost = atomic_read(&audit_lost); | 836 | s.lost = atomic_read(&audit_lost); |
835 | s.backlog = skb_queue_len(&audit_skb_queue); | 837 | s.backlog = skb_queue_len(&audit_skb_queue); |
836 | s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; | 838 | s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; |
837 | s.backlog_wait_time = audit_backlog_wait_time; | 839 | s.backlog_wait_time = audit_backlog_wait_time_master; |
838 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); | 840 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
839 | break; | 841 | break; |
840 | } | 842 | } |
@@ -877,8 +879,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
877 | if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { | 879 | if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { |
878 | if (sizeof(s) > (size_t)nlh->nlmsg_len) | 880 | if (sizeof(s) > (size_t)nlh->nlmsg_len) |
879 | return -EINVAL; | 881 | return -EINVAL; |
880 | if (s.backlog_wait_time < 0 || | 882 | if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME) |
881 | s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME) | ||
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | err = audit_set_backlog_wait_time(s.backlog_wait_time); | 884 | err = audit_set_backlog_wait_time(s.backlog_wait_time); |
884 | if (err < 0) | 885 | if (err < 0) |
@@ -1385,7 +1386,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1385 | return NULL; | 1386 | return NULL; |
1386 | } | 1387 | } |
1387 | 1388 | ||
1388 | audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; | 1389 | if (!reserve) |
1390 | audit_backlog_wait_time = audit_backlog_wait_time_master; | ||
1389 | 1391 | ||
1390 | ab = audit_buffer_alloc(ctx, gfp_mask, type); | 1392 | ab = audit_buffer_alloc(ctx, gfp_mask, type); |
1391 | if (!ab) { | 1393 | if (!ab) { |
@@ -1759,7 +1761,7 @@ void audit_log_name(struct audit_context *context, struct audit_names *n, | |||
1759 | } else | 1761 | } else |
1760 | audit_log_format(ab, " name=(null)"); | 1762 | audit_log_format(ab, " name=(null)"); |
1761 | 1763 | ||
1762 | if (n->ino != (unsigned long)-1) { | 1764 | if (n->ino != (unsigned long)-1) |
1763 | audit_log_format(ab, " inode=%lu" | 1765 | audit_log_format(ab, " inode=%lu" |
1764 | " dev=%02x:%02x mode=%#ho" | 1766 | " dev=%02x:%02x mode=%#ho" |
1765 | " ouid=%u ogid=%u rdev=%02x:%02x", | 1767 | " ouid=%u ogid=%u rdev=%02x:%02x", |
@@ -1771,7 +1773,6 @@ void audit_log_name(struct audit_context *context, struct audit_names *n, | |||
1771 | from_kgid(&init_user_ns, n->gid), | 1773 | from_kgid(&init_user_ns, n->gid), |
1772 | MAJOR(n->rdev), | 1774 | MAJOR(n->rdev), |
1773 | MINOR(n->rdev)); | 1775 | MINOR(n->rdev)); |
1774 | } | ||
1775 | if (n->osid != 0) { | 1776 | if (n->osid != 0) { |
1776 | char *ctx = NULL; | 1777 | char *ctx = NULL; |
1777 | u32 len; | 1778 | u32 len; |
@@ -1838,11 +1839,29 @@ error_path: | |||
1838 | } | 1839 | } |
1839 | EXPORT_SYMBOL(audit_log_task_context); | 1840 | EXPORT_SYMBOL(audit_log_task_context); |
1840 | 1841 | ||
1842 | void audit_log_d_path_exe(struct audit_buffer *ab, | ||
1843 | struct mm_struct *mm) | ||
1844 | { | ||
1845 | struct file *exe_file; | ||
1846 | |||
1847 | if (!mm) | ||
1848 | goto out_null; | ||
1849 | |||
1850 | exe_file = get_mm_exe_file(mm); | ||
1851 | if (!exe_file) | ||
1852 | goto out_null; | ||
1853 | |||
1854 | audit_log_d_path(ab, " exe=", &exe_file->f_path); | ||
1855 | fput(exe_file); | ||
1856 | return; | ||
1857 | out_null: | ||
1858 | audit_log_format(ab, " exe=(null)"); | ||
1859 | } | ||
1860 | |||
1841 | void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | 1861 | void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) |
1842 | { | 1862 | { |
1843 | const struct cred *cred; | 1863 | const struct cred *cred; |
1844 | char comm[sizeof(tsk->comm)]; | 1864 | char comm[sizeof(tsk->comm)]; |
1845 | struct mm_struct *mm = tsk->mm; | ||
1846 | char *tty; | 1865 | char *tty; |
1847 | 1866 | ||
1848 | if (!ab) | 1867 | if (!ab) |
@@ -1878,13 +1897,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | |||
1878 | audit_log_format(ab, " comm="); | 1897 | audit_log_format(ab, " comm="); |
1879 | audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); | 1898 | audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); |
1880 | 1899 | ||
1881 | if (mm) { | 1900 | audit_log_d_path_exe(ab, tsk->mm); |
1882 | down_read(&mm->mmap_sem); | ||
1883 | if (mm->exe_file) | ||
1884 | audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); | ||
1885 | up_read(&mm->mmap_sem); | ||
1886 | } else | ||
1887 | audit_log_format(ab, " exe=(null)"); | ||
1888 | audit_log_task_context(ab); | 1901 | audit_log_task_context(ab); |
1889 | } | 1902 | } |
1890 | EXPORT_SYMBOL(audit_log_task_info); | 1903 | EXPORT_SYMBOL(audit_log_task_info); |
diff --git a/kernel/audit.h b/kernel/audit.h index 1caa0d345d90..d641f9bb3ed0 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -257,6 +257,9 @@ extern struct list_head audit_filter_list[]; | |||
257 | 257 | ||
258 | extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); | 258 | extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); |
259 | 259 | ||
260 | extern void audit_log_d_path_exe(struct audit_buffer *ab, | ||
261 | struct mm_struct *mm); | ||
262 | |||
260 | /* audit watch functions */ | 263 | /* audit watch functions */ |
261 | #ifdef CONFIG_AUDIT_WATCH | 264 | #ifdef CONFIG_AUDIT_WATCH |
262 | extern void audit_put_watch(struct audit_watch *watch); | 265 | extern void audit_put_watch(struct audit_watch *watch); |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2e0c97427b33..71fd1f289885 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -37,6 +37,7 @@ struct audit_chunk { | |||
37 | 37 | ||
38 | static LIST_HEAD(tree_list); | 38 | static LIST_HEAD(tree_list); |
39 | static LIST_HEAD(prune_list); | 39 | static LIST_HEAD(prune_list); |
40 | static struct task_struct *prune_thread; | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * One struct chunk is attached to each inode of interest. | 43 | * One struct chunk is attached to each inode of interest. |
@@ -651,6 +652,57 @@ static int tag_mount(struct vfsmount *mnt, void *arg) | |||
651 | return tag_chunk(mnt->mnt_root->d_inode, arg); | 652 | return tag_chunk(mnt->mnt_root->d_inode, arg); |
652 | } | 653 | } |
653 | 654 | ||
655 | /* | ||
656 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | ||
657 | * Runs from a separate thread. | ||
658 | */ | ||
659 | static int prune_tree_thread(void *unused) | ||
660 | { | ||
661 | for (;;) { | ||
662 | set_current_state(TASK_INTERRUPTIBLE); | ||
663 | if (list_empty(&prune_list)) | ||
664 | schedule(); | ||
665 | __set_current_state(TASK_RUNNING); | ||
666 | |||
667 | mutex_lock(&audit_cmd_mutex); | ||
668 | mutex_lock(&audit_filter_mutex); | ||
669 | |||
670 | while (!list_empty(&prune_list)) { | ||
671 | struct audit_tree *victim; | ||
672 | |||
673 | victim = list_entry(prune_list.next, | ||
674 | struct audit_tree, list); | ||
675 | list_del_init(&victim->list); | ||
676 | |||
677 | mutex_unlock(&audit_filter_mutex); | ||
678 | |||
679 | prune_one(victim); | ||
680 | |||
681 | mutex_lock(&audit_filter_mutex); | ||
682 | } | ||
683 | |||
684 | mutex_unlock(&audit_filter_mutex); | ||
685 | mutex_unlock(&audit_cmd_mutex); | ||
686 | } | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static int audit_launch_prune(void) | ||
691 | { | ||
692 | if (prune_thread) | ||
693 | return 0; | ||
694 | prune_thread = kthread_create(prune_tree_thread, NULL, | ||
695 | "audit_prune_tree"); | ||
696 | if (IS_ERR(prune_thread)) { | ||
697 | pr_err("cannot start thread audit_prune_tree"); | ||
698 | prune_thread = NULL; | ||
699 | return -ENOMEM; | ||
700 | } else { | ||
701 | wake_up_process(prune_thread); | ||
702 | return 0; | ||
703 | } | ||
704 | } | ||
705 | |||
654 | /* called with audit_filter_mutex */ | 706 | /* called with audit_filter_mutex */ |
655 | int audit_add_tree_rule(struct audit_krule *rule) | 707 | int audit_add_tree_rule(struct audit_krule *rule) |
656 | { | 708 | { |
@@ -674,6 +726,12 @@ int audit_add_tree_rule(struct audit_krule *rule) | |||
674 | /* do not set rule->tree yet */ | 726 | /* do not set rule->tree yet */ |
675 | mutex_unlock(&audit_filter_mutex); | 727 | mutex_unlock(&audit_filter_mutex); |
676 | 728 | ||
729 | if (unlikely(!prune_thread)) { | ||
730 | err = audit_launch_prune(); | ||
731 | if (err) | ||
732 | goto Err; | ||
733 | } | ||
734 | |||
677 | err = kern_path(tree->pathname, 0, &path); | 735 | err = kern_path(tree->pathname, 0, &path); |
678 | if (err) | 736 | if (err) |
679 | goto Err; | 737 | goto Err; |
@@ -811,36 +869,10 @@ int audit_tag_tree(char *old, char *new) | |||
811 | return failed; | 869 | return failed; |
812 | } | 870 | } |
813 | 871 | ||
814 | /* | ||
815 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | ||
816 | * Runs from a separate thread. | ||
817 | */ | ||
818 | static int prune_tree_thread(void *unused) | ||
819 | { | ||
820 | mutex_lock(&audit_cmd_mutex); | ||
821 | mutex_lock(&audit_filter_mutex); | ||
822 | |||
823 | while (!list_empty(&prune_list)) { | ||
824 | struct audit_tree *victim; | ||
825 | |||
826 | victim = list_entry(prune_list.next, struct audit_tree, list); | ||
827 | list_del_init(&victim->list); | ||
828 | |||
829 | mutex_unlock(&audit_filter_mutex); | ||
830 | |||
831 | prune_one(victim); | ||
832 | |||
833 | mutex_lock(&audit_filter_mutex); | ||
834 | } | ||
835 | |||
836 | mutex_unlock(&audit_filter_mutex); | ||
837 | mutex_unlock(&audit_cmd_mutex); | ||
838 | return 0; | ||
839 | } | ||
840 | 872 | ||
841 | static void audit_schedule_prune(void) | 873 | static void audit_schedule_prune(void) |
842 | { | 874 | { |
843 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); | 875 | wake_up_process(prune_thread); |
844 | } | 876 | } |
845 | 877 | ||
846 | /* | 878 | /* |
@@ -907,9 +939,9 @@ static void evict_chunk(struct audit_chunk *chunk) | |||
907 | for (n = 0; n < chunk->count; n++) | 939 | for (n = 0; n < chunk->count; n++) |
908 | list_del_init(&chunk->owners[n].list); | 940 | list_del_init(&chunk->owners[n].list); |
909 | spin_unlock(&hash_lock); | 941 | spin_unlock(&hash_lock); |
942 | mutex_unlock(&audit_filter_mutex); | ||
910 | if (need_prune) | 943 | if (need_prune) |
911 | audit_schedule_prune(); | 944 | audit_schedule_prune(); |
912 | mutex_unlock(&audit_filter_mutex); | ||
913 | } | 945 | } |
914 | 946 | ||
915 | static int audit_tree_handle_event(struct fsnotify_group *group, | 947 | static int audit_tree_handle_event(struct fsnotify_group *group, |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index dc4ae70a7413..84c74d08c62b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -2361,7 +2361,6 @@ static void audit_log_task(struct audit_buffer *ab) | |||
2361 | kuid_t auid, uid; | 2361 | kuid_t auid, uid; |
2362 | kgid_t gid; | 2362 | kgid_t gid; |
2363 | unsigned int sessionid; | 2363 | unsigned int sessionid; |
2364 | struct mm_struct *mm = current->mm; | ||
2365 | char comm[sizeof(current->comm)]; | 2364 | char comm[sizeof(current->comm)]; |
2366 | 2365 | ||
2367 | auid = audit_get_loginuid(current); | 2366 | auid = audit_get_loginuid(current); |
@@ -2376,13 +2375,7 @@ static void audit_log_task(struct audit_buffer *ab) | |||
2376 | audit_log_task_context(ab); | 2375 | audit_log_task_context(ab); |
2377 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); | 2376 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); |
2378 | audit_log_untrustedstring(ab, get_task_comm(comm, current)); | 2377 | audit_log_untrustedstring(ab, get_task_comm(comm, current)); |
2379 | if (mm) { | 2378 | audit_log_d_path_exe(ab, current->mm); |
2380 | down_read(&mm->mmap_sem); | ||
2381 | if (mm->exe_file) | ||
2382 | audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); | ||
2383 | up_read(&mm->mmap_sem); | ||
2384 | } else | ||
2385 | audit_log_format(ab, " exe=(null)"); | ||
2386 | } | 2379 | } |
2387 | 2380 | ||
2388 | /** | 2381 | /** |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 630a7bac1e51..47dcd3aa6e23 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1397,7 +1397,8 @@ peek_stack: | |||
1397 | /* tell verifier to check for equivalent states | 1397 | /* tell verifier to check for equivalent states |
1398 | * after every call and jump | 1398 | * after every call and jump |
1399 | */ | 1399 | */ |
1400 | env->explored_states[t + 1] = STATE_LIST_MARK; | 1400 | if (t + 1 < insn_cnt) |
1401 | env->explored_states[t + 1] = STATE_LIST_MARK; | ||
1401 | } else { | 1402 | } else { |
1402 | /* conditional jump with two edges */ | 1403 | /* conditional jump with two edges */ |
1403 | ret = push_insn(t, t + 1, FALLTHROUGH, env); | 1404 | ret = push_insn(t, t + 1, FALLTHROUGH, env); |
@@ -1636,6 +1637,8 @@ static int do_check(struct verifier_env *env) | |||
1636 | if (err) | 1637 | if (err) |
1637 | return err; | 1638 | return err; |
1638 | 1639 | ||
1640 | src_reg_type = regs[insn->src_reg].type; | ||
1641 | |||
1639 | /* check that memory (src_reg + off) is readable, | 1642 | /* check that memory (src_reg + off) is readable, |
1640 | * the state of dst_reg will be updated by this func | 1643 | * the state of dst_reg will be updated by this func |
1641 | */ | 1644 | */ |
@@ -1645,9 +1648,12 @@ static int do_check(struct verifier_env *env) | |||
1645 | if (err) | 1648 | if (err) |
1646 | return err; | 1649 | return err; |
1647 | 1650 | ||
1648 | src_reg_type = regs[insn->src_reg].type; | 1651 | if (BPF_SIZE(insn->code) != BPF_W) { |
1652 | insn_idx++; | ||
1653 | continue; | ||
1654 | } | ||
1649 | 1655 | ||
1650 | if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) { | 1656 | if (insn->imm == 0) { |
1651 | /* saw a valid insn | 1657 | /* saw a valid insn |
1652 | * dst_reg = *(u32 *)(src_reg + off) | 1658 | * dst_reg = *(u32 *)(src_reg + off) |
1653 | * use reserved 'imm' field to mark this insn | 1659 | * use reserved 'imm' field to mark this insn |
diff --git a/kernel/fork.c b/kernel/fork.c index f2c1e7352298..03c1eaaa6ef5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/uprobes.h> | 74 | #include <linux/uprobes.h> |
75 | #include <linux/aio.h> | 75 | #include <linux/aio.h> |
76 | #include <linux/compiler.h> | 76 | #include <linux/compiler.h> |
77 | #include <linux/sysctl.h> | ||
77 | 78 | ||
78 | #include <asm/pgtable.h> | 79 | #include <asm/pgtable.h> |
79 | #include <asm/pgalloc.h> | 80 | #include <asm/pgalloc.h> |
@@ -88,6 +89,16 @@ | |||
88 | #include <trace/events/task.h> | 89 | #include <trace/events/task.h> |
89 | 90 | ||
90 | /* | 91 | /* |
92 | * Minimum number of threads to boot the kernel | ||
93 | */ | ||
94 | #define MIN_THREADS 20 | ||
95 | |||
96 | /* | ||
97 | * Maximum number of threads | ||
98 | */ | ||
99 | #define MAX_THREADS FUTEX_TID_MASK | ||
100 | |||
101 | /* | ||
91 | * Protected counters by write_lock_irq(&tasklist_lock) | 102 | * Protected counters by write_lock_irq(&tasklist_lock) |
92 | */ | 103 | */ |
93 | unsigned long total_forks; /* Handle normal Linux uptimes. */ | 104 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
@@ -253,7 +264,30 @@ EXPORT_SYMBOL_GPL(__put_task_struct); | |||
253 | 264 | ||
254 | void __init __weak arch_task_cache_init(void) { } | 265 | void __init __weak arch_task_cache_init(void) { } |
255 | 266 | ||
256 | void __init fork_init(unsigned long mempages) | 267 | /* |
268 | * set_max_threads | ||
269 | */ | ||
270 | static void set_max_threads(unsigned int max_threads_suggested) | ||
271 | { | ||
272 | u64 threads; | ||
273 | |||
274 | /* | ||
275 | * The number of threads shall be limited such that the thread | ||
276 | * structures may only consume a small part of the available memory. | ||
277 | */ | ||
278 | if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) | ||
279 | threads = MAX_THREADS; | ||
280 | else | ||
281 | threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, | ||
282 | (u64) THREAD_SIZE * 8UL); | ||
283 | |||
284 | if (threads > max_threads_suggested) | ||
285 | threads = max_threads_suggested; | ||
286 | |||
287 | max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); | ||
288 | } | ||
289 | |||
290 | void __init fork_init(void) | ||
257 | { | 291 | { |
258 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR | 292 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
259 | #ifndef ARCH_MIN_TASKALIGN | 293 | #ifndef ARCH_MIN_TASKALIGN |
@@ -268,18 +302,7 @@ void __init fork_init(unsigned long mempages) | |||
268 | /* do the arch specific task caches init */ | 302 | /* do the arch specific task caches init */ |
269 | arch_task_cache_init(); | 303 | arch_task_cache_init(); |
270 | 304 | ||
271 | /* | 305 | set_max_threads(MAX_THREADS); |
272 | * The default maximum number of threads is set to a safe | ||
273 | * value: the thread structures can take up at most half | ||
274 | * of memory. | ||
275 | */ | ||
276 | max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); | ||
277 | |||
278 | /* | ||
279 | * we need to allow at least 20 threads to boot a system | ||
280 | */ | ||
281 | if (max_threads < 20) | ||
282 | max_threads = 20; | ||
283 | 306 | ||
284 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; | 307 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
285 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; | 308 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
@@ -380,6 +403,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
380 | */ | 403 | */ |
381 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); | 404 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); |
382 | 405 | ||
406 | /* No ordering required: file already has been exposed. */ | ||
407 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); | ||
408 | |||
383 | mm->total_vm = oldmm->total_vm; | 409 | mm->total_vm = oldmm->total_vm; |
384 | mm->shared_vm = oldmm->shared_vm; | 410 | mm->shared_vm = oldmm->shared_vm; |
385 | mm->exec_vm = oldmm->exec_vm; | 411 | mm->exec_vm = oldmm->exec_vm; |
@@ -505,7 +531,13 @@ static inline void mm_free_pgd(struct mm_struct *mm) | |||
505 | pgd_free(mm, mm->pgd); | 531 | pgd_free(mm, mm->pgd); |
506 | } | 532 | } |
507 | #else | 533 | #else |
508 | #define dup_mmap(mm, oldmm) (0) | 534 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
535 | { | ||
536 | down_write(&oldmm->mmap_sem); | ||
537 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); | ||
538 | up_write(&oldmm->mmap_sem); | ||
539 | return 0; | ||
540 | } | ||
509 | #define mm_alloc_pgd(mm) (0) | 541 | #define mm_alloc_pgd(mm) (0) |
510 | #define mm_free_pgd(mm) | 542 | #define mm_free_pgd(mm) |
511 | #endif /* CONFIG_MMU */ | 543 | #endif /* CONFIG_MMU */ |
@@ -674,34 +706,53 @@ void mmput(struct mm_struct *mm) | |||
674 | } | 706 | } |
675 | EXPORT_SYMBOL_GPL(mmput); | 707 | EXPORT_SYMBOL_GPL(mmput); |
676 | 708 | ||
709 | /** | ||
710 | * set_mm_exe_file - change a reference to the mm's executable file | ||
711 | * | ||
712 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). | ||
713 | * | ||
714 | * Main users are mmput() and sys_execve(). Callers prevent concurrent | ||
715 | * invocations: in mmput() nobody alive left, in execve task is single | ||
716 | * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the | ||
717 | * mm->exe_file, but does so without using set_mm_exe_file() in order | ||
718 | * to do avoid the need for any locks. | ||
719 | */ | ||
677 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | 720 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
678 | { | 721 | { |
722 | struct file *old_exe_file; | ||
723 | |||
724 | /* | ||
725 | * It is safe to dereference the exe_file without RCU as | ||
726 | * this function is only called if nobody else can access | ||
727 | * this mm -- see comment above for justification. | ||
728 | */ | ||
729 | old_exe_file = rcu_dereference_raw(mm->exe_file); | ||
730 | |||
679 | if (new_exe_file) | 731 | if (new_exe_file) |
680 | get_file(new_exe_file); | 732 | get_file(new_exe_file); |
681 | if (mm->exe_file) | 733 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
682 | fput(mm->exe_file); | 734 | if (old_exe_file) |
683 | mm->exe_file = new_exe_file; | 735 | fput(old_exe_file); |
684 | } | 736 | } |
685 | 737 | ||
738 | /** | ||
739 | * get_mm_exe_file - acquire a reference to the mm's executable file | ||
740 | * | ||
741 | * Returns %NULL if mm has no associated executable file. | ||
742 | * User must release file via fput(). | ||
743 | */ | ||
686 | struct file *get_mm_exe_file(struct mm_struct *mm) | 744 | struct file *get_mm_exe_file(struct mm_struct *mm) |
687 | { | 745 | { |
688 | struct file *exe_file; | 746 | struct file *exe_file; |
689 | 747 | ||
690 | /* We need mmap_sem to protect against races with removal of exe_file */ | 748 | rcu_read_lock(); |
691 | down_read(&mm->mmap_sem); | 749 | exe_file = rcu_dereference(mm->exe_file); |
692 | exe_file = mm->exe_file; | 750 | if (exe_file && !get_file_rcu(exe_file)) |
693 | if (exe_file) | 751 | exe_file = NULL; |
694 | get_file(exe_file); | 752 | rcu_read_unlock(); |
695 | up_read(&mm->mmap_sem); | ||
696 | return exe_file; | 753 | return exe_file; |
697 | } | 754 | } |
698 | 755 | EXPORT_SYMBOL(get_mm_exe_file); | |
699 | static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | ||
700 | { | ||
701 | /* It's safe to write the exe_file pointer without exe_file_lock because | ||
702 | * this is called during fork when the task is not yet in /proc */ | ||
703 | newmm->exe_file = get_mm_exe_file(oldmm); | ||
704 | } | ||
705 | 756 | ||
706 | /** | 757 | /** |
707 | * get_task_mm - acquire a reference to the task's mm | 758 | * get_task_mm - acquire a reference to the task's mm |
@@ -864,8 +915,6 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) | |||
864 | if (!mm_init(mm, tsk)) | 915 | if (!mm_init(mm, tsk)) |
865 | goto fail_nomem; | 916 | goto fail_nomem; |
866 | 917 | ||
867 | dup_mm_exe_file(oldmm, mm); | ||
868 | |||
869 | err = dup_mmap(mm, oldmm); | 918 | err = dup_mmap(mm, oldmm); |
870 | if (err) | 919 | if (err) |
871 | goto free_pt; | 920 | goto free_pt; |
@@ -1403,10 +1452,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1403 | goto bad_fork_cleanup_io; | 1452 | goto bad_fork_cleanup_io; |
1404 | 1453 | ||
1405 | if (pid != &init_struct_pid) { | 1454 | if (pid != &init_struct_pid) { |
1406 | retval = -ENOMEM; | ||
1407 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); | 1455 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); |
1408 | if (!pid) | 1456 | if (IS_ERR(pid)) { |
1457 | retval = PTR_ERR(pid); | ||
1409 | goto bad_fork_cleanup_io; | 1458 | goto bad_fork_cleanup_io; |
1459 | } | ||
1410 | } | 1460 | } |
1411 | 1461 | ||
1412 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1462 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
@@ -2000,3 +2050,26 @@ int unshare_files(struct files_struct **displaced) | |||
2000 | task_unlock(task); | 2050 | task_unlock(task); |
2001 | return 0; | 2051 | return 0; |
2002 | } | 2052 | } |
2053 | |||
2054 | int sysctl_max_threads(struct ctl_table *table, int write, | ||
2055 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
2056 | { | ||
2057 | struct ctl_table t; | ||
2058 | int ret; | ||
2059 | int threads = max_threads; | ||
2060 | int min = MIN_THREADS; | ||
2061 | int max = MAX_THREADS; | ||
2062 | |||
2063 | t = *table; | ||
2064 | t.data = &threads; | ||
2065 | t.extra1 = &min; | ||
2066 | t.extra2 = &max; | ||
2067 | |||
2068 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | ||
2069 | if (ret || !write) | ||
2070 | return ret; | ||
2071 | |||
2072 | set_max_threads(threads); | ||
2073 | |||
2074 | return 0; | ||
2075 | } | ||
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index b358a802fd18..a744098e4eb7 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/sched.h> | ||
21 | #include "gcov.h" | 22 | #include "gcov.h" |
22 | 23 | ||
23 | static int gcov_events_enabled; | 24 | static int gcov_events_enabled; |
@@ -107,8 +108,10 @@ void gcov_enable_events(void) | |||
107 | gcov_events_enabled = 1; | 108 | gcov_events_enabled = 1; |
108 | 109 | ||
109 | /* Perform event callback for previously registered entries. */ | 110 | /* Perform event callback for previously registered entries. */ |
110 | while ((info = gcov_info_next(info))) | 111 | while ((info = gcov_info_next(info))) { |
111 | gcov_event(GCOV_ADD, info); | 112 | gcov_event(GCOV_ADD, info); |
113 | cond_resched(); | ||
114 | } | ||
112 | 115 | ||
113 | mutex_unlock(&gcov_lock); | 116 | mutex_unlock(&gcov_lock); |
114 | } | 117 | } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ba77ab5f64dd..a0831e1b99f4 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -551,7 +551,21 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
551 | 551 | ||
552 | static void print_lock(struct held_lock *hlock) | 552 | static void print_lock(struct held_lock *hlock) |
553 | { | 553 | { |
554 | print_lock_name(hlock_class(hlock)); | 554 | /* |
555 | * We can be called locklessly through debug_show_all_locks() so be | ||
556 | * extra careful, the hlock might have been released and cleared. | ||
557 | */ | ||
558 | unsigned int class_idx = hlock->class_idx; | ||
559 | |||
560 | /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ | ||
561 | barrier(); | ||
562 | |||
563 | if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { | ||
564 | printk("<RELEASED>\n"); | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | print_lock_name(lock_classes + class_idx - 1); | ||
555 | printk(", at: "); | 569 | printk(", at: "); |
556 | print_ip_sym(hlock->acquire_ip); | 570 | print_ip_sym(hlock->acquire_ip); |
557 | } | 571 | } |
diff --git a/kernel/module.c b/kernel/module.c index 650b038ae520..42a1d2afb217 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -387,9 +387,9 @@ static bool check_symbol(const struct symsearch *syms, | |||
387 | pr_warn("Symbol %s is marked as UNUSED, however this module is " | 387 | pr_warn("Symbol %s is marked as UNUSED, however this module is " |
388 | "using it.\n", fsa->name); | 388 | "using it.\n", fsa->name); |
389 | pr_warn("This symbol will go away in the future.\n"); | 389 | pr_warn("This symbol will go away in the future.\n"); |
390 | pr_warn("Please evalute if this is the right api to use and if " | 390 | pr_warn("Please evaluate if this is the right api to use and " |
391 | "it really is, submit a report the linux kernel " | 391 | "if it really is, submit a report to the linux kernel " |
392 | "mailinglist together with submitting your code for " | 392 | "mailing list together with submitting your code for " |
393 | "inclusion.\n"); | 393 | "inclusion.\n"); |
394 | } | 394 | } |
395 | #endif | 395 | #endif |
@@ -2511,7 +2511,8 @@ static int copy_module_from_user(const void __user *umod, unsigned long len, | |||
2511 | return err; | 2511 | return err; |
2512 | 2512 | ||
2513 | /* Suck in entire file: we'll want most of it. */ | 2513 | /* Suck in entire file: we'll want most of it. */ |
2514 | info->hdr = vmalloc(info->len); | 2514 | info->hdr = __vmalloc(info->len, |
2515 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL); | ||
2515 | if (!info->hdr) | 2516 | if (!info->hdr) |
2516 | return -ENOMEM; | 2517 | return -ENOMEM; |
2517 | 2518 | ||
diff --git a/kernel/params.c b/kernel/params.c index 728e05b167de..a22d6a759b1a 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -173,9 +173,9 @@ static char *next_arg(char *args, char **param, char **val) | |||
173 | if (args[i-1] == '"') | 173 | if (args[i-1] == '"') |
174 | args[i-1] = '\0'; | 174 | args[i-1] = '\0'; |
175 | } | 175 | } |
176 | if (quoted && args[i-1] == '"') | ||
177 | args[i-1] = '\0'; | ||
178 | } | 176 | } |
177 | if (quoted && args[i-1] == '"') | ||
178 | args[i-1] = '\0'; | ||
179 | 179 | ||
180 | if (args[i]) { | 180 | if (args[i]) { |
181 | args[i] = '\0'; | 181 | args[i] = '\0'; |
diff --git a/kernel/pid.c b/kernel/pid.c index cd36a5e0d173..4fd07d5b7baf 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -182,7 +182,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
182 | spin_unlock_irq(&pidmap_lock); | 182 | spin_unlock_irq(&pidmap_lock); |
183 | kfree(page); | 183 | kfree(page); |
184 | if (unlikely(!map->page)) | 184 | if (unlikely(!map->page)) |
185 | break; | 185 | return -ENOMEM; |
186 | } | 186 | } |
187 | if (likely(atomic_read(&map->nr_free))) { | 187 | if (likely(atomic_read(&map->nr_free))) { |
188 | for ( ; ; ) { | 188 | for ( ; ; ) { |
@@ -210,7 +210,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
210 | } | 210 | } |
211 | pid = mk_pid(pid_ns, map, offset); | 211 | pid = mk_pid(pid_ns, map, offset); |
212 | } | 212 | } |
213 | return -1; | 213 | return -EAGAIN; |
214 | } | 214 | } |
215 | 215 | ||
216 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) | 216 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
@@ -301,17 +301,20 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
301 | int i, nr; | 301 | int i, nr; |
302 | struct pid_namespace *tmp; | 302 | struct pid_namespace *tmp; |
303 | struct upid *upid; | 303 | struct upid *upid; |
304 | int retval = -ENOMEM; | ||
304 | 305 | ||
305 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); | 306 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
306 | if (!pid) | 307 | if (!pid) |
307 | goto out; | 308 | return ERR_PTR(retval); |
308 | 309 | ||
309 | tmp = ns; | 310 | tmp = ns; |
310 | pid->level = ns->level; | 311 | pid->level = ns->level; |
311 | for (i = ns->level; i >= 0; i--) { | 312 | for (i = ns->level; i >= 0; i--) { |
312 | nr = alloc_pidmap(tmp); | 313 | nr = alloc_pidmap(tmp); |
313 | if (nr < 0) | 314 | if (IS_ERR_VALUE(nr)) { |
315 | retval = nr; | ||
314 | goto out_free; | 316 | goto out_free; |
317 | } | ||
315 | 318 | ||
316 | pid->numbers[i].nr = nr; | 319 | pid->numbers[i].nr = nr; |
317 | pid->numbers[i].ns = tmp; | 320 | pid->numbers[i].ns = tmp; |
@@ -339,7 +342,6 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
339 | } | 342 | } |
340 | spin_unlock_irq(&pidmap_lock); | 343 | spin_unlock_irq(&pidmap_lock); |
341 | 344 | ||
342 | out: | ||
343 | return pid; | 345 | return pid; |
344 | 346 | ||
345 | out_unlock: | 347 | out_unlock: |
@@ -351,8 +353,7 @@ out_free: | |||
351 | free_pidmap(pid->numbers + i); | 353 | free_pidmap(pid->numbers + i); |
352 | 354 | ||
353 | kmem_cache_free(ns->pid_cachep, pid); | 355 | kmem_cache_free(ns->pid_cachep, pid); |
354 | pid = NULL; | 356 | return ERR_PTR(retval); |
355 | goto out; | ||
356 | } | 357 | } |
357 | 358 | ||
358 | void disable_pid_allocation(struct pid_namespace *ns) | 359 | void disable_pid_allocation(struct pid_namespace *ns) |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 879edfc5ee52..c099b082cd02 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2017,24 +2017,6 @@ int add_preferred_console(char *name, int idx, char *options) | |||
2017 | return __add_preferred_console(name, idx, options, NULL); | 2017 | return __add_preferred_console(name, idx, options, NULL); |
2018 | } | 2018 | } |
2019 | 2019 | ||
2020 | int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) | ||
2021 | { | ||
2022 | struct console_cmdline *c; | ||
2023 | int i; | ||
2024 | |||
2025 | for (i = 0, c = console_cmdline; | ||
2026 | i < MAX_CMDLINECONSOLES && c->name[0]; | ||
2027 | i++, c++) | ||
2028 | if (strcmp(c->name, name) == 0 && c->index == idx) { | ||
2029 | strlcpy(c->name, name_new, sizeof(c->name)); | ||
2030 | c->options = options; | ||
2031 | c->index = idx_new; | ||
2032 | return i; | ||
2033 | } | ||
2034 | /* not found */ | ||
2035 | return -1; | ||
2036 | } | ||
2037 | |||
2038 | bool console_suspend_enabled = true; | 2020 | bool console_suspend_enabled = true; |
2039 | EXPORT_SYMBOL(console_suspend_enabled); | 2021 | EXPORT_SYMBOL(console_suspend_enabled); |
2040 | 2022 | ||
@@ -2436,9 +2418,6 @@ void register_console(struct console *newcon) | |||
2436 | if (preferred_console < 0 || bcon || !console_drivers) | 2418 | if (preferred_console < 0 || bcon || !console_drivers) |
2437 | preferred_console = selected_console; | 2419 | preferred_console = selected_console; |
2438 | 2420 | ||
2439 | if (newcon->early_setup) | ||
2440 | newcon->early_setup(); | ||
2441 | |||
2442 | /* | 2421 | /* |
2443 | * See if we want to use this console driver. If we | 2422 | * See if we want to use this console driver. If we |
2444 | * didn't select a console we take the first one | 2423 | * didn't select a console we take the first one |
@@ -2464,23 +2443,27 @@ void register_console(struct console *newcon) | |||
2464 | for (i = 0, c = console_cmdline; | 2443 | for (i = 0, c = console_cmdline; |
2465 | i < MAX_CMDLINECONSOLES && c->name[0]; | 2444 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2466 | i++, c++) { | 2445 | i++, c++) { |
2467 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); | 2446 | if (!newcon->match || |
2468 | if (strcmp(c->name, newcon->name) != 0) | 2447 | newcon->match(newcon, c->name, c->index, c->options) != 0) { |
2469 | continue; | 2448 | /* default matching */ |
2470 | if (newcon->index >= 0 && | 2449 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); |
2471 | newcon->index != c->index) | 2450 | if (strcmp(c->name, newcon->name) != 0) |
2472 | continue; | 2451 | continue; |
2473 | if (newcon->index < 0) | 2452 | if (newcon->index >= 0 && |
2474 | newcon->index = c->index; | 2453 | newcon->index != c->index) |
2454 | continue; | ||
2455 | if (newcon->index < 0) | ||
2456 | newcon->index = c->index; | ||
2475 | 2457 | ||
2476 | if (_braille_register_console(newcon, c)) | 2458 | if (_braille_register_console(newcon, c)) |
2477 | return; | 2459 | return; |
2460 | |||
2461 | if (newcon->setup && | ||
2462 | newcon->setup(newcon, c->options) != 0) | ||
2463 | break; | ||
2464 | } | ||
2478 | 2465 | ||
2479 | if (newcon->setup && | ||
2480 | newcon->setup(newcon, console_cmdline[i].options) != 0) | ||
2481 | break; | ||
2482 | newcon->flags |= CON_ENABLED; | 2466 | newcon->flags |= CON_ENABLED; |
2483 | newcon->index = c->index; | ||
2484 | if (i == selected_console) { | 2467 | if (i == selected_console) { |
2485 | newcon->flags |= CON_CONSDEV; | 2468 | newcon->flags |= CON_CONSDEV; |
2486 | preferred_console = selected_console; | 2469 | preferred_console = selected_console; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 227fec36b12a..c8e0e050a36a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -456,8 +456,6 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |||
456 | 456 | ||
457 | static int ptrace_detach(struct task_struct *child, unsigned int data) | 457 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
458 | { | 458 | { |
459 | bool dead = false; | ||
460 | |||
461 | if (!valid_signal(data)) | 459 | if (!valid_signal(data)) |
462 | return -EIO; | 460 | return -EIO; |
463 | 461 | ||
@@ -467,18 +465,19 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
467 | 465 | ||
468 | write_lock_irq(&tasklist_lock); | 466 | write_lock_irq(&tasklist_lock); |
469 | /* | 467 | /* |
470 | * This child can be already killed. Make sure de_thread() or | 468 | * We rely on ptrace_freeze_traced(). It can't be killed and |
471 | * our sub-thread doing do_wait() didn't do release_task() yet. | 469 | * untraced by another thread, it can't be a zombie. |
472 | */ | 470 | */ |
473 | if (child->ptrace) { | 471 | WARN_ON(!child->ptrace || child->exit_state); |
474 | child->exit_code = data; | 472 | /* |
475 | dead = __ptrace_detach(current, child); | 473 | * tasklist_lock avoids the race with wait_task_stopped(), see |
476 | } | 474 | * the comment in ptrace_resume(). |
475 | */ | ||
476 | child->exit_code = data; | ||
477 | __ptrace_detach(current, child); | ||
477 | write_unlock_irq(&tasklist_lock); | 478 | write_unlock_irq(&tasklist_lock); |
478 | 479 | ||
479 | proc_ptrace_connector(child, PTRACE_DETACH); | 480 | proc_ptrace_connector(child, PTRACE_DETACH); |
480 | if (unlikely(dead)) | ||
481 | release_task(child); | ||
482 | 481 | ||
483 | return 0; | 482 | return 0; |
484 | } | 483 | } |
@@ -697,6 +696,8 @@ static int ptrace_peek_siginfo(struct task_struct *child, | |||
697 | static int ptrace_resume(struct task_struct *child, long request, | 696 | static int ptrace_resume(struct task_struct *child, long request, |
698 | unsigned long data) | 697 | unsigned long data) |
699 | { | 698 | { |
699 | bool need_siglock; | ||
700 | |||
700 | if (!valid_signal(data)) | 701 | if (!valid_signal(data)) |
701 | return -EIO; | 702 | return -EIO; |
702 | 703 | ||
@@ -724,8 +725,26 @@ static int ptrace_resume(struct task_struct *child, long request, | |||
724 | user_disable_single_step(child); | 725 | user_disable_single_step(child); |
725 | } | 726 | } |
726 | 727 | ||
728 | /* | ||
729 | * Change ->exit_code and ->state under siglock to avoid the race | ||
730 | * with wait_task_stopped() in between; a non-zero ->exit_code will | ||
731 | * wrongly look like another report from tracee. | ||
732 | * | ||
733 | * Note that we need siglock even if ->exit_code == data and/or this | ||
734 | * status was not reported yet, the new status must not be cleared by | ||
735 | * wait_task_stopped() after resume. | ||
736 | * | ||
737 | * If data == 0 we do not care if wait_task_stopped() reports the old | ||
738 | * status and clears the code too; this can't race with the tracee, it | ||
739 | * takes siglock after resume. | ||
740 | */ | ||
741 | need_siglock = data && !thread_group_empty(current); | ||
742 | if (need_siglock) | ||
743 | spin_lock_irq(&child->sighand->siglock); | ||
727 | child->exit_code = data; | 744 | child->exit_code = data; |
728 | wake_up_state(child, __TASK_TRACED); | 745 | wake_up_state(child, __TASK_TRACED); |
746 | if (need_siglock) | ||
747 | spin_unlock_irq(&child->sighand->siglock); | ||
729 | 748 | ||
730 | return 0; | 749 | return 0; |
731 | } | 750 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index a390499943e4..d51c5ddd855c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2992,11 +2992,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) | |||
2992 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2992 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2993 | */ | 2993 | */ |
2994 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && | 2994 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
2995 | (task_pid_vnr(current) != pid)) { | 2995 | (task_pid_vnr(current) != pid)) |
2996 | /* We used to allow any < 0 si_code */ | ||
2997 | WARN_ON_ONCE(info->si_code < 0); | ||
2998 | return -EPERM; | 2996 | return -EPERM; |
2999 | } | 2997 | |
3000 | info->si_signo = sig; | 2998 | info->si_signo = sig; |
3001 | 2999 | ||
3002 | /* POSIX.1b doesn't mention process groups. */ | 3000 | /* POSIX.1b doesn't mention process groups. */ |
@@ -3041,12 +3039,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |||
3041 | /* Not even root can pretend to send signals from the kernel. | 3039 | /* Not even root can pretend to send signals from the kernel. |
3042 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 3040 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3043 | */ | 3041 | */ |
3044 | if (((info->si_code >= 0 || info->si_code == SI_TKILL)) && | 3042 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3045 | (task_pid_vnr(current) != pid)) { | 3043 | (task_pid_vnr(current) != pid)) |
3046 | /* We used to allow any < 0 si_code */ | ||
3047 | WARN_ON_ONCE(info->si_code < 0); | ||
3048 | return -EPERM; | 3044 | return -EPERM; |
3049 | } | 3045 | |
3050 | info->si_signo = sig; | 3046 | info->si_signo = sig; |
3051 | 3047 | ||
3052 | return do_send_specific(tgid, pid, sig, info); | 3048 | return do_send_specific(tgid, pid, sig, info); |
diff --git a/kernel/smp.c b/kernel/smp.c index f38a1e692259..07854477c164 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | enum { | 20 | enum { |
21 | CSD_FLAG_LOCK = 0x01, | 21 | CSD_FLAG_LOCK = 0x01, |
22 | CSD_FLAG_WAIT = 0x02, | 22 | CSD_FLAG_SYNCHRONOUS = 0x02, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct call_function_data { | 25 | struct call_function_data { |
@@ -107,7 +107,7 @@ void __init call_function_init(void) | |||
107 | */ | 107 | */ |
108 | static void csd_lock_wait(struct call_single_data *csd) | 108 | static void csd_lock_wait(struct call_single_data *csd) |
109 | { | 109 | { |
110 | while (csd->flags & CSD_FLAG_LOCK) | 110 | while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK) |
111 | cpu_relax(); | 111 | cpu_relax(); |
112 | } | 112 | } |
113 | 113 | ||
@@ -121,19 +121,17 @@ static void csd_lock(struct call_single_data *csd) | |||
121 | * to ->flags with any subsequent assignments to other | 121 | * to ->flags with any subsequent assignments to other |
122 | * fields of the specified call_single_data structure: | 122 | * fields of the specified call_single_data structure: |
123 | */ | 123 | */ |
124 | smp_mb(); | 124 | smp_wmb(); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void csd_unlock(struct call_single_data *csd) | 127 | static void csd_unlock(struct call_single_data *csd) |
128 | { | 128 | { |
129 | WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK)); | 129 | WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * ensure we're all done before releasing data: | 132 | * ensure we're all done before releasing data: |
133 | */ | 133 | */ |
134 | smp_mb(); | 134 | smp_store_release(&csd->flags, 0); |
135 | |||
136 | csd->flags &= ~CSD_FLAG_LOCK; | ||
137 | } | 135 | } |
138 | 136 | ||
139 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | 137 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); |
@@ -144,13 +142,16 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |||
144 | * ->func, ->info, and ->flags set. | 142 | * ->func, ->info, and ->flags set. |
145 | */ | 143 | */ |
146 | static int generic_exec_single(int cpu, struct call_single_data *csd, | 144 | static int generic_exec_single(int cpu, struct call_single_data *csd, |
147 | smp_call_func_t func, void *info, int wait) | 145 | smp_call_func_t func, void *info) |
148 | { | 146 | { |
149 | struct call_single_data csd_stack = { .flags = 0 }; | ||
150 | unsigned long flags; | ||
151 | |||
152 | |||
153 | if (cpu == smp_processor_id()) { | 147 | if (cpu == smp_processor_id()) { |
148 | unsigned long flags; | ||
149 | |||
150 | /* | ||
151 | * We can unlock early even for the synchronous on-stack case, | ||
152 | * since we're doing this from the same CPU.. | ||
153 | */ | ||
154 | csd_unlock(csd); | ||
154 | local_irq_save(flags); | 155 | local_irq_save(flags); |
155 | func(info); | 156 | func(info); |
156 | local_irq_restore(flags); | 157 | local_irq_restore(flags); |
@@ -158,24 +159,14 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
158 | } | 159 | } |
159 | 160 | ||
160 | 161 | ||
161 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | 162 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
163 | csd_unlock(csd); | ||
162 | return -ENXIO; | 164 | return -ENXIO; |
163 | |||
164 | |||
165 | if (!csd) { | ||
166 | csd = &csd_stack; | ||
167 | if (!wait) | ||
168 | csd = this_cpu_ptr(&csd_data); | ||
169 | } | 165 | } |
170 | 166 | ||
171 | csd_lock(csd); | ||
172 | |||
173 | csd->func = func; | 167 | csd->func = func; |
174 | csd->info = info; | 168 | csd->info = info; |
175 | 169 | ||
176 | if (wait) | ||
177 | csd->flags |= CSD_FLAG_WAIT; | ||
178 | |||
179 | /* | 170 | /* |
180 | * The list addition should be visible before sending the IPI | 171 | * The list addition should be visible before sending the IPI |
181 | * handler locks the list to pull the entry off it because of | 172 | * handler locks the list to pull the entry off it because of |
@@ -190,9 +181,6 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
190 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) | 181 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
191 | arch_send_call_function_single_ipi(cpu); | 182 | arch_send_call_function_single_ipi(cpu); |
192 | 183 | ||
193 | if (wait) | ||
194 | csd_lock_wait(csd); | ||
195 | |||
196 | return 0; | 184 | return 0; |
197 | } | 185 | } |
198 | 186 | ||
@@ -250,8 +238,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
250 | } | 238 | } |
251 | 239 | ||
252 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { | 240 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
253 | csd->func(csd->info); | 241 | smp_call_func_t func = csd->func; |
254 | csd_unlock(csd); | 242 | void *info = csd->info; |
243 | |||
244 | /* Do we wait until *after* callback? */ | ||
245 | if (csd->flags & CSD_FLAG_SYNCHRONOUS) { | ||
246 | func(info); | ||
247 | csd_unlock(csd); | ||
248 | } else { | ||
249 | csd_unlock(csd); | ||
250 | func(info); | ||
251 | } | ||
255 | } | 252 | } |
256 | 253 | ||
257 | /* | 254 | /* |
@@ -274,6 +271,8 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
274 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | 271 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
275 | int wait) | 272 | int wait) |
276 | { | 273 | { |
274 | struct call_single_data *csd; | ||
275 | struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; | ||
277 | int this_cpu; | 276 | int this_cpu; |
278 | int err; | 277 | int err; |
279 | 278 | ||
@@ -292,7 +291,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | |||
292 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 291 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
293 | && !oops_in_progress); | 292 | && !oops_in_progress); |
294 | 293 | ||
295 | err = generic_exec_single(cpu, NULL, func, info, wait); | 294 | csd = &csd_stack; |
295 | if (!wait) { | ||
296 | csd = this_cpu_ptr(&csd_data); | ||
297 | csd_lock(csd); | ||
298 | } | ||
299 | |||
300 | err = generic_exec_single(cpu, csd, func, info); | ||
301 | |||
302 | if (wait) | ||
303 | csd_lock_wait(csd); | ||
296 | 304 | ||
297 | put_cpu(); | 305 | put_cpu(); |
298 | 306 | ||
@@ -321,7 +329,15 @@ int smp_call_function_single_async(int cpu, struct call_single_data *csd) | |||
321 | int err = 0; | 329 | int err = 0; |
322 | 330 | ||
323 | preempt_disable(); | 331 | preempt_disable(); |
324 | err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); | 332 | |
333 | /* We could deadlock if we have to wait here with interrupts disabled! */ | ||
334 | if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) | ||
335 | csd_lock_wait(csd); | ||
336 | |||
337 | csd->flags = CSD_FLAG_LOCK; | ||
338 | smp_wmb(); | ||
339 | |||
340 | err = generic_exec_single(cpu, csd, csd->func, csd->info); | ||
325 | preempt_enable(); | 341 | preempt_enable(); |
326 | 342 | ||
327 | return err; | 343 | return err; |
@@ -433,6 +449,8 @@ void smp_call_function_many(const struct cpumask *mask, | |||
433 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); | 449 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); |
434 | 450 | ||
435 | csd_lock(csd); | 451 | csd_lock(csd); |
452 | if (wait) | ||
453 | csd->flags |= CSD_FLAG_SYNCHRONOUS; | ||
436 | csd->func = func; | 454 | csd->func = func; |
437 | csd->info = info; | 455 | csd->info = info; |
438 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); | 456 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); |
diff --git a/kernel/sys.c b/kernel/sys.c index 3be344902316..a4e372b798a5 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1649,14 +1649,13 @@ SYSCALL_DEFINE1(umask, int, mask) | |||
1649 | return mask; | 1649 | return mask; |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | 1652 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) |
1653 | { | 1653 | { |
1654 | struct fd exe; | 1654 | struct fd exe; |
1655 | struct file *old_exe, *exe_file; | ||
1655 | struct inode *inode; | 1656 | struct inode *inode; |
1656 | int err; | 1657 | int err; |
1657 | 1658 | ||
1658 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
1659 | |||
1660 | exe = fdget(fd); | 1659 | exe = fdget(fd); |
1661 | if (!exe.file) | 1660 | if (!exe.file) |
1662 | return -EBADF; | 1661 | return -EBADF; |
@@ -1680,15 +1679,22 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | |||
1680 | /* | 1679 | /* |
1681 | * Forbid mm->exe_file change if old file still mapped. | 1680 | * Forbid mm->exe_file change if old file still mapped. |
1682 | */ | 1681 | */ |
1682 | exe_file = get_mm_exe_file(mm); | ||
1683 | err = -EBUSY; | 1683 | err = -EBUSY; |
1684 | if (mm->exe_file) { | 1684 | if (exe_file) { |
1685 | struct vm_area_struct *vma; | 1685 | struct vm_area_struct *vma; |
1686 | 1686 | ||
1687 | for (vma = mm->mmap; vma; vma = vma->vm_next) | 1687 | down_read(&mm->mmap_sem); |
1688 | if (vma->vm_file && | 1688 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
1689 | path_equal(&vma->vm_file->f_path, | 1689 | if (!vma->vm_file) |
1690 | &mm->exe_file->f_path)) | 1690 | continue; |
1691 | goto exit; | 1691 | if (path_equal(&vma->vm_file->f_path, |
1692 | &exe_file->f_path)) | ||
1693 | goto exit_err; | ||
1694 | } | ||
1695 | |||
1696 | up_read(&mm->mmap_sem); | ||
1697 | fput(exe_file); | ||
1692 | } | 1698 | } |
1693 | 1699 | ||
1694 | /* | 1700 | /* |
@@ -1702,10 +1708,18 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | |||
1702 | goto exit; | 1708 | goto exit; |
1703 | 1709 | ||
1704 | err = 0; | 1710 | err = 0; |
1705 | set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */ | 1711 | /* set the new file, lockless */ |
1712 | get_file(exe.file); | ||
1713 | old_exe = xchg(&mm->exe_file, exe.file); | ||
1714 | if (old_exe) | ||
1715 | fput(old_exe); | ||
1706 | exit: | 1716 | exit: |
1707 | fdput(exe); | 1717 | fdput(exe); |
1708 | return err; | 1718 | return err; |
1719 | exit_err: | ||
1720 | up_read(&mm->mmap_sem); | ||
1721 | fput(exe_file); | ||
1722 | goto exit; | ||
1709 | } | 1723 | } |
1710 | 1724 | ||
1711 | #ifdef CONFIG_CHECKPOINT_RESTORE | 1725 | #ifdef CONFIG_CHECKPOINT_RESTORE |
@@ -1840,10 +1854,9 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data | |||
1840 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; | 1854 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; |
1841 | } | 1855 | } |
1842 | 1856 | ||
1843 | down_write(&mm->mmap_sem); | ||
1844 | if (prctl_map.exe_fd != (u32)-1) | 1857 | if (prctl_map.exe_fd != (u32)-1) |
1845 | error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd); | 1858 | error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); |
1846 | downgrade_write(&mm->mmap_sem); | 1859 | down_read(&mm->mmap_sem); |
1847 | if (error) | 1860 | if (error) |
1848 | goto out; | 1861 | goto out; |
1849 | 1862 | ||
@@ -1909,12 +1922,8 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
1909 | if (!capable(CAP_SYS_RESOURCE)) | 1922 | if (!capable(CAP_SYS_RESOURCE)) |
1910 | return -EPERM; | 1923 | return -EPERM; |
1911 | 1924 | ||
1912 | if (opt == PR_SET_MM_EXE_FILE) { | 1925 | if (opt == PR_SET_MM_EXE_FILE) |
1913 | down_write(&mm->mmap_sem); | 1926 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); |
1914 | error = prctl_set_mm_exe_file_locked(mm, (unsigned int)addr); | ||
1915 | up_write(&mm->mmap_sem); | ||
1916 | return error; | ||
1917 | } | ||
1918 | 1927 | ||
1919 | if (addr >= TASK_SIZE || addr < mmap_min_addr) | 1928 | if (addr >= TASK_SIZE || addr < mmap_min_addr) |
1920 | return -EINVAL; | 1929 | return -EINVAL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 42b7fc2860c1..2082b1a88fb9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -93,11 +93,9 @@ | |||
93 | #include <linux/nmi.h> | 93 | #include <linux/nmi.h> |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | |||
97 | #if defined(CONFIG_SYSCTL) | 96 | #if defined(CONFIG_SYSCTL) |
98 | 97 | ||
99 | /* External variables not in a header file. */ | 98 | /* External variables not in a header file. */ |
100 | extern int max_threads; | ||
101 | extern int suid_dumpable; | 99 | extern int suid_dumpable; |
102 | #ifdef CONFIG_COREDUMP | 100 | #ifdef CONFIG_COREDUMP |
103 | extern int core_uses_pid; | 101 | extern int core_uses_pid; |
@@ -710,10 +708,10 @@ static struct ctl_table kern_table[] = { | |||
710 | #endif | 708 | #endif |
711 | { | 709 | { |
712 | .procname = "threads-max", | 710 | .procname = "threads-max", |
713 | .data = &max_threads, | 711 | .data = NULL, |
714 | .maxlen = sizeof(int), | 712 | .maxlen = sizeof(int), |
715 | .mode = 0644, | 713 | .mode = 0644, |
716 | .proc_handler = proc_dointvec, | 714 | .proc_handler = sysctl_max_threads, |
717 | }, | 715 | }, |
718 | { | 716 | { |
719 | .procname = "random", | 717 | .procname = "random", |
@@ -1983,7 +1981,15 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, | |||
1983 | int write, void *data) | 1981 | int write, void *data) |
1984 | { | 1982 | { |
1985 | if (write) { | 1983 | if (write) { |
1986 | *valp = *negp ? -*lvalp : *lvalp; | 1984 | if (*negp) { |
1985 | if (*lvalp > (unsigned long) INT_MAX + 1) | ||
1986 | return -EINVAL; | ||
1987 | *valp = -*lvalp; | ||
1988 | } else { | ||
1989 | if (*lvalp > (unsigned long) INT_MAX) | ||
1990 | return -EINVAL; | ||
1991 | *valp = *lvalp; | ||
1992 | } | ||
1987 | } else { | 1993 | } else { |
1988 | int val = *valp; | 1994 | int val = *valp; |
1989 | if (val < 0) { | 1995 | if (val < 0) { |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 25d942d1da27..11dc22a6983b 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -440,7 +440,7 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) | |||
440 | mutex_unlock(&clockevents_mutex); | 440 | mutex_unlock(&clockevents_mutex); |
441 | return ret; | 441 | return ret; |
442 | } | 442 | } |
443 | EXPORT_SYMBOL_GPL(clockevents_unbind); | 443 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); |
444 | 444 | ||
445 | /* Sanity check of state transition callbacks */ | 445 | /* Sanity check of state transition callbacks */ |
446 | static int clockevents_sanity_check(struct clock_event_device *dev) | 446 | static int clockevents_sanity_check(struct clock_event_device *dev) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7da1dfeb322e..3ab69fb72b85 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | |||
565 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) | 565 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
566 | { | 566 | { |
567 | char *event = NULL, *sub = NULL, *match; | 567 | char *event = NULL, *sub = NULL, *match; |
568 | int ret; | ||
568 | 569 | ||
569 | /* | 570 | /* |
570 | * The buf format can be <subsystem>:<event-name> | 571 | * The buf format can be <subsystem>:<event-name> |
@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) | |||
590 | event = NULL; | 591 | event = NULL; |
591 | } | 592 | } |
592 | 593 | ||
593 | return __ftrace_set_clr_event(tr, match, sub, event, set); | 594 | ret = __ftrace_set_clr_event(tr, match, sub, event, set); |
595 | |||
596 | /* Put back the colon to allow this to be called again */ | ||
597 | if (buf) | ||
598 | *(buf - 1) = ':'; | ||
599 | |||
600 | return ret; | ||
594 | } | 601 | } |
595 | 602 | ||
596 | /** | 603 | /** |
@@ -1753,6 +1760,8 @@ static void update_event_printk(struct ftrace_event_call *call, | |||
1753 | ptr++; | 1760 | ptr++; |
1754 | /* Check for alpha chars like ULL */ | 1761 | /* Check for alpha chars like ULL */ |
1755 | } while (isalnum(*ptr)); | 1762 | } while (isalnum(*ptr)); |
1763 | if (!*ptr) | ||
1764 | break; | ||
1756 | /* | 1765 | /* |
1757 | * A number must have some kind of delimiter after | 1766 | * A number must have some kind of delimiter after |
1758 | * it, and we can ignore that too. | 1767 | * it, and we can ignore that too. |
@@ -1779,12 +1788,16 @@ static void update_event_printk(struct ftrace_event_call *call, | |||
1779 | do { | 1788 | do { |
1780 | ptr++; | 1789 | ptr++; |
1781 | } while (isalnum(*ptr) || *ptr == '_'); | 1790 | } while (isalnum(*ptr) || *ptr == '_'); |
1791 | if (!*ptr) | ||
1792 | break; | ||
1782 | /* | 1793 | /* |
1783 | * If what comes after this variable is a '.' or | 1794 | * If what comes after this variable is a '.' or |
1784 | * '->' then we can continue to ignore that string. | 1795 | * '->' then we can continue to ignore that string. |
1785 | */ | 1796 | */ |
1786 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { | 1797 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { |
1787 | ptr += *ptr == '.' ? 1 : 2; | 1798 | ptr += *ptr == '.' ? 1 : 2; |
1799 | if (!*ptr) | ||
1800 | break; | ||
1788 | goto skip_more; | 1801 | goto skip_more; |
1789 | } | 1802 | } |
1790 | /* | 1803 | /* |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 9cfea4c6d314..a51e79688455 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1308,15 +1308,19 @@ void graph_trace_open(struct trace_iterator *iter) | |||
1308 | { | 1308 | { |
1309 | /* pid and depth on the last trace processed */ | 1309 | /* pid and depth on the last trace processed */ |
1310 | struct fgraph_data *data; | 1310 | struct fgraph_data *data; |
1311 | gfp_t gfpflags; | ||
1311 | int cpu; | 1312 | int cpu; |
1312 | 1313 | ||
1313 | iter->private = NULL; | 1314 | iter->private = NULL; |
1314 | 1315 | ||
1315 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1316 | /* We can be called in atomic context via ftrace_dump() */ |
1317 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | ||
1318 | |||
1319 | data = kzalloc(sizeof(*data), gfpflags); | ||
1316 | if (!data) | 1320 | if (!data) |
1317 | goto out_err; | 1321 | goto out_err; |
1318 | 1322 | ||
1319 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | 1323 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
1320 | if (!data->cpu_data) | 1324 | if (!data->cpu_data) |
1321 | goto out_err_free; | 1325 | goto out_err_free; |
1322 | 1326 | ||