diff options
Diffstat (limited to 'kernel')
35 files changed, 530 insertions, 335 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0f8f8b0bc1bf..60c302cfb4d3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -197,9 +197,9 @@ x509.genkey: | |||
197 | @echo >>x509.genkey "x509_extensions = myexts" | 197 | @echo >>x509.genkey "x509_extensions = myexts" |
198 | @echo >>x509.genkey | 198 | @echo >>x509.genkey |
199 | @echo >>x509.genkey "[ req_distinguished_name ]" | 199 | @echo >>x509.genkey "[ req_distinguished_name ]" |
200 | @echo >>x509.genkey "O = Magrathea" | 200 | @echo >>x509.genkey "#O = Unspecified company" |
201 | @echo >>x509.genkey "CN = Glacier signing key" | 201 | @echo >>x509.genkey "CN = Build time autogenerated kernel key" |
202 | @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" | 202 | @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company" |
203 | @echo >>x509.genkey | 203 | @echo >>x509.genkey |
204 | @echo >>x509.genkey "[ myexts ]" | 204 | @echo >>x509.genkey "[ myexts ]" |
205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" | 205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" |
diff --git a/kernel/audit.c b/kernel/audit.c index 72ab759a0b43..1c13e4267de6 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -43,6 +43,7 @@ | |||
43 | 43 | ||
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | 45 | ||
46 | #include <linux/file.h> | ||
46 | #include <linux/init.h> | 47 | #include <linux/init.h> |
47 | #include <linux/types.h> | 48 | #include <linux/types.h> |
48 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
@@ -107,6 +108,7 @@ static u32 audit_rate_limit; | |||
107 | * When set to zero, this means unlimited. */ | 108 | * When set to zero, this means unlimited. */ |
108 | static u32 audit_backlog_limit = 64; | 109 | static u32 audit_backlog_limit = 64; |
109 | #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ) | 110 | #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ) |
111 | static u32 audit_backlog_wait_time_master = AUDIT_BACKLOG_WAIT_TIME; | ||
110 | static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; | 112 | static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; |
111 | static u32 audit_backlog_wait_overflow = 0; | 113 | static u32 audit_backlog_wait_overflow = 0; |
112 | 114 | ||
@@ -338,13 +340,13 @@ static int audit_set_backlog_limit(u32 limit) | |||
338 | static int audit_set_backlog_wait_time(u32 timeout) | 340 | static int audit_set_backlog_wait_time(u32 timeout) |
339 | { | 341 | { |
340 | return audit_do_config_change("audit_backlog_wait_time", | 342 | return audit_do_config_change("audit_backlog_wait_time", |
341 | &audit_backlog_wait_time, timeout); | 343 | &audit_backlog_wait_time_master, timeout); |
342 | } | 344 | } |
343 | 345 | ||
344 | static int audit_set_enabled(u32 state) | 346 | static int audit_set_enabled(u32 state) |
345 | { | 347 | { |
346 | int rc; | 348 | int rc; |
347 | if (state < AUDIT_OFF || state > AUDIT_LOCKED) | 349 | if (state > AUDIT_LOCKED) |
348 | return -EINVAL; | 350 | return -EINVAL; |
349 | 351 | ||
350 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state); | 352 | rc = audit_do_config_change("audit_enabled", &audit_enabled, state); |
@@ -663,7 +665,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
663 | case AUDIT_MAKE_EQUIV: | 665 | case AUDIT_MAKE_EQUIV: |
664 | /* Only support auditd and auditctl in initial pid namespace | 666 | /* Only support auditd and auditctl in initial pid namespace |
665 | * for now. */ | 667 | * for now. */ |
666 | if ((task_active_pid_ns(current) != &init_pid_ns)) | 668 | if (task_active_pid_ns(current) != &init_pid_ns) |
667 | return -EPERM; | 669 | return -EPERM; |
668 | 670 | ||
669 | if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) | 671 | if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) |
@@ -834,7 +836,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
834 | s.lost = atomic_read(&audit_lost); | 836 | s.lost = atomic_read(&audit_lost); |
835 | s.backlog = skb_queue_len(&audit_skb_queue); | 837 | s.backlog = skb_queue_len(&audit_skb_queue); |
836 | s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; | 838 | s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; |
837 | s.backlog_wait_time = audit_backlog_wait_time; | 839 | s.backlog_wait_time = audit_backlog_wait_time_master; |
838 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); | 840 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
839 | break; | 841 | break; |
840 | } | 842 | } |
@@ -877,8 +879,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
877 | if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { | 879 | if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { |
878 | if (sizeof(s) > (size_t)nlh->nlmsg_len) | 880 | if (sizeof(s) > (size_t)nlh->nlmsg_len) |
879 | return -EINVAL; | 881 | return -EINVAL; |
880 | if (s.backlog_wait_time < 0 || | 882 | if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME) |
881 | s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME) | ||
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | err = audit_set_backlog_wait_time(s.backlog_wait_time); | 884 | err = audit_set_backlog_wait_time(s.backlog_wait_time); |
884 | if (err < 0) | 885 | if (err < 0) |
@@ -1385,7 +1386,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1385 | return NULL; | 1386 | return NULL; |
1386 | } | 1387 | } |
1387 | 1388 | ||
1388 | audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; | 1389 | if (!reserve) |
1390 | audit_backlog_wait_time = audit_backlog_wait_time_master; | ||
1389 | 1391 | ||
1390 | ab = audit_buffer_alloc(ctx, gfp_mask, type); | 1392 | ab = audit_buffer_alloc(ctx, gfp_mask, type); |
1391 | if (!ab) { | 1393 | if (!ab) { |
@@ -1759,7 +1761,7 @@ void audit_log_name(struct audit_context *context, struct audit_names *n, | |||
1759 | } else | 1761 | } else |
1760 | audit_log_format(ab, " name=(null)"); | 1762 | audit_log_format(ab, " name=(null)"); |
1761 | 1763 | ||
1762 | if (n->ino != (unsigned long)-1) { | 1764 | if (n->ino != (unsigned long)-1) |
1763 | audit_log_format(ab, " inode=%lu" | 1765 | audit_log_format(ab, " inode=%lu" |
1764 | " dev=%02x:%02x mode=%#ho" | 1766 | " dev=%02x:%02x mode=%#ho" |
1765 | " ouid=%u ogid=%u rdev=%02x:%02x", | 1767 | " ouid=%u ogid=%u rdev=%02x:%02x", |
@@ -1771,7 +1773,6 @@ void audit_log_name(struct audit_context *context, struct audit_names *n, | |||
1771 | from_kgid(&init_user_ns, n->gid), | 1773 | from_kgid(&init_user_ns, n->gid), |
1772 | MAJOR(n->rdev), | 1774 | MAJOR(n->rdev), |
1773 | MINOR(n->rdev)); | 1775 | MINOR(n->rdev)); |
1774 | } | ||
1775 | if (n->osid != 0) { | 1776 | if (n->osid != 0) { |
1776 | char *ctx = NULL; | 1777 | char *ctx = NULL; |
1777 | u32 len; | 1778 | u32 len; |
@@ -1838,11 +1839,29 @@ error_path: | |||
1838 | } | 1839 | } |
1839 | EXPORT_SYMBOL(audit_log_task_context); | 1840 | EXPORT_SYMBOL(audit_log_task_context); |
1840 | 1841 | ||
1842 | void audit_log_d_path_exe(struct audit_buffer *ab, | ||
1843 | struct mm_struct *mm) | ||
1844 | { | ||
1845 | struct file *exe_file; | ||
1846 | |||
1847 | if (!mm) | ||
1848 | goto out_null; | ||
1849 | |||
1850 | exe_file = get_mm_exe_file(mm); | ||
1851 | if (!exe_file) | ||
1852 | goto out_null; | ||
1853 | |||
1854 | audit_log_d_path(ab, " exe=", &exe_file->f_path); | ||
1855 | fput(exe_file); | ||
1856 | return; | ||
1857 | out_null: | ||
1858 | audit_log_format(ab, " exe=(null)"); | ||
1859 | } | ||
1860 | |||
1841 | void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | 1861 | void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) |
1842 | { | 1862 | { |
1843 | const struct cred *cred; | 1863 | const struct cred *cred; |
1844 | char comm[sizeof(tsk->comm)]; | 1864 | char comm[sizeof(tsk->comm)]; |
1845 | struct mm_struct *mm = tsk->mm; | ||
1846 | char *tty; | 1865 | char *tty; |
1847 | 1866 | ||
1848 | if (!ab) | 1867 | if (!ab) |
@@ -1878,13 +1897,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | |||
1878 | audit_log_format(ab, " comm="); | 1897 | audit_log_format(ab, " comm="); |
1879 | audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); | 1898 | audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); |
1880 | 1899 | ||
1881 | if (mm) { | 1900 | audit_log_d_path_exe(ab, tsk->mm); |
1882 | down_read(&mm->mmap_sem); | ||
1883 | if (mm->exe_file) | ||
1884 | audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); | ||
1885 | up_read(&mm->mmap_sem); | ||
1886 | } else | ||
1887 | audit_log_format(ab, " exe=(null)"); | ||
1888 | audit_log_task_context(ab); | 1901 | audit_log_task_context(ab); |
1889 | } | 1902 | } |
1890 | EXPORT_SYMBOL(audit_log_task_info); | 1903 | EXPORT_SYMBOL(audit_log_task_info); |
@@ -1915,7 +1928,7 @@ void audit_log_link_denied(const char *operation, struct path *link) | |||
1915 | 1928 | ||
1916 | /* Generate AUDIT_PATH record with object. */ | 1929 | /* Generate AUDIT_PATH record with object. */ |
1917 | name->type = AUDIT_TYPE_NORMAL; | 1930 | name->type = AUDIT_TYPE_NORMAL; |
1918 | audit_copy_inode(name, link->dentry, link->dentry->d_inode); | 1931 | audit_copy_inode(name, link->dentry, d_backing_inode(link->dentry)); |
1919 | audit_log_name(current->audit_context, name, link, 0, NULL); | 1932 | audit_log_name(current->audit_context, name, link, 0, NULL); |
1920 | out: | 1933 | out: |
1921 | kfree(name); | 1934 | kfree(name); |
diff --git a/kernel/audit.h b/kernel/audit.h index 1caa0d345d90..d641f9bb3ed0 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -257,6 +257,9 @@ extern struct list_head audit_filter_list[]; | |||
257 | 257 | ||
258 | extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); | 258 | extern struct audit_entry *audit_dupe_rule(struct audit_krule *old); |
259 | 259 | ||
260 | extern void audit_log_d_path_exe(struct audit_buffer *ab, | ||
261 | struct mm_struct *mm); | ||
262 | |||
260 | /* audit watch functions */ | 263 | /* audit watch functions */ |
261 | #ifdef CONFIG_AUDIT_WATCH | 264 | #ifdef CONFIG_AUDIT_WATCH |
262 | extern void audit_put_watch(struct audit_watch *watch); | 265 | extern void audit_put_watch(struct audit_watch *watch); |
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2e0c97427b33..b0f9877273fc 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -37,6 +37,7 @@ struct audit_chunk { | |||
37 | 37 | ||
38 | static LIST_HEAD(tree_list); | 38 | static LIST_HEAD(tree_list); |
39 | static LIST_HEAD(prune_list); | 39 | static LIST_HEAD(prune_list); |
40 | static struct task_struct *prune_thread; | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * One struct chunk is attached to each inode of interest. | 43 | * One struct chunk is attached to each inode of interest. |
@@ -576,7 +577,7 @@ int audit_remove_tree_rule(struct audit_krule *rule) | |||
576 | 577 | ||
577 | static int compare_root(struct vfsmount *mnt, void *arg) | 578 | static int compare_root(struct vfsmount *mnt, void *arg) |
578 | { | 579 | { |
579 | return mnt->mnt_root->d_inode == arg; | 580 | return d_backing_inode(mnt->mnt_root) == arg; |
580 | } | 581 | } |
581 | 582 | ||
582 | void audit_trim_trees(void) | 583 | void audit_trim_trees(void) |
@@ -648,7 +649,58 @@ void audit_put_tree(struct audit_tree *tree) | |||
648 | 649 | ||
649 | static int tag_mount(struct vfsmount *mnt, void *arg) | 650 | static int tag_mount(struct vfsmount *mnt, void *arg) |
650 | { | 651 | { |
651 | return tag_chunk(mnt->mnt_root->d_inode, arg); | 652 | return tag_chunk(d_backing_inode(mnt->mnt_root), arg); |
653 | } | ||
654 | |||
655 | /* | ||
656 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | ||
657 | * Runs from a separate thread. | ||
658 | */ | ||
659 | static int prune_tree_thread(void *unused) | ||
660 | { | ||
661 | for (;;) { | ||
662 | set_current_state(TASK_INTERRUPTIBLE); | ||
663 | if (list_empty(&prune_list)) | ||
664 | schedule(); | ||
665 | __set_current_state(TASK_RUNNING); | ||
666 | |||
667 | mutex_lock(&audit_cmd_mutex); | ||
668 | mutex_lock(&audit_filter_mutex); | ||
669 | |||
670 | while (!list_empty(&prune_list)) { | ||
671 | struct audit_tree *victim; | ||
672 | |||
673 | victim = list_entry(prune_list.next, | ||
674 | struct audit_tree, list); | ||
675 | list_del_init(&victim->list); | ||
676 | |||
677 | mutex_unlock(&audit_filter_mutex); | ||
678 | |||
679 | prune_one(victim); | ||
680 | |||
681 | mutex_lock(&audit_filter_mutex); | ||
682 | } | ||
683 | |||
684 | mutex_unlock(&audit_filter_mutex); | ||
685 | mutex_unlock(&audit_cmd_mutex); | ||
686 | } | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static int audit_launch_prune(void) | ||
691 | { | ||
692 | if (prune_thread) | ||
693 | return 0; | ||
694 | prune_thread = kthread_create(prune_tree_thread, NULL, | ||
695 | "audit_prune_tree"); | ||
696 | if (IS_ERR(prune_thread)) { | ||
697 | pr_err("cannot start thread audit_prune_tree"); | ||
698 | prune_thread = NULL; | ||
699 | return -ENOMEM; | ||
700 | } else { | ||
701 | wake_up_process(prune_thread); | ||
702 | return 0; | ||
703 | } | ||
652 | } | 704 | } |
653 | 705 | ||
654 | /* called with audit_filter_mutex */ | 706 | /* called with audit_filter_mutex */ |
@@ -674,6 +726,12 @@ int audit_add_tree_rule(struct audit_krule *rule) | |||
674 | /* do not set rule->tree yet */ | 726 | /* do not set rule->tree yet */ |
675 | mutex_unlock(&audit_filter_mutex); | 727 | mutex_unlock(&audit_filter_mutex); |
676 | 728 | ||
729 | if (unlikely(!prune_thread)) { | ||
730 | err = audit_launch_prune(); | ||
731 | if (err) | ||
732 | goto Err; | ||
733 | } | ||
734 | |||
677 | err = kern_path(tree->pathname, 0, &path); | 735 | err = kern_path(tree->pathname, 0, &path); |
678 | if (err) | 736 | if (err) |
679 | goto Err; | 737 | goto Err; |
@@ -811,36 +869,10 @@ int audit_tag_tree(char *old, char *new) | |||
811 | return failed; | 869 | return failed; |
812 | } | 870 | } |
813 | 871 | ||
814 | /* | ||
815 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | ||
816 | * Runs from a separate thread. | ||
817 | */ | ||
818 | static int prune_tree_thread(void *unused) | ||
819 | { | ||
820 | mutex_lock(&audit_cmd_mutex); | ||
821 | mutex_lock(&audit_filter_mutex); | ||
822 | |||
823 | while (!list_empty(&prune_list)) { | ||
824 | struct audit_tree *victim; | ||
825 | |||
826 | victim = list_entry(prune_list.next, struct audit_tree, list); | ||
827 | list_del_init(&victim->list); | ||
828 | |||
829 | mutex_unlock(&audit_filter_mutex); | ||
830 | |||
831 | prune_one(victim); | ||
832 | |||
833 | mutex_lock(&audit_filter_mutex); | ||
834 | } | ||
835 | |||
836 | mutex_unlock(&audit_filter_mutex); | ||
837 | mutex_unlock(&audit_cmd_mutex); | ||
838 | return 0; | ||
839 | } | ||
840 | 872 | ||
841 | static void audit_schedule_prune(void) | 873 | static void audit_schedule_prune(void) |
842 | { | 874 | { |
843 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); | 875 | wake_up_process(prune_thread); |
844 | } | 876 | } |
845 | 877 | ||
846 | /* | 878 | /* |
@@ -907,9 +939,9 @@ static void evict_chunk(struct audit_chunk *chunk) | |||
907 | for (n = 0; n < chunk->count; n++) | 939 | for (n = 0; n < chunk->count; n++) |
908 | list_del_init(&chunk->owners[n].list); | 940 | list_del_init(&chunk->owners[n].list); |
909 | spin_unlock(&hash_lock); | 941 | spin_unlock(&hash_lock); |
942 | mutex_unlock(&audit_filter_mutex); | ||
910 | if (need_prune) | 943 | if (need_prune) |
911 | audit_schedule_prune(); | 944 | audit_schedule_prune(); |
912 | mutex_unlock(&audit_filter_mutex); | ||
913 | } | 945 | } |
914 | 946 | ||
915 | static int audit_tree_handle_event(struct fsnotify_group *group, | 947 | static int audit_tree_handle_event(struct fsnotify_group *group, |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index ad9c1682f616..6e30024d9aac 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
@@ -146,7 +146,7 @@ int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev) | |||
146 | /* Initialize a parent watch entry. */ | 146 | /* Initialize a parent watch entry. */ |
147 | static struct audit_parent *audit_init_parent(struct path *path) | 147 | static struct audit_parent *audit_init_parent(struct path *path) |
148 | { | 148 | { |
149 | struct inode *inode = path->dentry->d_inode; | 149 | struct inode *inode = d_backing_inode(path->dentry); |
150 | struct audit_parent *parent; | 150 | struct audit_parent *parent; |
151 | int ret; | 151 | int ret; |
152 | 152 | ||
@@ -361,11 +361,11 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent) | |||
361 | struct dentry *d = kern_path_locked(watch->path, parent); | 361 | struct dentry *d = kern_path_locked(watch->path, parent); |
362 | if (IS_ERR(d)) | 362 | if (IS_ERR(d)) |
363 | return PTR_ERR(d); | 363 | return PTR_ERR(d); |
364 | mutex_unlock(&parent->dentry->d_inode->i_mutex); | 364 | mutex_unlock(&d_backing_inode(parent->dentry)->i_mutex); |
365 | if (d->d_inode) { | 365 | if (d_is_positive(d)) { |
366 | /* update watch filter fields */ | 366 | /* update watch filter fields */ |
367 | watch->dev = d->d_inode->i_sb->s_dev; | 367 | watch->dev = d_backing_inode(d)->i_sb->s_dev; |
368 | watch->ino = d->d_inode->i_ino; | 368 | watch->ino = d_backing_inode(d)->i_ino; |
369 | } | 369 | } |
370 | dput(d); | 370 | dput(d); |
371 | return 0; | 371 | return 0; |
@@ -426,7 +426,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) | |||
426 | return ret; | 426 | return ret; |
427 | 427 | ||
428 | /* either find an old parent or attach a new one */ | 428 | /* either find an old parent or attach a new one */ |
429 | parent = audit_find_parent(parent_path.dentry->d_inode); | 429 | parent = audit_find_parent(d_backing_inode(parent_path.dentry)); |
430 | if (!parent) { | 430 | if (!parent) { |
431 | parent = audit_init_parent(&parent_path); | 431 | parent = audit_init_parent(&parent_path); |
432 | if (IS_ERR(parent)) { | 432 | if (IS_ERR(parent)) { |
@@ -482,7 +482,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, | |||
482 | 482 | ||
483 | switch (data_type) { | 483 | switch (data_type) { |
484 | case (FSNOTIFY_EVENT_PATH): | 484 | case (FSNOTIFY_EVENT_PATH): |
485 | inode = ((struct path *)data)->dentry->d_inode; | 485 | inode = d_backing_inode(((struct path *)data)->dentry); |
486 | break; | 486 | break; |
487 | case (FSNOTIFY_EVENT_INODE): | 487 | case (FSNOTIFY_EVENT_INODE): |
488 | inode = (struct inode *)data; | 488 | inode = (struct inode *)data; |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index dc4ae70a7413..9fb9d1cb83ce 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1629,7 +1629,7 @@ retry: | |||
1629 | rcu_read_lock(); | 1629 | rcu_read_lock(); |
1630 | seq = read_seqbegin(&rename_lock); | 1630 | seq = read_seqbegin(&rename_lock); |
1631 | for(;;) { | 1631 | for(;;) { |
1632 | struct inode *inode = d->d_inode; | 1632 | struct inode *inode = d_backing_inode(d); |
1633 | if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { | 1633 | if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) { |
1634 | struct audit_chunk *chunk; | 1634 | struct audit_chunk *chunk; |
1635 | chunk = audit_tree_lookup(inode); | 1635 | chunk = audit_tree_lookup(inode); |
@@ -1754,7 +1754,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry, | |||
1754 | unsigned int flags) | 1754 | unsigned int flags) |
1755 | { | 1755 | { |
1756 | struct audit_context *context = current->audit_context; | 1756 | struct audit_context *context = current->audit_context; |
1757 | const struct inode *inode = dentry->d_inode; | 1757 | const struct inode *inode = d_backing_inode(dentry); |
1758 | struct audit_names *n; | 1758 | struct audit_names *n; |
1759 | bool parent = flags & AUDIT_INODE_PARENT; | 1759 | bool parent = flags & AUDIT_INODE_PARENT; |
1760 | 1760 | ||
@@ -1853,7 +1853,7 @@ void __audit_inode_child(const struct inode *parent, | |||
1853 | const unsigned char type) | 1853 | const unsigned char type) |
1854 | { | 1854 | { |
1855 | struct audit_context *context = current->audit_context; | 1855 | struct audit_context *context = current->audit_context; |
1856 | const struct inode *inode = dentry->d_inode; | 1856 | const struct inode *inode = d_backing_inode(dentry); |
1857 | const char *dname = dentry->d_name.name; | 1857 | const char *dname = dentry->d_name.name; |
1858 | struct audit_names *n, *found_parent = NULL, *found_child = NULL; | 1858 | struct audit_names *n, *found_parent = NULL, *found_child = NULL; |
1859 | 1859 | ||
@@ -2361,7 +2361,6 @@ static void audit_log_task(struct audit_buffer *ab) | |||
2361 | kuid_t auid, uid; | 2361 | kuid_t auid, uid; |
2362 | kgid_t gid; | 2362 | kgid_t gid; |
2363 | unsigned int sessionid; | 2363 | unsigned int sessionid; |
2364 | struct mm_struct *mm = current->mm; | ||
2365 | char comm[sizeof(current->comm)]; | 2364 | char comm[sizeof(current->comm)]; |
2366 | 2365 | ||
2367 | auid = audit_get_loginuid(current); | 2366 | auid = audit_get_loginuid(current); |
@@ -2376,13 +2375,7 @@ static void audit_log_task(struct audit_buffer *ab) | |||
2376 | audit_log_task_context(ab); | 2375 | audit_log_task_context(ab); |
2377 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); | 2376 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); |
2378 | audit_log_untrustedstring(ab, get_task_comm(comm, current)); | 2377 | audit_log_untrustedstring(ab, get_task_comm(comm, current)); |
2379 | if (mm) { | 2378 | audit_log_d_path_exe(ab, current->mm); |
2380 | down_read(&mm->mmap_sem); | ||
2381 | if (mm->exe_file) | ||
2382 | audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); | ||
2383 | up_read(&mm->mmap_sem); | ||
2384 | } else | ||
2385 | audit_log_format(ab, " exe=(null)"); | ||
2386 | } | 2379 | } |
2387 | 2380 | ||
2388 | /** | 2381 | /** |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4139a0f8b558..54f0e7fcd0e2 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -357,8 +357,8 @@ select_insn: | |||
357 | ALU64_MOD_X: | 357 | ALU64_MOD_X: |
358 | if (unlikely(SRC == 0)) | 358 | if (unlikely(SRC == 0)) |
359 | return 0; | 359 | return 0; |
360 | tmp = DST; | 360 | div64_u64_rem(DST, SRC, &tmp); |
361 | DST = do_div(tmp, SRC); | 361 | DST = tmp; |
362 | CONT; | 362 | CONT; |
363 | ALU_MOD_X: | 363 | ALU_MOD_X: |
364 | if (unlikely(SRC == 0)) | 364 | if (unlikely(SRC == 0)) |
@@ -367,8 +367,8 @@ select_insn: | |||
367 | DST = do_div(tmp, (u32) SRC); | 367 | DST = do_div(tmp, (u32) SRC); |
368 | CONT; | 368 | CONT; |
369 | ALU64_MOD_K: | 369 | ALU64_MOD_K: |
370 | tmp = DST; | 370 | div64_u64_rem(DST, IMM, &tmp); |
371 | DST = do_div(tmp, IMM); | 371 | DST = tmp; |
372 | CONT; | 372 | CONT; |
373 | ALU_MOD_K: | 373 | ALU_MOD_K: |
374 | tmp = (u32) DST; | 374 | tmp = (u32) DST; |
@@ -377,7 +377,7 @@ select_insn: | |||
377 | ALU64_DIV_X: | 377 | ALU64_DIV_X: |
378 | if (unlikely(SRC == 0)) | 378 | if (unlikely(SRC == 0)) |
379 | return 0; | 379 | return 0; |
380 | do_div(DST, SRC); | 380 | DST = div64_u64(DST, SRC); |
381 | CONT; | 381 | CONT; |
382 | ALU_DIV_X: | 382 | ALU_DIV_X: |
383 | if (unlikely(SRC == 0)) | 383 | if (unlikely(SRC == 0)) |
@@ -387,7 +387,7 @@ select_insn: | |||
387 | DST = (u32) tmp; | 387 | DST = (u32) tmp; |
388 | CONT; | 388 | CONT; |
389 | ALU64_DIV_K: | 389 | ALU64_DIV_K: |
390 | do_div(DST, IMM); | 390 | DST = div64_u64(DST, IMM); |
391 | CONT; | 391 | CONT; |
392 | ALU_DIV_K: | 392 | ALU_DIV_K: |
393 | tmp = (u32) DST; | 393 | tmp = (u32) DST; |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 630a7bac1e51..47dcd3aa6e23 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1397,7 +1397,8 @@ peek_stack: | |||
1397 | /* tell verifier to check for equivalent states | 1397 | /* tell verifier to check for equivalent states |
1398 | * after every call and jump | 1398 | * after every call and jump |
1399 | */ | 1399 | */ |
1400 | env->explored_states[t + 1] = STATE_LIST_MARK; | 1400 | if (t + 1 < insn_cnt) |
1401 | env->explored_states[t + 1] = STATE_LIST_MARK; | ||
1401 | } else { | 1402 | } else { |
1402 | /* conditional jump with two edges */ | 1403 | /* conditional jump with two edges */ |
1403 | ret = push_insn(t, t + 1, FALLTHROUGH, env); | 1404 | ret = push_insn(t, t + 1, FALLTHROUGH, env); |
@@ -1636,6 +1637,8 @@ static int do_check(struct verifier_env *env) | |||
1636 | if (err) | 1637 | if (err) |
1637 | return err; | 1638 | return err; |
1638 | 1639 | ||
1640 | src_reg_type = regs[insn->src_reg].type; | ||
1641 | |||
1639 | /* check that memory (src_reg + off) is readable, | 1642 | /* check that memory (src_reg + off) is readable, |
1640 | * the state of dst_reg will be updated by this func | 1643 | * the state of dst_reg will be updated by this func |
1641 | */ | 1644 | */ |
@@ -1645,9 +1648,12 @@ static int do_check(struct verifier_env *env) | |||
1645 | if (err) | 1648 | if (err) |
1646 | return err; | 1649 | return err; |
1647 | 1650 | ||
1648 | src_reg_type = regs[insn->src_reg].type; | 1651 | if (BPF_SIZE(insn->code) != BPF_W) { |
1652 | insn_idx++; | ||
1653 | continue; | ||
1654 | } | ||
1649 | 1655 | ||
1650 | if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) { | 1656 | if (insn->imm == 0) { |
1651 | /* saw a valid insn | 1657 | /* saw a valid insn |
1652 | * dst_reg = *(u32 *)(src_reg + off) | 1658 | * dst_reg = *(u32 *)(src_reg + off) |
1653 | * use reserved 'imm' field to mark this insn | 1659 | * use reserved 'imm' field to mark this insn |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 81aa3a4ece9f..1a3bf48743ce 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -913,10 +913,30 @@ static void put_ctx(struct perf_event_context *ctx) | |||
913 | * Those places that change perf_event::ctx will hold both | 913 | * Those places that change perf_event::ctx will hold both |
914 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. | 914 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. |
915 | * | 915 | * |
916 | * Lock ordering is by mutex address. There is one other site where | 916 | * Lock ordering is by mutex address. There are two other sites where |
917 | * perf_event_context::mutex nests and that is put_event(). But remember that | 917 | * perf_event_context::mutex nests and those are: |
918 | * that is a parent<->child context relation, and migration does not affect | 918 | * |
919 | * children, therefore these two orderings should not interact. | 919 | * - perf_event_exit_task_context() [ child , 0 ] |
920 | * __perf_event_exit_task() | ||
921 | * sync_child_event() | ||
922 | * put_event() [ parent, 1 ] | ||
923 | * | ||
924 | * - perf_event_init_context() [ parent, 0 ] | ||
925 | * inherit_task_group() | ||
926 | * inherit_group() | ||
927 | * inherit_event() | ||
928 | * perf_event_alloc() | ||
929 | * perf_init_event() | ||
930 | * perf_try_init_event() [ child , 1 ] | ||
931 | * | ||
932 | * While it appears there is an obvious deadlock here -- the parent and child | ||
933 | * nesting levels are inverted between the two. This is in fact safe because | ||
934 | * life-time rules separate them. That is an exiting task cannot fork, and a | ||
935 | * spawning task cannot (yet) exit. | ||
936 | * | ||
937 | * But remember that that these are parent<->child context relations, and | ||
938 | * migration does not affect children, therefore these two orderings should not | ||
939 | * interact. | ||
920 | * | 940 | * |
921 | * The change in perf_event::ctx does not affect children (as claimed above) | 941 | * The change in perf_event::ctx does not affect children (as claimed above) |
922 | * because the sys_perf_event_open() case will install a new event and break | 942 | * because the sys_perf_event_open() case will install a new event and break |
@@ -3657,9 +3677,6 @@ static void perf_remove_from_owner(struct perf_event *event) | |||
3657 | } | 3677 | } |
3658 | } | 3678 | } |
3659 | 3679 | ||
3660 | /* | ||
3661 | * Called when the last reference to the file is gone. | ||
3662 | */ | ||
3663 | static void put_event(struct perf_event *event) | 3680 | static void put_event(struct perf_event *event) |
3664 | { | 3681 | { |
3665 | struct perf_event_context *ctx; | 3682 | struct perf_event_context *ctx; |
@@ -3697,6 +3714,9 @@ int perf_event_release_kernel(struct perf_event *event) | |||
3697 | } | 3714 | } |
3698 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); | 3715 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); |
3699 | 3716 | ||
3717 | /* | ||
3718 | * Called when the last reference to the file is gone. | ||
3719 | */ | ||
3700 | static int perf_release(struct inode *inode, struct file *file) | 3720 | static int perf_release(struct inode *inode, struct file *file) |
3701 | { | 3721 | { |
3702 | put_event(file->private_data); | 3722 | put_event(file->private_data); |
@@ -7364,7 +7384,12 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) | |||
7364 | return -ENODEV; | 7384 | return -ENODEV; |
7365 | 7385 | ||
7366 | if (event->group_leader != event) { | 7386 | if (event->group_leader != event) { |
7367 | ctx = perf_event_ctx_lock(event->group_leader); | 7387 | /* |
7388 | * This ctx->mutex can nest when we're called through | ||
7389 | * inheritance. See the perf_event_ctx_lock_nested() comment. | ||
7390 | */ | ||
7391 | ctx = perf_event_ctx_lock_nested(event->group_leader, | ||
7392 | SINGLE_DEPTH_NESTING); | ||
7368 | BUG_ON(!ctx); | 7393 | BUG_ON(!ctx); |
7369 | } | 7394 | } |
7370 | 7395 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index f2c1e7352298..03c1eaaa6ef5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/uprobes.h> | 74 | #include <linux/uprobes.h> |
75 | #include <linux/aio.h> | 75 | #include <linux/aio.h> |
76 | #include <linux/compiler.h> | 76 | #include <linux/compiler.h> |
77 | #include <linux/sysctl.h> | ||
77 | 78 | ||
78 | #include <asm/pgtable.h> | 79 | #include <asm/pgtable.h> |
79 | #include <asm/pgalloc.h> | 80 | #include <asm/pgalloc.h> |
@@ -88,6 +89,16 @@ | |||
88 | #include <trace/events/task.h> | 89 | #include <trace/events/task.h> |
89 | 90 | ||
90 | /* | 91 | /* |
92 | * Minimum number of threads to boot the kernel | ||
93 | */ | ||
94 | #define MIN_THREADS 20 | ||
95 | |||
96 | /* | ||
97 | * Maximum number of threads | ||
98 | */ | ||
99 | #define MAX_THREADS FUTEX_TID_MASK | ||
100 | |||
101 | /* | ||
91 | * Protected counters by write_lock_irq(&tasklist_lock) | 102 | * Protected counters by write_lock_irq(&tasklist_lock) |
92 | */ | 103 | */ |
93 | unsigned long total_forks; /* Handle normal Linux uptimes. */ | 104 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
@@ -253,7 +264,30 @@ EXPORT_SYMBOL_GPL(__put_task_struct); | |||
253 | 264 | ||
254 | void __init __weak arch_task_cache_init(void) { } | 265 | void __init __weak arch_task_cache_init(void) { } |
255 | 266 | ||
256 | void __init fork_init(unsigned long mempages) | 267 | /* |
268 | * set_max_threads | ||
269 | */ | ||
270 | static void set_max_threads(unsigned int max_threads_suggested) | ||
271 | { | ||
272 | u64 threads; | ||
273 | |||
274 | /* | ||
275 | * The number of threads shall be limited such that the thread | ||
276 | * structures may only consume a small part of the available memory. | ||
277 | */ | ||
278 | if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) | ||
279 | threads = MAX_THREADS; | ||
280 | else | ||
281 | threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, | ||
282 | (u64) THREAD_SIZE * 8UL); | ||
283 | |||
284 | if (threads > max_threads_suggested) | ||
285 | threads = max_threads_suggested; | ||
286 | |||
287 | max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); | ||
288 | } | ||
289 | |||
290 | void __init fork_init(void) | ||
257 | { | 291 | { |
258 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR | 292 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
259 | #ifndef ARCH_MIN_TASKALIGN | 293 | #ifndef ARCH_MIN_TASKALIGN |
@@ -268,18 +302,7 @@ void __init fork_init(unsigned long mempages) | |||
268 | /* do the arch specific task caches init */ | 302 | /* do the arch specific task caches init */ |
269 | arch_task_cache_init(); | 303 | arch_task_cache_init(); |
270 | 304 | ||
271 | /* | 305 | set_max_threads(MAX_THREADS); |
272 | * The default maximum number of threads is set to a safe | ||
273 | * value: the thread structures can take up at most half | ||
274 | * of memory. | ||
275 | */ | ||
276 | max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); | ||
277 | |||
278 | /* | ||
279 | * we need to allow at least 20 threads to boot a system | ||
280 | */ | ||
281 | if (max_threads < 20) | ||
282 | max_threads = 20; | ||
283 | 306 | ||
284 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; | 307 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
285 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; | 308 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
@@ -380,6 +403,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
380 | */ | 403 | */ |
381 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); | 404 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); |
382 | 405 | ||
406 | /* No ordering required: file already has been exposed. */ | ||
407 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); | ||
408 | |||
383 | mm->total_vm = oldmm->total_vm; | 409 | mm->total_vm = oldmm->total_vm; |
384 | mm->shared_vm = oldmm->shared_vm; | 410 | mm->shared_vm = oldmm->shared_vm; |
385 | mm->exec_vm = oldmm->exec_vm; | 411 | mm->exec_vm = oldmm->exec_vm; |
@@ -505,7 +531,13 @@ static inline void mm_free_pgd(struct mm_struct *mm) | |||
505 | pgd_free(mm, mm->pgd); | 531 | pgd_free(mm, mm->pgd); |
506 | } | 532 | } |
507 | #else | 533 | #else |
508 | #define dup_mmap(mm, oldmm) (0) | 534 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
535 | { | ||
536 | down_write(&oldmm->mmap_sem); | ||
537 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); | ||
538 | up_write(&oldmm->mmap_sem); | ||
539 | return 0; | ||
540 | } | ||
509 | #define mm_alloc_pgd(mm) (0) | 541 | #define mm_alloc_pgd(mm) (0) |
510 | #define mm_free_pgd(mm) | 542 | #define mm_free_pgd(mm) |
511 | #endif /* CONFIG_MMU */ | 543 | #endif /* CONFIG_MMU */ |
@@ -674,34 +706,53 @@ void mmput(struct mm_struct *mm) | |||
674 | } | 706 | } |
675 | EXPORT_SYMBOL_GPL(mmput); | 707 | EXPORT_SYMBOL_GPL(mmput); |
676 | 708 | ||
709 | /** | ||
710 | * set_mm_exe_file - change a reference to the mm's executable file | ||
711 | * | ||
712 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). | ||
713 | * | ||
714 | * Main users are mmput() and sys_execve(). Callers prevent concurrent | ||
715 | * invocations: in mmput() nobody alive left, in execve task is single | ||
716 | * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the | ||
717 | * mm->exe_file, but does so without using set_mm_exe_file() in order | ||
718 | * to do avoid the need for any locks. | ||
719 | */ | ||
677 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | 720 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
678 | { | 721 | { |
722 | struct file *old_exe_file; | ||
723 | |||
724 | /* | ||
725 | * It is safe to dereference the exe_file without RCU as | ||
726 | * this function is only called if nobody else can access | ||
727 | * this mm -- see comment above for justification. | ||
728 | */ | ||
729 | old_exe_file = rcu_dereference_raw(mm->exe_file); | ||
730 | |||
679 | if (new_exe_file) | 731 | if (new_exe_file) |
680 | get_file(new_exe_file); | 732 | get_file(new_exe_file); |
681 | if (mm->exe_file) | 733 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
682 | fput(mm->exe_file); | 734 | if (old_exe_file) |
683 | mm->exe_file = new_exe_file; | 735 | fput(old_exe_file); |
684 | } | 736 | } |
685 | 737 | ||
738 | /** | ||
739 | * get_mm_exe_file - acquire a reference to the mm's executable file | ||
740 | * | ||
741 | * Returns %NULL if mm has no associated executable file. | ||
742 | * User must release file via fput(). | ||
743 | */ | ||
686 | struct file *get_mm_exe_file(struct mm_struct *mm) | 744 | struct file *get_mm_exe_file(struct mm_struct *mm) |
687 | { | 745 | { |
688 | struct file *exe_file; | 746 | struct file *exe_file; |
689 | 747 | ||
690 | /* We need mmap_sem to protect against races with removal of exe_file */ | 748 | rcu_read_lock(); |
691 | down_read(&mm->mmap_sem); | 749 | exe_file = rcu_dereference(mm->exe_file); |
692 | exe_file = mm->exe_file; | 750 | if (exe_file && !get_file_rcu(exe_file)) |
693 | if (exe_file) | 751 | exe_file = NULL; |
694 | get_file(exe_file); | 752 | rcu_read_unlock(); |
695 | up_read(&mm->mmap_sem); | ||
696 | return exe_file; | 753 | return exe_file; |
697 | } | 754 | } |
698 | 755 | EXPORT_SYMBOL(get_mm_exe_file); | |
699 | static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | ||
700 | { | ||
701 | /* It's safe to write the exe_file pointer without exe_file_lock because | ||
702 | * this is called during fork when the task is not yet in /proc */ | ||
703 | newmm->exe_file = get_mm_exe_file(oldmm); | ||
704 | } | ||
705 | 756 | ||
706 | /** | 757 | /** |
707 | * get_task_mm - acquire a reference to the task's mm | 758 | * get_task_mm - acquire a reference to the task's mm |
@@ -864,8 +915,6 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) | |||
864 | if (!mm_init(mm, tsk)) | 915 | if (!mm_init(mm, tsk)) |
865 | goto fail_nomem; | 916 | goto fail_nomem; |
866 | 917 | ||
867 | dup_mm_exe_file(oldmm, mm); | ||
868 | |||
869 | err = dup_mmap(mm, oldmm); | 918 | err = dup_mmap(mm, oldmm); |
870 | if (err) | 919 | if (err) |
871 | goto free_pt; | 920 | goto free_pt; |
@@ -1403,10 +1452,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1403 | goto bad_fork_cleanup_io; | 1452 | goto bad_fork_cleanup_io; |
1404 | 1453 | ||
1405 | if (pid != &init_struct_pid) { | 1454 | if (pid != &init_struct_pid) { |
1406 | retval = -ENOMEM; | ||
1407 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); | 1455 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); |
1408 | if (!pid) | 1456 | if (IS_ERR(pid)) { |
1457 | retval = PTR_ERR(pid); | ||
1409 | goto bad_fork_cleanup_io; | 1458 | goto bad_fork_cleanup_io; |
1459 | } | ||
1410 | } | 1460 | } |
1411 | 1461 | ||
1412 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1462 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
@@ -2000,3 +2050,26 @@ int unshare_files(struct files_struct **displaced) | |||
2000 | task_unlock(task); | 2050 | task_unlock(task); |
2001 | return 0; | 2051 | return 0; |
2002 | } | 2052 | } |
2053 | |||
2054 | int sysctl_max_threads(struct ctl_table *table, int write, | ||
2055 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
2056 | { | ||
2057 | struct ctl_table t; | ||
2058 | int ret; | ||
2059 | int threads = max_threads; | ||
2060 | int min = MIN_THREADS; | ||
2061 | int max = MAX_THREADS; | ||
2062 | |||
2063 | t = *table; | ||
2064 | t.data = &threads; | ||
2065 | t.extra1 = &min; | ||
2066 | t.extra2 = &max; | ||
2067 | |||
2068 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | ||
2069 | if (ret || !write) | ||
2070 | return ret; | ||
2071 | |||
2072 | set_max_threads(threads); | ||
2073 | |||
2074 | return 0; | ||
2075 | } | ||
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index b358a802fd18..a744098e4eb7 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/sched.h> | ||
21 | #include "gcov.h" | 22 | #include "gcov.h" |
22 | 23 | ||
23 | static int gcov_events_enabled; | 24 | static int gcov_events_enabled; |
@@ -107,8 +108,10 @@ void gcov_enable_events(void) | |||
107 | gcov_events_enabled = 1; | 108 | gcov_events_enabled = 1; |
108 | 109 | ||
109 | /* Perform event callback for previously registered entries. */ | 110 | /* Perform event callback for previously registered entries. */ |
110 | while ((info = gcov_info_next(info))) | 111 | while ((info = gcov_info_next(info))) { |
111 | gcov_event(GCOV_ADD, info); | 112 | gcov_event(GCOV_ADD, info); |
113 | cond_resched(); | ||
114 | } | ||
112 | 115 | ||
113 | mutex_unlock(&gcov_lock); | 116 | mutex_unlock(&gcov_lock); |
114 | } | 117 | } |
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 988dc58e8847..2feb6feca0cc 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = { | |||
57 | .irq_ack = noop, | 57 | .irq_ack = noop, |
58 | .irq_mask = noop, | 58 | .irq_mask = noop, |
59 | .irq_unmask = noop, | 59 | .irq_unmask = noop, |
60 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
60 | }; | 61 | }; |
61 | EXPORT_SYMBOL_GPL(dummy_irq_chip); | 62 | EXPORT_SYMBOL_GPL(dummy_irq_chip); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 38c25b1f2fd5..7a36fdcca5bf 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |||
707 | do { | 707 | do { |
708 | unsigned long pfn, epfn, addr, eaddr; | 708 | unsigned long pfn, epfn, addr, eaddr; |
709 | 709 | ||
710 | pages = kimage_alloc_pages(GFP_KERNEL, order); | 710 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); |
711 | if (!pages) | 711 | if (!pages) |
712 | break; | 712 | break; |
713 | pfn = page_to_pfn(pages); | 713 | pfn = page_to_pfn(pages); |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ba77ab5f64dd..a0831e1b99f4 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -551,7 +551,21 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
551 | 551 | ||
552 | static void print_lock(struct held_lock *hlock) | 552 | static void print_lock(struct held_lock *hlock) |
553 | { | 553 | { |
554 | print_lock_name(hlock_class(hlock)); | 554 | /* |
555 | * We can be called locklessly through debug_show_all_locks() so be | ||
556 | * extra careful, the hlock might have been released and cleared. | ||
557 | */ | ||
558 | unsigned int class_idx = hlock->class_idx; | ||
559 | |||
560 | /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ | ||
561 | barrier(); | ||
562 | |||
563 | if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { | ||
564 | printk("<RELEASED>\n"); | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | print_lock_name(lock_classes + class_idx - 1); | ||
555 | printk(", at: "); | 569 | printk(", at: "); |
556 | print_ip_sym(hlock->acquire_ip); | 570 | print_ip_sym(hlock->acquire_ip); |
557 | } | 571 | } |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b73279367087..b025295f4966 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | /* | 267 | /* |
268 | * Called by sched_setscheduler() to check whether the priority change | 268 | * Called by sched_setscheduler() to get the priority which will be |
269 | * is overruled by a possible priority boosting. | 269 | * effective after the change. |
270 | */ | 270 | */ |
271 | int rt_mutex_check_prio(struct task_struct *task, int newprio) | 271 | int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) |
272 | { | 272 | { |
273 | if (!task_has_pi_waiters(task)) | 273 | if (!task_has_pi_waiters(task)) |
274 | return 0; | 274 | return newprio; |
275 | 275 | ||
276 | return task_top_pi_waiter(task)->task->prio <= newprio; | 276 | if (task_top_pi_waiter(task)->task->prio <= newprio) |
277 | return task_top_pi_waiter(task)->task->prio; | ||
278 | return newprio; | ||
277 | } | 279 | } |
278 | 280 | ||
279 | /* | 281 | /* |
diff --git a/kernel/module.c b/kernel/module.c index 650b038ae520..42a1d2afb217 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -387,9 +387,9 @@ static bool check_symbol(const struct symsearch *syms, | |||
387 | pr_warn("Symbol %s is marked as UNUSED, however this module is " | 387 | pr_warn("Symbol %s is marked as UNUSED, however this module is " |
388 | "using it.\n", fsa->name); | 388 | "using it.\n", fsa->name); |
389 | pr_warn("This symbol will go away in the future.\n"); | 389 | pr_warn("This symbol will go away in the future.\n"); |
390 | pr_warn("Please evalute if this is the right api to use and if " | 390 | pr_warn("Please evaluate if this is the right api to use and " |
391 | "it really is, submit a report the linux kernel " | 391 | "if it really is, submit a report to the linux kernel " |
392 | "mailinglist together with submitting your code for " | 392 | "mailing list together with submitting your code for " |
393 | "inclusion.\n"); | 393 | "inclusion.\n"); |
394 | } | 394 | } |
395 | #endif | 395 | #endif |
@@ -2511,7 +2511,8 @@ static int copy_module_from_user(const void __user *umod, unsigned long len, | |||
2511 | return err; | 2511 | return err; |
2512 | 2512 | ||
2513 | /* Suck in entire file: we'll want most of it. */ | 2513 | /* Suck in entire file: we'll want most of it. */ |
2514 | info->hdr = vmalloc(info->len); | 2514 | info->hdr = __vmalloc(info->len, |
2515 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL); | ||
2515 | if (!info->hdr) | 2516 | if (!info->hdr) |
2516 | return -ENOMEM; | 2517 | return -ENOMEM; |
2517 | 2518 | ||
diff --git a/kernel/params.c b/kernel/params.c index 728e05b167de..a22d6a759b1a 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -173,9 +173,9 @@ static char *next_arg(char *args, char **param, char **val) | |||
173 | if (args[i-1] == '"') | 173 | if (args[i-1] == '"') |
174 | args[i-1] = '\0'; | 174 | args[i-1] = '\0'; |
175 | } | 175 | } |
176 | if (quoted && args[i-1] == '"') | ||
177 | args[i-1] = '\0'; | ||
178 | } | 176 | } |
177 | if (quoted && args[i-1] == '"') | ||
178 | args[i-1] = '\0'; | ||
179 | 179 | ||
180 | if (args[i]) { | 180 | if (args[i]) { |
181 | args[i] = '\0'; | 181 | args[i] = '\0'; |
diff --git a/kernel/pid.c b/kernel/pid.c index cd36a5e0d173..4fd07d5b7baf 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -182,7 +182,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
182 | spin_unlock_irq(&pidmap_lock); | 182 | spin_unlock_irq(&pidmap_lock); |
183 | kfree(page); | 183 | kfree(page); |
184 | if (unlikely(!map->page)) | 184 | if (unlikely(!map->page)) |
185 | break; | 185 | return -ENOMEM; |
186 | } | 186 | } |
187 | if (likely(atomic_read(&map->nr_free))) { | 187 | if (likely(atomic_read(&map->nr_free))) { |
188 | for ( ; ; ) { | 188 | for ( ; ; ) { |
@@ -210,7 +210,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
210 | } | 210 | } |
211 | pid = mk_pid(pid_ns, map, offset); | 211 | pid = mk_pid(pid_ns, map, offset); |
212 | } | 212 | } |
213 | return -1; | 213 | return -EAGAIN; |
214 | } | 214 | } |
215 | 215 | ||
216 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) | 216 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
@@ -301,17 +301,20 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
301 | int i, nr; | 301 | int i, nr; |
302 | struct pid_namespace *tmp; | 302 | struct pid_namespace *tmp; |
303 | struct upid *upid; | 303 | struct upid *upid; |
304 | int retval = -ENOMEM; | ||
304 | 305 | ||
305 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); | 306 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
306 | if (!pid) | 307 | if (!pid) |
307 | goto out; | 308 | return ERR_PTR(retval); |
308 | 309 | ||
309 | tmp = ns; | 310 | tmp = ns; |
310 | pid->level = ns->level; | 311 | pid->level = ns->level; |
311 | for (i = ns->level; i >= 0; i--) { | 312 | for (i = ns->level; i >= 0; i--) { |
312 | nr = alloc_pidmap(tmp); | 313 | nr = alloc_pidmap(tmp); |
313 | if (nr < 0) | 314 | if (IS_ERR_VALUE(nr)) { |
315 | retval = nr; | ||
314 | goto out_free; | 316 | goto out_free; |
317 | } | ||
315 | 318 | ||
316 | pid->numbers[i].nr = nr; | 319 | pid->numbers[i].nr = nr; |
317 | pid->numbers[i].ns = tmp; | 320 | pid->numbers[i].ns = tmp; |
@@ -339,7 +342,6 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
339 | } | 342 | } |
340 | spin_unlock_irq(&pidmap_lock); | 343 | spin_unlock_irq(&pidmap_lock); |
341 | 344 | ||
342 | out: | ||
343 | return pid; | 345 | return pid; |
344 | 346 | ||
345 | out_unlock: | 347 | out_unlock: |
@@ -351,8 +353,7 @@ out_free: | |||
351 | free_pidmap(pid->numbers + i); | 353 | free_pidmap(pid->numbers + i); |
352 | 354 | ||
353 | kmem_cache_free(ns->pid_cachep, pid); | 355 | kmem_cache_free(ns->pid_cachep, pid); |
354 | pid = NULL; | 356 | return ERR_PTR(retval); |
355 | goto out; | ||
356 | } | 357 | } |
357 | 358 | ||
358 | void disable_pid_allocation(struct pid_namespace *ns) | 359 | void disable_pid_allocation(struct pid_namespace *ns) |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 879edfc5ee52..c099b082cd02 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2017,24 +2017,6 @@ int add_preferred_console(char *name, int idx, char *options) | |||
2017 | return __add_preferred_console(name, idx, options, NULL); | 2017 | return __add_preferred_console(name, idx, options, NULL); |
2018 | } | 2018 | } |
2019 | 2019 | ||
2020 | int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) | ||
2021 | { | ||
2022 | struct console_cmdline *c; | ||
2023 | int i; | ||
2024 | |||
2025 | for (i = 0, c = console_cmdline; | ||
2026 | i < MAX_CMDLINECONSOLES && c->name[0]; | ||
2027 | i++, c++) | ||
2028 | if (strcmp(c->name, name) == 0 && c->index == idx) { | ||
2029 | strlcpy(c->name, name_new, sizeof(c->name)); | ||
2030 | c->options = options; | ||
2031 | c->index = idx_new; | ||
2032 | return i; | ||
2033 | } | ||
2034 | /* not found */ | ||
2035 | return -1; | ||
2036 | } | ||
2037 | |||
2038 | bool console_suspend_enabled = true; | 2020 | bool console_suspend_enabled = true; |
2039 | EXPORT_SYMBOL(console_suspend_enabled); | 2021 | EXPORT_SYMBOL(console_suspend_enabled); |
2040 | 2022 | ||
@@ -2436,9 +2418,6 @@ void register_console(struct console *newcon) | |||
2436 | if (preferred_console < 0 || bcon || !console_drivers) | 2418 | if (preferred_console < 0 || bcon || !console_drivers) |
2437 | preferred_console = selected_console; | 2419 | preferred_console = selected_console; |
2438 | 2420 | ||
2439 | if (newcon->early_setup) | ||
2440 | newcon->early_setup(); | ||
2441 | |||
2442 | /* | 2421 | /* |
2443 | * See if we want to use this console driver. If we | 2422 | * See if we want to use this console driver. If we |
2444 | * didn't select a console we take the first one | 2423 | * didn't select a console we take the first one |
@@ -2464,23 +2443,27 @@ void register_console(struct console *newcon) | |||
2464 | for (i = 0, c = console_cmdline; | 2443 | for (i = 0, c = console_cmdline; |
2465 | i < MAX_CMDLINECONSOLES && c->name[0]; | 2444 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2466 | i++, c++) { | 2445 | i++, c++) { |
2467 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); | 2446 | if (!newcon->match || |
2468 | if (strcmp(c->name, newcon->name) != 0) | 2447 | newcon->match(newcon, c->name, c->index, c->options) != 0) { |
2469 | continue; | 2448 | /* default matching */ |
2470 | if (newcon->index >= 0 && | 2449 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); |
2471 | newcon->index != c->index) | 2450 | if (strcmp(c->name, newcon->name) != 0) |
2472 | continue; | 2451 | continue; |
2473 | if (newcon->index < 0) | 2452 | if (newcon->index >= 0 && |
2474 | newcon->index = c->index; | 2453 | newcon->index != c->index) |
2454 | continue; | ||
2455 | if (newcon->index < 0) | ||
2456 | newcon->index = c->index; | ||
2475 | 2457 | ||
2476 | if (_braille_register_console(newcon, c)) | 2458 | if (_braille_register_console(newcon, c)) |
2477 | return; | 2459 | return; |
2460 | |||
2461 | if (newcon->setup && | ||
2462 | newcon->setup(newcon, c->options) != 0) | ||
2463 | break; | ||
2464 | } | ||
2478 | 2465 | ||
2479 | if (newcon->setup && | ||
2480 | newcon->setup(newcon, console_cmdline[i].options) != 0) | ||
2481 | break; | ||
2482 | newcon->flags |= CON_ENABLED; | 2466 | newcon->flags |= CON_ENABLED; |
2483 | newcon->index = c->index; | ||
2484 | if (i == selected_console) { | 2467 | if (i == selected_console) { |
2485 | newcon->flags |= CON_CONSDEV; | 2468 | newcon->flags |= CON_CONSDEV; |
2486 | preferred_console = selected_console; | 2469 | preferred_console = selected_console; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 227fec36b12a..c8e0e050a36a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -456,8 +456,6 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |||
456 | 456 | ||
457 | static int ptrace_detach(struct task_struct *child, unsigned int data) | 457 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
458 | { | 458 | { |
459 | bool dead = false; | ||
460 | |||
461 | if (!valid_signal(data)) | 459 | if (!valid_signal(data)) |
462 | return -EIO; | 460 | return -EIO; |
463 | 461 | ||
@@ -467,18 +465,19 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
467 | 465 | ||
468 | write_lock_irq(&tasklist_lock); | 466 | write_lock_irq(&tasklist_lock); |
469 | /* | 467 | /* |
470 | * This child can be already killed. Make sure de_thread() or | 468 | * We rely on ptrace_freeze_traced(). It can't be killed and |
471 | * our sub-thread doing do_wait() didn't do release_task() yet. | 469 | * untraced by another thread, it can't be a zombie. |
472 | */ | 470 | */ |
473 | if (child->ptrace) { | 471 | WARN_ON(!child->ptrace || child->exit_state); |
474 | child->exit_code = data; | 472 | /* |
475 | dead = __ptrace_detach(current, child); | 473 | * tasklist_lock avoids the race with wait_task_stopped(), see |
476 | } | 474 | * the comment in ptrace_resume(). |
475 | */ | ||
476 | child->exit_code = data; | ||
477 | __ptrace_detach(current, child); | ||
477 | write_unlock_irq(&tasklist_lock); | 478 | write_unlock_irq(&tasklist_lock); |
478 | 479 | ||
479 | proc_ptrace_connector(child, PTRACE_DETACH); | 480 | proc_ptrace_connector(child, PTRACE_DETACH); |
480 | if (unlikely(dead)) | ||
481 | release_task(child); | ||
482 | 481 | ||
483 | return 0; | 482 | return 0; |
484 | } | 483 | } |
@@ -697,6 +696,8 @@ static int ptrace_peek_siginfo(struct task_struct *child, | |||
697 | static int ptrace_resume(struct task_struct *child, long request, | 696 | static int ptrace_resume(struct task_struct *child, long request, |
698 | unsigned long data) | 697 | unsigned long data) |
699 | { | 698 | { |
699 | bool need_siglock; | ||
700 | |||
700 | if (!valid_signal(data)) | 701 | if (!valid_signal(data)) |
701 | return -EIO; | 702 | return -EIO; |
702 | 703 | ||
@@ -724,8 +725,26 @@ static int ptrace_resume(struct task_struct *child, long request, | |||
724 | user_disable_single_step(child); | 725 | user_disable_single_step(child); |
725 | } | 726 | } |
726 | 727 | ||
728 | /* | ||
729 | * Change ->exit_code and ->state under siglock to avoid the race | ||
730 | * with wait_task_stopped() in between; a non-zero ->exit_code will | ||
731 | * wrongly look like another report from tracee. | ||
732 | * | ||
733 | * Note that we need siglock even if ->exit_code == data and/or this | ||
734 | * status was not reported yet, the new status must not be cleared by | ||
735 | * wait_task_stopped() after resume. | ||
736 | * | ||
737 | * If data == 0 we do not care if wait_task_stopped() reports the old | ||
738 | * status and clears the code too; this can't race with the tracee, it | ||
739 | * takes siglock after resume. | ||
740 | */ | ||
741 | need_siglock = data && !thread_group_empty(current); | ||
742 | if (need_siglock) | ||
743 | spin_lock_irq(&child->sighand->siglock); | ||
727 | child->exit_code = data; | 744 | child->exit_code = data; |
728 | wake_up_state(child, __TASK_TRACED); | 745 | wake_up_state(child, __TASK_TRACED); |
746 | if (need_siglock) | ||
747 | spin_unlock_irq(&child->sighand->siglock); | ||
729 | 748 | ||
730 | return 0; | 749 | return 0; |
731 | } | 750 | } |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 233165da782f..8cf7304b2867 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | |||
162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; | 162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; |
163 | module_param(kthread_prio, int, 0644); | 163 | module_param(kthread_prio, int, 0644); |
164 | 164 | ||
165 | /* Delay in jiffies for grace-period initialization delays. */ | 165 | /* Delay in jiffies for grace-period initialization delays, debug only. */ |
166 | static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) | 166 | #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT |
167 | ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY | 167 | static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; |
168 | : 0; | ||
169 | module_param(gp_init_delay, int, 0644); | 168 | module_param(gp_init_delay, int, 0644); |
169 | #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
170 | static const int gp_init_delay; | ||
171 | #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
172 | #define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ | ||
170 | 173 | ||
171 | /* | 174 | /* |
172 | * Track the rcutorture test sequence number and the update version | 175 | * Track the rcutorture test sequence number and the update version |
@@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1843 | raw_spin_unlock_irq(&rnp->lock); | 1846 | raw_spin_unlock_irq(&rnp->lock); |
1844 | cond_resched_rcu_qs(); | 1847 | cond_resched_rcu_qs(); |
1845 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1848 | ACCESS_ONCE(rsp->gp_activity) = jiffies; |
1846 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) && | 1849 | if (gp_init_delay > 0 && |
1847 | gp_init_delay > 0 && | 1850 | !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) |
1848 | !(rsp->gpnum % (rcu_num_nodes * 10))) | ||
1849 | schedule_timeout_uninterruptible(gp_init_delay); | 1851 | schedule_timeout_uninterruptible(gp_init_delay); |
1850 | } | 1852 | } |
1851 | 1853 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index 5a56d3c8dc03..e9dbaeb8fd65 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -407,7 +407,7 @@ static inline void relay_set_buf_dentry(struct rchan_buf *buf, | |||
407 | struct dentry *dentry) | 407 | struct dentry *dentry) |
408 | { | 408 | { |
409 | buf->dentry = dentry; | 409 | buf->dentry = dentry; |
410 | buf->dentry->d_inode->i_size = buf->early_bytes; | 410 | d_inode(buf->dentry)->i_size = buf->early_bytes; |
411 | } | 411 | } |
412 | 412 | ||
413 | static struct dentry *relay_create_buf_file(struct rchan *chan, | 413 | static struct dentry *relay_create_buf_file(struct rchan *chan, |
@@ -733,7 +733,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
733 | buf->padding[old_subbuf] = buf->prev_padding; | 733 | buf->padding[old_subbuf] = buf->prev_padding; |
734 | buf->subbufs_produced++; | 734 | buf->subbufs_produced++; |
735 | if (buf->dentry) | 735 | if (buf->dentry) |
736 | buf->dentry->d_inode->i_size += | 736 | d_inode(buf->dentry)->i_size += |
737 | buf->chan->subbuf_size - | 737 | buf->chan->subbuf_size - |
738 | buf->padding[old_subbuf]; | 738 | buf->padding[old_subbuf]; |
739 | else | 739 | else |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f9123a82cbb6..57bd333bc4ab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
1016 | rq_clock_skip_update(rq, true); | 1016 | rq_clock_skip_update(rq, true); |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
1020 | |||
1021 | void register_task_migration_notifier(struct notifier_block *n) | ||
1022 | { | ||
1023 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
1024 | } | ||
1025 | |||
1026 | #ifdef CONFIG_SMP | 1019 | #ifdef CONFIG_SMP |
1027 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 1020 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
1028 | { | 1021 | { |
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1053 | trace_sched_migrate_task(p, new_cpu); | 1046 | trace_sched_migrate_task(p, new_cpu); |
1054 | 1047 | ||
1055 | if (task_cpu(p) != new_cpu) { | 1048 | if (task_cpu(p) != new_cpu) { |
1056 | struct task_migration_notifier tmn; | ||
1057 | |||
1058 | if (p->sched_class->migrate_task_rq) | 1049 | if (p->sched_class->migrate_task_rq) |
1059 | p->sched_class->migrate_task_rq(p, new_cpu); | 1050 | p->sched_class->migrate_task_rq(p, new_cpu); |
1060 | p->se.nr_migrations++; | 1051 | p->se.nr_migrations++; |
1061 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); | 1052 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
1062 | |||
1063 | tmn.task = p; | ||
1064 | tmn.from_cpu = task_cpu(p); | ||
1065 | tmn.to_cpu = new_cpu; | ||
1066 | |||
1067 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
1068 | } | 1053 | } |
1069 | 1054 | ||
1070 | __set_task_cpu(p, new_cpu); | 1055 | __set_task_cpu(p, new_cpu); |
@@ -3315,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p, | |||
3315 | 3300 | ||
3316 | /* Actually do priority change: must hold pi & rq lock. */ | 3301 | /* Actually do priority change: must hold pi & rq lock. */ |
3317 | static void __setscheduler(struct rq *rq, struct task_struct *p, | 3302 | static void __setscheduler(struct rq *rq, struct task_struct *p, |
3318 | const struct sched_attr *attr) | 3303 | const struct sched_attr *attr, bool keep_boost) |
3319 | { | 3304 | { |
3320 | __setscheduler_params(p, attr); | 3305 | __setscheduler_params(p, attr); |
3321 | 3306 | ||
3322 | /* | 3307 | /* |
3323 | * If we get here, there was no pi waiters boosting the | 3308 | * Keep a potential priority boosting if called from |
3324 | * task. It is safe to use the normal prio. | 3309 | * sched_setscheduler(). |
3325 | */ | 3310 | */ |
3326 | p->prio = normal_prio(p); | 3311 | if (keep_boost) |
3312 | p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); | ||
3313 | else | ||
3314 | p->prio = normal_prio(p); | ||
3327 | 3315 | ||
3328 | if (dl_prio(p->prio)) | 3316 | if (dl_prio(p->prio)) |
3329 | p->sched_class = &dl_sched_class; | 3317 | p->sched_class = &dl_sched_class; |
@@ -3423,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p, | |||
3423 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : | 3411 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : |
3424 | MAX_RT_PRIO - 1 - attr->sched_priority; | 3412 | MAX_RT_PRIO - 1 - attr->sched_priority; |
3425 | int retval, oldprio, oldpolicy = -1, queued, running; | 3413 | int retval, oldprio, oldpolicy = -1, queued, running; |
3426 | int policy = attr->sched_policy; | 3414 | int new_effective_prio, policy = attr->sched_policy; |
3427 | unsigned long flags; | 3415 | unsigned long flags; |
3428 | const struct sched_class *prev_class; | 3416 | const struct sched_class *prev_class; |
3429 | struct rq *rq; | 3417 | struct rq *rq; |
@@ -3605,15 +3593,14 @@ change: | |||
3605 | oldprio = p->prio; | 3593 | oldprio = p->prio; |
3606 | 3594 | ||
3607 | /* | 3595 | /* |
3608 | * Special case for priority boosted tasks. | 3596 | * Take priority boosted tasks into account. If the new |
3609 | * | 3597 | * effective priority is unchanged, we just store the new |
3610 | * If the new priority is lower or equal (user space view) | ||
3611 | * than the current (boosted) priority, we just store the new | ||
3612 | * normal parameters and do not touch the scheduler class and | 3598 | * normal parameters and do not touch the scheduler class and |
3613 | * the runqueue. This will be done when the task deboost | 3599 | * the runqueue. This will be done when the task deboost |
3614 | * itself. | 3600 | * itself. |
3615 | */ | 3601 | */ |
3616 | if (rt_mutex_check_prio(p, newprio)) { | 3602 | new_effective_prio = rt_mutex_get_effective_prio(p, newprio); |
3603 | if (new_effective_prio == oldprio) { | ||
3617 | __setscheduler_params(p, attr); | 3604 | __setscheduler_params(p, attr); |
3618 | task_rq_unlock(rq, p, &flags); | 3605 | task_rq_unlock(rq, p, &flags); |
3619 | return 0; | 3606 | return 0; |
@@ -3627,7 +3614,7 @@ change: | |||
3627 | put_prev_task(rq, p); | 3614 | put_prev_task(rq, p); |
3628 | 3615 | ||
3629 | prev_class = p->sched_class; | 3616 | prev_class = p->sched_class; |
3630 | __setscheduler(rq, p, attr); | 3617 | __setscheduler(rq, p, attr, true); |
3631 | 3618 | ||
3632 | if (running) | 3619 | if (running) |
3633 | p->sched_class->set_curr_task(rq); | 3620 | p->sched_class->set_curr_task(rq); |
@@ -7012,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | |||
7012 | unsigned long flags; | 6999 | unsigned long flags; |
7013 | long cpu = (long)hcpu; | 7000 | long cpu = (long)hcpu; |
7014 | struct dl_bw *dl_b; | 7001 | struct dl_bw *dl_b; |
7002 | bool overflow; | ||
7003 | int cpus; | ||
7015 | 7004 | ||
7016 | switch (action & ~CPU_TASKS_FROZEN) { | 7005 | switch (action) { |
7017 | case CPU_DOWN_PREPARE: | 7006 | case CPU_DOWN_PREPARE: |
7018 | /* explicitly allow suspend */ | 7007 | rcu_read_lock_sched(); |
7019 | if (!(action & CPU_TASKS_FROZEN)) { | 7008 | dl_b = dl_bw_of(cpu); |
7020 | bool overflow; | ||
7021 | int cpus; | ||
7022 | |||
7023 | rcu_read_lock_sched(); | ||
7024 | dl_b = dl_bw_of(cpu); | ||
7025 | 7009 | ||
7026 | raw_spin_lock_irqsave(&dl_b->lock, flags); | 7010 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
7027 | cpus = dl_bw_cpus(cpu); | 7011 | cpus = dl_bw_cpus(cpu); |
7028 | overflow = __dl_overflow(dl_b, cpus, 0, 0); | 7012 | overflow = __dl_overflow(dl_b, cpus, 0, 0); |
7029 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); | 7013 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
7030 | 7014 | ||
7031 | rcu_read_unlock_sched(); | 7015 | rcu_read_unlock_sched(); |
7032 | 7016 | ||
7033 | if (overflow) | 7017 | if (overflow) |
7034 | return notifier_from_errno(-EBUSY); | 7018 | return notifier_from_errno(-EBUSY); |
7035 | } | ||
7036 | cpuset_update_active_cpus(false); | 7019 | cpuset_update_active_cpus(false); |
7037 | break; | 7020 | break; |
7038 | case CPU_DOWN_PREPARE_FROZEN: | 7021 | case CPU_DOWN_PREPARE_FROZEN: |
@@ -7361,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
7361 | queued = task_on_rq_queued(p); | 7344 | queued = task_on_rq_queued(p); |
7362 | if (queued) | 7345 | if (queued) |
7363 | dequeue_task(rq, p, 0); | 7346 | dequeue_task(rq, p, 0); |
7364 | __setscheduler(rq, p, &attr); | 7347 | __setscheduler(rq, p, &attr, false); |
7365 | if (queued) { | 7348 | if (queued) { |
7366 | enqueue_task(rq, p, 0); | 7349 | enqueue_task(rq, p, 0); |
7367 | resched_curr(rq); | 7350 | resched_curr(rq); |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index deef1caa94c6..fefcb1fa5160 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void) | |||
81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
84 | unsigned int broadcast; | ||
85 | bool reflect; | 84 | bool reflect; |
86 | 85 | ||
87 | /* | 86 | /* |
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void) | |||
150 | goto exit_idle; | 149 | goto exit_idle; |
151 | } | 150 | } |
152 | 151 | ||
153 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; | ||
154 | |||
155 | /* | ||
156 | * Tell the time framework to switch to a broadcast timer | ||
157 | * because our local timer will be shutdown. If a local timer | ||
158 | * is used from another cpu as a broadcast timer, this call may | ||
159 | * fail if it is not available | ||
160 | */ | ||
161 | if (broadcast && tick_broadcast_enter()) | ||
162 | goto use_default; | ||
163 | |||
164 | /* Take note of the planned idle state. */ | 152 | /* Take note of the planned idle state. */ |
165 | idle_set_state(this_rq(), &drv->states[next_state]); | 153 | idle_set_state(this_rq(), &drv->states[next_state]); |
166 | 154 | ||
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void) | |||
174 | /* The cpu is no longer idle or about to enter idle. */ | 162 | /* The cpu is no longer idle or about to enter idle. */ |
175 | idle_set_state(this_rq(), NULL); | 163 | idle_set_state(this_rq(), NULL); |
176 | 164 | ||
177 | if (broadcast) | 165 | if (entered_state == -EBUSY) |
178 | tick_broadcast_exit(); | 166 | goto use_default; |
179 | 167 | ||
180 | /* | 168 | /* |
181 | * Give the governor an opportunity to reflect on the outcome | 169 | * Give the governor an opportunity to reflect on the outcome |
diff --git a/kernel/signal.c b/kernel/signal.c index a390499943e4..d51c5ddd855c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2992,11 +2992,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) | |||
2992 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2992 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2993 | */ | 2993 | */ |
2994 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && | 2994 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
2995 | (task_pid_vnr(current) != pid)) { | 2995 | (task_pid_vnr(current) != pid)) |
2996 | /* We used to allow any < 0 si_code */ | ||
2997 | WARN_ON_ONCE(info->si_code < 0); | ||
2998 | return -EPERM; | 2996 | return -EPERM; |
2999 | } | 2997 | |
3000 | info->si_signo = sig; | 2998 | info->si_signo = sig; |
3001 | 2999 | ||
3002 | /* POSIX.1b doesn't mention process groups. */ | 3000 | /* POSIX.1b doesn't mention process groups. */ |
@@ -3041,12 +3039,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |||
3041 | /* Not even root can pretend to send signals from the kernel. | 3039 | /* Not even root can pretend to send signals from the kernel. |
3042 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 3040 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3043 | */ | 3041 | */ |
3044 | if (((info->si_code >= 0 || info->si_code == SI_TKILL)) && | 3042 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3045 | (task_pid_vnr(current) != pid)) { | 3043 | (task_pid_vnr(current) != pid)) |
3046 | /* We used to allow any < 0 si_code */ | ||
3047 | WARN_ON_ONCE(info->si_code < 0); | ||
3048 | return -EPERM; | 3044 | return -EPERM; |
3049 | } | 3045 | |
3050 | info->si_signo = sig; | 3046 | info->si_signo = sig; |
3051 | 3047 | ||
3052 | return do_send_specific(tgid, pid, sig, info); | 3048 | return do_send_specific(tgid, pid, sig, info); |
diff --git a/kernel/smp.c b/kernel/smp.c index f38a1e692259..07854477c164 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | enum { | 20 | enum { |
21 | CSD_FLAG_LOCK = 0x01, | 21 | CSD_FLAG_LOCK = 0x01, |
22 | CSD_FLAG_WAIT = 0x02, | 22 | CSD_FLAG_SYNCHRONOUS = 0x02, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct call_function_data { | 25 | struct call_function_data { |
@@ -107,7 +107,7 @@ void __init call_function_init(void) | |||
107 | */ | 107 | */ |
108 | static void csd_lock_wait(struct call_single_data *csd) | 108 | static void csd_lock_wait(struct call_single_data *csd) |
109 | { | 109 | { |
110 | while (csd->flags & CSD_FLAG_LOCK) | 110 | while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK) |
111 | cpu_relax(); | 111 | cpu_relax(); |
112 | } | 112 | } |
113 | 113 | ||
@@ -121,19 +121,17 @@ static void csd_lock(struct call_single_data *csd) | |||
121 | * to ->flags with any subsequent assignments to other | 121 | * to ->flags with any subsequent assignments to other |
122 | * fields of the specified call_single_data structure: | 122 | * fields of the specified call_single_data structure: |
123 | */ | 123 | */ |
124 | smp_mb(); | 124 | smp_wmb(); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void csd_unlock(struct call_single_data *csd) | 127 | static void csd_unlock(struct call_single_data *csd) |
128 | { | 128 | { |
129 | WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK)); | 129 | WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * ensure we're all done before releasing data: | 132 | * ensure we're all done before releasing data: |
133 | */ | 133 | */ |
134 | smp_mb(); | 134 | smp_store_release(&csd->flags, 0); |
135 | |||
136 | csd->flags &= ~CSD_FLAG_LOCK; | ||
137 | } | 135 | } |
138 | 136 | ||
139 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | 137 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); |
@@ -144,13 +142,16 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |||
144 | * ->func, ->info, and ->flags set. | 142 | * ->func, ->info, and ->flags set. |
145 | */ | 143 | */ |
146 | static int generic_exec_single(int cpu, struct call_single_data *csd, | 144 | static int generic_exec_single(int cpu, struct call_single_data *csd, |
147 | smp_call_func_t func, void *info, int wait) | 145 | smp_call_func_t func, void *info) |
148 | { | 146 | { |
149 | struct call_single_data csd_stack = { .flags = 0 }; | ||
150 | unsigned long flags; | ||
151 | |||
152 | |||
153 | if (cpu == smp_processor_id()) { | 147 | if (cpu == smp_processor_id()) { |
148 | unsigned long flags; | ||
149 | |||
150 | /* | ||
151 | * We can unlock early even for the synchronous on-stack case, | ||
152 | * since we're doing this from the same CPU.. | ||
153 | */ | ||
154 | csd_unlock(csd); | ||
154 | local_irq_save(flags); | 155 | local_irq_save(flags); |
155 | func(info); | 156 | func(info); |
156 | local_irq_restore(flags); | 157 | local_irq_restore(flags); |
@@ -158,24 +159,14 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
158 | } | 159 | } |
159 | 160 | ||
160 | 161 | ||
161 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | 162 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
163 | csd_unlock(csd); | ||
162 | return -ENXIO; | 164 | return -ENXIO; |
163 | |||
164 | |||
165 | if (!csd) { | ||
166 | csd = &csd_stack; | ||
167 | if (!wait) | ||
168 | csd = this_cpu_ptr(&csd_data); | ||
169 | } | 165 | } |
170 | 166 | ||
171 | csd_lock(csd); | ||
172 | |||
173 | csd->func = func; | 167 | csd->func = func; |
174 | csd->info = info; | 168 | csd->info = info; |
175 | 169 | ||
176 | if (wait) | ||
177 | csd->flags |= CSD_FLAG_WAIT; | ||
178 | |||
179 | /* | 170 | /* |
180 | * The list addition should be visible before sending the IPI | 171 | * The list addition should be visible before sending the IPI |
181 | * handler locks the list to pull the entry off it because of | 172 | * handler locks the list to pull the entry off it because of |
@@ -190,9 +181,6 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
190 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) | 181 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
191 | arch_send_call_function_single_ipi(cpu); | 182 | arch_send_call_function_single_ipi(cpu); |
192 | 183 | ||
193 | if (wait) | ||
194 | csd_lock_wait(csd); | ||
195 | |||
196 | return 0; | 184 | return 0; |
197 | } | 185 | } |
198 | 186 | ||
@@ -250,8 +238,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
250 | } | 238 | } |
251 | 239 | ||
252 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { | 240 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
253 | csd->func(csd->info); | 241 | smp_call_func_t func = csd->func; |
254 | csd_unlock(csd); | 242 | void *info = csd->info; |
243 | |||
244 | /* Do we wait until *after* callback? */ | ||
245 | if (csd->flags & CSD_FLAG_SYNCHRONOUS) { | ||
246 | func(info); | ||
247 | csd_unlock(csd); | ||
248 | } else { | ||
249 | csd_unlock(csd); | ||
250 | func(info); | ||
251 | } | ||
255 | } | 252 | } |
256 | 253 | ||
257 | /* | 254 | /* |
@@ -274,6 +271,8 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
274 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | 271 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
275 | int wait) | 272 | int wait) |
276 | { | 273 | { |
274 | struct call_single_data *csd; | ||
275 | struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; | ||
277 | int this_cpu; | 276 | int this_cpu; |
278 | int err; | 277 | int err; |
279 | 278 | ||
@@ -292,7 +291,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | |||
292 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 291 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
293 | && !oops_in_progress); | 292 | && !oops_in_progress); |
294 | 293 | ||
295 | err = generic_exec_single(cpu, NULL, func, info, wait); | 294 | csd = &csd_stack; |
295 | if (!wait) { | ||
296 | csd = this_cpu_ptr(&csd_data); | ||
297 | csd_lock(csd); | ||
298 | } | ||
299 | |||
300 | err = generic_exec_single(cpu, csd, func, info); | ||
301 | |||
302 | if (wait) | ||
303 | csd_lock_wait(csd); | ||
296 | 304 | ||
297 | put_cpu(); | 305 | put_cpu(); |
298 | 306 | ||
@@ -321,7 +329,15 @@ int smp_call_function_single_async(int cpu, struct call_single_data *csd) | |||
321 | int err = 0; | 329 | int err = 0; |
322 | 330 | ||
323 | preempt_disable(); | 331 | preempt_disable(); |
324 | err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); | 332 | |
333 | /* We could deadlock if we have to wait here with interrupts disabled! */ | ||
334 | if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) | ||
335 | csd_lock_wait(csd); | ||
336 | |||
337 | csd->flags = CSD_FLAG_LOCK; | ||
338 | smp_wmb(); | ||
339 | |||
340 | err = generic_exec_single(cpu, csd, csd->func, csd->info); | ||
325 | preempt_enable(); | 341 | preempt_enable(); |
326 | 342 | ||
327 | return err; | 343 | return err; |
@@ -433,6 +449,8 @@ void smp_call_function_many(const struct cpumask *mask, | |||
433 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); | 449 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); |
434 | 450 | ||
435 | csd_lock(csd); | 451 | csd_lock(csd); |
452 | if (wait) | ||
453 | csd->flags |= CSD_FLAG_SYNCHRONOUS; | ||
436 | csd->func = func; | 454 | csd->func = func; |
437 | csd->info = info; | 455 | csd->info = info; |
438 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); | 456 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); |
diff --git a/kernel/sys.c b/kernel/sys.c index 3be344902316..a4e372b798a5 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1649,14 +1649,13 @@ SYSCALL_DEFINE1(umask, int, mask) | |||
1649 | return mask; | 1649 | return mask; |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | 1652 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) |
1653 | { | 1653 | { |
1654 | struct fd exe; | 1654 | struct fd exe; |
1655 | struct file *old_exe, *exe_file; | ||
1655 | struct inode *inode; | 1656 | struct inode *inode; |
1656 | int err; | 1657 | int err; |
1657 | 1658 | ||
1658 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
1659 | |||
1660 | exe = fdget(fd); | 1659 | exe = fdget(fd); |
1661 | if (!exe.file) | 1660 | if (!exe.file) |
1662 | return -EBADF; | 1661 | return -EBADF; |
@@ -1680,15 +1679,22 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | |||
1680 | /* | 1679 | /* |
1681 | * Forbid mm->exe_file change if old file still mapped. | 1680 | * Forbid mm->exe_file change if old file still mapped. |
1682 | */ | 1681 | */ |
1682 | exe_file = get_mm_exe_file(mm); | ||
1683 | err = -EBUSY; | 1683 | err = -EBUSY; |
1684 | if (mm->exe_file) { | 1684 | if (exe_file) { |
1685 | struct vm_area_struct *vma; | 1685 | struct vm_area_struct *vma; |
1686 | 1686 | ||
1687 | for (vma = mm->mmap; vma; vma = vma->vm_next) | 1687 | down_read(&mm->mmap_sem); |
1688 | if (vma->vm_file && | 1688 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
1689 | path_equal(&vma->vm_file->f_path, | 1689 | if (!vma->vm_file) |
1690 | &mm->exe_file->f_path)) | 1690 | continue; |
1691 | goto exit; | 1691 | if (path_equal(&vma->vm_file->f_path, |
1692 | &exe_file->f_path)) | ||
1693 | goto exit_err; | ||
1694 | } | ||
1695 | |||
1696 | up_read(&mm->mmap_sem); | ||
1697 | fput(exe_file); | ||
1692 | } | 1698 | } |
1693 | 1699 | ||
1694 | /* | 1700 | /* |
@@ -1702,10 +1708,18 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) | |||
1702 | goto exit; | 1708 | goto exit; |
1703 | 1709 | ||
1704 | err = 0; | 1710 | err = 0; |
1705 | set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */ | 1711 | /* set the new file, lockless */ |
1712 | get_file(exe.file); | ||
1713 | old_exe = xchg(&mm->exe_file, exe.file); | ||
1714 | if (old_exe) | ||
1715 | fput(old_exe); | ||
1706 | exit: | 1716 | exit: |
1707 | fdput(exe); | 1717 | fdput(exe); |
1708 | return err; | 1718 | return err; |
1719 | exit_err: | ||
1720 | up_read(&mm->mmap_sem); | ||
1721 | fput(exe_file); | ||
1722 | goto exit; | ||
1709 | } | 1723 | } |
1710 | 1724 | ||
1711 | #ifdef CONFIG_CHECKPOINT_RESTORE | 1725 | #ifdef CONFIG_CHECKPOINT_RESTORE |
@@ -1840,10 +1854,9 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data | |||
1840 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; | 1854 | user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; |
1841 | } | 1855 | } |
1842 | 1856 | ||
1843 | down_write(&mm->mmap_sem); | ||
1844 | if (prctl_map.exe_fd != (u32)-1) | 1857 | if (prctl_map.exe_fd != (u32)-1) |
1845 | error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd); | 1858 | error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); |
1846 | downgrade_write(&mm->mmap_sem); | 1859 | down_read(&mm->mmap_sem); |
1847 | if (error) | 1860 | if (error) |
1848 | goto out; | 1861 | goto out; |
1849 | 1862 | ||
@@ -1909,12 +1922,8 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
1909 | if (!capable(CAP_SYS_RESOURCE)) | 1922 | if (!capable(CAP_SYS_RESOURCE)) |
1910 | return -EPERM; | 1923 | return -EPERM; |
1911 | 1924 | ||
1912 | if (opt == PR_SET_MM_EXE_FILE) { | 1925 | if (opt == PR_SET_MM_EXE_FILE) |
1913 | down_write(&mm->mmap_sem); | 1926 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); |
1914 | error = prctl_set_mm_exe_file_locked(mm, (unsigned int)addr); | ||
1915 | up_write(&mm->mmap_sem); | ||
1916 | return error; | ||
1917 | } | ||
1918 | 1927 | ||
1919 | if (addr >= TASK_SIZE || addr < mmap_min_addr) | 1928 | if (addr >= TASK_SIZE || addr < mmap_min_addr) |
1920 | return -EINVAL; | 1929 | return -EINVAL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 42b7fc2860c1..2082b1a88fb9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -93,11 +93,9 @@ | |||
93 | #include <linux/nmi.h> | 93 | #include <linux/nmi.h> |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | |||
97 | #if defined(CONFIG_SYSCTL) | 96 | #if defined(CONFIG_SYSCTL) |
98 | 97 | ||
99 | /* External variables not in a header file. */ | 98 | /* External variables not in a header file. */ |
100 | extern int max_threads; | ||
101 | extern int suid_dumpable; | 99 | extern int suid_dumpable; |
102 | #ifdef CONFIG_COREDUMP | 100 | #ifdef CONFIG_COREDUMP |
103 | extern int core_uses_pid; | 101 | extern int core_uses_pid; |
@@ -710,10 +708,10 @@ static struct ctl_table kern_table[] = { | |||
710 | #endif | 708 | #endif |
711 | { | 709 | { |
712 | .procname = "threads-max", | 710 | .procname = "threads-max", |
713 | .data = &max_threads, | 711 | .data = NULL, |
714 | .maxlen = sizeof(int), | 712 | .maxlen = sizeof(int), |
715 | .mode = 0644, | 713 | .mode = 0644, |
716 | .proc_handler = proc_dointvec, | 714 | .proc_handler = sysctl_max_threads, |
717 | }, | 715 | }, |
718 | { | 716 | { |
719 | .procname = "random", | 717 | .procname = "random", |
@@ -1983,7 +1981,15 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, | |||
1983 | int write, void *data) | 1981 | int write, void *data) |
1984 | { | 1982 | { |
1985 | if (write) { | 1983 | if (write) { |
1986 | *valp = *negp ? -*lvalp : *lvalp; | 1984 | if (*negp) { |
1985 | if (*lvalp > (unsigned long) INT_MAX + 1) | ||
1986 | return -EINVAL; | ||
1987 | *valp = -*lvalp; | ||
1988 | } else { | ||
1989 | if (*lvalp > (unsigned long) INT_MAX) | ||
1990 | return -EINVAL; | ||
1991 | *valp = *lvalp; | ||
1992 | } | ||
1987 | } else { | 1993 | } else { |
1988 | int val = *valp; | 1994 | int val = *valp; |
1989 | if (val < 0) { | 1995 | if (val < 0) { |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 25d942d1da27..637a09461c1d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev, | |||
117 | /* Transition with new state-specific callbacks */ | 117 | /* Transition with new state-specific callbacks */ |
118 | switch (state) { | 118 | switch (state) { |
119 | case CLOCK_EVT_STATE_DETACHED: | 119 | case CLOCK_EVT_STATE_DETACHED: |
120 | /* | 120 | /* The clockevent device is getting replaced. Shut it down. */ |
121 | * This is an internal state, which is guaranteed to go from | ||
122 | * SHUTDOWN to DETACHED. No driver interaction required. | ||
123 | */ | ||
124 | return 0; | ||
125 | 121 | ||
126 | case CLOCK_EVT_STATE_SHUTDOWN: | 122 | case CLOCK_EVT_STATE_SHUTDOWN: |
127 | return dev->set_state_shutdown(dev); | 123 | return dev->set_state_shutdown(dev); |
@@ -440,7 +436,7 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) | |||
440 | mutex_unlock(&clockevents_mutex); | 436 | mutex_unlock(&clockevents_mutex); |
441 | return ret; | 437 | return ret; |
442 | } | 438 | } |
443 | EXPORT_SYMBOL_GPL(clockevents_unbind); | 439 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); |
444 | 440 | ||
445 | /* Sanity check of state transition callbacks */ | 441 | /* Sanity check of state transition callbacks */ |
446 | static int clockevents_sanity_check(struct clock_event_device *dev) | 442 | static int clockevents_sanity_check(struct clock_event_device *dev) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 91eecaaa43e0..05330494a0df 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -6079,7 +6079,7 @@ trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | |||
6079 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); | 6079 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); |
6080 | 6080 | ||
6081 | if (ret) /* See tracing_get_cpu() */ | 6081 | if (ret) /* See tracing_get_cpu() */ |
6082 | ret->d_inode->i_cdev = (void *)(cpu + 1); | 6082 | d_inode(ret)->i_cdev = (void *)(cpu + 1); |
6083 | return ret; | 6083 | return ret; |
6084 | } | 6084 | } |
6085 | 6085 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7da1dfeb322e..c4de47fc5cca 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -494,8 +494,8 @@ static void remove_event_file_dir(struct ftrace_event_file *file) | |||
494 | if (dir) { | 494 | if (dir) { |
495 | spin_lock(&dir->d_lock); /* probably unneeded */ | 495 | spin_lock(&dir->d_lock); /* probably unneeded */ |
496 | list_for_each_entry(child, &dir->d_subdirs, d_child) { | 496 | list_for_each_entry(child, &dir->d_subdirs, d_child) { |
497 | if (child->d_inode) /* probably unneeded */ | 497 | if (d_really_is_positive(child)) /* probably unneeded */ |
498 | child->d_inode->i_private = NULL; | 498 | d_inode(child)->i_private = NULL; |
499 | } | 499 | } |
500 | spin_unlock(&dir->d_lock); | 500 | spin_unlock(&dir->d_lock); |
501 | 501 | ||
@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | |||
565 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) | 565 | static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
566 | { | 566 | { |
567 | char *event = NULL, *sub = NULL, *match; | 567 | char *event = NULL, *sub = NULL, *match; |
568 | int ret; | ||
568 | 569 | ||
569 | /* | 570 | /* |
570 | * The buf format can be <subsystem>:<event-name> | 571 | * The buf format can be <subsystem>:<event-name> |
@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) | |||
590 | event = NULL; | 591 | event = NULL; |
591 | } | 592 | } |
592 | 593 | ||
593 | return __ftrace_set_clr_event(tr, match, sub, event, set); | 594 | ret = __ftrace_set_clr_event(tr, match, sub, event, set); |
595 | |||
596 | /* Put back the colon to allow this to be called again */ | ||
597 | if (buf) | ||
598 | *(buf - 1) = ':'; | ||
599 | |||
600 | return ret; | ||
594 | } | 601 | } |
595 | 602 | ||
596 | /** | 603 | /** |
@@ -1753,6 +1760,8 @@ static void update_event_printk(struct ftrace_event_call *call, | |||
1753 | ptr++; | 1760 | ptr++; |
1754 | /* Check for alpha chars like ULL */ | 1761 | /* Check for alpha chars like ULL */ |
1755 | } while (isalnum(*ptr)); | 1762 | } while (isalnum(*ptr)); |
1763 | if (!*ptr) | ||
1764 | break; | ||
1756 | /* | 1765 | /* |
1757 | * A number must have some kind of delimiter after | 1766 | * A number must have some kind of delimiter after |
1758 | * it, and we can ignore that too. | 1767 | * it, and we can ignore that too. |
@@ -1779,12 +1788,16 @@ static void update_event_printk(struct ftrace_event_call *call, | |||
1779 | do { | 1788 | do { |
1780 | ptr++; | 1789 | ptr++; |
1781 | } while (isalnum(*ptr) || *ptr == '_'); | 1790 | } while (isalnum(*ptr) || *ptr == '_'); |
1791 | if (!*ptr) | ||
1792 | break; | ||
1782 | /* | 1793 | /* |
1783 | * If what comes after this variable is a '.' or | 1794 | * If what comes after this variable is a '.' or |
1784 | * '->' then we can continue to ignore that string. | 1795 | * '->' then we can continue to ignore that string. |
1785 | */ | 1796 | */ |
1786 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { | 1797 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { |
1787 | ptr += *ptr == '.' ? 1 : 2; | 1798 | ptr += *ptr == '.' ? 1 : 2; |
1799 | if (!*ptr) | ||
1800 | break; | ||
1788 | goto skip_more; | 1801 | goto skip_more; |
1789 | } | 1802 | } |
1790 | /* | 1803 | /* |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 9cfea4c6d314..a51e79688455 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1308,15 +1308,19 @@ void graph_trace_open(struct trace_iterator *iter) | |||
1308 | { | 1308 | { |
1309 | /* pid and depth on the last trace processed */ | 1309 | /* pid and depth on the last trace processed */ |
1310 | struct fgraph_data *data; | 1310 | struct fgraph_data *data; |
1311 | gfp_t gfpflags; | ||
1311 | int cpu; | 1312 | int cpu; |
1312 | 1313 | ||
1313 | iter->private = NULL; | 1314 | iter->private = NULL; |
1314 | 1315 | ||
1315 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1316 | /* We can be called in atomic context via ftrace_dump() */ |
1317 | gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; | ||
1318 | |||
1319 | data = kzalloc(sizeof(*data), gfpflags); | ||
1316 | if (!data) | 1320 | if (!data) |
1317 | goto out_err; | 1321 | goto out_err; |
1318 | 1322 | ||
1319 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); | 1323 | data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); |
1320 | if (!data->cpu_data) | 1324 | if (!data->cpu_data) |
1321 | goto out_err_free; | 1325 | goto out_err_free; |
1322 | 1326 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 692bf7184c8c..25a086bcb700 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | |||
178 | EXPORT_SYMBOL(ftrace_print_hex_seq); | 178 | EXPORT_SYMBOL(ftrace_print_hex_seq); |
179 | 179 | ||
180 | const char * | 180 | const char * |
181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, | 181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, |
182 | size_t el_size) | 182 | size_t el_size) |
183 | { | 183 | { |
184 | const char *ret = trace_seq_buffer_ptr(p); | 184 | const char *ret = trace_seq_buffer_ptr(p); |
185 | const char *prefix = ""; | 185 | const char *prefix = ""; |
186 | void *ptr = (void *)buf; | 186 | void *ptr = (void *)buf; |
187 | size_t buf_len = count * el_size; | ||
187 | 188 | ||
188 | trace_seq_putc(p, '{'); | 189 | trace_seq_putc(p, '{'); |
189 | 190 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d60fe62ec4fa..6dd022c7b5bc 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -443,7 +443,7 @@ static int create_trace_uprobe(int argc, char **argv) | |||
443 | if (ret) | 443 | if (ret) |
444 | goto fail_address_parse; | 444 | goto fail_address_parse; |
445 | 445 | ||
446 | inode = igrab(path.dentry->d_inode); | 446 | inode = igrab(d_inode(path.dentry)); |
447 | path_put(&path); | 447 | path_put(&path); |
448 | 448 | ||
449 | if (!inode || !S_ISREG(inode->i_mode)) { | 449 | if (!inode || !S_ISREG(inode->i_mode)) { |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 2316f50b07a4..581a68a04c64 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | 41 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) |
42 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | 42 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) |
43 | 43 | ||
44 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
45 | |||
44 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 46 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
45 | static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; | 47 | static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; |
46 | #else | 48 | #else |
@@ -608,26 +610,36 @@ void watchdog_nmi_enable_all(void) | |||
608 | { | 610 | { |
609 | int cpu; | 611 | int cpu; |
610 | 612 | ||
611 | if (!watchdog_user_enabled) | 613 | mutex_lock(&watchdog_proc_mutex); |
612 | return; | 614 | |
615 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
616 | goto unlock; | ||
613 | 617 | ||
614 | get_online_cpus(); | 618 | get_online_cpus(); |
615 | for_each_online_cpu(cpu) | 619 | for_each_online_cpu(cpu) |
616 | watchdog_nmi_enable(cpu); | 620 | watchdog_nmi_enable(cpu); |
617 | put_online_cpus(); | 621 | put_online_cpus(); |
622 | |||
623 | unlock: | ||
624 | mutex_unlock(&watchdog_proc_mutex); | ||
618 | } | 625 | } |
619 | 626 | ||
620 | void watchdog_nmi_disable_all(void) | 627 | void watchdog_nmi_disable_all(void) |
621 | { | 628 | { |
622 | int cpu; | 629 | int cpu; |
623 | 630 | ||
631 | mutex_lock(&watchdog_proc_mutex); | ||
632 | |||
624 | if (!watchdog_running) | 633 | if (!watchdog_running) |
625 | return; | 634 | goto unlock; |
626 | 635 | ||
627 | get_online_cpus(); | 636 | get_online_cpus(); |
628 | for_each_online_cpu(cpu) | 637 | for_each_online_cpu(cpu) |
629 | watchdog_nmi_disable(cpu); | 638 | watchdog_nmi_disable(cpu); |
630 | put_online_cpus(); | 639 | put_online_cpus(); |
640 | |||
641 | unlock: | ||
642 | mutex_unlock(&watchdog_proc_mutex); | ||
631 | } | 643 | } |
632 | #else | 644 | #else |
633 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } | 645 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } |
@@ -744,8 +756,6 @@ static int proc_watchdog_update(void) | |||
744 | 756 | ||
745 | } | 757 | } |
746 | 758 | ||
747 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
748 | |||
749 | /* | 759 | /* |
750 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter | 760 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
751 | * | 761 | * |