diff options
Diffstat (limited to 'kernel')
36 files changed, 294 insertions, 148 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..3392d3e0254a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -182,7 +182,7 @@ struct audit_buffer { | |||
| 182 | 182 | ||
| 183 | struct audit_reply { | 183 | struct audit_reply { |
| 184 | __u32 portid; | 184 | __u32 portid; |
| 185 | pid_t pid; | 185 | struct net *net; |
| 186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
| 500 | { | 500 | { |
| 501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
| 502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
| 503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
| 504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 505 | 505 | ||
| 506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
| @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
| 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
| 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
| 512 | 512 | ||
| 513 | put_net(net); | ||
| 513 | kfree(dest); | 514 | kfree(dest); |
| 514 | 515 | ||
| 515 | return 0; | 516 | return 0; |
| @@ -543,7 +544,7 @@ out_kfree_skb: | |||
| 543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
| 544 | { | 545 | { |
| 545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
| 546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
| 547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 548 | 549 | ||
| 549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
| @@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
| 552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
| 553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
| 554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
| 556 | put_net(net); | ||
| 555 | kfree(reply); | 557 | kfree(reply); |
| 556 | return 0; | 558 | return 0; |
| 557 | } | 559 | } |
| 558 | /** | 560 | /** |
| 559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
| 560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 561 | * @seq: sequence number | 563 | * @seq: sequence number |
| 562 | * @type: audit message type | 564 | * @type: audit message type |
| 563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
| @@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
| 568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
| 569 | * No failure notifications. | 571 | * No failure notifications. |
| 570 | */ | 572 | */ |
| 571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
| 572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
| 573 | { | 575 | { |
| 576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
| 575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
| 576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
| @@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
| 583 | if (!skb) | 587 | if (!skb) |
| 584 | goto out; | 588 | goto out; |
| 585 | 589 | ||
| 590 | reply->net = get_net(net); | ||
| 586 | reply->portid = portid; | 591 | reply->portid = portid; |
| 587 | reply->pid = task_pid_vnr(current); | ||
| 588 | reply->skb = skb; | 592 | reply->skb = skb; |
| 589 | 593 | ||
| 590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
| @@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
| 673 | 677 | ||
| 674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 678 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
| 675 | 679 | ||
| 676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 680 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
| 677 | &af, sizeof(af)); | ||
| 678 | 681 | ||
| 679 | return 0; | 682 | return 0; |
| 680 | } | 683 | } |
| @@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 794 | s.backlog = skb_queue_len(&audit_skb_queue); | 797 | s.backlog = skb_queue_len(&audit_skb_queue); |
| 795 | s.version = AUDIT_VERSION_LATEST; | 798 | s.version = AUDIT_VERSION_LATEST; |
| 796 | s.backlog_wait_time = audit_backlog_wait_time; | 799 | s.backlog_wait_time = audit_backlog_wait_time; |
| 797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 800 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
| 798 | &s, sizeof(s)); | ||
| 799 | break; | 801 | break; |
| 800 | } | 802 | } |
| 801 | case AUDIT_SET: { | 803 | case AUDIT_SET: { |
| @@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 905 | seq, data, nlmsg_len(nlh)); | 907 | seq, data, nlmsg_len(nlh)); |
| 906 | break; | 908 | break; |
| 907 | case AUDIT_LIST_RULES: | 909 | case AUDIT_LIST_RULES: |
| 908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 910 | err = audit_list_rules_send(skb, seq); |
| 909 | break; | 911 | break; |
| 910 | case AUDIT_TRIM: | 912 | case AUDIT_TRIM: |
| 911 | audit_trim_trees(); | 913 | audit_trim_trees(); |
| @@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 970 | memcpy(sig_data->ctx, ctx, len); | 972 | memcpy(sig_data->ctx, ctx, len); |
| 971 | security_release_secctx(ctx, len); | 973 | security_release_secctx(ctx, len); |
| 972 | } | 974 | } |
| 973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 975 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
| 974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 976 | sig_data, sizeof(*sig_data) + len); |
| 975 | kfree(sig_data); | 977 | kfree(sig_data); |
| 976 | break; | 978 | break; |
| 977 | case AUDIT_TTY_GET: { | 979 | case AUDIT_TTY_GET: { |
| @@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 985 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
| 984 | spin_unlock(&tsk->sighand->siglock); | 986 | spin_unlock(&tsk->sighand->siglock); |
| 985 | 987 | ||
| 986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 988 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
| 987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
| 988 | break; | 989 | break; |
| 989 | } | 990 | } |
| 990 | case AUDIT_TTY_SET: { | 991 | case AUDIT_TTY_SET: { |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..8df132214606 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -247,7 +247,7 @@ extern void audit_panic(const char *message); | |||
| 247 | 247 | ||
| 248 | struct audit_netlink_list { | 248 | struct audit_netlink_list { |
| 249 | __u32 portid; | 249 | __u32 portid; |
| 250 | pid_t pid; | 250 | struct net *net; |
| 251 | struct sk_buff_head q; | 251 | struct sk_buff_head q; |
| 252 | }; | 252 | }; |
| 253 | 253 | ||
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 67ccf0e7cca9..135944a7b28a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -916,7 +916,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group, | |||
| 916 | struct fsnotify_mark *inode_mark, | 916 | struct fsnotify_mark *inode_mark, |
| 917 | struct fsnotify_mark *vfsmount_mark, | 917 | struct fsnotify_mark *vfsmount_mark, |
| 918 | u32 mask, void *data, int data_type, | 918 | u32 mask, void *data, int data_type, |
| 919 | const unsigned char *file_name) | 919 | const unsigned char *file_name, u32 cookie) |
| 920 | { | 920 | { |
| 921 | return 0; | 921 | return 0; |
| 922 | } | 922 | } |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 2596fac5dcb4..70b4554d2fbe 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
| @@ -471,7 +471,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, | |||
| 471 | struct fsnotify_mark *inode_mark, | 471 | struct fsnotify_mark *inode_mark, |
| 472 | struct fsnotify_mark *vfsmount_mark, | 472 | struct fsnotify_mark *vfsmount_mark, |
| 473 | u32 mask, void *data, int data_type, | 473 | u32 mask, void *data, int data_type, |
| 474 | const unsigned char *dname) | 474 | const unsigned char *dname, u32 cookie) |
| 475 | { | 475 | { |
| 476 | struct inode *inode; | 476 | struct inode *inode; |
| 477 | struct audit_parent *parent; | 477 | struct audit_parent *parent; |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..92062fd6cc8c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
| 32 | #include <net/net_namespace.h> | ||
| 33 | #include <net/sock.h> | ||
| 32 | #include "audit.h" | 34 | #include "audit.h" |
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| @@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
| 1065 | 1067 | ||
| 1066 | /** | 1068 | /** |
| 1067 | * audit_list_rules_send - list the audit rules | 1069 | * audit_list_rules_send - list the audit rules |
| 1068 | * @portid: target portid for netlink audit messages | 1070 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 1069 | * @seq: netlink audit message sequence (serial) number | 1071 | * @seq: netlink audit message sequence (serial) number |
| 1070 | */ | 1072 | */ |
| 1071 | int audit_list_rules_send(__u32 portid, int seq) | 1073 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
| 1072 | { | 1074 | { |
| 1075 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 1076 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 1073 | struct task_struct *tsk; | 1077 | struct task_struct *tsk; |
| 1074 | struct audit_netlink_list *dest; | 1078 | struct audit_netlink_list *dest; |
| 1075 | int err = 0; | 1079 | int err = 0; |
| @@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
| 1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1087 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
| 1084 | if (!dest) | 1088 | if (!dest) |
| 1085 | return -ENOMEM; | 1089 | return -ENOMEM; |
| 1090 | dest->net = get_net(net); | ||
| 1086 | dest->portid = portid; | 1091 | dest->portid = portid; |
| 1087 | dest->pid = task_pid_vnr(current); | ||
| 1088 | skb_queue_head_init(&dest->q); | 1092 | skb_queue_head_init(&dest->q); |
| 1089 | 1093 | ||
| 1090 | mutex_lock(&audit_filter_mutex); | 1094 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd5956a..7aef2f4b6c64 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name) | |||
| 1719 | struct audit_context *context = current->audit_context; | 1719 | struct audit_context *context = current->audit_context; |
| 1720 | 1720 | ||
| 1721 | BUG_ON(!context); | 1721 | BUG_ON(!context); |
| 1722 | if (!context->in_syscall) { | 1722 | if (!name->aname || !context->in_syscall) { |
| 1723 | #if AUDIT_DEBUG == 2 | 1723 | #if AUDIT_DEBUG == 2 |
| 1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", | 1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", |
| 1725 | __FILE__, __LINE__, context->serial, name); | 1725 | __FILE__, __LINE__, context->serial, name); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..0c753ddd223b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
| 886 | * per-subsystem and moved to css->id so that lookups are | 886 | * per-subsystem and moved to css->id so that lookups are |
| 887 | * successful until the target css is released. | 887 | * successful until the target css is released. |
| 888 | */ | 888 | */ |
| 889 | mutex_lock(&cgroup_mutex); | ||
| 889 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | 890 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
| 891 | mutex_unlock(&cgroup_mutex); | ||
| 890 | cgrp->id = -1; | 892 | cgrp->id = -1; |
| 891 | 893 | ||
| 892 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | 894 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| @@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, | |||
| 1566 | mutex_lock(&cgroup_mutex); | 1568 | mutex_lock(&cgroup_mutex); |
| 1567 | mutex_lock(&cgroup_root_mutex); | 1569 | mutex_lock(&cgroup_root_mutex); |
| 1568 | 1570 | ||
| 1569 | root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, | 1571 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
| 1570 | 0, 1, GFP_KERNEL); | 1572 | if (ret < 0) |
| 1571 | if (root_cgrp->id < 0) | ||
| 1572 | goto unlock_drop; | 1573 | goto unlock_drop; |
| 1574 | root_cgrp->id = ret; | ||
| 1573 | 1575 | ||
| 1574 | /* Check for name clashes with existing mounts */ | 1576 | /* Check for name clashes with existing mounts */ |
| 1575 | ret = -EBUSY; | 1577 | ret = -EBUSY; |
| @@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
| 2763 | */ | 2765 | */ |
| 2764 | update_before = cgroup_serial_nr_next; | 2766 | update_before = cgroup_serial_nr_next; |
| 2765 | 2767 | ||
| 2766 | mutex_unlock(&cgroup_mutex); | ||
| 2767 | |||
| 2768 | /* add/rm files for all cgroups created before */ | 2768 | /* add/rm files for all cgroups created before */ |
| 2769 | rcu_read_lock(); | ||
| 2770 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { | 2769 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
| 2771 | struct cgroup *cgrp = css->cgroup; | 2770 | struct cgroup *cgrp = css->cgroup; |
| 2772 | 2771 | ||
| @@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
| 2775 | 2774 | ||
| 2776 | inode = cgrp->dentry->d_inode; | 2775 | inode = cgrp->dentry->d_inode; |
| 2777 | dget(cgrp->dentry); | 2776 | dget(cgrp->dentry); |
| 2778 | rcu_read_unlock(); | ||
| 2779 | |||
| 2780 | dput(prev); | 2777 | dput(prev); |
| 2781 | prev = cgrp->dentry; | 2778 | prev = cgrp->dentry; |
| 2782 | 2779 | ||
| 2780 | mutex_unlock(&cgroup_mutex); | ||
| 2783 | mutex_lock(&inode->i_mutex); | 2781 | mutex_lock(&inode->i_mutex); |
| 2784 | mutex_lock(&cgroup_mutex); | 2782 | mutex_lock(&cgroup_mutex); |
| 2785 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) | 2783 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) |
| 2786 | ret = cgroup_addrm_files(cgrp, cfts, is_add); | 2784 | ret = cgroup_addrm_files(cgrp, cfts, is_add); |
| 2787 | mutex_unlock(&cgroup_mutex); | ||
| 2788 | mutex_unlock(&inode->i_mutex); | 2785 | mutex_unlock(&inode->i_mutex); |
| 2789 | |||
| 2790 | rcu_read_lock(); | ||
| 2791 | if (ret) | 2786 | if (ret) |
| 2792 | break; | 2787 | break; |
| 2793 | } | 2788 | } |
| 2794 | rcu_read_unlock(); | 2789 | mutex_unlock(&cgroup_mutex); |
| 2795 | dput(prev); | 2790 | dput(prev); |
| 2796 | deactivate_super(sb); | 2791 | deactivate_super(sb); |
| 2797 | return ret; | 2792 | return ret; |
| @@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void) | |||
| 2910 | * We should check if the process is exiting, otherwise | 2905 | * We should check if the process is exiting, otherwise |
| 2911 | * it will race with cgroup_exit() in that the list | 2906 | * it will race with cgroup_exit() in that the list |
| 2912 | * entry won't be deleted though the process has exited. | 2907 | * entry won't be deleted though the process has exited. |
| 2908 | * Do it while holding siglock so that we don't end up | ||
| 2909 | * racing against cgroup_exit(). | ||
| 2913 | */ | 2910 | */ |
| 2911 | spin_lock_irq(&p->sighand->siglock); | ||
| 2914 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | 2912 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) |
| 2915 | list_add(&p->cg_list, &task_css_set(p)->tasks); | 2913 | list_add(&p->cg_list, &task_css_set(p)->tasks); |
| 2914 | spin_unlock_irq(&p->sighand->siglock); | ||
| 2915 | |||
| 2916 | task_unlock(p); | 2916 | task_unlock(p); |
| 2917 | } while_each_thread(g, p); | 2917 | } while_each_thread(g, p); |
| 2918 | read_unlock(&tasklist_lock); | 2918 | read_unlock(&tasklist_lock); |
| @@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4112 | 4112 | ||
| 4113 | err = percpu_ref_init(&css->refcnt, css_release); | 4113 | err = percpu_ref_init(&css->refcnt, css_release); |
| 4114 | if (err) | 4114 | if (err) |
| 4115 | goto err_free; | 4115 | goto err_free_css; |
| 4116 | 4116 | ||
| 4117 | init_css(css, ss, cgrp); | 4117 | init_css(css, ss, cgrp); |
| 4118 | 4118 | ||
| 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); |
| 4120 | if (err) | 4120 | if (err) |
| 4121 | goto err_free; | 4121 | goto err_free_percpu_ref; |
| 4122 | 4122 | ||
| 4123 | err = online_css(css); | 4123 | err = online_css(css); |
| 4124 | if (err) | 4124 | if (err) |
| 4125 | goto err_free; | 4125 | goto err_clear_dir; |
| 4126 | 4126 | ||
| 4127 | dget(cgrp->dentry); | 4127 | dget(cgrp->dentry); |
| 4128 | css_get(css->parent); | 4128 | css_get(css->parent); |
| @@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4138 | 4138 | ||
| 4139 | return 0; | 4139 | return 0; |
| 4140 | 4140 | ||
| 4141 | err_free: | 4141 | err_clear_dir: |
| 4142 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | ||
| 4143 | err_free_percpu_ref: | ||
| 4142 | percpu_ref_cancel_init(&css->refcnt); | 4144 | percpu_ref_cancel_init(&css->refcnt); |
| 4145 | err_free_css: | ||
| 4143 | ss->css_free(css); | 4146 | ss->css_free(css); |
| 4144 | return err; | 4147 | return err; |
| 4145 | } | 4148 | } |
| @@ -4158,7 +4161,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4158 | struct cgroup *cgrp; | 4161 | struct cgroup *cgrp; |
| 4159 | struct cgroup_name *name; | 4162 | struct cgroup_name *name; |
| 4160 | struct cgroupfs_root *root = parent->root; | 4163 | struct cgroupfs_root *root = parent->root; |
| 4161 | int ssid, err = 0; | 4164 | int ssid, err; |
| 4162 | struct cgroup_subsys *ss; | 4165 | struct cgroup_subsys *ss; |
| 4163 | struct super_block *sb = root->sb; | 4166 | struct super_block *sb = root->sb; |
| 4164 | 4167 | ||
| @@ -4168,19 +4171,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4168 | return -ENOMEM; | 4171 | return -ENOMEM; |
| 4169 | 4172 | ||
| 4170 | name = cgroup_alloc_name(dentry); | 4173 | name = cgroup_alloc_name(dentry); |
| 4171 | if (!name) | 4174 | if (!name) { |
| 4175 | err = -ENOMEM; | ||
| 4172 | goto err_free_cgrp; | 4176 | goto err_free_cgrp; |
| 4177 | } | ||
| 4173 | rcu_assign_pointer(cgrp->name, name); | 4178 | rcu_assign_pointer(cgrp->name, name); |
| 4174 | 4179 | ||
| 4175 | /* | 4180 | /* |
| 4176 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
| 4177 | * a half-baked cgroup. | ||
| 4178 | */ | ||
| 4179 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
| 4180 | if (cgrp->id < 0) | ||
| 4181 | goto err_free_name; | ||
| 4182 | |||
| 4183 | /* | ||
| 4184 | * Only live parents can have children. Note that the liveliness | 4181 | * Only live parents can have children. Note that the liveliness |
| 4185 | * check isn't strictly necessary because cgroup_mkdir() and | 4182 | * check isn't strictly necessary because cgroup_mkdir() and |
| 4186 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it | 4183 | * cgroup_rmdir() are fully synchronized by i_mutex; however, do it |
| @@ -4189,7 +4186,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4189 | */ | 4186 | */ |
| 4190 | if (!cgroup_lock_live_group(parent)) { | 4187 | if (!cgroup_lock_live_group(parent)) { |
| 4191 | err = -ENODEV; | 4188 | err = -ENODEV; |
| 4192 | goto err_free_id; | 4189 | goto err_free_name; |
| 4190 | } | ||
| 4191 | |||
| 4192 | /* | ||
| 4193 | * Temporarily set the pointer to NULL, so idr_find() won't return | ||
| 4194 | * a half-baked cgroup. | ||
| 4195 | */ | ||
| 4196 | cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); | ||
| 4197 | if (cgrp->id < 0) { | ||
| 4198 | err = -ENOMEM; | ||
| 4199 | goto err_unlock; | ||
| 4193 | } | 4200 | } |
| 4194 | 4201 | ||
| 4195 | /* Grab a reference on the superblock so the hierarchy doesn't | 4202 | /* Grab a reference on the superblock so the hierarchy doesn't |
| @@ -4221,7 +4228,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4221 | */ | 4228 | */ |
| 4222 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); | 4229 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); |
| 4223 | if (err < 0) | 4230 | if (err < 0) |
| 4224 | goto err_unlock; | 4231 | goto err_free_id; |
| 4225 | lockdep_assert_held(&dentry->d_inode->i_mutex); | 4232 | lockdep_assert_held(&dentry->d_inode->i_mutex); |
| 4226 | 4233 | ||
| 4227 | cgrp->serial_nr = cgroup_serial_nr_next++; | 4234 | cgrp->serial_nr = cgroup_serial_nr_next++; |
| @@ -4257,12 +4264,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4257 | 4264 | ||
| 4258 | return 0; | 4265 | return 0; |
| 4259 | 4266 | ||
| 4260 | err_unlock: | ||
| 4261 | mutex_unlock(&cgroup_mutex); | ||
| 4262 | /* Release the reference count that we took on the superblock */ | ||
| 4263 | deactivate_super(sb); | ||
| 4264 | err_free_id: | 4267 | err_free_id: |
| 4265 | idr_remove(&root->cgroup_idr, cgrp->id); | 4268 | idr_remove(&root->cgroup_idr, cgrp->id); |
| 4269 | /* Release the reference count that we took on the superblock */ | ||
| 4270 | deactivate_super(sb); | ||
| 4271 | err_unlock: | ||
| 4272 | mutex_unlock(&cgroup_mutex); | ||
| 4266 | err_free_name: | 4273 | err_free_name: |
| 4267 | kfree(rcu_dereference_raw(cgrp->name)); | 4274 | kfree(rcu_dereference_raw(cgrp->name)); |
| 4268 | err_free_cgrp: | 4275 | err_free_cgrp: |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..e6b1b66afe52 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, | 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, |
| 975 | * so that the migration code can allocate pages on these nodes. | 975 | * so that the migration code can allocate pages on these nodes. |
| 976 | * | 976 | * |
| 977 | * Call holding cpuset_mutex, so current's cpuset won't change | ||
| 978 | * during this call, as manage_mutex holds off any cpuset_attach() | ||
| 979 | * calls. Therefore we don't need to take task_lock around the | ||
| 980 | * call to guarantee_online_mems(), as we know no one is changing | ||
| 981 | * our task's cpuset. | ||
| 982 | * | ||
| 983 | * While the mm_struct we are migrating is typically from some | 977 | * While the mm_struct we are migrating is typically from some |
| 984 | * other task, the task_struct mems_allowed that we are hacking | 978 | * other task, the task_struct mems_allowed that we are hacking |
| 985 | * is for our current task, which must allocate new pages for that | 979 | * is for our current task, which must allocate new pages for that |
| @@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
| 996 | 990 | ||
| 997 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 991 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
| 998 | 992 | ||
| 993 | rcu_read_lock(); | ||
| 999 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 994 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
| 1000 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); | 995 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); |
| 996 | rcu_read_unlock(); | ||
| 1001 | } | 997 | } |
| 1002 | 998 | ||
| 1003 | /* | 999 | /* |
| @@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
| 2486 | 2482 | ||
| 2487 | task_lock(current); | 2483 | task_lock(current); |
| 2488 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2484 | cs = nearest_hardwall_ancestor(task_cs(current)); |
| 2485 | allowed = node_isset(node, cs->mems_allowed); | ||
| 2489 | task_unlock(current); | 2486 | task_unlock(current); |
| 2490 | 2487 | ||
| 2491 | allowed = node_isset(node, cs->mems_allowed); | ||
| 2492 | mutex_unlock(&callback_mutex); | 2488 | mutex_unlock(&callback_mutex); |
| 2493 | return allowed; | 2489 | return allowed; |
| 2494 | } | 2490 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..fa0b2d4ad83c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -7856,14 +7856,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
| 7856 | static void __perf_event_exit_context(void *__info) | 7856 | static void __perf_event_exit_context(void *__info) |
| 7857 | { | 7857 | { |
| 7858 | struct perf_event_context *ctx = __info; | 7858 | struct perf_event_context *ctx = __info; |
| 7859 | struct perf_event *event, *tmp; | 7859 | struct perf_event *event; |
| 7860 | 7860 | ||
| 7861 | perf_pmu_rotate_stop(ctx->pmu); | 7861 | perf_pmu_rotate_stop(ctx->pmu); |
| 7862 | 7862 | ||
| 7863 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 7863 | rcu_read_lock(); |
| 7864 | __perf_remove_from_context(event); | 7864 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) |
| 7865 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | ||
| 7866 | __perf_remove_from_context(event); | 7865 | __perf_remove_from_context(event); |
| 7866 | rcu_read_unlock(); | ||
| 7867 | } | 7867 | } |
| 7868 | 7868 | ||
| 7869 | static void perf_event_exit_cpu_context(int cpu) | 7869 | static void perf_event_exit_cpu_context(int cpu) |
| @@ -7887,11 +7887,11 @@ static void perf_event_exit_cpu(int cpu) | |||
| 7887 | { | 7887 | { |
| 7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
| 7889 | 7889 | ||
| 7890 | perf_event_exit_cpu_context(cpu); | ||
| 7891 | |||
| 7890 | mutex_lock(&swhash->hlist_mutex); | 7892 | mutex_lock(&swhash->hlist_mutex); |
| 7891 | swevent_hlist_release(swhash); | 7893 | swevent_hlist_release(swhash); |
| 7892 | mutex_unlock(&swhash->hlist_mutex); | 7894 | mutex_unlock(&swhash->hlist_mutex); |
| 7893 | |||
| 7894 | perf_event_exit_cpu_context(cpu); | ||
| 7895 | } | 7895 | } |
| 7896 | #else | 7896 | #else |
| 7897 | static inline void perf_event_exit_cpu(int cpu) { } | 7897 | static inline void perf_event_exit_cpu(int cpu) { } |
diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..08ec814ad9d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = { | |||
| 234 | * waiting on a futex. | 234 | * waiting on a futex. |
| 235 | */ | 235 | */ |
| 236 | struct futex_hash_bucket { | 236 | struct futex_hash_bucket { |
| 237 | atomic_t waiters; | ||
| 237 | spinlock_t lock; | 238 | spinlock_t lock; |
| 238 | struct plist_head chain; | 239 | struct plist_head chain; |
| 239 | } ____cacheline_aligned_in_smp; | 240 | } ____cacheline_aligned_in_smp; |
| @@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key) | |||
| 253 | smp_mb__after_atomic_inc(); | 254 | smp_mb__after_atomic_inc(); |
| 254 | } | 255 | } |
| 255 | 256 | ||
| 256 | static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) | 257 | /* |
| 258 | * Reflects a new waiter being added to the waitqueue. | ||
| 259 | */ | ||
| 260 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) | ||
| 257 | { | 261 | { |
| 258 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
| 263 | atomic_inc(&hb->waiters); | ||
| 259 | /* | 264 | /* |
| 260 | * Tasks trying to enter the critical region are most likely | 265 | * Full barrier (A), see the ordering comment above. |
| 261 | * potential waiters that will be added to the plist. Ensure | ||
| 262 | * that wakers won't miss to-be-slept tasks in the window between | ||
| 263 | * the wait call and the actual plist_add. | ||
| 264 | */ | 266 | */ |
| 265 | if (spin_is_locked(&hb->lock)) | 267 | smp_mb__after_atomic_inc(); |
| 266 | return true; | 268 | #endif |
| 267 | smp_rmb(); /* Make sure we check the lock state first */ | 269 | } |
| 270 | |||
| 271 | /* | ||
| 272 | * Reflects a waiter being removed from the waitqueue by wakeup | ||
| 273 | * paths. | ||
| 274 | */ | ||
| 275 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) | ||
| 276 | { | ||
| 277 | #ifdef CONFIG_SMP | ||
| 278 | atomic_dec(&hb->waiters); | ||
| 279 | #endif | ||
| 280 | } | ||
| 268 | 281 | ||
| 269 | return !plist_head_empty(&hb->chain); | 282 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
| 283 | { | ||
| 284 | #ifdef CONFIG_SMP | ||
| 285 | return atomic_read(&hb->waiters); | ||
| 270 | #else | 286 | #else |
| 271 | return true; | 287 | return 1; |
| 272 | #endif | 288 | #endif |
| 273 | } | 289 | } |
| 274 | 290 | ||
| @@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q) | |||
| 954 | 970 | ||
| 955 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 971 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
| 956 | plist_del(&q->list, &hb->chain); | 972 | plist_del(&q->list, &hb->chain); |
| 973 | hb_waiters_dec(hb); | ||
| 957 | } | 974 | } |
| 958 | 975 | ||
| 959 | /* | 976 | /* |
| @@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
| 1257 | */ | 1274 | */ |
| 1258 | if (likely(&hb1->chain != &hb2->chain)) { | 1275 | if (likely(&hb1->chain != &hb2->chain)) { |
| 1259 | plist_del(&q->list, &hb1->chain); | 1276 | plist_del(&q->list, &hb1->chain); |
| 1277 | hb_waiters_dec(hb1); | ||
| 1260 | plist_add(&q->list, &hb2->chain); | 1278 | plist_add(&q->list, &hb2->chain); |
| 1279 | hb_waiters_inc(hb2); | ||
| 1261 | q->lock_ptr = &hb2->lock; | 1280 | q->lock_ptr = &hb2->lock; |
| 1262 | } | 1281 | } |
| 1263 | get_futex_key_refs(key2); | 1282 | get_futex_key_refs(key2); |
| @@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
| 1600 | struct futex_hash_bucket *hb; | 1619 | struct futex_hash_bucket *hb; |
| 1601 | 1620 | ||
| 1602 | hb = hash_futex(&q->key); | 1621 | hb = hash_futex(&q->key); |
| 1622 | |||
| 1623 | /* | ||
| 1624 | * Increment the counter before taking the lock so that | ||
| 1625 | * a potential waker won't miss a to-be-slept task that is | ||
| 1626 | * waiting for the spinlock. This is safe as all queue_lock() | ||
| 1627 | * users end up calling queue_me(). Similarly, for housekeeping, | ||
| 1628 | * decrement the counter at queue_unlock() when some error has | ||
| 1629 | * occurred and we don't end up adding the task to the list. | ||
| 1630 | */ | ||
| 1631 | hb_waiters_inc(hb); | ||
| 1632 | |||
| 1603 | q->lock_ptr = &hb->lock; | 1633 | q->lock_ptr = &hb->lock; |
| 1604 | 1634 | ||
| 1605 | spin_lock(&hb->lock); /* implies MB (A) */ | 1635 | spin_lock(&hb->lock); /* implies MB (A) */ |
| @@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb) | |||
| 1611 | __releases(&hb->lock) | 1641 | __releases(&hb->lock) |
| 1612 | { | 1642 | { |
| 1613 | spin_unlock(&hb->lock); | 1643 | spin_unlock(&hb->lock); |
| 1644 | hb_waiters_dec(hb); | ||
| 1614 | } | 1645 | } |
| 1615 | 1646 | ||
| 1616 | /** | 1647 | /** |
| @@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2342 | * Unqueue the futex_q and determine which it was. | 2373 | * Unqueue the futex_q and determine which it was. |
| 2343 | */ | 2374 | */ |
| 2344 | plist_del(&q->list, &hb->chain); | 2375 | plist_del(&q->list, &hb->chain); |
| 2376 | hb_waiters_dec(hb); | ||
| 2345 | 2377 | ||
| 2346 | /* Handle spurious wakeups gracefully */ | 2378 | /* Handle spurious wakeups gracefully */ |
| 2347 | ret = -EWOULDBLOCK; | 2379 | ret = -EWOULDBLOCK; |
| @@ -2875,6 +2907,7 @@ static int __init futex_init(void) | |||
| 2875 | futex_cmpxchg_enabled = 1; | 2907 | futex_cmpxchg_enabled = 1; |
| 2876 | 2908 | ||
| 2877 | for (i = 0; i < futex_hashsize; i++) { | 2909 | for (i = 0; i < futex_hashsize; i++) { |
| 2910 | atomic_set(&futex_queues[i].waiters, 0); | ||
| 2878 | plist_head_init(&futex_queues[i].chain); | 2911 | plist_head_init(&futex_queues[i].chain); |
| 2879 | spin_lock_init(&futex_queues[i].lock); | 2912 | spin_lock_init(&futex_queues[i].lock); |
| 2880 | } | 2913 | } |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef09f658..07cbdfea9ae2 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
| @@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER | |||
| 40 | # Generic configurable interrupt chip implementation | 40 | # Generic configurable interrupt chip implementation |
| 41 | config GENERIC_IRQ_CHIP | 41 | config GENERIC_IRQ_CHIP |
| 42 | bool | 42 | bool |
| 43 | select IRQ_DOMAIN | ||
| 43 | 44 | ||
| 44 | # Generic irq_domain hw <--> linux irq number translation | 45 | # Generic irq_domain hw <--> linux irq number translation |
| 45 | config IRQ_DOMAIN | 46 | config IRQ_DOMAIN |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index bd8e788d71e0..1ef0606797c9 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
| @@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq, | |||
| 73 | EXPORT_SYMBOL(devm_request_threaded_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
| 74 | 74 | ||
| 75 | /** | 75 | /** |
| 76 | * devm_request_any_context_irq - allocate an interrupt line for a managed device | ||
| 77 | * @dev: device to request interrupt for | ||
| 78 | * @irq: Interrupt line to allocate | ||
| 79 | * @handler: Function to be called when the IRQ occurs | ||
| 80 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
| 81 | * for devices which handle everything in @handler | ||
| 82 | * @irqflags: Interrupt type flags | ||
| 83 | * @devname: An ascii name for the claiming device | ||
| 84 | * @dev_id: A cookie passed back to the handler function | ||
| 85 | * | ||
| 86 | * Except for the extra @dev argument, this function takes the | ||
| 87 | * same arguments and performs the same function as | ||
| 88 | * request_any_context_irq(). IRQs requested with this function will be | ||
| 89 | * automatically freed on driver detach. | ||
| 90 | * | ||
| 91 | * If an IRQ allocated with this function needs to be freed | ||
| 92 | * separately, devm_free_irq() must be used. | ||
| 93 | */ | ||
| 94 | int devm_request_any_context_irq(struct device *dev, unsigned int irq, | ||
| 95 | irq_handler_t handler, unsigned long irqflags, | ||
| 96 | const char *devname, void *dev_id) | ||
| 97 | { | ||
| 98 | struct irq_devres *dr; | ||
| 99 | int rc; | ||
| 100 | |||
| 101 | dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), | ||
| 102 | GFP_KERNEL); | ||
| 103 | if (!dr) | ||
| 104 | return -ENOMEM; | ||
| 105 | |||
| 106 | rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id); | ||
| 107 | if (rc) { | ||
| 108 | devres_free(dr); | ||
| 109 | return rc; | ||
| 110 | } | ||
| 111 | |||
| 112 | dr->irq = irq; | ||
| 113 | dr->dev_id = dev_id; | ||
| 114 | devres_add(dev, dr); | ||
| 115 | |||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL(devm_request_any_context_irq); | ||
| 119 | |||
| 120 | /** | ||
| 76 | * devm_free_irq - free an interrupt | 121 | * devm_free_irq - free an interrupt |
| 77 | * @dev: device to free interrupt for | 122 | * @dev: device to free interrupt for |
| 78 | * @irq: Interrupt line to free | 123 | * @irq: Interrupt line to free |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cfd..8ab8e9390297 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
| 274 | { | 274 | { |
| 275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 276 | } | 276 | } |
| 277 | EXPORT_SYMBOL(irq_to_desc); | ||
| 277 | 278 | ||
| 278 | static void free_desc(unsigned int irq) | 279 | static void free_desc(unsigned int irq) |
| 279 | { | 280 | { |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
| 11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
| 12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
| 13 | #include <linux/of_irq.h> | ||
| 13 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
| 14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
| 15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..d3bf660cb57f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
| 802 | 802 | ||
| 803 | static void wake_threads_waitq(struct irq_desc *desc) | 803 | static void wake_threads_waitq(struct irq_desc *desc) |
| 804 | { | 804 | { |
| 805 | if (atomic_dec_and_test(&desc->threads_active) && | 805 | if (atomic_dec_and_test(&desc->threads_active)) |
| 806 | waitqueue_active(&desc->wait_for_threads)) | ||
| 807 | wake_up(&desc->wait_for_threads); | 806 | wake_up(&desc->wait_for_threads); |
| 808 | } | 807 | } |
| 809 | 808 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index b086006c59e7..6b375af4958d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data) | |||
| 239 | 239 | ||
| 240 | commit_creds(new); | 240 | commit_creds(new); |
| 241 | 241 | ||
| 242 | retval = do_execve(sub_info->path, | 242 | retval = do_execve(getname_kernel(sub_info->path), |
| 243 | (const char __user *const __user *)sub_info->argv, | 243 | (const char __user *const __user *)sub_info->argv, |
| 244 | (const char __user *const __user *)sub_info->envp); | 244 | (const char __user *const __user *)sub_info->envp); |
| 245 | if (!retval) | 245 | if (!retval) |
diff --git a/kernel/power/console.c b/kernel/power/console.c index eacb8bd8cab4..aba9c545a0e3 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/kbd_kern.h> | 9 | #include <linux/kbd_kern.h> |
| 10 | #include <linux/vt.h> | 10 | #include <linux/vt.h> |
| 11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| 12 | #include <linux/slab.h> | ||
| 12 | #include "power.h" | 13 | #include "power.h" |
| 13 | 14 | ||
| 14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 15 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
| 1076 | next_seq = log_next_seq; | 1076 | next_seq = log_next_seq; |
| 1077 | 1077 | ||
| 1078 | len = 0; | 1078 | len = 0; |
| 1079 | prev = 0; | ||
| 1080 | while (len >= 0 && seq < next_seq) { | 1079 | while (len >= 0 && seq < next_seq) { |
| 1081 | struct printk_log *msg = log_from_idx(idx); | 1080 | struct printk_log *msg = log_from_idx(idx); |
| 1082 | int textlen; | 1081 | int textlen; |
| @@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
| 2788 | next_idx = idx; | 2787 | next_idx = idx; |
| 2789 | 2788 | ||
| 2790 | l = 0; | 2789 | l = 0; |
| 2791 | prev = 0; | ||
| 2792 | while (seq < dumper->next_seq) { | 2790 | while (seq < dumper->next_seq) { |
| 2793 | struct printk_log *msg = log_from_idx(idx); | 2791 | struct printk_log *msg = log_from_idx(idx); |
| 2794 | 2792 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..ebdd9c1a86b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
| 549 | struct page *page; | 549 | struct page *page; |
| 550 | 550 | ||
| 551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
| 552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 553 | 0); | 553 | 0); |
| 554 | if (!page) | 554 | if (!page) |
| 555 | goto out_cleanup; | 555 | goto out_cleanup; |
| 556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
| 557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
| 558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
| 559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 560 | 0); | 560 | 0); |
| 561 | if (!page) | 561 | if (!page) |
| 562 | goto out_cleanup; | 562 | goto out_cleanup; |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..b30a2924ef14 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu) | |||
| 301 | if (unlikely(!sched_clock_running)) | 301 | if (unlikely(!sched_clock_running)) |
| 302 | return 0ull; | 302 | return 0ull; |
| 303 | 303 | ||
| 304 | preempt_disable(); | 304 | preempt_disable_notrace(); |
| 305 | scd = cpu_sdc(cpu); | 305 | scd = cpu_sdc(cpu); |
| 306 | 306 | ||
| 307 | if (cpu != smp_processor_id()) | 307 | if (cpu != smp_processor_id()) |
| 308 | clock = sched_clock_remote(scd); | 308 | clock = sched_clock_remote(scd); |
| 309 | else | 309 | else |
| 310 | clock = sched_clock_local(scd); | 310 | clock = sched_clock_local(scd); |
| 311 | preempt_enable(); | 311 | preempt_enable_notrace(); |
| 312 | 312 | ||
| 313 | return clock; | 313 | return clock; |
| 314 | } | 314 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy, | |||
| 1952 | { | 1952 | { |
| 1953 | 1953 | ||
| 1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); | 1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); |
| 1955 | u64 period = attr->sched_period; | 1955 | u64 period = attr->sched_period ?: attr->sched_deadline; |
| 1956 | u64 runtime = attr->sched_runtime; | 1956 | u64 runtime = attr->sched_runtime; |
| 1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; | 1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; |
| 1958 | int cpus, err = -1; | 1958 | int cpus, err = -1; |
| @@ -3338,6 +3338,15 @@ recheck: | |||
| 3338 | return -EPERM; | 3338 | return -EPERM; |
| 3339 | } | 3339 | } |
| 3340 | 3340 | ||
| 3341 | /* | ||
| 3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
| 3343 | * (safest behavior); in the future we would like to allow | ||
| 3344 | * unprivileged DL tasks to increase their relative deadline | ||
| 3345 | * or reduce their runtime (both ways reducing utilization) | ||
| 3346 | */ | ||
| 3347 | if (dl_policy(policy)) | ||
| 3348 | return -EPERM; | ||
| 3349 | |||
| 3341 | /* | 3350 | /* |
| 3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
| @@ -3661,13 +3670,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
| 3661 | * @pid: the pid in question. | 3670 | * @pid: the pid in question. |
| 3662 | * @uattr: structure containing the extended parameters. | 3671 | * @uattr: structure containing the extended parameters. |
| 3663 | */ | 3672 | */ |
| 3664 | SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) | 3673 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 3674 | unsigned int, flags) | ||
| 3665 | { | 3675 | { |
| 3666 | struct sched_attr attr; | 3676 | struct sched_attr attr; |
| 3667 | struct task_struct *p; | 3677 | struct task_struct *p; |
| 3668 | int retval; | 3678 | int retval; |
| 3669 | 3679 | ||
| 3670 | if (!uattr || pid < 0) | 3680 | if (!uattr || pid < 0 || flags) |
| 3671 | return -EINVAL; | 3681 | return -EINVAL; |
| 3672 | 3682 | ||
| 3673 | if (sched_copy_attr(uattr, &attr)) | 3683 | if (sched_copy_attr(uattr, &attr)) |
| @@ -3786,7 +3796,7 @@ static int sched_read_attr(struct sched_attr __user *uattr, | |||
| 3786 | attr->size = usize; | 3796 | attr->size = usize; |
| 3787 | } | 3797 | } |
| 3788 | 3798 | ||
| 3789 | ret = copy_to_user(uattr, attr, usize); | 3799 | ret = copy_to_user(uattr, attr, attr->size); |
| 3790 | if (ret) | 3800 | if (ret) |
| 3791 | return -EFAULT; | 3801 | return -EFAULT; |
| 3792 | 3802 | ||
| @@ -3804,8 +3814,8 @@ err_size: | |||
| 3804 | * @uattr: structure containing the extended parameters. | 3814 | * @uattr: structure containing the extended parameters. |
| 3805 | * @size: sizeof(attr) for fwd/bwd comp. | 3815 | * @size: sizeof(attr) for fwd/bwd comp. |
| 3806 | */ | 3816 | */ |
| 3807 | SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | 3817 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 3808 | unsigned int, size) | 3818 | unsigned int, size, unsigned int, flags) |
| 3809 | { | 3819 | { |
| 3810 | struct sched_attr attr = { | 3820 | struct sched_attr attr = { |
| 3811 | .size = sizeof(struct sched_attr), | 3821 | .size = sizeof(struct sched_attr), |
| @@ -3814,7 +3824,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
| 3814 | int retval; | 3824 | int retval; |
| 3815 | 3825 | ||
| 3816 | if (!uattr || pid < 0 || size > PAGE_SIZE || | 3826 | if (!uattr || pid < 0 || size > PAGE_SIZE || |
| 3817 | size < SCHED_ATTR_SIZE_VER0) | 3827 | size < SCHED_ATTR_SIZE_VER0 || flags) |
| 3818 | return -EINVAL; | 3828 | return -EINVAL; |
| 3819 | 3829 | ||
| 3820 | rcu_read_lock(); | 3830 | rcu_read_lock(); |
| @@ -7422,6 +7432,7 @@ static int sched_dl_global_constraints(void) | |||
| 7422 | u64 period = global_rt_period(); | 7432 | u64 period = global_rt_period(); |
| 7423 | u64 new_bw = to_ratio(period, runtime); | 7433 | u64 new_bw = to_ratio(period, runtime); |
| 7424 | int cpu, ret = 0; | 7434 | int cpu, ret = 0; |
| 7435 | unsigned long flags; | ||
| 7425 | 7436 | ||
| 7426 | /* | 7437 | /* |
| 7427 | * Here we want to check the bandwidth not being set to some | 7438 | * Here we want to check the bandwidth not being set to some |
| @@ -7435,10 +7446,10 @@ static int sched_dl_global_constraints(void) | |||
| 7435 | for_each_possible_cpu(cpu) { | 7446 | for_each_possible_cpu(cpu) { |
| 7436 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7447 | struct dl_bw *dl_b = dl_bw_of(cpu); |
| 7437 | 7448 | ||
| 7438 | raw_spin_lock(&dl_b->lock); | 7449 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7439 | if (new_bw < dl_b->total_bw) | 7450 | if (new_bw < dl_b->total_bw) |
| 7440 | ret = -EBUSY; | 7451 | ret = -EBUSY; |
| 7441 | raw_spin_unlock(&dl_b->lock); | 7452 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7442 | 7453 | ||
| 7443 | if (ret) | 7454 | if (ret) |
| 7444 | break; | 7455 | break; |
| @@ -7451,6 +7462,7 @@ static void sched_dl_do_global(void) | |||
| 7451 | { | 7462 | { |
| 7452 | u64 new_bw = -1; | 7463 | u64 new_bw = -1; |
| 7453 | int cpu; | 7464 | int cpu; |
| 7465 | unsigned long flags; | ||
| 7454 | 7466 | ||
| 7455 | def_dl_bandwidth.dl_period = global_rt_period(); | 7467 | def_dl_bandwidth.dl_period = global_rt_period(); |
| 7456 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); | 7468 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); |
| @@ -7464,9 +7476,9 @@ static void sched_dl_do_global(void) | |||
| 7464 | for_each_possible_cpu(cpu) { | 7476 | for_each_possible_cpu(cpu) { |
| 7465 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7477 | struct dl_bw *dl_b = dl_bw_of(cpu); |
| 7466 | 7478 | ||
| 7467 | raw_spin_lock(&dl_b->lock); | 7479 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7468 | dl_b->bw = new_bw; | 7480 | dl_b->bw = new_bw; |
| 7469 | raw_spin_unlock(&dl_b->lock); | 7481 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7470 | } | 7482 | } |
| 7471 | } | 7483 | } |
| 7472 | 7484 | ||
| @@ -7475,7 +7487,8 @@ static int sched_rt_global_validate(void) | |||
| 7475 | if (sysctl_sched_rt_period <= 0) | 7487 | if (sysctl_sched_rt_period <= 0) |
| 7476 | return -EINVAL; | 7488 | return -EINVAL; |
| 7477 | 7489 | ||
| 7478 | if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) | 7490 | if ((sysctl_sched_rt_runtime != RUNTIME_INF) && |
| 7491 | (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) | ||
| 7479 | return -EINVAL; | 7492 | return -EINVAL; |
| 7480 | 7493 | ||
| 7481 | return 0; | 7494 | return 0; |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 045fc74e3f09..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
| @@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
| 70 | 70 | ||
| 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
| 72 | { | 72 | { |
| 73 | WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
| 74 | 74 | ||
| 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
| 76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
| @@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | out: | 119 | out: |
| 120 | WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
| 121 | 121 | ||
| 122 | return best_cpu; | 122 | return best_cpu; |
| 123 | } | 123 | } |
| @@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
| 137 | int old_idx, new_cpu; | 137 | int old_idx, new_cpu; |
| 138 | unsigned long flags; | 138 | unsigned long flags; |
| 139 | 139 | ||
| 140 | WARN_ON(cpu > num_present_cpus()); | 140 | WARN_ON(!cpu_present(cpu)); |
| 141 | 141 | ||
| 142 | raw_spin_lock_irqsave(&cp->lock, flags); | 142 | raw_spin_lock_irqsave(&cp->lock, flags); |
| 143 | old_idx = cp->cpu_to_idx[cpu]; | 143 | old_idx = cp->cpu_to_idx[cpu]; |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0dd5e0971a07..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq) | |||
| 121 | 121 | ||
| 122 | static void update_dl_migration(struct dl_rq *dl_rq) | 122 | static void update_dl_migration(struct dl_rq *dl_rq) |
| 123 | { | 123 | { |
| 124 | if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { | 124 | if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { |
| 125 | if (!dl_rq->overloaded) { | 125 | if (!dl_rq->overloaded) { |
| 126 | dl_set_overload(rq_of_dl_rq(dl_rq)); | 126 | dl_set_overload(rq_of_dl_rq(dl_rq)); |
| 127 | dl_rq->overloaded = 1; | 127 | dl_rq->overloaded = 1; |
| @@ -135,9 +135,7 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
| 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| 136 | { | 136 | { |
| 137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
| 138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
| 139 | 138 | ||
| 140 | dl_rq->dl_nr_total++; | ||
| 141 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
| 142 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
| 143 | 141 | ||
| @@ -147,9 +145,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
| 147 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| 148 | { | 146 | { |
| 149 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
| 150 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
| 151 | 148 | ||
| 152 | dl_rq->dl_nr_total--; | ||
| 153 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
| 154 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
| 155 | 151 | ||
| @@ -566,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
| 566 | return 1; | 562 | return 1; |
| 567 | } | 563 | } |
| 568 | 564 | ||
| 565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
| 566 | |||
| 569 | /* | 567 | /* |
| 570 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
| 571 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
| @@ -629,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
| 629 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
| 630 | 628 | ||
| 631 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 632 | rt_rq->rt_time += delta_exec; | ||
| 633 | /* | 630 | /* |
| 634 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
| 635 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
| 633 | * bandwidth is relevant. | ||
| 636 | */ | 634 | */ |
| 635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
| 636 | rt_rq->rt_time += delta_exec; | ||
| 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 638 | } | 638 | } |
| 639 | } | 639 | } |
| @@ -717,6 +717,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
| 717 | 717 | ||
| 718 | WARN_ON(!dl_prio(prio)); | 718 | WARN_ON(!dl_prio(prio)); |
| 719 | dl_rq->dl_nr_running++; | 719 | dl_rq->dl_nr_running++; |
| 720 | inc_nr_running(rq_of_dl_rq(dl_rq)); | ||
| 720 | 721 | ||
| 721 | inc_dl_deadline(dl_rq, deadline); | 722 | inc_dl_deadline(dl_rq, deadline); |
| 722 | inc_dl_migration(dl_se, dl_rq); | 723 | inc_dl_migration(dl_se, dl_rq); |
| @@ -730,6 +731,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
| 730 | WARN_ON(!dl_prio(prio)); | 731 | WARN_ON(!dl_prio(prio)); |
| 731 | WARN_ON(!dl_rq->dl_nr_running); | 732 | WARN_ON(!dl_rq->dl_nr_running); |
| 732 | dl_rq->dl_nr_running--; | 733 | dl_rq->dl_nr_running--; |
| 734 | dec_nr_running(rq_of_dl_rq(dl_rq)); | ||
| 733 | 735 | ||
| 734 | dec_dl_deadline(dl_rq, dl_se->deadline); | 736 | dec_dl_deadline(dl_rq, dl_se->deadline); |
| 735 | dec_dl_migration(dl_se, dl_rq); | 737 | dec_dl_migration(dl_se, dl_rq); |
| @@ -836,8 +838,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 836 | 838 | ||
| 837 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) | 839 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
| 838 | enqueue_pushable_dl_task(rq, p); | 840 | enqueue_pushable_dl_task(rq, p); |
| 839 | |||
| 840 | inc_nr_running(rq); | ||
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | 843 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
| @@ -850,8 +850,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 850 | { | 850 | { |
| 851 | update_curr_dl(rq); | 851 | update_curr_dl(rq); |
| 852 | __dequeue_task_dl(rq, p, flags); | 852 | __dequeue_task_dl(rq, p, flags); |
| 853 | |||
| 854 | dec_nr_running(rq); | ||
| 855 | } | 853 | } |
| 856 | 854 | ||
| 857 | /* | 855 | /* |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 966cc2bfcb77..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work) | |||
| 1757 | start = end; | 1757 | start = end; |
| 1758 | if (pages <= 0) | 1758 | if (pages <= 0) |
| 1759 | goto out; | 1759 | goto out; |
| 1760 | |||
| 1761 | cond_resched(); | ||
| 1760 | } while (end != vma->vm_end); | 1762 | } while (end != vma->vm_end); |
| 1761 | } | 1763 | } |
| 1762 | 1764 | ||
| @@ -6999,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
| 6999 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 7000 | 7002 | ||
| 7001 | /* | 7003 | /* |
| 7002 | * Ensure the task's vruntime is normalized, so that when its | 7004 | * Ensure the task's vruntime is normalized, so that when it's |
| 7003 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will |
| 7004 | * do the right thing. | 7006 | * do the right thing. |
| 7005 | * | 7007 | * |
| 7006 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7008 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
| 7007 | * have normalized the vruntime, if it was !on_rq, then only when | 7009 | * have normalized the vruntime, if it's !on_rq, then only when |
| 7008 | * the task is sleeping will it still have non-normalized vruntime. | 7010 | * the task is sleeping will it still have non-normalized vruntime. |
| 7009 | */ | 7011 | */ |
| 7010 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7012 | if (!p->on_rq && p->state != TASK_RUNNING) { |
| 7011 | /* | 7013 | /* |
| 7012 | * Fix up our vruntime so that the current sleep doesn't | 7014 | * Fix up our vruntime so that the current sleep doesn't |
| 7013 | * cause 'unlimited' sleep bonus. | 7015 | * cause 'unlimited' sleep bonus. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
| 538 | 538 | ||
| 539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 539 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 540 | 540 | ||
| 541 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
| 542 | { | ||
| 543 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
| 544 | |||
| 545 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
| 546 | rt_rq->rt_time < rt_b->rt_runtime); | ||
| 547 | } | ||
| 548 | |||
| 541 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
| 542 | /* | 550 | /* |
| 543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 551 | * We ran out of runtime, see if we can borrow some from our neighbours. |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c2119fd20f8b..f964add50f38 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -462,7 +462,6 @@ struct dl_rq { | |||
| 462 | } earliest_dl; | 462 | } earliest_dl; |
| 463 | 463 | ||
| 464 | unsigned long dl_nr_migratory; | 464 | unsigned long dl_nr_migratory; |
| 465 | unsigned long dl_nr_total; | ||
| 466 | int overloaded; | 465 | int overloaded; |
| 467 | 466 | ||
| 468 | /* | 467 | /* |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
| 293 | */ | 293 | */ |
| 294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
| 295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
| 296 | &call_args, 0); | 296 | &call_args, 1); |
| 297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
| 298 | preempt_enable(); | 298 | preempt_enable(); |
| 299 | 299 | ||
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7a925ba456fb..a6a5bf53e86d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
| @@ -51,7 +51,13 @@ | |||
| 51 | * HZ shrinks, so values greater than 8 overflow 32bits when | 51 | * HZ shrinks, so values greater than 8 overflow 32bits when |
| 52 | * HZ=100. | 52 | * HZ=100. |
| 53 | */ | 53 | */ |
| 54 | #if HZ < 34 | ||
| 55 | #define JIFFIES_SHIFT 6 | ||
| 56 | #elif HZ < 67 | ||
| 57 | #define JIFFIES_SHIFT 7 | ||
| 58 | #else | ||
| 54 | #define JIFFIES_SHIFT 8 | 59 | #define JIFFIES_SHIFT 8 |
| 60 | #endif | ||
| 55 | 61 | ||
| 56 | static cycle_t jiffies_read(struct clocksource *cs) | 62 | static cycle_t jiffies_read(struct clocksource *cs) |
| 57 | { | 63 | { |
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 0abb36464281..4d23dc4d8139 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
| @@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) | |||
| 116 | void __init sched_clock_register(u64 (*read)(void), int bits, | 116 | void __init sched_clock_register(u64 (*read)(void), int bits, |
| 117 | unsigned long rate) | 117 | unsigned long rate) |
| 118 | { | 118 | { |
| 119 | u64 res, wrap, new_mask, new_epoch, cyc, ns; | ||
| 120 | u32 new_mult, new_shift; | ||
| 121 | ktime_t new_wrap_kt; | ||
| 119 | unsigned long r; | 122 | unsigned long r; |
| 120 | u64 res, wrap; | ||
| 121 | char r_unit; | 123 | char r_unit; |
| 122 | 124 | ||
| 123 | if (cd.rate > rate) | 125 | if (cd.rate > rate) |
| 124 | return; | 126 | return; |
| 125 | 127 | ||
| 126 | WARN_ON(!irqs_disabled()); | 128 | WARN_ON(!irqs_disabled()); |
| 127 | read_sched_clock = read; | ||
| 128 | sched_clock_mask = CLOCKSOURCE_MASK(bits); | ||
| 129 | cd.rate = rate; | ||
| 130 | 129 | ||
| 131 | /* calculate the mult/shift to convert counter ticks to ns. */ | 130 | /* calculate the mult/shift to convert counter ticks to ns. */ |
| 132 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); | 131 | clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); |
| 132 | |||
| 133 | new_mask = CLOCKSOURCE_MASK(bits); | ||
| 134 | |||
| 135 | /* calculate how many ns until we wrap */ | ||
| 136 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); | ||
| 137 | new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); | ||
| 138 | |||
| 139 | /* update epoch for new counter and update epoch_ns from old counter*/ | ||
| 140 | new_epoch = read(); | ||
| 141 | cyc = read_sched_clock(); | ||
| 142 | ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | ||
| 143 | cd.mult, cd.shift); | ||
| 144 | |||
| 145 | raw_write_seqcount_begin(&cd.seq); | ||
| 146 | read_sched_clock = read; | ||
| 147 | sched_clock_mask = new_mask; | ||
| 148 | cd.rate = rate; | ||
| 149 | cd.wrap_kt = new_wrap_kt; | ||
| 150 | cd.mult = new_mult; | ||
| 151 | cd.shift = new_shift; | ||
| 152 | cd.epoch_cyc = new_epoch; | ||
| 153 | cd.epoch_ns = ns; | ||
| 154 | raw_write_seqcount_end(&cd.seq); | ||
| 133 | 155 | ||
| 134 | r = rate; | 156 | r = rate; |
| 135 | if (r >= 4000000) { | 157 | if (r >= 4000000) { |
| @@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits, | |||
| 141 | } else | 163 | } else |
| 142 | r_unit = ' '; | 164 | r_unit = ' '; |
| 143 | 165 | ||
| 144 | /* calculate how many ns until we wrap */ | ||
| 145 | wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); | ||
| 146 | cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); | ||
| 147 | |||
| 148 | /* calculate the ns resolution of this counter */ | 166 | /* calculate the ns resolution of this counter */ |
| 149 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); | 167 | res = cyc_to_ns(1ULL, new_mult, new_shift); |
| 168 | |||
| 150 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", | 169 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", |
| 151 | bits, r, r_unit, res, wrap); | 170 | bits, r, r_unit, res, wrap); |
| 152 | 171 | ||
| 153 | update_sched_clock(); | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Ensure that sched_clock() starts off at 0ns | ||
| 157 | */ | ||
| 158 | cd.epoch_ns = 0; | ||
| 159 | |||
| 160 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ | 172 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ |
| 161 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | 173 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
| 162 | enable_sched_clock_irqtime(); | 174 | enable_sched_clock_irqtime(); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 43780ab5e279..98977a57ac72 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -756,6 +756,7 @@ out: | |||
| 756 | static void tick_broadcast_clear_oneshot(int cpu) | 756 | static void tick_broadcast_clear_oneshot(int cpu) |
| 757 | { | 757 | { |
| 758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
| 759 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
| 759 | } | 760 | } |
| 760 | 761 | ||
| 761 | static void tick_broadcast_init_next_event(struct cpumask *mask, | 762 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 294b8a271a04..fc4da2d97f9b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 2397 | write &= RB_WRITE_MASK; | 2397 | write &= RB_WRITE_MASK; |
| 2398 | tail = write - length; | 2398 | tail = write - length; |
| 2399 | 2399 | ||
| 2400 | /* | ||
| 2401 | * If this is the first commit on the page, then it has the same | ||
| 2402 | * timestamp as the page itself. | ||
| 2403 | */ | ||
| 2404 | if (!tail) | ||
| 2405 | delta = 0; | ||
| 2406 | |||
| 2400 | /* See if we shot pass the end of this buffer page */ | 2407 | /* See if we shot pass the end of this buffer page */ |
| 2401 | if (unlikely(write > BUF_PAGE_SIZE)) | 2408 | if (unlikely(write > BUF_PAGE_SIZE)) |
| 2402 | return rb_move_tail(cpu_buffer, length, tail, | 2409 | return rb_move_tail(cpu_buffer, length, tail, |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..7b16d40bd64d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -27,12 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
| 29 | 29 | ||
| 30 | DEFINE_MUTEX(event_storage_mutex); | ||
| 31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
| 32 | |||
| 33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
| 34 | EXPORT_SYMBOL_GPL(event_storage); | ||
| 35 | |||
| 36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
| 37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
| 38 | 32 | ||
| @@ -1777,6 +1771,16 @@ static void trace_module_add_events(struct module *mod) | |||
| 1777 | { | 1771 | { |
| 1778 | struct ftrace_event_call **call, **start, **end; | 1772 | struct ftrace_event_call **call, **start, **end; |
| 1779 | 1773 | ||
| 1774 | if (!mod->num_trace_events) | ||
| 1775 | return; | ||
| 1776 | |||
| 1777 | /* Don't add infrastructure for mods without tracepoints */ | ||
| 1778 | if (trace_module_has_bad_taint(mod)) { | ||
| 1779 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
| 1780 | mod->name); | ||
| 1781 | return; | ||
| 1782 | } | ||
| 1783 | |||
| 1780 | start = mod->trace_events; | 1784 | start = mod->trace_events; |
| 1781 | end = mod->trace_events + mod->num_trace_events; | 1785 | end = mod->trace_events + mod->num_trace_events; |
| 1782 | 1786 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..ee0a5098ac43 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
| 95 | #undef __array | 95 | #undef __array |
| 96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
| 97 | do { \ | 97 | do { \ |
| 98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
| 98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
| 100 | snprintf(event_storage, sizeof(event_storage), \ | ||
| 101 | "%s[%d]", #type, len); \ | ||
| 102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
| 103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
| 104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
| 105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
| 106 | mutex_unlock(&event_storage_mutex); \ | ||
| 107 | if (ret) \ | 104 | if (ret) \ |
| 108 | return ret; \ | 105 | return ret; \ |
| 109 | } while (0); | 106 | } while (0); |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..031cc5655a51 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
| 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
| 632 | 632 | ||
| 633 | #ifdef CONFIG_MODULES | 633 | #ifdef CONFIG_MODULES |
| 634 | bool trace_module_has_bad_taint(struct module *mod) | ||
| 635 | { | ||
| 636 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); | ||
| 637 | } | ||
| 638 | |||
| 634 | static int tracepoint_module_coming(struct module *mod) | 639 | static int tracepoint_module_coming(struct module *mod) |
| 635 | { | 640 | { |
| 636 | struct tp_module *tp_mod, *iter; | 641 | struct tp_module *tp_mod, *iter; |
| @@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) | |||
| 641 | * module headers (for forced load), to make sure we don't cause a crash. | 646 | * module headers (for forced load), to make sure we don't cause a crash. |
| 642 | * Staging and out-of-tree GPL modules are fine. | 647 | * Staging and out-of-tree GPL modules are fine. |
| 643 | */ | 648 | */ |
| 644 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) | 649 | if (trace_module_has_bad_taint(mod)) |
| 645 | return 0; | 650 | return 0; |
| 646 | mutex_lock(&tracepoints_mutex); | 651 | mutex_lock(&tracepoints_mutex); |
| 647 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 652 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 240fb62cf394..dd06439b9c84 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) | |||
| 225 | * | 225 | * |
| 226 | * When there is no mapping defined for the user-namespace uid | 226 | * When there is no mapping defined for the user-namespace uid |
| 227 | * pair INVALID_UID is returned. Callers are expected to test | 227 | * pair INVALID_UID is returned. Callers are expected to test |
| 228 | * for and handle handle INVALID_UID being returned. INVALID_UID | 228 | * for and handle INVALID_UID being returned. INVALID_UID |
| 229 | * may be tested for using uid_valid(). | 229 | * may be tested for using uid_valid(). |
| 230 | */ | 230 | */ |
| 231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) | 231 | kuid_t make_kuid(struct user_namespace *ns, uid_t uid) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82ef9f3b7473..193e977a10ea 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker) | |||
| 1851 | if (worker->flags & WORKER_IDLE) | 1851 | if (worker->flags & WORKER_IDLE) |
| 1852 | pool->nr_idle--; | 1852 | pool->nr_idle--; |
| 1853 | 1853 | ||
| 1854 | /* | ||
| 1855 | * Once WORKER_DIE is set, the kworker may destroy itself at any | ||
| 1856 | * point. Pin to ensure the task stays until we're done with it. | ||
| 1857 | */ | ||
| 1858 | get_task_struct(worker->task); | ||
| 1859 | |||
| 1854 | list_del_init(&worker->entry); | 1860 | list_del_init(&worker->entry); |
| 1855 | worker->flags |= WORKER_DIE; | 1861 | worker->flags |= WORKER_DIE; |
| 1856 | 1862 | ||
| @@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker) | |||
| 1859 | spin_unlock_irq(&pool->lock); | 1865 | spin_unlock_irq(&pool->lock); |
| 1860 | 1866 | ||
| 1861 | kthread_stop(worker->task); | 1867 | kthread_stop(worker->task); |
| 1868 | put_task_struct(worker->task); | ||
| 1862 | kfree(worker); | 1869 | kfree(worker); |
| 1863 | 1870 | ||
| 1864 | spin_lock_irq(&pool->lock); | 1871 | spin_lock_irq(&pool->lock); |
